_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q43900
ParseContext.parse
train
def parse(self, **global_args): """Entry point to parsing a BUILD file. Args: **global_args: Variables to include in the parsing environment. """ if self.build_file not in ParseContext._parsed: # http://en.wikipedia.org/wiki/Abstract_syntax_tree # http://martinfowler.com/books/dsl.html butcher_context = {} for str_to_exec in self._strs_to_exec: ast = compile(str_to_exec, '<string>', 'exec') exec_function(ast, butcher_context) with ParseContext.activate(self): startdir = os.path.abspath(os.curdir) try: os.chdir(self.build_file.path_on_disk) if self.build_file not in ParseContext._parsed: ParseContext._parsed.add(self.build_file) eval_globals = copy.copy(butcher_context) eval_globals.update( {'ROOT_DIR': self.build_file.path_on_disk, '__file__': 'bogus please fix this'}) eval_globals.update(global_args) exec_function(self.build_file.code, eval_globals) finally: os.chdir(startdir)
python
{ "resource": "" }
q43901
ClientManager.add_client
train
def add_client(self, client): """ Adds the specified client to this manager. :param client: The client to add into this manager. :type client: :class:`revision.client.Client` :return: The ClientManager instance (method chaining) :rtype: :class:`revision.client_manager.ClientManager` """ if not isinstance(client, Client): raise InvalidArgType() if self.has_client(client.key): return self self[client.key] = client return self
python
{ "resource": "" }
q43902
generate_conf_file
train
def generate_conf_file(argv: List[str]) -> bool: """ Convert a set of FHIR resources into their corresponding i2b2 counterparts. :param argv: Command line arguments. See: create_parser for details :return: """ parser = ArgumentParser(description="Generate SQL db_conf file template") parser.add_argument("-f", "--configfile", help="File name to generate (Default: db_conf)", metavar="Config File", default="db_conf") opts = parser.parse_args(argv) if os.path.exists(opts.configfile): print(f"{opts.configfile} already exists!") return False with open(opts.configfile, 'w') as f: f.write(conf_template) print(f"{opts.configfile} generated") return True
python
{ "resource": "" }
q43903
Chain.compute
train
def compute(self, *args, **kwargs)->[Any, None]: """Compose and evaluate the function. """ return super().compute( self.compose, *args, **kwargs )
python
{ "resource": "" }
q43904
Chain.copy
train
def copy(self, klass=None): """Create a new instance of the current chain. """ chain = ( klass if klass else self.__class__ )(*self._args, **self._kwargs) chain._tokens = self._tokens.copy() return chain
python
{ "resource": "" }
q43905
ThisComposer.call
train
def call(self, tokens, *args, **kwargs): """Add args and kwargs to the tokens. """ tokens.append([evaluate, [args, kwargs], {}]) return tokens
python
{ "resource": "" }
q43906
_this.copy
train
def copy(self, klass=_x): """A new chain beginning with the current chain tokens and argument. """ chain = super().copy() new_chain = klass(chain._args[0]) new_chain._tokens = [[ chain.compose, [], {}, ]] return new_chain
python
{ "resource": "" }
q43907
prefix
train
def prefix(filename): ''' strips common fMRI dataset suffixes from filenames ''' return os.path.split(re.sub(_afni_suffix_regex,"",str(filename)))[1]
python
{ "resource": "" }
q43908
suffix
train
def suffix(filename,suffix): ''' returns a filenames with ``suffix`` inserted before the dataset suffix ''' return os.path.split(re.sub(_afni_suffix_regex,"%s\g<1>" % suffix,str(filename)))[1]
python
{ "resource": "" }
q43909
afni_copy
train
def afni_copy(filename): ''' creates a ``+orig`` copy of the given dataset and returns the filename as a string ''' if nl.pkg_available('afni',True): afni_filename = "%s+orig" % nl.prefix(filename) if not os.path.exists(afni_filename + ".HEAD"): nl.calc(filename,'a',prefix=nl.prefix(filename)) return afni_filename
python
{ "resource": "" }
q43910
nifti_copy
train
def nifti_copy(filename,prefix=None,gzip=True): ''' creates a ``.nii`` copy of the given dataset and returns the filename as a string''' # I know, my argument ``prefix`` clobbers the global method... but it makes my arguments look nice and clean if prefix==None: prefix = filename nifti_filename = globals()['prefix'](prefix) + ".nii" if gzip: nifti_filename += '.gz' if not os.path.exists(nifti_filename): try: subprocess.check_call(['3dAFNItoNIFTI','-prefix',nifti_filename,str(filename)]) except subprocess.CalledProcessError: nl.notify('Error: could not convert "%s" to NIFTI dset!' % filename,level=nl.level.error) return None return nifti_filename
python
{ "resource": "" }
q43911
_dset_info_afni
train
def _dset_info_afni(dset): ''' returns raw output from running ``3dinfo`` ''' info = DsetInfo() try: raw_info = subprocess.check_output(['3dinfo','-verb',str(dset)],stderr=subprocess.STDOUT) except: return None if raw_info==None: return None # Subbrick info: sub_pattern = r'At sub-brick #(\d+) \'([^\']+)\' datum type is (\w+)(:\s+(.*)\s+to\s+(.*))?\n(.*statcode = (\w+); statpar = (.*)|)' sub_info = re.findall(sub_pattern,raw_info) for brick in sub_info: brick_info = { 'index': int(brick[0]), 'label': brick[1], 'datum': brick[2] } if brick[3]!='': brick_info.update({ 'min': float(brick[4]), 'max': float(brick[5]) }) if brick[6]!='': brick_info.update({ 'stat': brick[7], 'params': brick[8].split() }) info.subbricks.append(brick_info) info.reps = len(info.subbricks) # Dimensions: orient = re.search('\[-orient ([A-Z]+)\]',raw_info) if orient: info.orient = orient.group(1) for axis in ['RL','AP','IS']: m = re.search(r'%s-to-%s extent:\s+([0-9-.]+) \[.\] -to-\s+([0-9-.]+) \[.\] -step-\s+([0-9-.]+) mm \[\s*([0-9]+) voxels\]' % (axis[0],axis[1]),raw_info) if m: info.spatial_from.append(float(m.group(1))) info.spatial_to.append(float(m.group(2))) info.voxel_size.append(float(m.group(3))) info.voxel_dims.append(float(m.group(4))) if len(info.voxel_size)==3: info.voxel_volume = reduce(mul,info.voxel_size) slice_timing = re.findall('-time:[tz][tz] \d+ \d+ [0-9.]+ (.*?) ',raw_info) if len(slice_timing): info.slice_timing = slice_timing[0] TR = re.findall('Time step = ([0-9.]+)s',raw_info) if len(TR): info.TR = float(TR[0]) # Other info.. details_regex = { 'identifier': r'Identifier Code:\s+(.*)', 'filetype': r'Storage Mode:\s+(.*)', 'space': r'Template Space:\s+(.*)' } for d in details_regex: m = re.search(details_regex[d],raw_info) if m: setattr(info,d,m.group(1)) return info
python
{ "resource": "" }
q43912
subbrick
train
def subbrick(dset,label,coef=False,tstat=False,fstat=False,rstat=False,number_only=False): ''' returns a string referencing the given subbrick within a dset This method reads the header of the dataset ``dset``, finds the subbrick whose label matches ``label`` and returns a string of type ``dataset[X]``, which can be used by most AFNI programs to refer to a subbrick within a file The options coef, tstat, fstat, and rstat will add the suffix that is appended to the label by 3dDeconvolve :coef: "#0_Coef" :tstat: "#0_Tstat" :fstat: "_Fstat" :rstat: "_R^2" If ``coef`` or ``tstat`` are set to a number, it will use that parameter number (instead of 0), for models that use multiple parameters (e.g., "TENT"). if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string ''' if coef is not False: if coef is True: coef = 0 label += "#%d_Coef" % coef elif tstat != False: if tstat==True: tstat = 0 label += "#%d_Tstat" % tstat elif fstat: label += "_Fstat" elif rstat: label += "_R^2" info = nl.dset_info(dset) if info==None: nl.notify('Error: Couldn\'t get info from dset "%s"'%dset,level=nl.level.error) return None i = info.subbrick_labeled(label) if number_only: return i return '%s[%d]' % (dset,i)
python
{ "resource": "" }
q43913
dset_grids_equal
train
def dset_grids_equal(dsets): '''Tests if each dataset in the ``list`` ``dsets`` has the same number of voxels and voxel-widths''' infos = [dset_info(dset) for dset in dsets] for i in xrange(3): if len(set([x.voxel_size[i] for x in infos]))>1 or len(set([x.voxel_dims[i] for x in infos]))>1: return False return True
python
{ "resource": "" }
q43914
resample_dset
train
def resample_dset(dset,template,prefix=None,resam='NN'): '''Resamples ``dset`` to the grid of ``template`` using resampling mode ``resam``. Default prefix is to suffix ``_resam`` at the end of ``dset`` Available resampling modes: :NN: Nearest Neighbor :Li: Linear :Cu: Cubic :Bk: Blocky ''' if prefix==None: prefix = nl.suffix(dset,'_resam') nl.run(['3dresample','-master',template,'-rmode',resam,'-prefix',prefix,'-inset',dset])
python
{ "resource": "" }
q43915
ijk_to_xyz
train
def ijk_to_xyz(dset,ijk): '''convert the dset indices ``ijk`` to RAI coordinates ``xyz``''' i = nl.dset_info(dset) orient_codes = [int(x) for x in nl.run(['@AfniOrient2RAImap',i.orient]).output.split()] orient_is = [abs(x)-1 for x in orient_codes] rai = [] for rai_i in xrange(3): ijk_i = orient_is[rai_i] if orient_codes[rai_i] > 0: rai.append(ijk[ijk_i]*i.voxel_size[rai_i] + i.spatial_from[rai_i]) else: rai.append(i.spatial_to[rai_i] - ijk[ijk_i]*i.voxel_size[rai_i]) return rai
python
{ "resource": "" }
q43916
value_at_coord
train
def value_at_coord(dset,coords): '''returns value at specified coordinate in ``dset``''' return nl.numberize(nl.run(['3dmaskave','-q','-dbox'] + list(coords) + [dset],stderr=None).output)
python
{ "resource": "" }
q43917
AwsAutoScalingGroup.do_printActivities
train
def do_printActivities(self,args): """Print scaling activities""" parser = CommandArgumentParser("printActivities") parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh'); args = vars(parser.parse_args(args)) refresh = args['refresh'] or not self.activities if refresh: response = self.client.describe_scaling_activities(AutoScalingGroupName=self.scalingGroup) self.activities = response['Activities'] index = 0 for activity in self.activities: print "{}: {} -> {} {}: {}".format(index,activity['StartTime'],stdplus.defaultifyDict(activity,'EndTime',''),activity['StatusCode'],activity['Description']) index = index + 1
python
{ "resource": "" }
q43918
AwsAutoScalingGroup.do_printActivity
train
def do_printActivity(self,args): """Print scaling activity details""" parser = CommandArgumentParser("printActivity") parser.add_argument(dest='index',type=int,help='refresh'); args = vars(parser.parse_args(args)) index = args['index'] activity = self.activities[index] pprint(activity)
python
{ "resource": "" }
q43919
AwsAutoScalingGroup.do_printInstances
train
def do_printInstances(self,args): """Print the list of instances in this auto scaling group. printInstances -h for detailed help""" parser = CommandArgumentParser("printInstances") parser.add_argument(dest='filters',nargs='*',default=["*"],help='Filter instances'); parser.add_argument('-a','--addresses',action='store_true',dest='addresses',help='list all ip addresses'); parser.add_argument('-t','--tags',action='store_true',dest='tags',help='list all instance tags'); parser.add_argument('-d','--allDetails',action='store_true',dest='details',help='print all instance details'); parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh'); parser.add_argument('-z','--zones',dest='availabilityZones',nargs='+',help='Only include specified availability zones'); args = vars(parser.parse_args(args)) client = AwsConnectionFactory.getEc2Client() filters = args['filters'] addresses = args['addresses'] tags = args['tags'] details = args['details'] availabilityZones = args['availabilityZones'] needDescription = addresses or tags or details if args['refresh']: self.scalingGroupDescription = self.client.describe_auto_scaling_groups(AutoScalingGroupNames=[self.scalingGroup]) # print "AutoScaling Group:{}".format(self.scalingGroup) print "=== Instances ===" instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instances = filter( lambda x: fnmatches(x['InstanceId'],filters),instances) if availabilityZones: instances = filter( lambda x: fnmatches(x['AvailabilityZone'],availabilityZones),instances) index = 0 for instance in instances: instance['index'] = index print "* {0:3d} {1} {2} {3}".format(index,instance['HealthStatus'],instance['AvailabilityZone'],instance['InstanceId']) description = None if needDescription: description = client.describe_instances(InstanceIds=[instance['InstanceId']]) if addresses: networkInterfaces = description['Reservations'][0]['Instances'][0]['NetworkInterfaces'] number = 0 print " Network Interfaces:" for interface in networkInterfaces: print " * {0:3d} {1}".format(number, interface['PrivateIpAddress']) number +=1 if tags: tags = description['Reservations'][0]['Instances'][0]['Tags'] print " Tags:" for tag in tags: print " * {0} {1}".format(tag['Key'],tag['Value']) if details: pprint(description) index += 1
python
{ "resource": "" }
q43920
AwsAutoScalingGroup.do_printPolicy
train
def do_printPolicy(self,args): """Print the autoscaling policy""" parser = CommandArgumentParser("printPolicy") args = vars(parser.parse_args(args)) policy = self.client.describe_policies(AutoScalingGroupName=self.scalingGroup) pprint(policy)
python
{ "resource": "" }
q43921
AwsAutoScalingGroup.do_rebootInstance
train
def do_rebootInstance(self,args): """Restart specified instance""" parser = CommandArgumentParser("rebootInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.reboot_instances(InstanceIds=[instanceId['InstanceId']])
python
{ "resource": "" }
q43922
AwsAutoScalingGroup.do_run
train
def do_run(self,args): """SSH to each instance in turn and run specified command""" parser = CommandArgumentParser("run") parser.add_argument('-R','--replace-key',dest='replaceKey',default=False,action='store_true',help="Replace the host's key. This is useful when AWS recycles an IP address you've seen before.") parser.add_argument('-Y','--keyscan',dest='keyscan',default=False,action='store_true',help="Perform a keyscan to avoid having to say 'yes' for a new host. Implies -R.") parser.add_argument('-ii','--ignore-host-key',dest='ignore-host-key',default=False,action='store_true',help='Ignore host key') parser.add_argument('-ne','--no-echo',dest='no-echo',default=False,action='store_true',help='Do not echo command') parser.add_argument(dest='command',nargs='+',help="Command to run on all hosts.") # consider adding a filter option later parser.add_argument('-v',dest='verbosity',default=0,action=VAction,nargs='?',help='Verbosity. The more instances, the more verbose'); parser.add_argument('-j',dest='jobs',type=int,default=1,help='Number of hosts to contact in parallel'); parser.add_argument('-s',dest='skip',type=int,default=0,help='Skip this many hosts'); parser.add_argument('-m',dest='macro',default=False,action='store_true',help='{command} is a series of macros to execute, not the actual command to run on the host'); args = vars(parser.parse_args(args)) replaceKey = args['replaceKey'] keyscan = args['keyscan'] verbosity = args['verbosity'] jobs = args['jobs'] skip = args['skip'] ignoreHostKey = args['ignore-host-key'] noEcho = args['no-echo'] instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instances = instances[skip:] # if replaceKey or keyscan: # for instance in instances: # stdplus.resetKnownHost(instance) if args['macro']: if len(args['command']) > 1: print("Only one macro may be specified with the -m switch.") return else: macro = args['command'][0] print("Macro:{}".format(macro)) command = Config.config['ssh-macros'][macro] else: command = ' '.join(args['command']) Parallel(n_jobs=jobs)( delayed(ssh)(instance['InstanceId'],0,[],replaceKey,keyscan,False,verbosity,command,ignoreHostKey=ignoreHostKey,echoCommand=not noEcho,name="{}:{}: ".format(instance['index'],instance['InstanceId'])) for instance in instances )
python
{ "resource": "" }
q43923
AwsAutoScalingGroup.do_startInstance
train
def do_startInstance(self,args): """Start specified instance""" parser = CommandArgumentParser("startInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] force = args['force'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.start_instances(InstanceIds=[instanceId['InstanceId']])
python
{ "resource": "" }
q43924
AwsAutoScalingGroup.do_stopInstance
train
def do_stopInstance(self,args): """Stop specified instance""" parser = CommandArgumentParser("stopInstance") parser.add_argument(dest='instance',help='instance index or name'); parser.add_argument('-f','--force',action='store_true',dest='force',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] force = args['force'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.stop_instances(InstanceIds=[instanceId['InstanceId']],Force=force)
python
{ "resource": "" }
q43925
AwsAutoScalingGroup.do_terminateInstance
train
def do_terminateInstance(self,args): """Terminate an EC2 instance""" parser = CommandArgumentParser("terminateInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.terminate_instances(InstanceIds=[instanceId['InstanceId']]) self.do_printInstances("-r")
python
{ "resource": "" }
q43926
cli
train
def cli(sequencepath, report, refseq_database): """ Pass command line arguments to, and run the feature extraction functions """ main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
python
{ "resource": "" }
q43927
grouplabelencode
train
def grouplabelencode(data, mapping, nacode=None, nastate=False): """Encode data array with grouped labels Parameters: ----------- data : list array with labels mapping : dict, list of list the index of each element is used as encoding. Each element is a single label (str) or list of labels that are mapped to the encoding. nacode : integer (Default: None) Encoding for unmapped states. nastate : bool If False (Default) unmatched data labels are encoded as nacode. If nastate=True (and nacode=None) then unmatched data labels are encoded with the integer nacode=len(mapping). """ # What value is used for missing data? if nastate: if nacode is None: nacode = len(mapping) # Process depending on the data type of the data mapping variable if isinstance(mapping, list): m = mapping e = range(len(mapping)) elif isinstance(mapping, dict): m = list(mapping.values()) e = list(mapping.keys()) else: raise Exception("'data' must be list-of-list or dict.") # Loop over 'data' array return grouplabelencode_loop(data, m, e, nacode=nacode)
python
{ "resource": "" }
q43928
get_csv_col_headers
train
def get_csv_col_headers(rows, row_headers_count_value=0): """ Retrieve csv column headers """ count = 0 if rows: for row in rows: if exclude_empty_values(row[:row_headers_count_value]): break count += 1 if len(rows) == count: count = 1 # by default return [r[row_headers_count_value:] for r in rows[:count]]
python
{ "resource": "" }
q43929
populate_csv_headers
train
def populate_csv_headers(rows, partial_headers, column_headers_count=1): """ Populate csv rows headers when are empty, extending the superior or upper headers. """ result = [''] * (len(rows) - column_headers_count) for i_index in range(0, len(partial_headers)): for k_index in range(0, len(partial_headers[i_index])): # missing field find for a value in upper rows if not partial_headers[i_index][k_index] and i_index - 1 >= 0: # TODO: It's necesary a for or only taking the # inmediate latest row works well?? for t_index in range(i_index - 1, -1, -1): # TODO: could suposse that allways a value exists partial_value = partial_headers[t_index][k_index] if partial_value: partial_headers[i_index][k_index] = partial_value break result[i_index] = " ".join(map(str, partial_headers[i_index])) return result
python
{ "resource": "" }
q43930
get_row_headers
train
def get_row_headers(rows, row_headers_count_value=0, column_headers_count=1): """ Return row headers. Assume that by default it has one column header. Assume that there is only one father row header. """ # TODO: REFACTOR ALGORITHM NEEDED partial_headers = [] if row_headers_count_value: # Take partial data for k_index in range(0, len(rows) - column_headers_count): header = rows[k_index + column_headers_count][ :row_headers_count_value] partial_headers.append(remove_list_duplicates(force_list(header))) # Populate headers populated_headers = populate_csv_headers( rows, partial_headers, column_headers_count) return populated_headers
python
{ "resource": "" }
q43931
retrieve_csv_data
train
def retrieve_csv_data(rows, row_header=0, column_header=0, limit_column=0): """ Take the data from the rows. """ return [row[row_header:limit_column] for row in rows[column_header:]]
python
{ "resource": "" }
q43932
csv_tolist
train
def csv_tolist(path_to_file, **kwargs): """ Parse the csv file to a list of rows. """ result = [] encoding = kwargs.get('encoding', 'utf-8') delimiter = kwargs.get('delimiter', ',') dialect = kwargs.get('dialect', csv.excel) _, _ext = path_to_file.split('.', 1) try: file = codecs.open(path_to_file, 'r', encoding) items_file = io.TextIOWrapper(file, encoding=encoding) result = list( csv.reader(items_file, delimiter=delimiter, dialect=dialect)) items_file.close() file.close() except Exception as ex: result = [] logger.error('Fail parsing csv to list of rows - {}'.format(ex)) return result
python
{ "resource": "" }
q43933
excel_todictlist
train
def excel_todictlist(path_to_file, **kwargs): """ Parse excel file to a dict list of sheets, rows. """ result = collections.OrderedDict() encoding = kwargs.get('encoding', 'utf-8') formatting_info = '.xlsx' not in path_to_file count = 0 with xlrd.open_workbook( path_to_file, encoding_override=encoding, formatting_info=formatting_info) \ as _excelfile: for sheet_name_raw in _excelfile.sheet_names(): # if empty sheet name put sheet# as name sheet_name = sheet_name_raw or "sheet{}".format(count) result[sheet_name] = [] xl_sheet = _excelfile.sheet_by_name(sheet_name_raw) for row_idx in range(0, xl_sheet.nrows): col_data = [] for col_idx in range(0, xl_sheet.ncols): # Get cell object by row, col cell_obj = xl_sheet.cell(row_idx, col_idx) merged_info = is_merged(xl_sheet, row_idx, col_idx) # Search for value in merged_info if not cell_obj.value and merged_info: cell_obj = search_mergedcell_value( xl_sheet, merged_info[1]) col_data.append(cell_obj.value if cell_obj else '') else: col_data.append(cell_obj.value) result[sheet_name].append(col_data) count += 1 # increase sheet counter return result
python
{ "resource": "" }
q43934
search_mergedcell_value
train
def search_mergedcell_value(xl_sheet, merged_range): """ Search for a value in merged_range cells. """ for search_row_idx in range(merged_range[0], merged_range[1]): for search_col_idx in range(merged_range[2], merged_range[3]): if xl_sheet.cell(search_row_idx, search_col_idx).value: return xl_sheet.cell(search_row_idx, search_col_idx) return False
python
{ "resource": "" }
q43935
is_merged
train
def is_merged(sheet, row, column): """ Check if a row, column cell is a merged cell """ for cell_range in sheet.merged_cells: row_low, row_high, column_low, column_high = cell_range if (row in range(row_low, row_high)) and \ (column in range(column_low, column_high)): # TODO: IS NECESARY THIS IF? if ((column_high - column_low) < sheet.ncols - 1) and \ ((row_high - row_low) < sheet.nrows - 1): return (True, cell_range) return False
python
{ "resource": "" }
q43936
populate_headers
train
def populate_headers(headers): """ Concatenate headers with subheaders """ result = [''] * len(headers[0]) values = [''] * len(headers) for k_index in range(0, len(headers)): for i_index in range(0, len(headers[k_index])): if headers[k_index][i_index]: values[k_index] = normalizer( str(headers[k_index][i_index])) # pass to str if len(exclude_empty_values(result)) > i_index: result[i_index] += "-{}".format(values[k_index]) else: result[i_index] += str(values[k_index]) return result
python
{ "resource": "" }
q43937
row_csv_limiter
train
def row_csv_limiter(rows, limits=None): """ Limit row passing a value or detect limits making the best effort. """ limits = [None, None] if limits is None else limits if len(exclude_empty_values(limits)) == 2: upper_limit = limits[0] lower_limit = limits[1] elif len(exclude_empty_values(limits)) == 1: upper_limit = limits[0] lower_limit = row_iter_limiter(rows, 1, -1, 1) else: upper_limit = row_iter_limiter(rows, 0, 1, 0) lower_limit = row_iter_limiter(rows, 1, -1, 1) return rows[upper_limit: lower_limit]
python
{ "resource": "" }
q43938
row_iter_limiter
train
def row_iter_limiter(rows, begin_row, way, c_value): """ Alghoritm to detect row limits when row have more that one column. Depending the init params find from the begin or behind. NOT SURE THAT IT WORKS WELL.. """ limit = None for index in range(begin_row, len(rows)): if not len(exclude_empty_values(rows[way * index])) == 1: limit = way * index + c_value if way * index + \ c_value not in [way * len(rows), 0] else None break return limit
python
{ "resource": "" }
q43939
csv_dict_format
train
def csv_dict_format(csv_data, c_headers=None, r_headers=None): """ Format csv rows parsed to Dict. """ # format dict if has row_headers if r_headers: result = {} for k_index in range(0, len(csv_data)): if r_headers[k_index]: result[r_headers[k_index]] = collections.OrderedDict( zip(c_headers, csv_data[k_index])) # format list if hasn't row_headers -- square csv else: result = [] for k_index in range(0, len(csv_data)): result.append( collections.OrderedDict(zip(c_headers, csv_data[k_index]))) result = [result] return result
python
{ "resource": "" }
q43940
csv_array_clean_format
train
def csv_array_clean_format(csv_data, c_headers=None, r_headers=None): """ Format csv rows parsed to Array clean format. """ result = [] real_num_header = len(force_list(r_headers[0])) if r_headers else 0 result.append([""] * real_num_header + c_headers) for k_index in range(0, len(csv_data)): if r_headers: result.append( list( itertools.chain( [r_headers[k_index]], csv_data[k_index]))) else: result.append(csv_data[k_index]) return result
python
{ "resource": "" }
q43941
csv_format
train
def csv_format(csv_data, c_headers=None, r_headers=None, rows=None, **kwargs): """ Format csv rows parsed to Dict or Array """ result = None c_headers = [] if c_headers is None else c_headers r_headers = [] if r_headers is None else r_headers rows = [] if rows is None else rows result_format = kwargs.get('result_format', ARRAY_RAW_FORMAT) # DICT FORMAT if result_format == DICT_FORMAT: result = csv_dict_format(csv_data, c_headers, r_headers) # ARRAY_RAW_FORMAT elif result_format == ARRAY_RAW_FORMAT: result = rows # ARRAY_CLEAN_FORMAT elif result_format == ARRAY_CLEAN_FORMAT: result = csv_array_clean_format(csv_data, c_headers, r_headers) else: result = None # DEFAULT if result and result_format < DICT_FORMAT: result = [result] return result
python
{ "resource": "" }
q43942
Notification._notify_on_condition
train
def _notify_on_condition(self, test_message=None, **kwargs): """Returns the value of `notify_on_condition` or False. """ if test_message: return True else: return self.enabled and self.notify_on_condition(**kwargs)
python
{ "resource": "" }
q43943
Notification.enabled
train
def enabled(self): """Returns True if this notification is enabled based on the value of Notification model instance. Note: Notification names/display_names are persisted in the "Notification" model where each mode instance can be flagged as enabled or not, and are selected/subscribed to by each user in their user profile. See also: `site_notifications.update_notification_list` """ if not self._notification_enabled: self._notification_enabled = self.notification_model.enabled return self._notification_enabled
python
{ "resource": "" }
q43944
Notification.notification_model
train
def notification_model(self): """Returns the Notification 'model' instance associated with this notification. """ NotificationModel = django_apps.get_model("edc_notification.notification") # trigger exception if this class is not registered. site_notifications.get(self.name) try: notification_model = NotificationModel.objects.get(name=self.name) except ObjectDoesNotExist: site_notifications.update_notification_list() notification_model = NotificationModel.objects.get(name=self.name) return notification_model
python
{ "resource": "" }
q43945
Notification.get_template_options
train
def get_template_options(self, instance=None, test_message=None, **kwargs): """Returns a dictionary of message template options. Extend using `extra_template_options`. """ protocol_name = django_apps.get_app_config("edc_protocol").protocol_name test_message = test_message or self.test_message template_options = dict( name=self.name, protocol_name=protocol_name, display_name=self.display_name, email_from=self.email_from, test_subject_line=( self.email_test_subject_line if test_message else "" ).strip(), test_body_line=self.email_test_body_line if test_message else "", test_line=self.sms_test_line if test_message else "", message_datetime=get_utcnow(), message_reference="", ) if "subject_identifier" not in template_options: try: template_options.update(subject_identifier=instance.subject_identifier) except AttributeError: pass if "site_name" not in template_options: try: template_options.update(site_name=instance.site.name.title()) except AttributeError: pass return template_options
python
{ "resource": "" }
q43946
Notification.sms_recipients
train
def sms_recipients(self): """Returns a list of recipients subscribed to receive SMS's for this "notifications" class. See also: edc_auth.UserProfile. """ sms_recipients = [] UserProfile = django_apps.get_model("edc_auth.UserProfile") for user_profile in UserProfile.objects.filter( user__is_active=True, user__is_staff=True ): try: user_profile.sms_notifications.get(name=self.name) except ObjectDoesNotExist: pass else: if user_profile.mobile: sms_recipients.append(user_profile.mobile) return sms_recipients
python
{ "resource": "" }
q43947
sign_filter_permissions
train
def sign_filter_permissions(permissions): """ Return a compressed, signed dump of the json blob. This function expects a json blob that is a dictionary containing model dotted names as keys. Those keys each have a value that is a list of dictionaries, each of which contains the keys 'filters' and 'actions': The key 'filters' key is a dict that is a filter to be applied to a django queryset. The key 'actions' is a list of DRF methods that can be called for this model's viewset. For example: { 'accounts.Account': [ { 'filters': { 'email': 'marcel@chewse.com', 'organizations__name': 'Chewse' }, 'actions': ['create', 'partial_update'] } ] } """ permissions = {key.lower(): value for key, value in permissions.iteritems()} return signing.dumps(permissions, compress=True)
python
{ "resource": "" }
q43948
unsign_filters_and_actions
train
def unsign_filters_and_actions(sign, dotted_model_name): """Return the list of filters and actions for dotted_model_name.""" permissions = signing.loads(sign) return permissions.get(dotted_model_name, [])
python
{ "resource": "" }
q43949
Comparable.equality
train
def equality(self, other): """Compare two objects for equality. @param self: first object to compare @param other: second object to compare @return: boolean result of comparison """ # Compare specified attributes for equality cname = self.__class__.__name__ for aname in self.attributes: try: attr1 = getattr(self, aname) attr2 = getattr(other, aname) except AttributeError as error: logging.debug("%s.%s: %s", cname, aname, error) return False self.log(attr1, attr2, '==', cname=cname, aname=aname) eql = (attr1 == attr2) self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql) if not eql: return False return True
python
{ "resource": "" }
q43950
Comparable.similarity
train
def similarity(self, other): """Compare two objects for similarity. @param self: first object to compare @param other: second object to compare @return: L{Similarity} result of comparison """ sim = self.Similarity() total = 0.0 # Calculate similarity ratio for each attribute cname = self.__class__.__name__ for aname, weight in self.attributes.items(): attr1 = getattr(self, aname, None) attr2 = getattr(other, aname, None) self.log(attr1, attr2, '%', cname=cname, aname=aname) # Similarity is ignored if None on both objects if attr1 is None and attr2 is None: self.log(attr1, attr2, '%', cname=cname, aname=aname, result="attributes are both None") continue # Similarity is 0 if either attribute is non-Comparable if not all((isinstance(attr1, Comparable), isinstance(attr2, Comparable))): self.log(attr1, attr2, '%', cname=cname, aname=aname, result="attributes not Comparable") total += weight continue # Calculate similarity between the attributes attr_sim = (attr1 % attr2) self.log(attr1, attr2, '%', cname=cname, aname=aname, result=attr_sim) # Add the similarity to the total sim += attr_sim * weight total += weight # Scale the similarity so the total is 1.0 if total: sim *= (1.0 / total) return sim
python
{ "resource": "" }
q43951
Comparable.Similarity
train
def Similarity(self, value=None): # pylint: disable=C0103 """Constructor for new default Similarities.""" if value is None: value = 0.0 return Similarity(value, threshold=self.threshold)
python
{ "resource": "" }
q43952
Comparable.log
train
def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913 """Log the objects being compared and the result. When no result object is specified, subsequence calls will have an increased indentation level. The indentation level is decreased once a result object is provided. @param obj1: first object @param obj2: second object @param sym: operation being performed ('==' or '%') @param cname: name of class (when attributes are being compared) @param aname: name of attribute (when attributes are being compared) @param result: outcome of comparison """ fmt = "{o1} {sym} {o2} : {r}" if cname or aname: assert cname and aname # both must be specified fmt = "{c}.{a}: " + fmt if result is None: result = '...' fmt = _Indent.indent(fmt) _Indent.more() else: _Indent.less() fmt = _Indent.indent(fmt) msg = fmt.format(o1=repr(obj1), o2=repr(obj2), c=cname, a=aname, sym=sym, r=result) logging.info(msg)
python
{ "resource": "" }
q43953
PkgFileGroup.translate_path
train
def translate_path(self, dep_file, dep_rule): """Translate dep_file from dep_rule into this rule's output path.""" dst_base = dep_file.split(os.path.join(dep_rule.address.repo, dep_rule.address.path), 1)[-1] if self.params['strip_prefix']: dst_base = dep_file.split(self.params['strip_prefix'], 1)[-1] return os.path.join(self.address.repo, self.address.path, self.params['prefix'].lstrip('/'), dst_base.lstrip('/'))
python
{ "resource": "" }
q43954
new
train
def new(ruletype, **kwargs): """Instantiate a new build rule based on kwargs. Appropriate args list varies with rule type. Minimum args required: [... fill this in ...] """ try: ruleclass = TYPE_MAP[ruletype] except KeyError: raise error.InvalidRule('Unrecognized rule type: %s' % ruletype) try: return ruleclass(**kwargs) except TypeError: log.error('BADNESS. ruletype: %s, data: %s', ruletype, kwargs) raise
python
{ "resource": "" }
q43955
_countdown
train
def _countdown(seconds): """ Wait `seconds` counting down. """ for i in range(seconds, 0, -1): sys.stdout.write("%02d" % i) time.sleep(1) sys.stdout.write("\b\b") sys.stdout.flush() sys.stdout.flush()
python
{ "resource": "" }
q43956
post_process
train
def post_process(table, post_processors): """Applies the list of post processing methods if any""" table_result = table for processor in post_processors: table_result = processor(table_result) return table_result
python
{ "resource": "" }
q43957
describe
train
def describe(cls, full=False): """Prints a description of the table based on the provided documentation and post processors""" divider_double = "=" * 80 divider_single = "-" * 80 description = cls.__doc__ message = [] message.append(divider_double) message.append(cls.__name__ + ':') message.append(description) if full and cls.post_processors(cls): message.append(divider_single) message.append("Post processors:") message.append(divider_single) for processor in cls.post_processors(cls): message.append(">" + " " * 3 + processor.__name__ + ':') message.append(" " * 4 + processor.__doc__) message.append('') message.append(divider_double) message.append('') for line in message: print(line)
python
{ "resource": "" }
q43958
BaseTableABC.describe_processors
train
def describe_processors(cls): """List all postprocessors and their description""" # TODO: Add dependencies to this dictionary for processor in cls.post_processors(cls): yield {'name': processor.__name__, 'description': processor.__doc__, 'processor': processor}
python
{ "resource": "" }
q43959
BaseTableABC.dependencies
train
def dependencies(cls): """Returns a list of all dependent tables, in the order they are defined. Add new dependencies for source and every post proecssor like this:: source.dependencies = [PersonalData] some_post_processor.dependencies = [SomeOtherTable, AnotherTable] `some_post_processor.dependencies` needs to be placed after `some_post_processor` is defined. """ dependencies = [] try: dependencies += cls.source.dependencies except AttributeError: pass for processor in cls.post_processors(cls): try: assert isinstance(processor.dependencies, list), \ "{}.dependencies must be a list".format(processor.__name__) dependencies += processor.dependencies except AttributeError: pass return dependencies
python
{ "resource": "" }
q43960
BaseTableABC.get_settings_list
train
def get_settings_list(self): """The settings list used for building the cache id.""" return [ self.source, self.output, self.kwargs, self.post_processors, ]
python
{ "resource": "" }
q43961
BaseTableABC.get_hash
train
def get_hash(self): """Retruns a hash based on the the current table code and kwargs. Also changes based on dependent tables.""" depencency_hashes = [dep.get_hash() for dep in self.dep()] sl = inspect.getsourcelines hash_sources = [sl(self.__class__), self.args, self.kwargs, *depencency_hashes] hash_input = pickle.dumps(hash_sources) return hashlib.md5(hash_input).hexdigest()
python
{ "resource": "" }
q43962
BaseTableABC.get_cached_filename
train
def get_cached_filename(self, filename, extention, settings_list=None): """Creates a filename with md5 cache string based on settings list Args: filename (str): the filename without extention extention (str): the file extention without dot. (i.e. 'pkl') settings_list (dict|list): the settings list as list (optional) NB! The dictionaries have to be sorted or hash id will change arbitrarely. """ cached_name = "_".join([filename, self.get_hash()]) return ".".join([cached_name, extention])
python
{ "resource": "" }
q43963
Table._process_table
train
def _process_table(self, cache=True): """Applies the post processors""" table = self.source() assert not isinstance(table, None.__class__), \ "{}.source needs to return something, not None".format(self.__class__.__name__) table = post_process(table, self.post_processors()) if cache: self.to_cache(table) return table
python
{ "resource": "" }
q43964
APIGenerator.generate
train
def generate(self): """Runs generation process.""" for root, _, files in os.walk(self.source_dir): for fname in files: source_fpath = os.path.join(root, fname) self.generate_api_for_source(source_fpath)
python
{ "resource": "" }
q43965
APIGenerator.generate_api_for_source
train
def generate_api_for_source(self, source_fpath: str): """Generate end json api file with directory structure for concrete source file.""" content = self.convert_content(source_fpath) if content is None: return dest_fpath = self.dest_fpath(source_fpath) self.create_fpath_dir(dest_fpath) with open(dest_fpath, 'w+') as dest_f: json.dump(content, dest_f, cls=DateTimeJsonEncoder)
python
{ "resource": "" }
q43966
APIGenerator.convert_content
train
def convert_content(self, fpath: str) -> typing.Optional[dict]: """Convert content of source file with loader, provided with `loader_cls` self attribute. Returns dict with converted content if loader class support source file extenstions, otherwise return nothing.""" try: loader = self.loader_cls(fpath) except UnsupportedExtensionError: return return loader.convert_content()
python
{ "resource": "" }
q43967
APIGenerator.dest_fpath
train
def dest_fpath(self, source_fpath: str) -> str: """Calculates full path for end json-api file from source file full path.""" relative_fpath = os.path.join(*source_fpath.split(os.sep)[1:]) relative_dirpath = os.path.dirname(relative_fpath) source_fname = relative_fpath.split(os.sep)[-1] base_fname = source_fname.split('.')[0] dest_fname = f'{base_fname}.json' return os.path.join(self.dest_dir, relative_dirpath, dest_fname)
python
{ "resource": "" }
q43968
APIGenerator.create_fpath_dir
train
def create_fpath_dir(self, fpath: str): """Creates directory for fpath.""" os.makedirs(os.path.dirname(fpath), exist_ok=True)
python
{ "resource": "" }
q43969
FilenameChecker.add_options
train
def add_options(cls, parser): """Required by flake8 add the possible options, called first Args: parser (OptionsManager): """ kwargs = {'action': 'store', 'default': '', 'parse_from_config': True, 'comma_separated_list': True} for num in range(cls.min_check, cls.max_check): parser.add_option(None, "--filename_check{}".format(num), **kwargs)
python
{ "resource": "" }
q43970
FilenameChecker.parse_options
train
def parse_options(cls, options): """Required by flake8 parse the options, called after add_options Args: options (dict): options to be parsed """ d = {} for filename_check, dictionary in cls.filename_checks.items(): # retrieve the marks from the passed options filename_data = getattr(options, filename_check) if len(filename_data) != 0: parsed_params = {} for single_line in filename_data: a = [s.strip() for s in single_line.split('=')] # whitelist the acceptable params if a[0] in ['filter_regex', 'filename_regex']: parsed_params[a[0]] = a[1] d[filename_check] = parsed_params cls.filename_checks.update(d) # delete any empty rules cls.filename_checks = {x: y for x, y in cls.filename_checks.items() if len(y) > 0}
python
{ "resource": "" }
q43971
FilenameChecker.run
train
def run(self): """Required by flake8 Will be called after add_options and parse_options. Yields: tuple: (int, int, str, type) the tuple used by flake8 to construct a violation """ if len(self.filename_checks) == 0: message = "N401 no configuration found for {}, " \ "please provide filename configuration in a flake8 config".format(self.name) yield (0, 0, message, type(self)) rule_funcs = [rules.rule_n5xx] for rule_func in rule_funcs: for rule_name, configured_rule in self.filename_checks.items(): for err in rule_func(self.filename, rule_name, configured_rule, type(self)): yield err
python
{ "resource": "" }
q43972
get_my_ips
train
def get_my_ips(): """highly os specific - works only in modern linux kernels""" ips = list() if not os.path.exists("/sys/class/net"): # not linux return ['127.0.0.1'] for ifdev in os.listdir("/sys/class/net"): if ifdev == "lo": continue try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ips.append(socket.inet_ntoa(fcntl.ioctl( sock.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifdev[:15].encode()) )[20:24])) except OSError: pass return ips
python
{ "resource": "" }
q43973
get_identity_document
train
def get_identity_document(current_block: dict, uid: str, salt: str, password: str) -> Identity: """ Get an Identity document :param current_block: Current block data :param uid: Unique IDentifier :param salt: Passphrase of the account :param password: Password of the account :rtype: Identity """ # get current block BlockStamp timestamp = BlockUID(current_block['number'], current_block['hash']) # create keys from credentials key = SigningKey.from_credentials(salt, password) # create identity document identity = Identity( version=10, currency=current_block['currency'], pubkey=key.pubkey, uid=uid, ts=timestamp, signature=None ) # sign document identity.sign([key]) return identity
python
{ "resource": "" }
q43974
make_hash_id
train
def make_hash_id(): """ Compute the `datetime.now` based SHA-1 hash of a string. :return: Returns the sha1 hash as a string. :rtype: str """ today = datetime.datetime.now().strftime(DATETIME_FORMAT) return hashlib.sha1(today.encode('utf-8')).hexdigest()
python
{ "resource": "" }
q43975
read_header
train
def read_header(filename): ''' returns a dictionary of values in the header of the given file ''' header = {} in_header = False data = nl.universal_read(filename) lines = [x.strip() for x in data.split('\n')] for line in lines: if line=="*** Header Start ***": in_header=True continue if line=="*** Header End ***": return header fields = line.split(": ") if len(fields)==2: header[fields[0]] = fields[1]
python
{ "resource": "" }
q43976
CustomLabelCondition.appointment
train
def appointment(self): """Returns the appointment instance for this request or None. """ return django_apps.get_model(self.appointment_model).objects.get( pk=self.request.GET.get("appointment") )
python
{ "resource": "" }
q43977
CustomLabelCondition.previous_visit
train
def previous_visit(self): """Returns the previous visit for this request or None. Requires attr `visit_model_cls`. """ previous_visit = None if self.appointment: appointment = self.appointment while appointment.previous_by_timepoint: try: previous_visit = self.model.visit_model_cls().objects.get( appointment=appointment.previous_by_timepoint ) except ObjectDoesNotExist: pass else: break appointment = appointment.previous_by_timepoint return previous_visit
python
{ "resource": "" }
q43978
CustomLabelCondition.previous_obj
train
def previous_obj(self): """Returns a model obj that is the first occurrence of a previous obj relative to this object's appointment. Override this method if not am EDC subject model / CRF. """ previous_obj = None if self.previous_visit: try: previous_obj = self.model.objects.get( **{f"{self.model.visit_model_attr()}": self.previous_visit} ) except ObjectDoesNotExist: pass return previous_obj
python
{ "resource": "" }
q43979
read
train
def read(fname): """ utility function to read and return file contents """ fpath = os.path.join(os.path.dirname(__file__), fname) with codecs.open(fpath, 'r', 'utf8') as fhandle: return fhandle.read().strip()
python
{ "resource": "" }
q43980
create_files
train
def create_files(filedef, cleanup=True): """Contextmanager that creates a directory structure from a yaml descripttion. """ cwd = os.getcwd() tmpdir = tempfile.mkdtemp() try: Filemaker(tmpdir, filedef) if not cleanup: # pragma: nocover pass # print("TMPDIR =", tmpdir) os.chdir(tmpdir) yield tmpdir finally: os.chdir(cwd) if cleanup: # pragma: nocover shutil.rmtree(tmpdir, ignore_errors=True)
python
{ "resource": "" }
q43981
Filemaker.make_file
train
def make_file(self, filename, content): """Create a new file with name ``filename`` and content ``content``. """ with open(filename, 'w') as fp: fp.write(content)
python
{ "resource": "" }
q43982
insert_data_frame
train
def insert_data_frame(col, df, int_col=None, binary_col=None, minimal_size=5): """Insert ``pandas.DataFrame``. :param col: :class:`pymongo.collection.Collection` instance. :param df: :class:`pandas.DataFrame` instance. :param int_col: list of integer-type column. :param binary_col: list of binary-type column. """ data = transform.to_dict_list_generic_type(df, int_col=int_col, binary_col=binary_col) smart_insert(col, data, minimal_size)
python
{ "resource": "" }
q43983
AsciiArmor._remove_trailing_spaces
train
def _remove_trailing_spaces(text: str) -> str: """ Remove trailing spaces and tabs :param text: Text to clean up :return: """ clean_text = str() for line in text.splitlines(True): # remove trailing spaces (0x20) and tabs (0x09) clean_text += line.rstrip("\x09\x20") return clean_text
python
{ "resource": "" }
q43984
AsciiArmor._parse_dash_escaped_line
train
def _parse_dash_escaped_line(dash_escaped_line: str) -> str: """ Parse a dash-escaped text line :param dash_escaped_line: Dash escaped text line :return: """ text = str() regex_dash_escape_prefix = compile('^' + DASH_ESCAPE_PREFIX) # if prefixed by a dash escape prefix... if regex_dash_escape_prefix.match(dash_escaped_line): # remove dash '-' (0x2D) and space ' ' (0x20) prefix text += dash_escaped_line[2:] return text
python
{ "resource": "" }
q43985
AsciiArmor._decrypt
train
def _decrypt(ascii_armor_message: str, signing_key: SigningKey) -> str: """ Decrypt a message from ascii armor format :param ascii_armor_message: Utf-8 message :param signing_key: SigningKey instance created from credentials :return: """ data = signing_key.decrypt_seal(base64.b64decode(ascii_armor_message)) return data.decode('utf-8')
python
{ "resource": "" }
q43986
render
train
def render(request, template_name, context=None, content_type=None, status=None, using=None, logs=None): """ Wrapper around Django render method. Can take one or a list of logs and logs the response. No overhead if no logs are passed. """ if logs: obj_logger = ObjectLogger() if not isinstance(logs, list): logs = [logs, ] for log in logs: log = obj_logger.log_response( log, context, status=str(status), headers='', content_type=str(content_type)) log.save() return django_render( request, template_name, context=context, content_type=content_type, status=status, using=using)
python
{ "resource": "" }
q43987
entify_main
train
def entify_main(args): ''' Main function. This function is created in this way so as to let other applications make use of the full configuration capabilities of the application. ''' # Recovering the logger # Calling the logger when being imported i3visiotools.logger.setupLogger(loggerName="entify", verbosity=args.verbose, logFolder=args.logfolder) # From now on, the logger can be recovered like this: logger = logging.getLogger("entify") logger.info("""entify-launcher.py Copyright (C) F. Brezo and Y. Rubio (i3visio) 2014 This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions. For details, run: \tpython entify-launcher.py --license""") logger.info("Selecting the regular expressions to be analysed...") listRegexp = [] if args.regexp: listRegexp = config.getRegexpsByName(args.regexp) elif args.new_regexp: for i, r in enumerate(args.new_regexp): list.Regexp.append(RegexpObject(name = "NewRegexp"+str(i), reg_exp = args.new_regexp)) if not args.web: results = scanFolderForRegexp(folder = args.input_folder, listRegexp= listRegexp, recursive = args.recursive, verbosity=args.verbose, logFolder= args.logfolder) else: results = scanResource(uri = args.web, listRegexp= listRegexp, verbosity=args.verbose, logFolder= args.logfolder) logger.info("Printing the results:\n" + general.dictToJson(results)) if args.output_folder: logger.info("Preparing the output folder...") if not os.path.exists(args.output_folder): logger.warning("The output folder \'" + args.output_folder + "\' does not exist. The system will try to create it.") os.makedirs(args.output_folder) logger.info("Storing the results...") """if "csv" in args.extension: with open(os.path.join(args.output_folder, "results.csv"), "w") as oF: oF.write(resultsToCSV(results))""" if "json" in args.extension: with open(os.path.join(args.output_folder, "results.json"), "w") as oF: oF.write(general.dictToJson(results))
python
{ "resource": "" }
q43988
grouper_df
train
def grouper_df(df, chunksize): """Evenly divide pd.DataFrame into n rows piece, no filled value if sub dataframe's size smaller than n. :param df: ``pandas.DataFrame`` instance. :param chunksize: number of rows of each small DataFrame. **中文文档** 将 ``pandas.DataFrame`` 分拆成等大小的小DataFrame。 """ data = list() counter = 0 for tp in zip(*(l for col, l in df.iteritems())): counter += 1 data.append(tp) if counter == chunksize: new_df = pd.DataFrame(data, columns=df.columns) yield new_df data = list() counter = 0 if len(data) > 0: new_df = pd.DataFrame(data, columns=df.columns) yield new_df
python
{ "resource": "" }
q43989
to_index_row_dict
train
def to_index_row_dict(df, index_col=None, use_ordered_dict=True): """Transform data frame to list of dict. :param index_col: None or str, the column that used as index. :param use_ordered_dict: if True, row dict is has same order as df.columns. **中文文档** 将dataframe以指定列为key, 转化成以行为视角的dict结构, 提升按行index访问 的速度。若无指定列, 则使用index。 """ if index_col: index_list = df[index_col] else: index_list = df.index columns = df.columns if use_ordered_dict: table = OrderedDict() else: table = dict() for ind, tp in zip(index_list, itertuple(df)): table[ind] = dict(zip(columns, tp)) return table
python
{ "resource": "" }
q43990
to_dict_list
train
def to_dict_list(df, use_ordered_dict=True): """Transform each row to dict, and put them into a list. **中文文档** 将 ``pandas.DataFrame`` 转换成一个字典的列表。列表的长度与行数相同, 其中 每一个字典相当于表中的一行, 相当于一个 ``pandas.Series`` 对象。 """ if use_ordered_dict: dict = OrderedDict columns = df.columns data = list() for tp in itertuple(df): data.append(dict(zip(columns, tp))) return data
python
{ "resource": "" }
q43991
to_dict_list_generic_type
train
def to_dict_list_generic_type(df, int_col=None, binary_col=None): """Transform each row to dict, and put them into a list. And automatically convert ``np.int64`` to ``int``, ``pandas.tslib.Timestamp`` to ``datetime.datetime``, ``np.nan`` to ``None``. :param df: ``pandas.DataFrame`` instance. :param int_col: integer type columns. :param binary_col: binary type type columns. **中文文档** 由于 ``pandas.Series`` 中的值的整数数据类型是 ``numpy.int64``, 时间数据类型是 ``pandas.tslib.Timestamp``, None的数据类型是 ``np.nan``。 虽然从访问和计算的角度来说没有什么问题, 但会和很多数据库的操作不兼容。 此函数能将 ``pandas.DataFrame`` 转化成字典的列表。数据类型能正确的获得int, bytes和datetime.datetime。 """ # Pre-process int_col, binary_col and datetime_col if (int_col is not None) and (not isinstance(int_col, (list, tuple))): int_col = [int_col, ] if (binary_col is not None) and (not isinstance(binary_col, (list, tuple))): binary_col = [binary_col, ] datetime_col = list() for col, dtype in dict(df.dtypes).items(): if "datetime64" in str(dtype): datetime_col.append(col) if len(datetime_col) == 0: datetime_col = None # Pre-process binary column dataframe def b64_encode(b): try: return base64.b64encode(b) except: return b if binary_col is not None: for col in binary_col: df[col] = df[col].apply(b64_encode) data = json.loads(df.to_json(orient="records", date_format="iso")) if int_col is not None: for row in data: for col in int_col: try: row[col] = int(row[col]) except: pass if binary_col is not None: for row in data: for col in binary_col: try: row[col] = base64.b64decode(row[col].encode("ascii")) except: pass if datetime_col is not None: for row in data: for col in datetime_col: try: row[col] = rolex.str2datetime(row[col]) except: pass return data
python
{ "resource": "" }
q43992
add_connection_args
train
def add_connection_args(parser: FileAwareParser, strong_config_file: bool=True) -> FileAwareParser: """ Add the database connection arguments to the supplied parser :param parser: parser to add arguments to :param strong_config_file: If True, force --conf to be processed. This is strictly a test for programming errors, and has to be skipped due to removefacts function. :return: parser """ # TODO: Decide what to do with this parser.add_file_argument("--conf", metavar="CONFIG FILE", help="Configuration file", action=ConfigFile if strong_config_file else None) parser.add_argument("-db", "--dburl", help="Default database URL", default=Default_DB_Connection) parser.add_argument("--user", help="Default user name", default=Default_User) parser.add_argument("--password", help="Default password", default=Default_Password) parser.add_argument("--crcdb", help="CRC database URL. (default: dburl)") parser.add_argument("--crcuser", help="User name for CRC database. (default: user)") parser.add_argument("--crcpassword", help="Password for CRC database. (default: password)") parser.add_argument("--ontodb", help="Ontology database URL. (default: dburl)") parser.add_argument("--ontouser", help="User name for ontology database. (default: user)") parser.add_argument("--ontopassword", help="Password for ontology database. (default: password)") parser.add_argument("--onttable", metavar="ONTOLOGY TABLE NAME", help="Ontology table name (default: {})".format(DEFAULT_ONTOLOGY_TABLE), default=DEFAULT_ONTOLOGY_TABLE) return parser
python
{ "resource": "" }
q43993
versioned_storage.build_dir_tree
train
def build_dir_tree(self, files): """ Convert a flat file dict into the tree format used for storage """ def helper(split_files): this_dir = {'files' : {}, 'dirs' : {}} dirs = defaultdict(list) for fle in split_files: index = fle[0]; fileinfo = fle[1] if len(index) == 1: fileinfo['path'] = index[0] # store only the file name instead of the whole path this_dir['files'][fileinfo['path']] = fileinfo elif len(index) > 1: dirs[index[0]].append((index[1:], fileinfo)) for name,info in dirs.iteritems(): this_dir['dirs'][name] = helper(info) return this_dir return helper([(name.split('/')[1:], file_info) for name, file_info in files.iteritems()])
python
{ "resource": "" }
q43994
versioned_storage.flatten_dir_tree
train
def flatten_dir_tree(self, tree): """ Convert a file tree back into a flat dict """ result = {} def helper(tree, leading_path = ''): dirs = tree['dirs']; files = tree['files'] for name, file_info in files.iteritems(): file_info['path'] = leading_path + '/' + name result[file_info['path']] = file_info for name, contents in dirs.iteritems(): helper(contents, leading_path +'/'+ name) helper(tree); return result
python
{ "resource": "" }
q43995
versioned_storage.read_dir_tree
train
def read_dir_tree(self, file_hash): """ Recursively read the directory structure beginning at hash """ json_d = self.read_index_object(file_hash, 'tree') node = {'files' : json_d['files'], 'dirs' : {}} for name, hsh in json_d['dirs'].iteritems(): node['dirs'][name] = self.read_dir_tree(hsh) return node
python
{ "resource": "" }
q43996
versioned_storage.write_dir_tree
train
def write_dir_tree(self, tree): """ Recur through dir tree data structure and write it as a set of objects """ dirs = tree['dirs']; files = tree['files'] child_dirs = {name : self.write_dir_tree(contents) for name, contents in dirs.iteritems()} return self.write_index_object('tree', {'files' : files, 'dirs': child_dirs})
python
{ "resource": "" }
q43997
versioned_storage.have_active_commit
train
def have_active_commit(self): """ Checks if there is an active commit owned by the specified user """ commit_state = sfs.file_or_default(sfs.cpjoin(self.base_path, 'active_commit'), None) if commit_state != None: return True return False
python
{ "resource": "" }
q43998
AV.set_env_var
train
def set_env_var(key: str, value: str): """ Sets environment variable on AV Args: key: variable name value: variable value """ elib_run.run(f'appveyor SetVariable -Name {key} -Value {value}') AV.info('Env', f'set "{key}" -> "{value}"')
python
{ "resource": "" }
q43999
BuildFile.validate_internal_deps
train
def validate_internal_deps(self): """Freak out if there are missing local references.""" for node in self.node: if ('target_obj' not in self.node[node] and node not in self.crossrefs): raise error.BrokenGraph('Missing target: %s referenced from %s' ' but not defined there.' % (node, self.name))
python
{ "resource": "" }