content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def proc_avrgs(lst): """[summary] Arguments: lst {[list]} -- [list of avrg fitness] Returns: [list] -- [mult every el by 100 in avrg fitness] """ return list(map(lambda x: x * 100, lst))
594641841d7fd2cd5500fc133be459104b099a80
17,713
from typing import OrderedDict import re import os def read_properties(prop_file_path, recognize_vars=False): """Read a file of name=value pairs and load them into a dictionary. Name and value must be separated by =. Spaces are stripped around both name and value. Quotes around value are removed. Comment lines starting with # are ignored. Parameters ---------- prop_file_path : str Path to the property file. recognize_vars : bool, optional If True, values containing dollar prefixed tokens are expanded to previously read property values or environment variable values if possible. Returns ------- properties : dict Dictionary mapping property names to str values. Examples -------- # Setup tests >>> from tempfile import NamedTemporaryFile # Create a property file >>> f = NamedTemporaryFile(delete=False, mode='w') >>> filepath = f.name >>> print(" # Comment", file=f) # space before comment >>> print("Aaa=bbb", file=f) # no spaces around =, no quotes around value >>> print(" Bbb = '2' ", file=f) # leading space, spaces around =, single quoted value >>> print("Ccc = \\"33\\"", file=f) # double quotes around value >>> print("Ddd=", file=f) # no value >>> print("Eee='5\\"'", file=f) # double quote within single-quoted value >>> print("Fff=\\"6'\\"", file=f) # single quote within double-quoted value >>> print("Ggg=$Aaa", file=f) # ignore other parameter when recognize_vars=False >>> f.close() # Read the properties >>> read_properties(filepath) OrderedDict([('Aaa', 'bbb'), ('Bbb', '2'), ('Ccc', '33'), ('Ddd', ''), ('Eee', '5"'), ('Fff', "6'"), ('Ggg', '$Aaa')]) >>> os.unlink(filepath) # Create a property file >>> os.environ["DUMMY_ENV_VAR"] = "D ummy" >>> f = NamedTemporaryFile(delete=False, mode='w') >>> filepath = f.name >>> print("Aaa = bbb", file=f) # define Aaa property >>> print("Bbb = $Aaa ", file=f) # substitute property >>> print("Ccc=$DUMMY_ENV_VAR", file=f) # substitute environment variable >>> print("Ddd=$DUMMY_ENV_VAR $more", file=f) # substitute environment variable, ignore something that cannot be resolved >>> print("Eee=$DUMMY_ENV_VAR$Aaa $Bbb", file=f) # substitute two properties and one environment variable >>> f.close() # Read the properties >>> read_properties(filepath, recognize_vars=True) OrderedDict([('Aaa', 'bbb'), ('Bbb', 'bbb'), ('Ccc', 'D ummy'), ('Ddd', 'D ummy $more'), ('Eee', 'D ummybbb bbb')]) >>> os.unlink(filepath) """ properties = OrderedDict() with open(prop_file_path) as f: for line in f: line = line.strip() if line.startswith('#'): continue assign_op_idx = line.find('=') if assign_op_idx < 0: continue key = line[:assign_op_idx] value = line[assign_op_idx + 1:] key = key.strip() value = value.strip() if value.startswith('"') and value.endswith('"'): value = value.strip('"') elif value.startswith("'") and value.endswith("'"): value = value.strip("'") # expand known tokens if recognize_vars: old_value = None token_regex = "[$][a-zA-Z_]+[a-zA-Z0-9_]*" while value != old_value: old_value = value match = re.search(token_regex, value) if match: token = match.group(0)[1:] # drop the leading $ if token in properties: token_value = properties[token] value = re.sub(token_regex, token_value, value, count=1) elif token in os.environ: token_value = os.environ[token] value = re.sub(token_regex, token_value, value, count=1) properties[key] = value return properties
3b3f91d32accce9c9cfcfb20ab0e4d3259bd4bda
17,714
def make_bwa_map_cmds(mapping_ob, ngsLib, cores, samtools_exe, bwa_exe, genome_fasta, add_args='-L 0,0 -U 0 -a', logger=None): """ make bwa sys commands. maps PE and S reads separately, then combines them into a X_mapped.bam file return a list of commands to run. """ # index the reference cmdindex = str("{0} index {1}").format( bwa_exe, genome_fasta) # map paired end reads to reference index. bwacommands = [cmdindex] if "pe" in ngsLib.libtype: cmdmap = str('{0} mem -t {1} {2} -k 15 ' + '{3} {4} {5} | {6} view -bh - | ' + '{6} sort -o ' + '{7} - ').format(bwa_exe, # 0 cores, # 1 add_args, # 2 genome_fasta, # 3 ngsLib.readF, # 4 ngsLib.readR, # 5 samtools_exe, # 6 mapping_ob.pe_map_bam) # 7) bwacommands.append(cmdmap) else: assert ngsLib.readS0 is not None, \ str("No readS0 attribute found, cannot run mapping with " + "any reads in .readS0 or .readF and .readR") # if singletons are present, map those too. Index is already made if ngsLib.readS0 is not None: # and not ignore_singletons: cmdmapS = str( '{0} mem -t {1} {2} -k 15 ' + '{3} {4} | {5} view -bh - | ' + '{5} sort -o {6} - ').format(bwa_exe, # 0 cores, # 1 add_args, # 2 genome_fasta, # 3 ngsLib.readS0, # 4 samtools_exe, # 5 mapping_ob.s_map_bam) # 5) # merge together the singleton and pe reads, if there are any if "s_1" == ngsLib.libtype: cmdmergeS = str( "{0} view -bh {1} > {2}" ).format(samtools_exe, mapping_ob.s_map_bam, mapping_ob.mapped_bam_unfiltered) else: assert ngsLib.libtype == "pe_s", "error parsing libtype" cmdmergeS = '{0} merge -f {3} {1} {2}'.format( samtools_exe, mapping_ob.pe_map_bam, mapping_ob.s_map_bam, mapping_ob.mapped_bam_unfiltered) bwacommands.extend([cmdmapS, cmdmergeS]) else: # if not already none, set to None when ignoring singleton ngsLib.readS0 = None cmdmerge = str("{0} view -bh {1} > " + "{2}").format(samtools_exe, mapping_ob.pe_map_bam, mapping_ob.mapped_bam_unfiltered) bwacommands.extend([cmdmerge]) return bwacommands
77c0d6d40a8ec0995fbfbe8558e479e464fd392b
17,716
import zipfile import os def extract_zip(zip_path, extract_path=None, password=None): """ Opens Zip Archive in order to extract files """ zip_status = True try: zip_archive = zipfile.ZipFile(zip_path, 'r') if password is not None: zip_archive.setpassword(password) if extract_path is None: extract_path = os.path.dirname(zip_path) zip_archive.extractall(extract_path) zip_archive.close() except: zip_status = False return zip_status
2166f5465fb28181a230634409b870a019a21b51
17,717
import hashlib def hash_large_file(file, chunk_size=1280000000, digest=True): """fast hashing of file see https://stackoverflow.com/a/1131238/5172579 """ with open(file, "rb") as f: file_hash = hashlib.sha1() chunk = f.read(chunk_size) while chunk: file_hash.update(chunk) chunk = f.read(chunk_size) if digest: return file_hash.hexdigest() return file_hash
7603d2464e48fac70bc74cd3af8c5748ebeb2760
17,719
from pathlib import Path def path_exists(filename: str) -> bool: """ Checks for the given file path exists. """ return Path(filename).exists()
689c20e3417923e3538a62e35c861d2fd0cc11c5
17,721
def get_segments_between_timestamps(data_array, tag_timestamps, pre_and_post_event_marker_len=60*60, segments=[]): """ Extract sensor segment for the not-stress class between event markers. For a given event marker timestamp we extract sensor segment until one hour before the event marker and one hour after the event marker. Param ================================ data_array -- sensor data array tag_timestamps -- timestamps of tags to extract data around of pre_and_post_event_marker_len -- Time duration to skip data points pre and post event marker segments -- Array to store the extracted segments """ if(len(data_array) == 0): return segments if len(tag_timestamps) == 0: segments.append(data_array[2:]) return segments else: # extract start time, sampling freq, and n_observations start_time = data_array[0] sampling_freq = data_array[1] try: if len(start_time): start_time = start_time[0] except: start_time = start_time try: if len(sampling_freq): sampling_freq = sampling_freq[0] except: sampling_freq = sampling_freq # number of samples to skip before and after the event n_observation = int(pre_and_post_event_marker_len * sampling_freq) # create the tags, add the start and end time into tags tags = [start_time] tags.extend(tag_timestamps) tags.append(tags[0] + len(data_array) / sampling_freq) # sensor data and the length data = data_array[2:] data_length = len(data) # for each tag in the tags array for i in range(len(tags)): j = i + 1 if j >= len(tags): # if at the end, break free break # get the starting and end point for the sensor segment. start_tag = tags[i] # this is the position of start tag end_tag = tags[j] # this is the position of the end tag # print("Current tags pair ", (start_tag, end_tag)) # the positions in the array here_ = int((start_tag - start_time) * sampling_freq + n_observation) # pre_and_post_event_marker_len after the event there_ = int((end_tag - start_time) * sampling_freq - n_observation) # pre_and_post_event_marker_len before the event # print("Indices ", (here_, there_)) # if there are data points between the start and end points, extract those data points else ignore them if((there_ - here_) > 0): pp = data[here_:there_] segments.append(pp) return segments
4113634f8f3be987b377fc98911b0f1b0d7e62ee
17,722
def checker(z): """ :param z: a 2d array of complex type. For each entry in z, it lies in an unique unit square with left-bottom corner at lattice site (m, n) where (m, n) are both integers. Return a bool array of the same shape with its corresponding entry is True if m, n have the same parity else it's False. """ fx = ((z.real / 2) % 1) * 2 - 1 fy = ((z.imag / 2) % 1) * 2 - 1 return fx * fy > 0
3c5e17272441c5d33380ee217e1e518de67199b2
17,723
def compute_daily_returns(df): """Compute and return the daily return values.""" daily_returns = df.copy() # copy given dataframe to match size and column names # compute daily returns starting from row 1 as no data present for day before day 0 # another way to do this is: (df[1:]/df[:-1].values) - 1 daily_returns[1:] = (df/df.shift(1)) - 1 daily_returns.iloc[0, :] = 0 # set daily returns for row 0 to 0 return daily_returns
2b73673dab8bcb0dee4e8a890d720ca933b8981d
17,724
def _literal_to_r_str(value): """Convert a python value to a corresponding R string. >>> _literal_to_r_str(True) "TRUE" >>> _literal_to_r_str(6) "8" >>> _literal_to_r_str("test") "'test'" """ _literal_to_str = {True: "TRUE", False: "FALSE", None: "NULL"} try: return _literal_to_str[value] except KeyError: # quote a string if isinstance(value, str): return "'{}'".format(value) else: return str(value)
6fc6445fb4295458973dd977363536f741ab05f1
17,725
def weight_height_score(lists): """ Score table. :param x: object list of Class Horse or Cat. :return: avg_weight_score, avg_height_score, least_height, most_height in a tuple. """ # I am aware that two of these vars are not being used. # This is only for future scalability. total_weight_score = 0 avg_weigth_score = 0 total_height_score = 0 avg_height_score = 0 enteries = 0 # low and upper height vars least_height = None most_height = 0 for i in lists: # print(i.weight) total_weight_score += int(i.weight) total_height_score += int(i.height) enteries += 1 if int(i.height) > most_height: most_height = int(i.height) # Implementation to avoid upper & low bounds. if least_height is None: least_height = int(i.height) elif int(i.height) < least_height: least_height = int(i.height) # print(least_height) # This just creates the average score of both animals or any other provided objects list. avg_weight_score = total_weight_score / enteries avg_height_score = total_height_score / enteries # Returns all the information, some tuple should be created here using (x,y,z,c) vars for read friendly. return avg_weight_score, avg_height_score, least_height, most_height
ee14dae891afa9a1de2101e0063239e68c7928dc
17,726
def drop_quasi_zero(df, thresh=0.05): """ Drop Quasi Zero Features Returns a passed pandas DataFrame without columns containing too few non-zero values. Parameters ---------- df : pandas DataFrame Dataset whose columns will be dropped. thresh : float, optional Minimum percentage of non-zero values in any given column for it to be kept in the dataset. Default value is 0.05. Returns ------- pandas DataFrame """ drop_list = [] for el in df.columns.values: non_zero = df[el][df[el] != 0].shape[0] / df.shape[0] if non_zero < thresh: drop_list.append(el) print('Dropping column: {} | Non-zero values ratio: {}%'.format( el, round(100 * non_zero, 3))) return df.drop(drop_list, axis=1)
187254b59b34f7c788ea5a3c162d6b869e852e9a
17,728
def append_ordering_info_to_docstring(fields): """Class decorator factory which creates viewset decorator adding documentation on ordering of viewset results. Documentation is displayed either via Browseable API or upon receiving OPTIONS request. Parameters: - fields #list: The list of field names which are available for ordering. """ assert len(fields) > 0, "At least one ordering field is required" def _wrapped_append(cls): if cls.__doc__ is not None: appended_doc = 'Specify "?ordering=<fields to order by here>" query parameter to order results.\n\n' \ 'You can use following fields for ordering: {}.\n\n' \ "To reverse ordering of a field prefix it with hyphen '-': ?ordering=-{}.\n" \ .format(', '.join(fields), fields[0]) if len(fields) > 1: appended_doc = '{}' \ 'You can specify multiple orderings by separating them using comma: ?ordering={}.\n' \ .format(appended_doc, ','.join(fields[:2])) cls.__doc__ = '{}\n{}'.format(cls.__doc__, appended_doc) return cls return _wrapped_append
9065cbdeb13e80275363ef8848eb4d73e8dfa3c8
17,729
def _tokenize_with_entity_markers(tokens, tokenizer, e1, e2): """Apply wordpiece tokenization with entity markers around entities.""" def tokenize(start, end): return tokenizer.tokenize(" ".join(tokens[start:end])) if e1[0] < e2[0]: return (tokenize(0, e1[0]) + ["[E1]"] + tokenize(e1[0], e1[1] + 1) + ["[/E1]"] + tokenize(e1[1] + 1, e2[0]) + ["[E2]"] + tokenize(e2[0], e2[1] + 1) + ["[/E2]"] + tokenize(e2[1] + 1, None)) else: return (tokenize(0, e2[0]) + ["[E2]"] + tokenize(e2[0], e2[1] + 1) + ["[/E2]"] + tokenize(e2[1] + 1, e1[0]) + ["[E1]"] + tokenize(e1[0], e1[1] + 1) + ["[/E1]"] + tokenize(e1[1] + 1, None))
3dd49697366b24855711a4e46c6f9e944cf9fc61
17,731
import pkg_resources import json def get_MWDD_info(name): """ Retourne un dict avec les parametres de l'etoile donnee en input """ MWDD_table = pkg_resources.resource_stream(__name__, 'MWDD_table.json') data = json.load(MWDD_table) for entry in data['data']: try: namelist = entry['allnames']+entry['wdid']+entry['name'] except KeyError: namelist = entry['allnames']+entry['wdid'] if name.lower().replace(' ', '') in namelist.lower().replace(' ', ''): return entry
1a8d0ba084f91c4f9b7e7d5dd0c6dc940e5936a0
17,732
import os def checkPath(): """Create a path name for config file depending on location of the program""" currentPath = os.getcwd() if ("/usr" or "/opt") in currentPath: pathName = os.path.expanduser("~/.config/tuxle-triad/config.txt") else: pathName = os.path.join(os.getcwd(), "config.txt") return pathName
02a96f46a55af2a845ce7e3780cf58e5b74dad85
17,733
def parseHeaderText(header): """ parseHeaderText(): Go through our parsed headers, and create text descriptions based on them. """ retval = {} if header["qr"] == 0: retval["qr"] = "Question" elif header["qr"] == 1: retval["qr"] = "Response" else: retval["qr"] = "Unknown! (%s)" % header["qr"] if header["opcode"] == 0: retval["opcode_text"] = "Standard query" elif header["opcode"] == 1: retval["opcode_text"] = "Inverse query" elif header["opcode"] == 2: retval["opcode_text"] = "Server status request" else: retval["opcode_text"] = "Unknown! (%s)" % header["opcode"] if header["aa"] == 0: retval["aa"] = "Server isn't an authority" elif header["aa"] == 1: retval["aa"] = "Server is an authority" else: retval["aa"] = "Unknown! (%s)" % header["aa"] if header["tc"] == 0: retval["tc"] = "Message not truncated" elif header["tc"] == 1: retval["tc"] = "Message truncated" else: retval["tc"] = "Unknown! (%s)" % header["tc"] if header["rd"] == 0: retval["rd"] = "Recursion not requested" elif header["rd"] == 1: retval["rd"] = "Recursion requested" else: retval["rd"] = "Unknown! (%s)" % header["rd"] if header["ra"] == 0: retval["ra"] = "Recursion not available" elif header["ra"] == 1: retval["ra"] = "Recursion available!" else: retval["ra"] = "Unknown! (%s)" % header["ra"] if header["rcode"] == 0: retval["rcode_text"] = "No errors reported" elif header["rcode"] == 1: retval["rcode_text"] = "Format error (nameserver couldn't interpret this query)" elif header["rcode"] == 2: retval["rcode_text"] = "Server failure" elif header["rcode"] == 3: retval["rcode_text"] = "Name error (name does not exist!)" elif header["rcode"] == 4: retval["rcode_text"] = "Not implemented (nameserver doesn't support this type of query)" elif header["rcode"] == 5: retval["rcode_text"] = "Refused (the server refused to answer our question!)" else: retval["rcode_text"] = "Error code %s" % header["rcode"] return(retval)
9fea4101de686bb86b89abfb82cde7a212e4b8e2
17,734
import os def write_service(service_name, contents): """Write a service file in a "safe" manner. If the contents of the file are the same as what is desired to be written, do nothing. First writes to a temporary file in the same directory as the target, them move that temporary file into plce. Return a boolean True if the file was changed else False """ assert '/' not in service_name path = '/lib/systemd/system/' + service_name if os.path.exists(path): with open(path, 'r') as f: existing = f.read() if existing == contents: return False tmppath = '/lib/systemd/system/.' + service_name + '.tmp' with open(tmppath, 'w') as f: f.write(contents) os.rename(tmppath, path) return True
5ba3e1bc4edad239e17d2daeaa198f395bab7c5c
17,735
import re def pascal_to_snake(string: str) -> str: """ Converts pascal-case to snake-case. >>> pascal_to_snake(string="HelloAndGoodMorning") # Returns "hello_and_good_morning" """ words = re.findall(pattern="[A-Z][^A-Z]*", string=string) words_lower_cased = list(map(str.lower, words)) return "_".join(words_lower_cased)
2ef5f0056d099194ffbd1aa8062fefea7664f4eb
17,737
def fluid_properties(fluid_str): """ Return the physical density and kinematic viscosity for the prescribed fluid. """ fluid_lib = {'water':(1000., 1.0e-6), 'glycol':(965.3,0.06/965.3), 'glycerin':(1260.0,1.49/1260.0)} if fluid_str in list(fluid_lib.keys()): return fluid_lib[fluid_str] else: print('valid fluids are:') for keys in fluid_lib: print(" '%s' " % keys) raise KeyError('invalid fluid specified')
ea2784b5b9c9767e43dba787910fd5c32da8a266
17,739
def reduce_datetimes(row): """ Receives a row, converts datetimes to strings. """ row = list(row) for i, iterrow in enumerate(row): if hasattr(iterrow, 'isoformat'): row[i] = iterrow.isoformat() return tuple(row)
8cf4c4bee5e3a0f656c0a883a5325de3dea4e8fe
17,740
def losocv_split(subjectIDs): """Create leave-one-subject-out cross-validation train/test splits. Args: subjectIDs (list): subjectID corresponding to each example. Returns: splits (list of lists): each fold's train and test indices. subjectIDset (list): unique IDs, in held-out-test-set order """ subjectIDset = list(set(subjectIDs)) splits = [] for subject in subjectIDset: test_idx = [i for i in range(len(subjectIDs)) if subjectIDs[i]==subject] train_idx = [i for i in range(len(subjectIDs)) if subjectIDs[i]!=subject] splits.append([train_idx,test_idx]) return splits, subjectIDset
9f7d5051e34669a5c085cf08e65bcfd66eaaa9c7
17,741
def get_isilon_smartquota_parameters(): """This method provides parameters required for the ansible Smart Quota module on Isilon""" return dict( path=dict(required=True, type='str'), user_name=dict(type='str'), group_name=dict(type='str'), access_zone=dict(type='str', default='system'), provider_type=dict(type='str', default='local', choices=['local', 'file', 'ldap', 'ads']), quota_type=dict(required=True, type='str', choices=['user', 'group', 'directory']), quota=dict( type='dict', options=dict( include_snapshots=dict(type='bool', default=False), include_overheads=dict(type='bool'), advisory_limit_size=dict(type='int'), soft_limit_size=dict(type='int'), hard_limit_size=dict(type='int'), soft_grace_period=dict(type='int'), period_unit=dict(type='str', choices=['days', 'weeks', 'months']), cap_unit=dict(type='str', choices=['GB', 'TB']) ), required_together=[ ['soft_grace_period', 'period_unit'], ['soft_grace_period', 'soft_limit_size'] ] ), state=dict(required=True, type='str', choices=['present', 'absent']) )
88bf752234bd7280550753f60e8510ade98b34c7
17,742
def unauthenticated_json(): """Return a failed authentication response.""" return {"statusCode": 2, "message": "Not Authenticated !"}
e1ebd455f60534eb9b9609024d45d1fc324bca41
17,743
import string def letter_for(index): """Convert an index into a letter (or letter pair). a-z, then aa-az-zz""" if index < 26: return string.ascii_lowercase[index] return (string.ascii_lowercase[(index // 26) - 1] # First letter in pair + string.ascii_lowercase[index % 26]) # Second letter
aad7edbbd8941339e93b8a732857258d28d73033
17,744
def KJKGtoBTULB(hkjkg): """ Convertie l'enthalpie en kJ/kg vers btu/lb Conversion: 1 kJ/kg = 0.429923 Btu/lb :param hkjkg: Enthalpie [kJ/kg] :return hbtulb: Enthalpie [btu/lb] """ hbtulb = hkjkg * 0.429923 return hbtulb
3b26742b2e56c7e265bf94d0af15f485e091f395
17,745
def getOperation(value, operations): """Get an L{Operation} from an C{int} representation. @param value: The C{int} operation value. @param operations: A sequence of possible L{Operation}s to match. @return: The matching L{Operation} or C{None} if one isn't available. """ for operation in operations: if operation.id == value: return operation
34d2ef3cf0965285d2e694d8e5ea2df41cfe5892
17,746
import argparse def get_parser(): """ get_parser - a helper function for the argparse module """ parser = argparse.ArgumentParser(description="Spell correct and create PDF") parser.add_argument( "-i", "--input-dir", required=True, type=str, help="path to directory containing input files", ) parser.add_argument( "-o", "--output-dir", required=False, default=None, type=str, help="path to directory to write output files (new folder created). Defaults to input-dir", ) parser.add_argument( "-kw", "--keywords", required=False, default=None, type=str, help="keywords identifying files to be processed", ) parser.add_argument( "-e", "--ewriter-notes", required=False, default=False, action="store_true", help="if set, will write the output to ewriter format (narrow text width)", ) parser.add_argument( "--no-split", required=False, default=False, action="store_true", help="if set, will not split the text into paragraphs (faster)", ) parser.add_argument( "--no-punkt", required=False, default=False, action="store_true", help="if set, will not use nltk punkt tokenizer", ) parser.add_argument( "-r", "--recursive", required=False, default=False, action="store_true", help="whether to load files recursively from the input directory", ) parser.add_argument( "-v", "--verbose", required=False, default=False, action="store_true", help="whether to print verbose output", ) return parser
80a2786ffb93198d4de7dab2494567ee9a9c5198
17,747
def convert_date_time(date, time=None): """ The date time is only concatenated unless there is no time. """ if time is None: return date else: return "%s %s" % (date, time)
6196c224137f2be158d8580777e9651714613433
17,748
def _checkinst(inst, disp, fpu, insttable): """ Check observation instrument requirements against tonight's instrument configuration. Parameters ---------- inst : str Observation instrument disp : str Observation instrument disperser (or 'null') fpu : str Observation instrument focal plane unit (or 'null') insttable : '~astropy.table.Table' Instrument configuration calender. i_cal : int Index of row for tonights instrument configuration. Returns ------- boolean. True or False if the observation requirements are satisfied by the current configuration. """ # print('\ninst, disp, fpu: ',inst,disp,fpu) if inst not in insttable['insts']: # print('Not available') return False elif inst in insttable['insts']: # print('Available') if 'GMOS' in inst: # print('GMOS') return ((disp in insttable['gmos_disp']) or ('null' in insttable['gmos_disp'])) and \ ((fpu in insttable['gmos_fpu']) or ('null' in insttable['gmos_fpu'])) elif 'Flamingos' in inst: # print('Flamingos') return (fpu == insttable['f2_fpu']) or (insttable['f2_fpu'] == 'null') else: # print('Not GMOS or F2') return True
225064720de710933894c951be25c18be89f0181
17,749
def merge_list(lst, *to_merged_list): """ Merge multiple lists into the first list :param lst: :param to_merged_list: :return: """ for item in to_merged_list: lst.extend(item) return lst
29b17583c73854363277a65862efc130ae19346a
17,750
def isstr(obj): """Return whether an object is instance of `str`.""" return isinstance(obj, str)
33e1cea9b8a60d224dc395f4446f175c0b967dd0
17,751
def convert_clip_ids_to_windows(clip_ids): """ Inverse function of convert_windows_to_clip_ids Args: clip_ids: list(int), each is a index of a clip, starting from 0 Returns: list(list(int)), each sublist contains two integers which are clip indices. [10, 19] meaning a 9 clip window [20, 40] (seconds), if each clip is 2 seconds. >>> test_clip_ids = [56, 57, 58, 59, 60, 61, 62] + [64, ] + [67, 68, 69, 70, 71] >>> convert_clip_ids_to_windows(test_clip_ids) [[56, 62], [64, 64], [67, 71]] """ windows = [] _window = [clip_ids[0], None] last_clip_id = clip_ids[0] for clip_id in clip_ids: if clip_id - last_clip_id > 1: # find gap _window[1] = last_clip_id windows.append(_window) _window = [clip_id, None] last_clip_id = clip_id _window[1] = last_clip_id windows.append(_window) return windows
05600f2eb248ce61359a02cbcd4bd75035c6a55f
17,752
import torch def kron(*matrices): """ Kroneker product between matrices """ for m in matrices: assert m.dim() == 2 if len(matrices) == 0: return torch.ones(1, 1) if len(matrices) == 1: return matrices[0] x, y, *matrices = matrices z = torch.einsum("ij,kl->ikjl", x, y).reshape(x.size(0) * y.size(0), x.size(1) * y.size(1)) if matrices: return kron(z, *matrices) return z
5d2e732a1b4fe3581522ae16a91833428f928db3
17,753
def M2_string(params, errors): """ Return string describing a single set of beam measurements. Args: z: array of axial position of beam measurements [m] d: array of beam diameters [m] lambda0: wavelength of the laser [m] Returns: Formatted string suitable for printing. """ d0, z0, Theta, M2, zR = params d0_std, z0_std, Theta_std, M2_std, zR_std = errors s = '' s += " M^2 = %.2f ± %.2f\n" % (M2, M2_std) s += "\n" s += " d_0 = %.0f ± %.0f µm\n" % (d0*1e6, d0_std*1e6) s += " w_0 = %.0f ± %.0f µm\n" % (d0/2*1e6, d0_std/2*1e6) s += "\n" s += " z_0 = %.0f ± %.0f mm\n" % (z0*1e3, z0_std*1e3) s += " z_R = %.0f ± %.0f mm\n" % (zR*1e3, zR_std*1e3) s += "\n" s += " Theta = %.2f ± %.2f mrad\n" % (Theta*1e3, Theta_std*1e3) return s
19f97d54e4d12957f79b8c4207660f73c648b2f6
17,755
def check_approved(userName, userArn): """Summary Args: userName (TYPE): Description userArn (TYPE): Description Returns: TYPE: Description """ # Default approved = False # Connect change record DDB # Check if approved for adding users # Check how many users added # Determine if account should be locked approved = True return approved
d827e1f05e69aa22088d3545e82da38fb0398748
17,756
import string import random def seed(length=32): """ 生成指定长度的随机字符串 :param length: 期望生成的字符串长度 """ base_str = string.digits + string.ascii_letters return ''.join([random.choice(base_str) for _ in range(length)])
32563ef10dd76a2e44bf0d0a90ba60e21ffb03f5
17,758
def _is_compiled(url): """ Returns True if wheel with provided url is precompiled. The logic in this method is a less efficient version of -cp[0-9]{2}- regex matching. """ prefix = "-cp" start = 0 for _ in range(len(url)): start = url.find(prefix, start) if start == -1 or start + 6 >= len(url): break if url[start + len(prefix)].isdigit() and \ url[start + len(prefix) + 1].isdigit() and \ url[start + len(prefix) + 2] == "-": return True start += len(prefix) return False
fada4e14da5c92f7189737a3c50041f9a93acfe1
17,760
def first_and_last_n_chars(s, n1=30, n2=30): """ Utility function to display first n1 characters and last n2 characters of a long string (Adjusts display if string is less than n1+n2 char long) :param s: string :return: string for display """ first_len = min(len(s), n1) first = s[:first_len] last_len = min(len(s) - len(first), n2) last = s[-last_len:] if last_len > 0 else '' if first_len == len(s): return first elif first_len + last_len == len(s): return "{}{}".format(first, last) else: return "{}...{}".format(first, last)
f23b02a65f1c8c03a71498b0bcb9cc2941fd8060
17,761
from pathlib import Path def create_folder(folder_name, folder_path=""): """ Create a folder with a given name in a given path. Also creates all non-existing parent folders. Parameters ---------- folder_name : str Name of the folder to be created. folder_path : str Optional; default: current path. Either relative or absolute path of the folder to be created. Returns ------- pathlib.Path Full path of the created folder. """ path = Path(folder_path) / folder_name # Creating the folder and all non-existing parent folders. path.mkdir(parents=True, exist_ok=True) return path
ed6f239c210cc9697fe6b4bb45189cc54abda970
17,764
def log_json(request_id, message, context={}): """Create JSON object for logging data.""" stmt = {"message": message, "request_id": request_id} stmt.update(context) return stmt
b04c7101fcbc8bd800bd888e726ff131da198854
17,766
def claim_account_legacy(request): """Render a page explaining that claim links are no longer valid.""" return {}
d48db70e437fad3d8048902e640226780d3f4fb4
17,767
def comma_conjoin(inlist, conjunction): """Parses the elements of a list into a string joined by commas, with an 'and' before the final element. Oxford comma! """ if len(inlist) == 0: return "" elif len(inlist) == 1: return str(inlist.pop()) elif len(inlist) == 2: return (" " + conjunction + " ").join(inlist) text = ", " + conjunction + " " + inlist.pop() text = ", ".join(inlist) + text return text
1a60d7e8752436796fc518832bfece1a97914ff0
17,768
def fillna(df, column, value): """ Can fill NaN values from a column Args: df column value """ if column in df.columns: df[column] = df[column].fillna(value) else: df[column] = value return df
8f14bf01017b62817736c3ae137798fbc63a01d7
17,769
def min_user(): """Represent a valid user with minimal data. """ return { 'email': 'minimal@example.com', 'name': 'Primus Minimus', 'age': 18, }
9ffe9567a86242830ce3f162ca6effda341a8ae0
17,770
import warnings def naive_dump_json(x, indent=None): """dumb not safe! Works for the purposes of this specific script as quotes never appear in data set. Parameter indent ignored""" warnings.warn('about to dump rough read_json') assert isinstance(x, dict) # could use pprint for the purposes of this specific script return repr(x).replace("'", '"')
46854a807ffb83ce4c83090ae6f13208fac3d158
17,771
def same_container(cont1, cont2): """ Return True if cont1 and cont2 are the same containers.We assume that processes that share the same PID are the same container even if their name differ. We assume that files that are located in the same directory and share the same inode are the same containers too even if their name differ. In reality this should not be limited to files in the same directory but located in the same partition. """ partition_list = ["/data", "/system", "/mnt/sdcard", "/sdcard"] if (cont1 == cont2): return True if (cont1[0] == cont2[0]): if (cont1[0] == 'process'): return cont1[2] == cont2[2] elif (cont1[0] == 'file') and (cont1[2] == cont2[2]): s1 = cont1[1].split("/") s2 = cont2[1].split("/") if len(s1) == len (s2): i = 0 equal = True while equal and (i < (len(s1) - 2)): if not (s1[i] == s2[i]): equal = False i += 1 if equal: return True elif (cont1[0] == 'socket') : return cont1[1] == cont2[1] return False
88a5abc5547b7ee28ca9136425b7a52f532a23a1
17,772
def compare_data(plt_type, correct, given): """ Determines whether the given data matches any of the data found in the correct data. This handles plots of different types: if a histogram was plotted with the expected data for a line plot, it will return True. Args: plt_type (str): The expected type of this plot correct (List of Int or List of List of Int): The expected data. given (Dict): The actual plotted data and information Returns: bool: Whether the correct data was found in the given plot. """ # Infer arguments if plt_type == 'hist': correct_xs = None correct_ys = correct elif not correct: correct_xs = [] correct_ys = [] elif isinstance(correct[0], (tuple, list)): # We were given a list of lists of ints correct_xs, correct_ys = correct else: # Assume it is a singular list correct_xs = list(range(len(correct))) correct_ys = correct if given['type'] == 'hist': return correct_ys == given['values'] elif plt_type == 'hist': return correct_ys == given['y'] else: return correct_xs == given['x'] and correct_ys == given['y']
0bbd217906d86c2117c8c1b7a66a768386ca116b
17,773
def UINT(value): # noqa: N802 """Converts a value that matches \d+ into an integer.""" if value is None: raise ValueError('None is not a valid integer') if not value.isdigit(): raise ValueError('Only positive numbers are allowed') return int(value)
3d58dadf97fe26d7bfa00acb3451a39a5e5845bb
17,777
import math def get_distance(loc1, loc2): """ Computes the Euclidian distance between two 2D points.""" x_diff = loc1.x - loc2.x y_diff = loc1.y - loc2.y return math.sqrt(x_diff**2 + y_diff**2)
3a603ace039ea887cabd8b16d2b04d84d939c112
17,778
def get_rank(target, ranks): """ Get rank of a target entity within all ranked entities. Args: target (str): Target entity which rank should be determined. ranks (list): List of tuples of an entity and its rank. Returns: int: Rank of entity or -1 if entity is not present in ranks. """ for i in range(len(ranks)): word, rank = ranks[i] if word == target: return i return -1
0e247669af757a5ffa5fff016262eb677f7c3cb8
17,781
from unittest.mock import call def convert_video_audio(title, video_filename): """ Convert given video to the mp3 """ audio_filename = 'audios/{title}.mp3'.format(title=title) call([ 'ffmpeg', '-i', video_filename, '-b:a', '192k', '-vn', audio_filename ]) return audio_filename
3a52e957eb07be7b932db5667a693117f125aadf
17,784
import torch def _as_tensor(x): """ An equivalent of `torch.as_tensor`, but works under tracing. """ if isinstance(x, (list, tuple)) and all([isinstance(t, torch.Tensor) for t in x]): return torch.stack(x) return torch.as_tensor(x)
601795d4fb1b7cb31b91d103075db57aedcfa874
17,786
import sys def file_opener() -> str: """The shell program to use to open a file with the default program.""" return {'linux': 'xdg-open', 'win32': 'start'}[sys.platform]
93317f4a55999b3e5a8f0cc28dba24e1dccc4946
17,787
import os import torch def load_model(name, dir_path, device='cuda'): """ Load pytorch model from given path. :param device: device to load models. :param model: model object :param name: model file name :param dir_path: model directory :return: loaded model """ model_path = os.path.join(dir_path, f"{name}.pth") if not os.path.exists(model_path): return None model = torch.load(model_path, map_location=device) return model
4bdf9863fe23358e3737db343ace7c29aca109ad
17,789
def get_attributes(obj): """ Fetches the attributes from an object. :param obj: The object. :type obj: object :returns: A dictionary of attributes and their values from the object. :rtype: dict """ return {k: getattr(obj, k) for k in dir(obj) if not k.startswith("__")}
6e1cea3ed8ad2fa1c00f7c2eb86efb4a629e9f06
17,790
def evaluate_func2(x): """a - b + 2a^2 + 2ab + b^2 :param x: numpy ndarray,with shape (2,) :return: """ a = x[0] b = x[1] return a - b + 2 * a ** 2 + 2 * a * b + b ** 2
0f100254b05137a30f5423b95756f3df03a87cef
17,791
def define_args(parser, *args_builders): """ Set program args""" for args_builder in args_builders: parser = args_builder(parser) return parser
36febf6c80d2e66b34d26ea9b522c09144cd2876
17,794
def bernoulli_lh(ho, ha, s, n): """ Returns the likelihood ratio for independently distributed bernoulli random variables. Parameters ---------- ho : float null hypothesis ha : float alternative hypothesis s : float or int number of successes in sample n : float or int total number of elements in sample Returns ------- float likelihood ratio of model """ null_lh = (ho ** s) * (1 - ho)**(n - s) alt_lh = (ha ** s) * (1 - ha)**(n - s) return alt_lh / null_lh
95b52f3f48a173a6a40fab8ae7c776a7082ebb02
17,796
def extend_hit_reference(seq1, start1, end1, seq2, start2, end2): """ Extend the hit on seq1, assuming missing content is identical to seq2 If the hit covers all of seq1, but seq1 is shorter than seq2, assume the missing sequences from seq1 are identical, and add those sequences. """ # If the hit covers all of seq2, we simply return the region from seq1 that # matches if len(seq2) == end1 - start1 + 1: return seq1[start1 -1: end1] # If the full length of seq1 matches if len(seq1) == end1 - start1 + 1: # If seq2 extend beyond the blast hit if len(seq2) > end2: return seq1[start1 - 1: end1] + seq2[end2:] # If seq2 extend before the blast hit if start2 > 1: return seq2[:start2 - 1] + seq1[start1 - 1: end1] # If seq1 partially matches if len(seq1) > end1 - start1 + 1: # If seq2 extend beyond the end of the blast hit, and seq1 doesn't if len(seq2) > end2 and end1 == len(seq1): return seq1[start1 - 1: end1] + seq2[end2:] # If seq2 extend before the start of the blast hit, and seq1 doesn't elif start2 > 1 and start1 == 1: return seq2[:start2 - 1] + seq1[start1 - 1: end1] else: return seq1[start1 - 1: end1]
d58dcea4117d1826a434c87092b229a271d4b775
17,799
def get_predicted_gender(spanish_sent): """ Return the gender of the first entity in the spanish translation. """ first_word = spanish_sent.split()[0].lower() if first_word == "el": return "male" elif first_word == "la": return "female" else: return "neutral"
94f43d37f29af4e3f1314a5e177d2e8036036a0a
17,800
import torch def make_onehot_kernel(kernel_size, index): """ Make 2D one hot square kernel, i.e. h=w k[kernel_size, kernel_size] = 0 except k.view(-1)[index] = 1 """ kernel = torch.zeros(kernel_size, kernel_size) kernel.view(-1)[index] = 1 return kernel.view(1, 1, kernel_size, kernel_size)
be324887a77f454e9f2c306e1bc9eddd8c001bb8
17,801
def manual_expo_mode(state): """ Bool to express if mode is manual. """ return state["expo_mode"] == "manual"
b9797a0e85ef9474b8819555afc33cf020031fc1
17,802
from datetime import datetime import logging def get_current(): """ Grabs the current month in integer / string formats and year. """ # format month year for datetime comparison month = datetime.now().strftime('%m') month_word = datetime.now().strftime('%B') year = datetime.now().year logging.info(f'Current: {month_word}, {month}-{year}') return month, month_word, year
28666b9f643e8d4b9879aeb84dd653adafb4f6e4
17,803
def whitelist(squat_candidates, whitelist_filename="whitelist.txt"): """Remove whitelisted packages from typosquat candidate list. Args: squat_candidates (dict): dict of packages and potential typosquatters whitelist_filename (str): file location for whitelist Returns: dict: packages and post-whitelist potential typosquatters """ # Create whitelist whitelist = [] with open(whitelist_filename, "r") as file: for line in file: # Strip out end of line character whitelist.append(line.strip("\n")) # Remove packages contained in whitelist whitelist_set = set(whitelist) for pkg in squat_candidates: new_squat_candidates_set = set(squat_candidates[pkg]) - whitelist_set new_squat_candidates_list = list(new_squat_candidates_set) # Update typosquat candidate list squat_candidates[pkg] = new_squat_candidates_list return squat_candidates
5d8305d7ee721988420a6035d2df9e0b52404b7b
17,804
import sys import textwrap import itertools def format_record_restructuredtext( record, out=sys.stdout, structured_sort=lambda k: k.name, unstructured_sort=lambda k: k.group_by if k.group_by is not None else "", ): """Convert a record to a reStructuedText document. :param record: Record to format. :param out: A file type object to write the record in. Default `stdout`. :param structured_sort: A function of one argument that will be used to sort the structured data items. The default is to sort by the name of the item. :param unstructured_sort: A function of one argument that will be used to sort the unstructured data items. The default is to sort on `group_by`. """ def print_table_header(columns): for name, width in columns: print(f"+-{'-'*width}-", end="", file=out) print("+", file=out) for name, width in columns: print(f"| {name}{' '*(width-len(name))} ", end="", file=out) print("|", file=out) for name, width in columns: print(f"+={'='*width}=", end="", file=out) print("+", file=out) def print_table_entry(data, columns): cstrs = [(str(d), w) for d, (n, w) in zip(data, columns)] cstrs = [textwrap.wrap(s, w) for s, w in cstrs] for rstrs in itertools.zip_longest(*cstrs): for index, vstr in enumerate(rstrs): width = columns[index][1] if vstr is None: print(f"| {' '*width} ", end="", file=out) else: print(f"| {vstr:<{width}} ", end="", file=out) print("|", file=out) for _, width in columns: print(f"+-{'-'*width}-", end="", file=out) print("+", file=out) def print_structured_item_table(record): columns = compute_column_widths( record.structured_data, ["name", "data_type", "target", "value"], [16, 8, 6, 32], ) print_table_header(columns) data_items = list(record.structured_data) data_items.sort(key=structured_sort) for sdi in data_items: data = [sdi.name, sdi.data_type.name, sdi.target.name, sdi.value] print_table_entry(data, columns) def print_unstructured_item_table(record): columns = compute_column_widths( record.unstructured_data, ["file_type", "group_by", "data_uri", "accessible"], [8, 8, 8, 16], ) print_table_header(columns) data_items = list(record.unstructured_data) data_items.sort(key=unstructured_sort) for udi in data_items: data = [udi.file_type, udi.group_by, udi.data_uri, udi.accessible] print_table_entry(data, columns) def compute_column_widths(items, names, mins): widths = list(mins) for item in items: for index, width in enumerate(widths): value = getattr(item, names[index]) width = max(len(str(value)), width) widths[index] = width if sum(widths) + len(widths) * 3 + 1 > 132: reduce = sum(widths) + len(widths) * 3 + 1 - 132 maxcol = max(widths) widths[widths.index(maxcol)] = maxcol - reduce return list(zip(names, widths)) print("=" * len(str(record))) print(record) print("=" * len(str(record))) print() print("Structured Data") print("===============") print_structured_item_table(record) print(file=out) print("Unstructured Data", file=out) print("=================", file=out) print_unstructured_item_table(record)
a5aa4572dba17040f809a8e957a580ff4aa6a762
17,806
def str_to_list(text): """ input: "['clouds', 'sky']" (str) output: ['clouds', 'sky'] (list) """ # res = [] res = [i.strip('[]\'\"\n ') for i in text.split(',')] return res
2f5bea8885177f89d0d745fdddf0ecd589d1d9bf
17,807
def legacy_collision_handler(slug, node1, node2): """Ignores all collisions, like :class:`SystemDispatcher` does. """ if node1.type == 'directory' and node2.type != 'directory': if not node1.children: # Ignore empty directory return 'replace_first_node' if '' not in node1.children: # Allow `/bar.spt` to act as the index of `/bar/` return 'set_second_node_as_index_of_first_node' return 'ignore_second_node'
1dd7671eaae8b5889c3bd8f9461917376d73a3d6
17,808
def generate_family_characteristics(df, family_id, group_ids): """ Given either an HMIS or a Connecting Point dataframe, add columns regarding family structure. :param df: HMIS or Connecting point dataframe. :type hmis: Pandas.Dataframe. :param family_id: column name of family identifier. :type family_id: str. :param group_ids: grouping column names. :type group_ids: [str]. """ df['With Child?'] = df.groupby(group_ids)['Child?'].transform(any) df['With Adult?'] = df.groupby(group_ids)['Adult?'].transform(any) df['With Family?'] = df['With Child?'] & df['With Adult?'] df['Family?'] = df.groupby(family_id)['With Family?'].transform(any) return df
f4d37d396352f6e9236e6710d8115bc6a8d3e632
17,809
import functools def int_ip_from_string(ip_string): """ Convert ip4 address from string representation into int (4 bytes). Parameters ---------- ip_string : string ip4 address as string (dot-separated) Returns ------- int 4-byte integer ip4 address representation """ addr_segments = map(int, reversed(ip_string.split('.'))) return functools.reduce(lambda hi, lo: (hi << 8) | lo, addr_segments, 0)
d6a2cd4d93c54887697396533203d1b716e53cfe
17,811
import numpy def weighted_median(data, weights): """ Args: data (list or numpy.array): data weights (list or numpy.array): weights """ data, weights = numpy.array(data).squeeze(), numpy.array(weights).squeeze() s_data, s_weights = map(numpy.array, zip(*sorted(zip(data, weights)))) midpoint = 0.5 * sum(s_weights) if any(weights > midpoint): w_median = (data[weights == numpy.max(weights)])[0] else: cs_weights = numpy.cumsum(s_weights) idx = numpy.where(cs_weights <= midpoint)[0][-1] if cs_weights[idx] == midpoint: w_median = numpy.mean(s_data[idx:idx+2]) else: w_median = s_data[idx+1] return w_median
dff154dd195cef523eb386888c4beddaee68542a
17,813
import re def find_pattern(text_to_search): """ Find pattern .dylib within a text """ pattern = re.compile(r'[a-zA-Z0-9_.+-]+\.dylib') matches = pattern.findall(text_to_search) pattern_path = re.compile(r'[a-zA-Z0-9_.+-/@]+\.dylib') matches_path = pattern_path.findall(text_to_search) irregular_patterns = [r'\/Python', r'\/CoreFoundation', r'\/CoreServices', ] irregular_dir_patterns = [r'[a-zA-Z0-9_.+-/]+/Python', r'[a-zA-Z0-9_.+-/]+/CoreFoundation', r'[a-zA-Z0-9_.+-/]+/CoreServices', ] list_irregular_matches = [] list_irregular_path_matches = [] for irr_pattern, irr_dir_pattern in zip(irregular_patterns,irregular_dir_patterns): pattern_irr = re.compile(irr_pattern) pattern_path_irr = re.compile(irr_dir_pattern) matches_irr = pattern_irr.findall(text_to_search) matches_path_irr = pattern_path_irr.findall(text_to_search) list_irregular_matches = list_irregular_matches + matches_irr list_irregular_path_matches = list_irregular_path_matches + matches_path_irr matches_def = matches + list_irregular_matches matches_def_path = matches_path + list_irregular_path_matches return (matches_def[0], matches_def_path[0])
4f4ec47de86f48782291cc4b7522f88a248d9405
17,814
def get_values_as_str(history, key): """Retrieve values from sequence of dictionaries.""" return [i[key] for i in history]
46687f63ce519d532f44f4aa6eb836eb079cadba
17,815
def lambda_tuple_converter(func): """ Converts a Python 2 function as lambda (x,y): x + y In the Python 3 format: lambda x,y : x + y """ if func is not None and func.__code__.co_argcount == 1: return lambda *args: func(args[0] if len(args) == 1 else args) else: return func
e513ed89d3cfc075031a6ecb45d6b839b4b9fdf7
17,817
def merge_compute(left, right): """ Merge two dictionnaries but computing integer values instead of overriding. Left override every right values except when both left and right value are integers then right value will be incremented by left value. Arguments: left (dict): The dict to merge into right. right (dict): The merge in the left dict. Returns: dict: Merged dict from left to right. """ for k, v in left.items(): # Only compute item if both left and right values are integers, else # left override right value if k in right and type(v) is int and type(right[k]) is int: right[k] += v else: right[k] = v return right
37823d5c3bd94c94685aed1f965c77d787900633
17,818
import torch def find_good_mu(noise_mag): """computes the regularization parameter in adversarial regularizer according to their heuristic""" return 2*torch.mean(noise_mag)
d7c3ff16ea2f7ce64c81afa20aad3267eacb863f
17,819
def verify_params(post_dict): """ Verify that a post dict contains the expected parameters. """ if ('target' not in post_dict) or ('source' not in post_dict): return False if not post_dict['target'] or not post_dict['source']: return False return True
b0dc66873e1771a898eb0b12c778bc2c4341e69f
17,820
def isClimateMap(): """ Uses the Climate options """ return 1
af5801b3d7b0c7dba8995ce2446a8c31918ee440
17,821
def build_metadata_from_setuptools_dict(metadata): """Build and return metadata from a setuptools dict. This is typically from package metadata. """ # based on warehouse and virtualenv examples # https://github.com/pypa/warehouse/blob/master/warehouse/templates/packaging/detail.html # https://github.com/pypa/virtualenv/blob/master/setup.cfg project_urls = metadata["project_urls"] output = { "name": metadata["name"], "version": metadata["version"], "description": metadata["description"], "homepage": metadata["url"], "license": metadata["license"], "maintainers": metadata["author"], "repository": project_urls["Source"], "issuesurl": project_urls["Tracker"], } # contributors is a special case where they might be # specified in the conf.py so don't raise an error output["contributors"] = metadata.get("contributors", None) return output
954ec12c2b557f4a5dbad6ed5cdf3f0b05123e21
17,822
def checkConfig(config): """Checks that all necessary configuration fields has been set. Returns the names of any invalid configuration fields. :param config: The Configuration object to check. :type confing: configuration.Configuration :return: Names of fields that have yet to be set. :return type: List[str] """ remainingFields = [ f for f in dir(config) if not f.startswith('_') and not f == 'fields' and not callable(getattr(config, f)) and not type(getattr(config, f)) is int ] return remainingFields
e5635be1534e587b0c3612caf79147b2d67ecb30
17,823
import pickle def load_tokenizer(path): """[load tokenizer stored in path] Args: path ([string]): [where the tokenizer is stored] """ with open(path, 'rb') as handle: tokenizer = pickle.load(handle) return tokenizer
32958c9294d4e93ac66640a150bdaee7e72fcfe1
17,825
def _get_special_ids(tokenizer): """Gets the ids of special [T] and [P] tokens.""" trigger_token_id = tokenizer.convert_tokens_to_ids('[T]') if trigger_token_id == tokenizer.unk_token_id: raise ValueError('Tokenizer does not have special [T] token.') predict_token_id = tokenizer.convert_tokens_to_ids('[P]') if predict_token_id == tokenizer.unk_token_id: raise ValueError('Tokenizer does not have special [P] token.') return trigger_token_id, predict_token_id
ad3458150df641a3ae3ae3ce5d4481b438345757
17,828
import shutil def mpi_executable(preferred_executable=None): """ Return an mpi executable found on the current system. Depending on your MPI implementation, the executable name to run an MPI application may differ. This function will check which one is available and return the first valid one it finds. Valid in this case means that it can be found with methods like `which`. To override which executable to check first you can pass your preferred executable as an argument. Parameters ---------- preferred_executable : str, optional The first executable to check for on the system. If it isn't found, will continue with the regular search for valid MPI executables. Returns ------- str The name of a valid MPI executable. Raises ------ RuntimeError If no valid MPI executable could be found, a RuntimeError is raised. """ if preferred_executable: if shutil.which(preferred_executable): return preferred_executable else: raise RuntimeError( f"The given preferred mpi executable `{preferred_executable}` " "was not found on this system" ) executables = ["mpirun", "mpiexec", "srun"] for executable in executables: if shutil.which(executable): return executable raise RuntimeError( "Could not find an mpi installation. Make sure your PATH is set correctly." )
ab197a7591da0609a4af3eee57e8b8947ab19d9d
17,830
def extract_pattern(fmt): """Extracts used strings from a %(foo)s pattern.""" class FakeDict(object): def __init__(self): self.seen_keys = set() def __getitem__(self, key): self.seen_keys.add(key) return '' def keys(self): return self.seen_keys fake = FakeDict() try: fmt % fake except TypeError: # Formatting error pass return set(fake.keys())
81116122db6167322a4c7d05f6d45dc257fa5a74
17,831
def yaml_variables_subst(yaml_raw, variables=None): """ Performs variables substitute on a provided raw YAML content :type yaml_raw str :type variables dict :rtype:str """ if variables is None: return yaml_raw # replace "${VAR_NAME}" for key, value in variables.items(): yaml_raw = yaml_raw.replace('${%s}' % key, value) return yaml_raw
de1c858712c73b6d81ed4f9db10b136a77ebea78
17,832
def illuminanceToPhotonPixelRate(illuminance, numerical_aperture=1.0, pixel_size=6.5e-6, magnification=1, sample_quantum_yield=1., **kwargs): """ Function which converts source illuminance and microscope parameters to photons / px / s. Based heavily on the publication: "When Does Computational Imaging Improve Performance?," O. Cossairt, M. Gupta and S.K. Nayar, IEEE Transactions on Image Processing, Vol. 22, No. 2, pp. 447–458, Aug. 2012. However, this function implements the same result for microscopy, replacing f/# with NA, removing reflectance, and including magnification. Args: exposure_time: Integration time, s source_illuminance: Photometric source illuminance, lux numerical_aperture: System numerical aperture pixel_size: Pixel size of detector, um magnification: Magnification of imaging system Returns: Photon counts at the camera. """ # Conversion factor from radiometric to photometric cordinates # https://www.thorlabs.de/catalogPages/506.pdf K = 1 / 680 # Planck's constant # h_bar = 6.626176e-34 h_bar = 1.054572e-34 # Speed of light c = 2.9979e8 # Average wavelength wavelength = 0.55e-6 # Constant term const = K * wavelength / h_bar / c # Calculate photon_pixel_rate photon_pixel_rate = sample_quantum_yield * const * (numerical_aperture ** 2) * illuminance * (pixel_size / magnification) ** 2 # Return return photon_pixel_rate
9bb90c07c3b58c8b82424147018d697f095303f7
17,836
def Main(): """ :return: """ a = [1, 2, 3, 4] b = len(a) c = -424 d = abs(c) m1 = 32 m2 = 21 m3 = min(m1, m2) m4 = max(b, c) a2 = a[2:] return m4 + m3
9c8c8d9836b05a3393c774a22d833eb6c5a0bc09
17,837
def abs2(numero): """ (num) -> num Calcula el valor absoluto de un número >>> abs2(10) 10 >>> abs2(-8) 8 >>> abs2(0) 0 :param numero: el numero a evaluar :return: el valor absoluto del número """ if numero < 0: # La palabra clave return se utiliza para retonar el resultado de # Nuestra función return -numero return numero
826ab80cc1af250e9e332e72c81ffcc5a5383d83
17,838
import requests def fetch_output(input_prefix, input_value, output_prefix, enable_semantic_search=False): """Find APIs which can produce the output_prefix :arg str input_prefix: The prefix of the input, e.g. ncbigene, hgnc.symbol. :arg str output_prefix: The prefix of the output, e.g. ncbigene, hgnc.symbol. :arg str input_value: The actual value of the input :arg boolean enable_semantic_search: :returns: list -- api endpoints which can produce the output prefix """ if enable_semantic_search: response = requests.get('http://biothings.io/explorer/api/v2/semanticquery?input_prefix={{input}}&output_prefix={{output}}&input_value={{value}}'. replace("{{input}}", input_prefix).replace("{{output}}", output_prefix).replace("{{value}}", input_value)) else: response = requests.get('http://biothings.io/explorer/api/v2/directinput2output?input_prefix={{input}}&output_prefix={{output}}&input_value={{value}}'. replace("{{input}}", input_prefix).replace("{{output}}", output_prefix).replace("{{value}}", input_value)) if response.ok: doc = response.json() return doc['data'] if doc else [] else: doc = response.json() if 'error message' in doc: print(doc['error message']) else: print("No results could be found for your query!") return []
1ed8f27923695ab7587f49083070aecb040987f6
17,839
import torch def InlierPortion(predict_weight,gt_weight,threshold=0.3): """ Input: predict_weight ([Batch_size,N]), gt_weight ([Batch_size,N]) , threshold for deciding inliers OutpuT: portion of the ground truth inlier detected, ground truth inliers among the dtected inliers """ predict_weight[predict_weight >= threshold] = 1 predict_weight[predict_weight < threshold] = 0 por_inlier = torch.sum(predict_weight*gt_weight)/torch.sum(gt_weight) gt_inlier_por = torch.sum(predict_weight*gt_weight)/torch.sum(predict_weight) return por_inlier , gt_inlier_por
0b728a59bb880eecb1d9636a838cc4cc6040bdb8
17,840
from datetime import datetime def timestamp_string_parse(value): """Converte timestamp string no formato %d/%m/%Y""" return datetime.fromtimestamp(int(value)).strftime('%d/%m/%Y')
1c561fe6cebac5fba05cc2ce56bf14a2311c51ae
17,841
import sys def exclude_selection(data, selection): """Makes a new dataset excluding points whose id is in selection.""" new_data = [ a_data_point for a_data_point in data if a_data_point.par.get('resonance_id', None) not in selection ] if new_data == data: sys.stdout.write("\n No Data removed! Aborting ...\n") exit(1) return new_data
7488d44d23b2f9bec42a138cfdd2e32d34b7d473
17,842
def collinear(p1, p2, p3): """ Check if the points are colinear :param shapely Point p1: point to chek if is colinear :param shapely Point p2: point to chek if is colinear :param shapely Point p3: point to chek if is colinear :return bool: True if are colinear """ return (p1[1]-p2[1]) * (p1[0]-p3[0]) == (p1[1]-p3[1])*(p1[0]-p2[0])
5738649db59980742dd7fd089d8f27302eead0ac
17,843
import glob def parse_lmat_output(lmat_dirname, allow_missing=True): """ Returns dict (field_name) -> (parsed_value), see code for list of field_names. No longer used (LMAT isn't part of pipeline any more), but kept around in case it's useful to resurrect it some day. """ # Represent each taxon by a 4-tuple (nreads, score, rank, name) taxa = [ ] nreads_tot = 0 for filename in glob.glob(f"{lmat_dirname}/*.fastsummary"): for line in open(filename): line = line.rstrip('\r\n') t = line.split('\t') assert len(t) == 4 i = t[3].rfind(',') assert i >= 0 (score, nreads, ncbi_id) = (float(t[0]), int(t[1]), int(t[2])) (rank, name) = (t[3][:i], t[3][(i+1):]) assert nreads > 0 taxon = (nreads, score, rank, name) taxa.append(taxon) nreads_tot += nreads if (not allow_missing) and (nreads_tot == 0): raise RuntimeError(f"couldn't find fastsummary files in lmat dir '{lmat_dirname}'") top_taxa = [ ] top_taxa_ann = [ ] nreads_cumul = 0 for (nreads, score, rank, name) in reversed(sorted(taxa)): # Roundoff-tolerant version of (nreads_cumul >= 0.9 * nreads_tot) if 10*nreads_cumul >= 9*nreads_tot: break percentage = 100.*nreads/nreads_tot top_taxa.append(name) top_taxa_ann.append(f"{name} ({rank}, {percentage:.1f}%)") nreads_cumul += nreads # 'top_taxa_ann' = "top taxa with annotations" return { 'top_taxa': top_taxa, 'top_taxa_ann': top_taxa_ann }
33ba466c57d972652537a807fd9ad9a15398f762
17,844
def plugin_final_label(): """A plug-in function with a different label""" return "final"
eab1ec3e35099afa92ced379552db06ce5ee3576
17,845
import requests def validate_status_via_raw(recommendation, config_data): """ Runs validation checks against the recommendation status and associated reputation override. Args: recommendation (Recommendation): The recommendation object to be tested. config_data (dict): Contains configuration data for the request. Returns: bool: True if verification was successful, False if not. """ url = "{0}recommendation-service/v1/orgs/{1}/recommendation/_search".format(config_data['hostname'], config_data['org_key']) request_body = {'criteria': {'status': ['NEW', 'REJECTED', 'ACCEPTED']}, 'rows': 50} request_headers = {'X-Auth-Token': config_data['apikey']} response = requests.post(url, json=request_body, headers=request_headers) if response.status_code != 200: print(f"attempt to get recommendation data failed with code {response.status_code}") return False result_array = response.json()['results'] good_results = [block for block in result_array if block['recommendation_id'] == recommendation.recommendation_id] if len(good_results) != 1: print(f"Unable to re-locate recommendation with ID {recommendation.recommendation_id}") return False new_status = good_results[0]['workflow']['status'] if new_status != recommendation.workflow_.status: print(f"Recommendation status incorrect - is {new_status}, should be {recommendation.workflow_.status}") return False if new_status == 'ACCEPTED': new_ref_id = good_results[0]['workflow'].get('ref_id', None) if not new_ref_id: print("Reputation Override reference ID is not present when it should be") return False rep_override = recommendation.reputation_override() if not rep_override: print("Reputation Override object is not present when it should be") return False url = "{0}appservices/v6/orgs/{1}/reputations/overrides/{2}".format(config_data['hostname'], config_data['org_key'], new_ref_id) response = requests.get(url, headers=request_headers) if response.status_code != 200: print(f"attempt to get reputation override data failed with code {response.status_code}") return False raw_rep_override = response.json() if raw_rep_override['id'] != rep_override.id: print(f"Reputation override ID incorrect - is {raw_rep_override['id']}, should be {rep_override.id}") return False else: if good_results[0]['workflow'].get('ref_id', None): print("Reputation Override reference ID is present when it shouldn't be") return False if recommendation.reputation_override(): print("Reputation Override object is present when it shouldn't be") return False return True
3156b67eec9c67bcc1a3b1f8d46057e527d45de3
17,846
def ListToBullets(files): """! Convert a list of file names into a bulleted list (as a string). @param files: (list) names of files to be compiled into a bullet list in a string. @return a string with a well formatted bullet list of file names. ## Profile * line count: 7 * characters: 149 * returns: return '', return out """ if files is None or len(files) < 1: return '' out = '## Modules\n\n' for f in files: out += '* {}\n'.format(f) return out
8ba84c7fc467c8927e690e7762f4412209d5499d
17,849
def _remove_duplicate_transactions(transactions, reference_transactions): """Returns a list of transactions that are not present in reference_transactions.""" return [t for t in transactions if t not in reference_transactions]
8e2ff00dd845acd9ab63f93b8d998fe897a7d21e
17,850
from typing import Dict from typing import Any import os import json def read_json(path: str) -> Dict[str, Any]: """ Read a JSON file (assumed to be a dictionary). """ if os.path.exists(path): with open(path, "r", encoding="utf-8") as file: return json.load(file) else: return {}
668d5b9adc3350cb9d5825fe45eefb9403c518e2
17,851
import argparse from pathlib import Path def parse_arguments() -> argparse.Namespace: """Parse arguments from CLI.""" parser = argparse.ArgumentParser( description="Enduro Learner", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "--store_path", type=Path, default="temp/", help="Place to store videos.", ) parser.add_argument( "--trial_names", nargs="+", help="List of trials to train on.", ) parser.add_argument( "--batch_size", type=int, default=8, help="Train batch size.", ) parser.add_argument( "--num_workers", type=int, default=4, help="Number of workers", ) parser.add_argument("--train", action="store_true") parser.add_argument( "--model_path", type=Path, default="models/", help="Parent directory to store models", ) parser.add_argument( "--train_run_name", type=str, required=True, help="Name of the trial", ) parser.add_argument( "--epochs", type=int, default=10, help="Number of epochs to train." ) parser.add_argument( "--learning_rate", type=float, default=0.01, help="Learning rate, what else?", ) parser.add_argument( "--opt", type=str, default="adam", help="Name of the optimizer", ) parser.add_argument( "--tune", action="store_true", help="Use optuna hyper parameter tuning.", ) parser.add_argument( "--play", action="store_true", help="Let the model play a round." ) parser.add_argument( "--watch", action=("store_true"), help=("Open window to see the model's game.") ) parser.add_argument( "--model", type=str, required=True, choices=["Big", "Simple", "ResNet"], help="Model architecture", ) return parser.parse_args()
b1c89265222174fa65fcd020fbc2e752235435ce
17,853