content
stringlengths
42
6.51k
def comment(s: str): """ Make a LISP inline comment from str """ return f"#|{s}|#"
def levenshteinDistance(s1, s2): """Calculate Levenstein edit distance between two arbitary strings. from https://stackoverflow.com/questions/2460177/edit-distance-in-python Args: s1: an arbitrary string s2: a second arbitrary string Return: str: an integer value of how distant the two strings are """ # pylint: disable=invalid-name # special cities and countries if len(s1) > len(s2): s1, s2 = s2, s1 distances = range(len(s1) + 1) for i2, c2 in enumerate(s2): distances_ = [i2 + 1] for i1, c1 in enumerate(s1): if c1 == c2: distances_.append(distances[i1]) else: distances_.append( 1 + min((distances[i1], distances[i1 + 1], distances_[-1])) ) distances = distances_ return distances[-1]
def first(seq, pred=None): """Return the first item in seq for which the predicate is true. If the predicate is None, return the first item regardless of value. If no items satisfy the predicate, return None. """ if pred is None: pred = lambda x: True for item in seq: if pred(item): return item return None
def chunk_version(fritzing_version): """ Turn a fritzing version string into a tuple """ return tuple(int(part) if part.isdigit() else part for part in fritzing_version.split('.'))
def is_matching(edges): """Determines whether the given set of edges is a matching. A matching is a subset of edges in which no node occurs more than once. Parameters ---------- edges : iterable A iterable of edges. Returns ------- is_matching : bool True if the given edges are a matching. """ return len(set().union(*edges)) == len(edges) * 2
def _trim_comment(line) -> str: """Remove comment from end of line, if any""" if '#' in line: icomment = line.index('#') line = line[:icomment] return line
def num_if_possible(s): """Convert string to number if possible, otherwise return string.""" try: return int(s) except ValueError: try: return float(s) except ValueError: return s
def _prepare_gdal_options(options: dict, split_by_option_type: bool = False) -> dict: """ Prepares the options so they are ready to pass on to gdal. - Uppercase the option key - Check if the option types are on of the supported ones: - LAYER_CREATION: layer creation option (lco) - DATASET_CREATION: dataset creation option (dsco) - INPUT_OPEN: input dataset open option (oo) - DESTINATION_OPEN: destination dataset open option (doo) - CONFIG: config option (config) - Prepare the option values - convert bool to YES/NO - convert all values to str Args: options (dict): options to pass to gdal. split_by_option_type (optional, bool): True to split the options in a seperate dict per option type. Defaults to False. Returns: dict: prepared options. If split_by_option_type: a dict of dicts for each occuring option type. """ # Init prepared options with all existing option types option_types = [ "LAYER_CREATION", "DATASET_CREATION", "INPUT_OPEN", "DESTINATION_OPEN", "CONFIG", ] prepared_options = {option_type: {} for option_type in option_types} # Loop through options specified to add them for option, value in options.items(): # Prepare option type and name option_type, option_name = option.split(".") option_type = option_type.strip().upper() option_name = option_name.strip().upper() if option_type not in option_types: raise ValueError( f"Unsupported option type: {option_type}, should be one of {option_types}" ) # Prepare value if isinstance(value, bool): value = "YES" if value is True else "NO" # Add to prepared options if option_name in prepared_options[option_type]: raise ValueError( f"option {option_type}.{option_name} specified more than once" ) prepared_options[option_type][option_name] = str(value) # If no split is asked, convert back to original format if split_by_option_type is True: result = prepared_options else: result = {} for option_type in prepared_options: for option_name, value in prepared_options[option_type].items(): result[f"{option_type}.{option_name}"] = value return result
def get_yt_link_time(url) -> int: """Get the seconds from youtube link's &t=hms format. Returns seconds""" hours = "" minuts = "" secs = "" surl = str(url) if "t=" in surl or "time_continue=" in surl or "start=" in surl: at = 0 i = len(surl) letter = "" while i > 0 and letter != "=": i -= 1 letter = surl[i] if at == 's': try: checkint = int(letter) secs = surl[i] + secs except: pass elif at == 'm': try: checkint = int(letter) minuts = surl[i] + minuts except: pass elif at == 'h': try: checkint = int(letter) hours = surl[i] + hours except: pass if i == len(surl) - 1: # the second letter from the end == an int, meaning there isnt an 's' try: checkint = int(letter) at = 's' secs = letter except: pass if letter == 's': at = 's' elif letter == 'm': at = 'm' elif letter == 'h': at = 'h' if hours == "": hours = 0 else: hours = int(hours) if minuts == "": minuts = 0 else: minuts = int(minuts) if secs == "": secs = 0 else: secs = int(secs) return hours * 3600 + minuts * 60 + secs
def handle_cli_id(apiclient, given_instance_id=None): """Infer instance ID given command-line interface arguments Note that if given_instance_id is not None, then this function returns it and takes no other action. Eventually this function might be changed to validate the given ID, e.g., check that it exists and that given API token is sufficient to use it. """ if given_instance_id is not None: return given_instance_id active_instances = apiclient.get_instances() if len(active_instances) == 1: return active_instances[0] if len(active_instances) > 1: print('ambiguous command because more than one active instance') else: # len(active_instances) == 0: print('no active instances') return None
def search(T, w, dist, i = 0): """Make a recommendation with a word from the dictionary close to the bad spelled word w. Params ------ T: Trie structure w: String The bad spelled word to correct (has to be in lower case) dist: Int Max distance to test in the trie Structure """ if len(w) == i: if T != None and T.is_word and dist == 0: # Init return '' else: return None if T == None: return None find = search(T.char[w[i]], w, dist, i + 1) if find != None: return w[i] + find if dist == 0: return None for c in [chr(i) for i in range(ord('a'), ord('z') + 1)]: find = search(T.char[c], w, dist - 1, i) if find != None: return c + find find = search(T.char[c], w, dist - 1, i + 1) if find != None: return c + find return search(T.char[w[i]], w, dist - 1, i + 1)
def EnforceActivityWindow(start_time, end_time, instance_events): """ This function enforces possible activity windows defined in the config file. """ events_iit = [] events_abs = [0] + instance_events event_times = [sum(events_abs[:i]) for i in range(1, len(events_abs))] event_times = [e for e in event_times if (e > start_time)and(e < end_time)] try: events_iit = [event_times[0]] + [event_times[i]-event_times[i-1] for i in range(1, len(event_times))] except: pass return events_iit
def cleanUpKeyValuePair(rawKey, rawValueStr): """ Given a raw key and a raw value string, clean them up and remove a leading "-" if necessary. """ paramStr = rawValueStr.strip() key = rawKey.strip().strip('-') return key, paramStr
def dot_product(x, y, n): """Takes the dot product of lists x and y over F_n""" if len(x) != len(y): raise ValueError("x and y must be the same length!") if not isinstance(n, int): raise ValueError("n must be an integer!") return sum([(x_i * y[i]) % n for i, x_i in enumerate(x)])
def persona_from_template_values(topic: str, topic_item: str, extra_details: str = ''): """ Generates a sentence stating the persona of the apprentice, given their selection. """ pers = f'My favorite {topic} is {topic_item}.' if extra_details: pers += f'\n{extra_details}' return pers
def molar_concentration_water_vapour(relative_humidity, saturation_pressure, pressure): """ Molar concentration of water vapour :math:`h`. :param relative_humidity: Relative humidity :math:`h_r` :param saturation_pressure: Saturation pressure :math:`p_{sat}` :param pressure: Ambient pressure :math:`p` The molar concentration of water vapour is calculated using .. math:: h = h_r \\frac{p_{sat}}{p_a} """ return relative_humidity * saturation_pressure / pressure
def check_difference_on_lt(val1, val2): """ Func which calculate difference of values and check it on __lt__ num 20. Additional func for func split_on_ranges_by_step. :param val1: :param val2: :return: """ difference = val2 - val1 if difference < 20: # < 20 are nums on which func split on rages return incorrect ranges. return val1, val2
def nonempty(str): """Any text. Returns '(none)' if the string is empty.""" return str or "(none)"
def update_result(res, new_data): """ update current result dictionary with newly collected :param res: dict :param new_data: dict :return: """ if not res: res = new_data return res for is_valid, days in new_data.items(): for day, events in days.items(): if day not in res[is_valid]: res[is_valid][day] = events continue for event, quantity in events.items(): res[is_valid][day][event] += quantity return res
def _unpack_image(pixels): """Flatten out pixels and returns a tuple. The first entry is the size of each pixel.""" unpacked_pixels = [] try: for pix in pixels: for val in pix: unpacked_pixels.append(val) return len(pixels[0]), bytes(unpacked_pixels) except TypeError: return 1, bytes(pixels)
def _twos_comp(val, bits=16): """compute the 2's complement of int val with bits""" if (val & (1 << (bits - 1))) != 0: # if sign bit is set val = val - (1 << bits) # compute negative value return val
def _lowfreq_linear_filter(tumor_index, is_paired): """Linear classifier for removing low frequency false positives. Uses a logistic classifier based on 0.5% tumor only variants from the smcounter2 paper: https://github.com/bcbio/bcbio_validations/tree/master/somatic-lowfreq The classifier uses strand bias (SBF) and read mismatches (NM) and applies only for low frequency (<2%) and low depth (<30) variants. """ if is_paired: sbf = "FORMAT/SBF[%s]" % tumor_index nm = "FORMAT/NM[%s]" % tumor_index else: sbf = "INFO/SBF" nm = "INFO/NM" cmd = ("""bcftools filter --soft-filter 'LowFreqBias' --mode '+' """ """-e 'FORMAT/AF[{tumor_index}:0] < 0.02 && FORMAT/VD[{tumor_index}] < 30 """ """&& {sbf} < 0.1 && {nm} >= 2.0'""") return cmd.format(**locals())
def parse_id(hardware_id): """Parse Nuki ID.""" return hex(hardware_id).split("x")[-1].upper()
def eq(x, y): """Creates an SMTLIB equal to statement formatted string Parameters ---------- x, y: float First and second numerical arguments to include in the expression """ return "(= " + x + " " + y + ")"
def parse_pvc_param_line(pvc_param): """ Takes a pvc mount in the format: "pvc-name/subpath/desired:/mountpath/desired[:r]" and returns {"name": "pvc-name", "subPath": "subpath/desired", "mountPath": "/mountpath/desired", "readOnly": False} :param pvc_param: the pvc mount param in the format "pvc-name/subpath:/mountpath[:r]" :return: a dict containing the elements claim, subpath, mountpath and readonly """ claim, _, rest = pvc_param.partition(":") mount_path, _, mode = rest.partition(":") read_only = mode == "r" claim_name, _, subpath = claim.partition("/") return { 'name': claim_name.strip(), 'subPath': subpath.strip(), 'mountPath': mount_path.strip(), 'readOnly': read_only }
def fnv1a_64(string, seed=0): """ Returns: The FNV-1a (alternate) hash of a given string """ # Constants FNV_prime = 1099511628211 offset_basis = 14695981039346656037 # FNV-1a Hash Function hash = offset_basis + seed for char in string: hash = hash ^ ord(char) hash = hash * FNV_prime return hash
def variance(dataset): """\ Given a set of time differences, compute the variance in this set :param dataset: list of values :returns: the variance """ avg = sum(dataset)/len(dataset) v = 0.0 for data in dataset: v += (data - avg) * (data - avg) v = v / len(dataset) return v
def build_azure_signed_url_write_headers(content_length: str, x_ms_blob_type: str = 'BlockBlob', accept: str = '*/*', accept_encoding: str = '*'): """Builds the headers required for a SAS PUT to Azure blob storage. Args: content_length: Length of the content in bytes as string. x_ms_blob_type: Blob type (one of BlockBlob, PageBlob, AppendBlob) accept: Indicates which content types the client is able to understand. accept_encoding: Indicates the content encoding that the client can understand. Returns: Formatted header which should be passed to the PUT request. """ headers = { 'x-ms-blob-type': x_ms_blob_type, 'Accept': accept, 'Content-Length': content_length, 'x-ms-original-content-length': content_length, 'Accept-Encoding': accept_encoding, } return headers
def decodeString(s): """ :type s: str :rtype: str """ # Given an encoded string, return its decoded string. # The encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer. # You may assume that the input string is always valid # No extra white spaces, square brackets are well-formed, etc. # Furthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4]. # start at end of encoded string # if current item is letter, add to beginning of decoded # if current item is ]: # find [ # save string in between [] # get number before [ # repeat-add to beginning of decoded the string in between [] as many times as number decoded = "" close_brackets = [index for index, letter in enumerate(s) if letter == str("]")] open_brackets = [index for index, letter in enumerate(s) if letter == str("[")] for letter in reversed(s): if letter.isalpha(): decoded = letter + decoded elif letter == str("]"): last_close_bracket = close_brackets[-1] del close_brackets[-1] # find [ last_open_bracket = open_brackets[-1] del open_brackets[-1] # save string in between [] end_sub = last_close_bracket start_sub = last_open_bracket + 1 substring = s[start_sub:end_sub] # get number before [ repeat_times = int(s[last_open_bracket-1]) # repeat-add to beginning of decoded the string in between [] as many times as number for x in range(0, repeat_times-1): decoded = substring + decoded print(decoded) return decoded
def _maybe_to_dense(obj): """ try to convert to dense """ if hasattr(obj, 'to_dense'): return obj.to_dense() return obj
def divide_numbers(numerator, denominator): """For this exercise you can assume numerator and denominator are of type int/str/float. Try to convert numerator and denominator to int types, if that raises a ValueError reraise it. Following do the division and return the result. However if denominator is 0 catch the corresponding exception Python throws (cannot divide by 0), and return 0""" try: numerator = int(numerator) denominator = int(denominator) except ValueError: raise ValueError try: return numerator / denominator except ZeroDivisionError: return 0
def convert_hex_to_ascii(hex_string: str) -> str: """Convert hex string to ascii format""" return bytearray.fromhex(hex_string).decode()
def from_mask(m, coding='big'): """ Returns the integer represented by the bit mask `m`, given in `coding` scheme. Keyword argument `coding` represent the coding scheme used by mask `m`, it resembles keyword `byteorder` in function `int.to_bytes`. If it is 'big', then a `big endian` scheme has been applied, namely bits at the *beginning* of `m` are the most significant bits of the returned integer; otherwise a `little endian` scheme applies, namely bits at the *end* of the given mask `m` are most significant bits of the returned integer. Examples ======== >>> from_mask((1,1,0,0), coding='big') 12 >>> from_mask((0,0,1,1), coding='little') 12 """ n = 0 for i, b in enumerate(m if coding == 'little' else reversed(m)): n |= b * (1 << i) return n
def redo_data(data): """ Alter the data received from the backend to our way """ output = [] for pp in data: values = [] for ts in pp['values']: if isinstance(ts, dict): if 'error_code' in ts: values.append([False, ts['timestamp'], ts['error_code']]) else: values.append([ts['timestamp'], ts['value']]) else: values.append(ts) output.append({'path': pp['path'], 'values': values}) return output
def testDim(testlist, dim=0): """tests if testlist is a list and how many dimensions it has returns -1 if it is no list at all, 0 if list is empty and otherwise the dimensions of it""" if isinstance(testlist, list): if testlist == []: return dim dim = dim + 1 dim = testDim(testlist[0], dim) return dim else: if dim == 0: return -1 else: return dim
def regional_data_file_data(base_regional_data_file_data, data_file): """Return data file creation data for a reigon.""" return { "file": data_file, **base_regional_data_file_data }
def create_response_payload_with_status(command_request, method_name, create_user_response=None): """ Helper method to create the payload for responding to a command request. This method is used for all method responses unless the user provides another method to construct responses to specific command requests. :param command_request: The command request for which the response is being sent. :param method_name: The method name for which we are responding to. :param create_user_response: Function to create user specific response. :return: The response payload. """ if method_name: response_status = 200 else: response_status = 404 if not create_user_response: result = True if method_name else False data = "executed " + method_name if method_name else "unknown method" response_payload = {"result": result, "data": data} else: response_payload = create_user_response(command_request.payload) return (response_status, response_payload)
def deep_reverse(L): """ assumes L is a list of lists whose elements are ints Mutates L such that it reverses its elements and also reverses the order of the int elements in every element of L. It does not return anything. """ for item in range(len(L) - 1, -1, -1): newList = [] for value in range(len(L[item]) -1, -1, -1): newList.append(L[item][value]) L.remove(L[item]) L.append(newList) return L
def fibonacciDP(sequenceNumber): """ A Dynamic Programming Algorithm that gets a number from fibonacci sequence at certain point in sequence. Args: sequenceNumber {integer} the place in the sequence to get the number from Return: {integer} the number from fibonacci sequence """ if(sequenceNumber<0): return 0 if(sequenceNumber<2): return 1 sequenceList = [None] * sequenceNumber sequenceList[0] = 1 sequenceList[1] = 1 for x in range (2,sequenceNumber): sequenceList[x] = sequenceList[x-1] + sequenceList[x-2] return sequenceList[sequenceNumber-1]
def get_module_name(xname): """ Parses the xname for named operations.""" # e.g. VGG / Sequential[features] / Conv2d[0] / Conv_33 xparts = xname.split('/') module_name_parts = [] for part in xparts[:-1]: bracket_pos = part.find('[') if bracket_pos < 0: module_name_parts.append(part) else: var_name = part[bracket_pos + 1:-1] module_name_parts.append(var_name) return '.'.join(module_name_parts)
def num2str(a, n=3): """ To turn a number into a string. Parameters ---------- a : float The number to convert. n : int, default=3 (optional) Number of significant digits. Returns ------- result : str The string. """ n = str(n) y = '%.' + n + 'g' return '%s' % float(y % a)
def debug(value): """ Simple tag to debug output a variable; Usage: {% debug request %} """ print("%s %s: " % (type(value), value)) print(dir(value)) print('\n\n') return ''
def alphabetical_position(char: str) -> int: """Return the alphabetical position a char. """ return ord(char.upper()) - 64
def filter_ps(ps): """ ps -> List of paths Out of all the paths, select only the lowest weight paths that lead to the same end. """ best_ps = {} for p in ps: w_4 = p[0][1] w_5 = p[1][1] x = (w_4 + w_5, w_4) state_5 = p[1][0] if (state_5 not in best_ps) or (x < best_ps[state_5][0]): best_ps[state_5] = (x, [p]) elif x == best_ps[state_5][0]: best_ps[state_5][1].append(p) return [p for state_5 in best_ps for p in best_ps[state_5][1]]
def netrentalincome(annualrent, vacancyrate): """Return annual net rental income. :param annualrent: Yearly rental income if no vacancy. :type annualrent: double :param vacancyrate: Percent vacancy expected in a given year. :type vacancyrate: double :return: double """ return annualrent * (1 - vacancyrate)
def is_kind_of_class(obj, a_class): """checks if an object is sort of a class -> through inheritance """ if not isinstance(a_class, type): raise TypeError("a_class type must be 'type'") if isinstance(obj, a_class) or issubclass(type(obj), a_class): return True return False
def gwrap(some_string): """ Returns green text """ return "\033[92m%s\033[0m" % some_string
def same_property_kind(pk_a, pk_b): """Returns True if the two property kinds are the same, or pseudonyms.""" if pk_a is None or pk_b is None: return False if pk_a == pk_b: return True if pk_a in ['permeability rock', 'rock permeability'] and pk_b in ['permeability rock', 'rock permeability']: return True return False
def getHeight(matrix): """ The height is the horizontal side of the image. Because you iterate through the columns second. """ return len(matrix[0])
def is_abs_path(path_string): """Return True iff path_string is an absolute path.""" return path_string.startswith('/') and not path_string.startswith('//')
def normalize_by_chrom_lengths(counts_dict, chrom_lengths_dict): """ Normalize the number of counts by the length of the chromosome Parameters: counts_dict(dict): count_chrom_alignments chrom_lengths_dict(dict): output from determine_chrom_lengths() Returns: counts_dict (dict): """ for ele in counts_dict: counts_dict[ele] = counts_dict[ele] / float(chrom_lengths_dict[ele]) return counts_dict
def find_first_non_none(positions): """Given a list of positions, find the index and value of first non-none element. This method is specifically designed for pysam, which has a weird way of returning the reference positions. If they are mismatched/softmasked it returns None when fetched using get_reference_positions. query_alignment_start and query_alignment_end give you indexes of position in the read which technically align, but are not softmasked i.e. it is set to None even if the position does not align Parameters ---------- positions: list of int Positions as returned by pysam.fetch.get_reference_positions Return ------ index: int Index of first non-None value position: int Value at that index """ for idx, position in enumerate(positions): if position is not None: return idx, position
def getSVTYPE(chr1, chr2, extend1, extend2): """Get SVTYPE from extend right""" if chr1 != chr2: return "TRA" eventmap = {True: {True: "INV", False: "DUP"}, False: {True: "DEL", False: "INV"}} return eventmap[extend1][extend2]
def get_aoi_from_path(path): """[summary] Args: path ([type]): [description] """ # path: /data/spacenet7/spacenet7/{train_or_test}/{aoi}/images_masked/{filename} import os.path return os.path.basename(os.path.dirname(os.path.dirname(path)))
def get_occ_list(lst: list) -> list: """Get the occupancies list from a list of atom information dictionary.""" return [ doc["occ"] for doc in lst ]
def grid_draw(d1, d2, r, g, b): """Creates and empty grid for Netpbm image formating Arguments -------- d1, d2 -- height and width of grid """ grid = [] for i in range(d2): row = [] for j in range(d1): row.append([r, g, b]) grid.append(row) return grid
def insertNewlines(text, lineLength): """ Given text and a desired line length, wrap the text as a typewriter would. Insert a newline character ("\n") after each word that reaches or exceeds the desired line length. text: a string containing the text to wrap. line_length: the number of characters to include on a line before wrapping the next word. returns: a string, with newline characters inserted appropriately. """ assert lineLength > len(max(text.split(), key = len)) if len(text) < lineLength: return text elif text[lineLength] == ' ': return text[:lineLength + 1] + '\n' + insertNewlines(text[lineLength + 1:], lineLength) else: return text[:lineLength] + text[lineLength:text.find(' ',lineLength) + 1] + '\n' + insertNewlines(text[text.find(' ',lineLength) + 1:], lineLength)
def _pretty_class(s): """ convert the internal class name representation into what users expect to see. Currently that just means swapping '/' for '.' """ # well that's easy. return str(s).replace("/", ".")
def is_found(params, target): """ searching for param in sixer (element by element) :param params: what to search :param target: where to search :return: True if all elements in param are found in target, False otherwise """ for param in params: if param not in target: return False return True
def move_zeros(array): """.""" original_array = array[:] num_zeros = 0 for i, obj in enumerate(original_array): try: if int(obj) == 0 and not (obj is False): value = int(array.pop(i - num_zeros)) array.append(value) num_zeros += 1 except Exception: continue return array
def mean(collection): """docstring for mean""" return float(sum(collection)) / len(collection)
def _make_key(category, key): """Generate a binary key for the given category and key. Args: category (str): The category of the item key (str): The unique identifier for the item Returns: The key to use for storing a the value. """ return (b"TuneRegistry:" + category.encode("ascii") + b"/" + key.encode("ascii"))
def top_n_filter(peak_set, n=40): """Keep only the top `n` most abundant peaks in the spectrum. Parameters ---------- peak_set : :class:`Iterable` of :class:`~.PeakBase` The peaks to filter n : int, optional The maximum number of peaks to retain, by default 40 Returns ------- list """ reduced_peaks = sorted(peak_set, key=lambda x: x.intensity, reverse=True)[:n] reduced_peaks.sort(key=lambda x: x.mz) return reduced_peaks
def sanitize_input(ll): """Return an alert based on issues with dataset. Return None if no issue.""" p = sum([l[1] for l in ll]) if not all([l[0] == int(l[0]) for l in ll]): if round(p, 5) != 1: return "It's not a valid distribution and furthermore, one or more variable value are not integers" else: return "All the variable values should be integers" if round(p, 5) != 1: return "It's not a valid distribution"
def map_position( offset_mapping , position , direction ): """Convert a character position to the closest non-skipped position. Use the offset mapping dictionary to convert a position to the closest valid character position. We include a direction for the mapping because it is important to consider the closest position to the right or left of a position when mapping the start or end position, respectively. :param offset_mapping: a dictionary mapping character positions to ``None`` if the character is in the skip list or to an int, otherwise :param position: current character position :param direction: 1, if moving right; -1 if moving left :returns: character position if all skipped characters were removed from the document and positions re-assigned or ``None``, on KeyError """ if( not bool( offset_mapping ) ): return None else: try: while( offset_mapping[ position ] == None ): position = str( int( position ) + direction ) return offset_mapping[ position ] except KeyError: if( direction < 0 ): return 'EOF' elif( direction > 0 ): return 'SOF' else: return None
def filter_report(report): """Filters a test report dict down to only the interesting keys.""" INTERESTING_KEYS = [ 'behavior', 'behaviorClose', 'expected', 'received', 'expectedClose', 'remoteCloseCode' ] return { key: report[key] for key in INTERESTING_KEYS }
def rename_list_to_dict(rlist): """ Helper for main to parse args for rename operator. The args are assumed to be a pair of strings separated by a ":". These are parsed into a dict that is returned with the old document key to be replaced as the (returned) dict key and the value of the return being set as the string defining the new key. """ result=dict() for val in rlist: pair=val.split(':') if len(pair)!=2: print('Cannot parse pair defined as ',val) print('-r args are expected to be pairs of keys strings with a : separator') print('Type dbclean --help for usage details') exit(-1) result[pair[0]]=pair[1] return result
def bracket_matches(program): """Returns a dict with positions for matching brackets. e.g.: >>> bracket_matches("++[-,.[+>>]]") { 2: 11, 6: 10 } """ opening_pos = [] matches = {} for pos, instr in enumerate(program): if instr == "[": opening_pos.append(pos) if instr == "]": opening = opening_pos.pop() matches[opening] = pos matches[pos] = opening return matches
def epsilonCheck(x, epsilon=1e-6): """Checks that x is in (-epsilon, epsilon).""" epsilon = abs(epsilon) return -epsilon < x < epsilon
def average(lst): """ Count average from the list of numbers """ return sum(lst) / len(lst)
def hex_to_rgb(_hex): """ Convert a HEX color representation to an RGB color representation. hex :: hex -> [000000, FFFFFF] :param _hex: The 3- or 6-char hexadecimal string representing the color value. :return: RGB representation of the input HEX value. :rtype: tuple """ _hex = _hex.strip('#') n = len(_hex) // 3 if len(_hex) == 3: r = int(_hex[:n] * 2, 16) g = int(_hex[n:2 * n] * 2, 16) b = int(_hex[2 * n:3 * n] * 2, 16) else: r = int(_hex[:n], 16) g = int(_hex[n:2 * n], 16) b = int(_hex[2 * n:3 * n], 16) return r, g, b
def _comp_intron_lens(seq_type, inter_blocks, raw_inter_lens): """Returns the length of introns between fragments.""" # set opposite type, for setting introns opp_type = 'hit' if seq_type == 'query' else 'query' # list of flags to denote if an intron follows a block # it reads e.g. this line: # "ATGTT{TT} >>>> Target Intron 1 >>>> {G}TGTGTGTACATT" # and sets the opposing sequence type's intron (since this # line is present on the opposite sequence type line) has_intron_after = ['Intron' in x[seq_type] for x in inter_blocks] assert len(has_intron_after) == len(raw_inter_lens) # create list containing coord adjustments incorporating # intron lengths inter_lens = [] for flag, parsed_len in zip(has_intron_after, raw_inter_lens): if flag: # joint introns if all(parsed_len[:2]): # intron len is [0] if opp_type is query, otherwise it's [1] intron_len = int(parsed_len[0]) if opp_type == 'query' \ else int(parsed_len[1]) # single hit/query introns elif parsed_len[2]: intron_len = int(parsed_len[2]) else: raise ValueError("Unexpected intron parsing " "result: %r" % parsed_len) else: intron_len = 0 inter_lens.append(intron_len) return inter_lens
def objclass2dict(objclass): """ Meta is a objclass on python 2.7 and no have __dict__ attribute. This method convert one objclass to one lazy dict without AttributeError """ class Dict(dict): def __init__(self, data=None): if data is None: data = {} super(Dict, self).__init__(data) self.__dict__ = dict(self.items()) def __getattr__(self, key): try: return self.__getattribute__(key) except AttributeError: return False obj_list = [i for i in dir(objclass) if not str(i).startswith("__")] obj_values = [] for objitem in obj_list: obj_values.append(getattr(objclass, objitem)) return Dict(zip(obj_list, obj_values))
def simplify_alleles(*alleles): """Simplifies alleles by stripping off common postfix bases. For example, simplify("AC", "GC") would produce the tuple "A", "G" as the "C" base is a common postfix of both alleles. But simplify("AC", "GT") would produce "AC", "GT" as there is no common postfix. Note this function will never simplify any allele down to the empty string. So if alleles = ['CACA', 'CA'], the longest common postfix is 'CA' but we will not produce ['CA', ''] as this is an invalid Variant allele encoding. Instead we produce ['CAC', 'C']. Args: *alleles: A tuple of bases, each as a string, to simplify. Returns: A tuple, one for each allele in alleles in order, with any common postfix bases stripped off. """ def all_the_same(items): first = next(items) return all(item == first for item in items) # Loop over the alleles to determine the length of the shared postfix. Start # at 1 so every allele, even after trimming the postfix, has at least len 1. # For example, alleles = ['ATT', 'TT'] reduces to ['AT', 'T'] not ['A', '']. shortest_allele_len = min(len(a) for a in alleles) common_postfix_len = 0 for i in range(1, shortest_allele_len): if not all_the_same(a[-i] for a in alleles): break common_postfix_len = i if common_postfix_len: return tuple(a[0:-common_postfix_len] for a in alleles) else: # Fast path for the case where there's no shared postfix. return alleles
def _Scope(x): """Takes Numeric Code and returns String API code Input Values: 1:"", 2:"RS", 3:"S", 4:"Rookies" Used in: """ measure = {1:'',2:"RS",3:"S",4:"Rookies"} try: return measure[x] except: raise ValueError("Please enter a number between 1 and "+str(len(measure)))
def average(sum, tem): """ :param sum:int,the temperature sum :param tem:int, the temperature that user entered. This function circulate the average temperature. """ return float(sum/tem)
def get_vertical_index(typi, category): """ Take a user input for what datatype and category Index reference from the user 0: Totals 1: Per-game 2: Per-36-Min 3: Per-100-Possession 4: Advanced :param typi: a type of file :param category: a string input by the user to extract from a file :return: """ if typi in [0, 1, 2, 3, 4]: # FIXME Could add more if needed if category == 'AGE': return 2 elif category == 'GAMES': return 4 elif typi in [0, 1, 2, 3]: if category == '3P': return 10
def dictionaryDifference(minuend, subtrahend): """ Compute the difference between two dictionaries, using the keys of each dictionary. Receives: minuend: The minuend dictionary. subtrahend: The subtrahend dictionary. Returns: The difference dictionary (minuend-subtrahend) """ difference = {} for varName in list(set(minuend.keys()) - set(subtrahend.keys())): difference[varName] = minuend[varName] return difference
def nested_operation(d, func, *args, **named_args): """ Executes string operation on dict,list,tuple or string """ c = 0 s = d if isinstance(d, str): d = func(str, *args, **named_args) else: for k in d: update_key = c if isinstance(d, dict): update_key = k v = d[update_key] if isinstance(v, (dict, list, tuple)): v = nested_operation(v, func, *args, **named_args) else: v = func(v, *args, **named_args) d[update_key] = v c = c+1 return d
def overlaps(i1,s1,l1,i2,s2,l2): """ Checks whether two shapelets overlap (with respect to the time series they originated from. In order to overlap, shapelets need to share at least one index and originate from the same time series. """ if i1 != i2: return False a = s1 b = s1 + l1 - 1 c = s2 d = s2 + l2 - 1 return a <= d and c <= b
def parameterized_config(template): """ Generates a configuration from the provided template + variables defined in current scope :param template: a config content templated with {{variables}} """ all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()} return template.format(**all_vars)
def parse_thumbprint_from_openssl_output(raw_fingerprint): """Parses the thumbprint value from the raw OpenSSL output. Example output from openSSL: SHA1 Fingerprint=3B:C3:70:46:00:0C:B2:0B:F9:86:98:CF:9D:11:DF:EB:22:B7:41:F5 Returns: string : The certificate thumbprint. """ return raw_fingerprint.split("SHA1 Fingerprint=")[1].replace(":", "").strip()
def percent_diff(a, b): """ Calculate the percent by which `b` deviates from `a`. :param a: First value. (Traditionally the initial value). :param b: Second value. (Traditionally the final value). :return: Percent by which `b` deviates from `a`. (Positive iff b >= a) """ return (b - a) / ((a + b) / 2)
def _get_approx_string_width(text, font_width, fixed_width=False): """ Get the approximate width of a string using a specific average font width. Args: text(str): Text string to calculate width of. font_width(int): Average width of font characters. fixed_width(bool): Indicates that the font is fixed width. Returns: int: Width of string in pixels. Examples: Call the function with a string and the maximum character width of the font you are using: >>> int(_get_approx_string_width('hello', 10)) 29 This example shows the comparison of simplistic calculation based on a fixed width. Given a test string and a fixed font width of 10, we can calculate the width by multiplying the length and the font character with: >>> test_string = 'GOOGLE|ijkl' >>> _get_approx_string_width(test_string, 10, fixed_width=True) 110 Since some characters in the string are thinner than others we expect that the apporximate text width will be narrower than the fixed width calculation: >>> _get_approx_string_width(test_string, 10) 77 """ if fixed_width: return len(text) * font_width size = 0.0 # A dictionary containing percentages that relate to how wide # each character will be represented in a variable width font. # These percentages can be calculated using the ``_get_character_percentage_dict`` function. char_width_percentages = { "lij|' ": 40.0, '![]fI.,:;/\\t': 50.0, '`-(){}r"': 60.0, '*^zcsJkvxy': 70.0, 'aebdhnopqug#$L+<>=?_~FZT0123456789': 70.0, 'BSPEAKVXY&UwNRCHD': 70.0, 'QGOMm%W@': 100.0 } for s in text: percentage = 100.0 for k in char_width_percentages.keys(): if s in k: percentage = char_width_percentages[k] break size += (percentage / 100.0) * float(font_width) return int(size)
def _kname(obj): """Get name or names out of json result from API server""" if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type"
def strip_meta(value: dict) -> dict: """Strip the "_meta" node from dict, recursively.""" result = {} for k, v in value.items(): if k == "_meta": continue if isinstance(v, dict): result[k] = strip_meta(v) else: result[k] = v return result
def pref(pp, name): """ Make prefix-appended name """ return '%s_%s'%(pp, name)
def permitted_info_attributes(info_layer_name, permissions): """Get permitted attributes for a feature info result layer. :param str info_layer_name: Layer name from feature info result :param obj permissions: OGC service permissions """ # get WMS layer name for info result layer wms_layer_name = permissions.get('feature_info_aliases', {}) \ .get(info_layer_name, info_layer_name) # return permitted attributes for layer return permissions['layers'].get(wms_layer_name, {})
def transpose(n): """Takes a list-style Matrix and gives back the transpose""" d = [[n[j][i] for j in range(len(n[0]))] for i in range(len(n))] return d
def list_contains_round(rounds, number): """ :param rounds: list - rounds played. :param number: int - round number. :return: bool - was the round played? """ if len(rounds) > 0: for item in rounds: if item == number: return True return False return False
def reduce_to_alphanum(string): """Removes all non alphanumeric characters from string.""" return ''.join(c if c.isalnum() else '' for c in string)
def reorder_candidate(candidates, text): """Reorder the candidate list by prioritizing the closest friends.""" # For example, candidates: [a, b, c, target, d, e, f] -> [d, c, e, b, f, a]. # Every candidate is a two-element tuple (xpath, text). reverse, result = [], [] text_met = False # Make sure the current node is in the candidates, # otherwise cannot locate its closest friends. if text not in [x[1] for x in candidates]: return [] for candidate in candidates: # Locate the current node and record the next/previous nodes one by one. if not text_met: # candidate[0]: xpath, candidate[1]: text. if candidate[1] != text: reverse.append(candidate) else: text_met = True else: if reverse: result.append(reverse.pop()) result.append(candidate) # Keep the remaining candidates in "reverse" list. while reverse: result.append(reverse.pop()) return result
def filter_jobs_to_cancel(current_job_name, current_job_id, list_of_job_info): """ This filters for jobs that should be cancelled. There are two cases a job should be cancelled. 1. If a job has the same name as the current job and is of a lower job_id. 2. Has a different job name. This is because there is the possibility that a different workflow started before the current job but will have moved onto different parts of the workflow (hence a different job name). It also has the option to have a higher job number, if it's kicked off after the current job. """ running_jobs = [] for job_info in list_of_job_info: job_num = job_info.job_num job_step_name = job_info.job_step_name if job_step_name != current_job_name: running_jobs.append(job_num) elif job_num < current_job_id: running_jobs.append(job_num) return running_jobs
def splitext(p): """Split a path into root and extension. The extension is everything starting at the last dot in the last pathname component; the root is everything before that. It is always true that root + ext == p.""" root, ext = '', '' for c in p: if c == ':': root, ext = root + ext + c, '' elif c == '.': if ext: root, ext = root + ext, c else: ext = c elif ext: ext = ext + c else: root = root + c return root, ext
def round_fraction(number): """ rounds the number and to the closest 0, 0.25, 0.5, 0.75, 1, 1.25, etc' number: float return integer if there's no decimal value else float """ PARTS = 4 x = number * PARTS x = round(x) x = x / PARTS out = int(x) if x.is_integer() else x return out
def css_property(property_name, property_value): """Generate a CSS property: property_name: property_value; """ return '{0}: {1};'.format(property_name, property_value)
def partition(array, low, high): """ This function takes last element as pivot, places the pivot element at its correct position in sorted array, and places all smaller (smaller than pivot) to left of pivot and all greater elements to right of pivot. """ pivot = array[high] i = low - 1 for j in range(low, high): if array[j] < pivot: i += 1 array[i], array[j] = array[j], array[i] array[i + 1], array[high] = array[high], array[i + 1] return i + 1
def append(x, ys): """Returns a new list containing the contents of the given list, followed by the given element""" return list(ys) + [x]
def lists2dict(keys, vals): """Creates a dictionary from `keys` and `vals`, creating lists for each key, and appending values to those lists. This is useful if you have many values per key and need to convert to a dict.""" ret = {} for k, v in zip(keys, vals): ret.setdefault(k, []).append(v) return ret
def linear_norm(x, x_arr: list): """ AKA min max scaling """ min_scale = -1 max_scale = 1 max_x = max(x_arr) min_x = min(x_arr) norm_x = min_scale + (x - min_x)*(max_scale - min_scale) / (max_x - min_x) return norm_x