content
stringlengths
42
6.51k
def calculate_immediate_dominators(nodes, _dom, _sdom): """ Determine immediate dominators from dominators and strict dominators. """ _idom = {} for node in nodes: if _sdom[node]: for x in _sdom[node]: if _dom[x] == _sdom[node]: # This must be the only definition of idom: assert node not in _idom _idom[node] = x return _idom
def seconds_described(s): """ UTILITY FUNCTION returns the equivalent time in days, hours, minutes and seconds as a descriptive string. INPUT: s (int) - time in seconds. """ minutes, seconds = divmod(s,60) hours, minutes = divmod(minutes, 60) days, hours = divmod(hours, 24) if days !=0: day_print = str(days) + ' day(s) ' hour_print = str(hours) + ' hour(s) ' minute_print = str(minutes) + ' minute(s) ' sec_print = str(seconds) + ' second(s)' elif days == 0 and hours != 0: day_print = '' hour_print = str(hours) + ' hour(s) ' minute_print = str(minutes) + ' minute(s) ' sec_print = str(seconds) + ' second(s)' elif days == 0 and hours == 0 and minutes !=0: day_print = '' hour_print = '' minute_print = str(minutes) + ' minute(s) ' sec_print = str(seconds) + ' second(s)' else: day_print = '' hour_print = '' minute_print = '' sec_print = str(seconds) + ' second(s)' return day_print + hour_print + minute_print + sec_print + '.'
def IncludeCompareKey(line): """Sorting comparator key used for comparing two #include lines. Returns the filename without the #include/#import/import prefix. """ for prefix in ('#include ', '#import ', 'import '): if line.startswith(prefix): line = line[len(prefix):] break # The win32 api has all sorts of implicit include order dependencies :-/ # Give a few headers special sort keys that make sure they appear before all # other headers. if line.startswith('<windows.h>'): # Must be before e.g. shellapi.h return '0' if line.startswith('<atlbase.h>'): # Must be before atlapp.h. return '1' + line if line.startswith('<unknwn.h>'): # Must be before e.g. intshcut.h return '1' + line # C++ system headers should come after C system headers. if line.startswith('<'): if line.find('.h>') != -1: return '2' + line.lower() else: return '3' + line.lower() return '4' + line
def combinedJunctionDist(dist_0, dist_1): """Computes the combined genomic distance of two splice junction ends from the closest annotated junctions. In essence, it is finding the size indel that could have created the discrepancy between the reference and transcript junctions. Examples ('|' character respresents end of exon): Reference: ----->| |<----- Transcript: ----->| |<----- dist_0 = -2, dist_1 = +2, combined dist = 4 Reference: ----->| |<----- Transcript: ----->| |<----- dist_0 = 0, dist_1 = +2, combined dist = 2 Reference: ----->| |<----- Transcript: ----->| |<----- dist_0 = +1, dist_1 = +4, combined dist = 3 """ # If dist_0 and dist_1 have different signs, the combined distance is # the sum of their absolute values if dist_0*dist_1 <= 0: combined_dist = abs(dist_0) + abs(dist_1) else: combined_dist = abs(abs(dist_0) - abs(dist_1)) return combined_dist
def primal_gap(best_sol, feas_sol): """ :param best_sol: optimal or best known solution value: c^Tx*, :param feas_sol: feasible solution value: c^Tx~ """ if (abs(best_sol) == 0) & (abs(feas_sol) == 0): return 0 elif best_sol * feas_sol < 0: return 1 else: return abs(best_sol - feas_sol) / max([abs(best_sol), abs(feas_sol)])
def cal_proc_loc_from_rank(pnx: int, rank: int): """Calculate the location of a rank in a 2D Cartesian topology. Arguments --------- pnx : int Number of MPI ranks in x directions. rank : int The rank of which we want to calculate local cell numbers. Returns ------- pi, pj : int The indices of the rank in the 2D MPI topology in x and y directions. """ return rank % pnx, rank // pnx
def splitbytwos(x): """Split a string into substrings of length two. Args: x: The string to split. Returns: A list of strings of length two """ return [x[2*i:2*i+2] for i in range(len(x)//2)]
def _format_argval(argval): """Remove newlines and limit max length From Allure-pytest logger (formats argument in the CLI live logs). Consider using the same function.""" MAX_ARG_LENGTH = 100 argval = argval.replace("\n"," ") if len(argval) > MAX_ARG_LENGTH: argval = argval[:3]+" ... "+argval[-MAX_ARG_LENGTH:] return argval
def _calc_scalar_potential( coeff, cos_theta, sin_n, cos_n, legendre_poly, legendre_poly_der, a_over_r_pow, max_n, ): """ Calculates the partial scalar potential values """ B_r = 0 B_theta = 0 B_phi = 0 for n in range(1, max_n): current_a_over_r_power = a_over_r_pow[n] for m in range(n + 1): current_cos_n = cos_n[m] current_sin_n = sin_n[m] g_cos = coeff[m][n] * current_cos_n g_sin = coeff[m][n] * current_sin_n h_sin = coeff[n][m - 1] * current_sin_n h_cos = coeff[n][m - 1] * current_cos_n B_r += ( current_a_over_r_power * (n + 1) * (g_cos + h_sin) * legendre_poly[m][n] ) B_theta -= ( current_a_over_r_power * (g_cos + h_sin) * legendre_poly_der[m][n] ) B_phi -= current_a_over_r_power * m * (-g_sin + h_cos) * legendre_poly[m][n] try: B_phi *= 1 / cos_theta except ZeroDivisionError: B_phi = B_phi return B_r, B_theta, B_phi
def get_field_description(field, description_keyword='description'): """ Gets field description if available, using the description_keyword""" return getattr(field, description_keyword, '')
def compute_f1(actual, predicted): """ Computes the F1 score of your predictions. Note that we use 0.5 as the cutoff here. """ num = len(actual) true_positives = 0 false_positives = 0 false_negatives = 0 true_negatives = 0 for i in range(num): if actual[i] >= 0.5 and predicted[i] >= 0.5: true_positives += 1 elif actual[i] < 0.5 and predicted[i] >= 0.5: false_positives += 1 elif actual[i] >= 0.5 and predicted[i] < 0.5: false_negatives += 1 else: true_negatives += 1 try: precision = true_positives / (true_positives + false_positives) recall = true_positives / (true_positives + false_negatives) print("Accuracy = %f " % ((true_positives + true_negatives) / (true_positives + true_negatives + false_negatives + false_positives))) F1 = 2 * precision * recall / (precision + recall) except ZeroDivisionError: F1 = 0.0 return F1
def operation(instuction_sign, value_1, value_2): """Perform operation between two values """ if instuction_sign == '+': return value_1 + value_2 elif instuction_sign == '-': return value_1 - value_2
def calculateHandlen(hand): """ Returns the length (number of letters) in the current hand. hand: dictionary (string-> int) returns: integer """ return sum(hand.values())
def _tiff(h): """TIFF (can be in Motorola or Intel byte order)""" if h[:2] in (b'MM', b'II'): return 'tiff'
def bytearray_to_bits(x): """Convert bytearray to a list of bits""" result = [] for i in x: bits = bin(i)[2:] bits = '00000000'[len(bits):] + bits result.extend([int(b) for b in bits]) return result
def note_hash(channel, pitch): """Generate a note hash.""" return channel * 128 + pitch
def nocrc(byte_cmd): """ CRC function to provide no crc """ return ['', '']
def isCFile(filename): """Returns True if `filename` ends in .c, .cpp, .cc""" return filename.endswith(".c") or filename.endswith(".cpp") or filename.endswith(".cc")
def is_install_cmd(argv): """Method checks if installation is requested Args: argv (list): command arguments Returns: bool: result """ res = False if ('install' in argv or 'bdist_egg' in argv or 'bdist_wheel' in argv): res = True return res
def ones_complement_addition(number1, number2): """ Build the one's complement addition as used in the calculation of IP-, TCP- and UDP-headers. To see how the one's complement addition works, visit: https://youtu.be/EmUuFRMJbss :param number1: A 16-bit number as Integer :param number2: A 16-bit number as Integer :return: One's complement of the two numbers """ if not (isinstance(number1, int)) and not (isinstance(number2, int)): return None result = bin(number1 + number2) # string will begin with '0b', just ignore result[0] and result[1] if len(result) < 18: # add leading zeros partial_result = result[2:] while len(partial_result) < 16: partial_result = '0' + partial_result result = '0b' + partial_result if len(result) > 18: if len(result) == 19 and result[2] == '1': carry_bit = '1' result = list(result) # convert the string to a list result.pop(2) for i in range(1, 17): if result[-i] == '0' and carry_bit == '1': result[-i] = '1' carry_bit = '0' elif result[-i] == '1' and carry_bit == '1': result[-i] = '0' carry_bit = '1' elif carry_bit == '0': break else: # this should never be executed carry_bit = '0' raise ValueError result = ''.join(result) # convert the list to a string return int(result, 2)
def _int_from_val(x, default=None): """ Returns an integer from the passed in value, unless it is None or the string 'None' - in which case it returns default. """ if (x is not None and x != 'None' and x != ''): return int(x) else: return default
def _e_f(rho, rhou, e, p): """Computes the flux of the energy equation.""" return (e + p) * rhou / rho
def shatter_seq(seq, size): """shatters a sequnce into fragments of length = size. Output list of fragments.""" fragments = [] i = 0 while i+size <= len(seq): fragments.append(seq[i:i+size]) i += 1 # print('shatter', fragments) return fragments
def insertion_sort_swaps(array): """Number of times insertion sort performs a swap Args: array: An unsorted list that will undergo insertion sort. Returns: The number of swaps that insertion sort performed. """ swap = 0 for i, x in enumerate(array): k = i while k > 0 and array[k] < array[k-1]: array[k], array[k-1] = array[k-1], array[k] swap += 1 k -= 1 return swap
def from_bits(bin_str: str): """ This function will convert a binary string (with or without prefix) into a integer; :param bin_str: A binary string to convert into integer :return: A integer representing the given binary string """ return int(bin_str, 2)
def tracklims(lims, x_i=[], y_i=[]): """ Return modified limits list. INPUT: - ``lims`` -- A list with 4 elements ``[xmin,xmax,ymin,ymax]`` - ``x_i`` -- New x values to track - ``y_i`` -- New y values to track OUTPUT: A list with 4 elements ``[xmin,xmax,ymin,ymax]`` EXAMPLES:: sage: from sage.matroids import matroids_plot_helpers sage: matroids_plot_helpers.tracklims([0,5,-1,7],[1,2,3,6,-1], ....: [-1,2,3,6]) [-1, 6, -1, 7] .. NOTE:: This method does NOT do any checks. """ if lims is not None and lims[0] is not None and lims[1] is not None and \ lims[2] is not None and lims[3] is not None: lims = [min(min(x_i), lims[0]), max(max(x_i), lims[1]), min(min(y_i), lims[2]), max(max(y_i), lims[3])] else: lims = [min(x_i), max(x_i), min(y_i), max(y_i)] return lims
def executeSQL(query): """ Dummy function to test android-api interactions. Will be deprecated. """ print(query) #result = database.query_db(query) # print([x for x in result]) #return helper.row_jsonify(result) if 'delete' in query.lower() or 'drop' in query.lower(): return "Oh you slimy cheeky little kid! Quit messing with my database!" return "Try harder, something more destructive. Maybe like little bobby tables! ;P"
def lowerconv(upperdecision: int, upperllr: float, lowerllr: float) -> float: """PERFORMS IN LOG DOMAIN llr = lowerllr * upperllr, if uppperdecision == 0 llr = lowerllr / upperllr, if uppperdecision == 1 """ if upperdecision == 0: return lowerllr + upperllr else: return lowerllr - upperllr
def check(number): """check that the number of robots is between 1 and 100.""" number = int(number) if number < 1: number = 1 if number > 100: number = 100 return number
def get_sequence(center_idx, half_len, sample_rate, max_num_frames): """ Sample frames among the corresponding clip. Args: center_idx (int): center frame idx for current clip half_len (int): half of the clip length sample_rate (int): sampling rate for sampling frames inside of the clip max_num_frames (int): number of expected sampled frames Returns: seq (list): list of indexes of sampled frames in this clip. """ seq = list(range(center_idx - half_len, center_idx + half_len, sample_rate)) for seq_idx in range(len(seq)): if seq[seq_idx] < 0: seq[seq_idx] = 0 elif seq[seq_idx] >= max_num_frames: seq[seq_idx] = max_num_frames - 1 return seq
def _find_gap_positions(accession): """ In the longest sequence, find each gapped position. 0-index """ list_gap = [] gap_start = 0 gap_end = 0 current_gap = False accession_len = len(accession) for i in range(accession_len): #Get current position base call current_pos = accession[i] #Gap discovered --> store gap index, and begin recording contiguous #gap region if current_pos == '-' and current_gap is False: current_gap = True gap_start = i #If we're currently recording a gap, and a non-gap position is discovered, #then record the current position as the end and append gap coordinates #to the list. elif current_pos != '-' and current_gap is True: gap_end = i list_gap.append((gap_start, gap_end)) current_gap = False #Final case --> gap continues to the very end of the alignment. #In this case, we want to return the final gap coordinates, with the gap #end being 1 more than the current index. elif current_pos == '-' and i == accession_len-1 and current_gap is True: gap_end = i + 1 list_gap.append((gap_start, gap_end)) return list_gap
def macthing_templates(templates_peptide, templates_minipept): """ The function searching atoms of peptide bonds in small peptide pattern. Parameters ---------- templates_peptide : list List of atoms of peptide bonds. templates_minipept : list List of atoms of part of peptide bonds. Returns ------- templates_minipept_cop : list Corrected list of atoms of part of peptide bonds. """ templates_minipept_cop = templates_minipept.copy() for i in templates_minipept: check = 0 for tp in templates_peptide: if check == len(i): continue check = 0 for si in i: if si in tp: check+=1 if check != len(i): templates_minipept_cop.remove(i) return templates_minipept_cop
def _subsequence(s, c): """ Calculate the length of a subsequence Takes as parameter list like object `s` and returns the length of the longest subsequence of `s` constituted only by consecutive character `c`s. Example: If the string passed as parameter is "001000111100", and `c` is '0', then the longest subsequence of only '0's has length 3. """ count = 0 maxlen = 0 for bit in s: if bit == c: count += 1 if count > maxlen: maxlen = count else: count = 0 return maxlen
def __format_command(command): """ Formats the command taken by run_command/run_pipe. Examples: > __format_command("sort") 'sort' > __format_command(["sort", "-u"]) 'sort -u' > __format_command([["sort"], ["unique", "-n"]]) 'sort | unique -n' """ if isinstance(command, list): if command and isinstance(command[0], list): return " | ".join([" ".join(cmd) for cmd in command]) return " ".join(command) return command
def get_sep(file_path: str) -> str: """Figure out the sep based on file name. Only helps with tsv and csv. Args: file_path: Path of file. Returns: sep """ if file_path[-4:] == '.tsv': return '\t' elif file_path[-4:] == '.csv': return ',' raise ValueError('Input file is not a .csv or .tsv')
def catalan_recursive(n: int) -> int: """ Returns the nth catalan number with exponential time complexity. >>> catalan_recursive(5) 42 >>> catalan_recursive(10) 16796 >>> catalan_recursive(0) 1 >>> catalan_recursive(-5) 1 >>> catalan_recursive(1.5) -1 """ if not isinstance(n, int): return -1 if n <= 1: return 1 number = 0 for i in range(n): number += catalan_recursive(i) * catalan_recursive(n - i - 1) return number
def order(letter): """Given letter, returns it alphabetic index""" return ord(str.lower(letter))-96
def MergeTwoListsAsDic(keys, values): """ """ dic = {} for i in range(len(keys)): dic[keys[i]] = values[i] return dic
def n_line(file): """ Return the number of lines for a file Args: file (str): File path Returns: int: number of lines """ count = 0 buffer = 16 * 1024 * 1024 # buffer=16MB with open(file, "rb") as inf: while True: block = inf.read(buffer) if not block: break count += block.count(b"\n") return count
def volume(length, width, height): """ (float, float, float) -> float Computes the volume of a rectangular box (cuboid) defined by it's length, width and height """ return length * width * height
def checksum(buf): """ @param buf: buffer without the checksum part """ c = 0x1234567 for i, b in enumerate(buf): c += b * (i+1) return c
def as_dict_with_keys(obj, keys): """ Convert SQLAlchemy model to list of dictionary with provided keys. """ return [dict((a, b) for (a, b) in zip(keys, item)) for item in obj]
def get_email_domain_part(address): """ Get the domain part from email ab@cd.com -> cd.com """ return address[address.find("@") + 1 :].strip().lower()
def display_lef(lef_dict): """ Used to display lef extracted information to user for verification of pins""" cell_name = lef_dict['cell_name'] cell_area = lef_dict['area'] pin_list = list(lef_dict['pin'].keys()) input_pins_list = [ pin for pin in pin_list if lef_dict['pin'][pin]['direction'] == 'INPUT'] output_pins_list = [ pin for pin in pin_list if lef_dict['pin'][pin]['direction'] == 'OUTPUT'] power_pins_list = [ pin for pin in pin_list if lef_dict['pin'][pin]['direction'] == 'INOUT'] clk_pins_list = [ pin for pin in pin_list if lef_dict['pin'][pin]['use'] == 'CLOCK'] input_pins_str = ' '.join(input_pins_list) output_pins_str = ' '.join(output_pins_list) power_pins_str = ' '.join(power_pins_list) print('-----Lef data------') print(f'- Cell Name: {cell_name}') print(f'- Cell Area: {cell_area}') print(f'- Input Pins: {input_pins_str}') print(f'- Output Pins: {output_pins_str}') print(f'- Power Pins: {power_pins_str}') if clk_pins_list: clk_pins_str = ' '.join(clk_pins_list) print(f'- CLK Pins: {clk_pins_str}') # Checking for input pin with signal Use input_signal_pin = [ pin for pin in pin_list if lef_dict['pin'][pin]['use'] == 'SIGNAL' and lef_dict['pin'][pin]['direction'] == 'INPUT'] return pin_list, lef_dict, input_signal_pin, output_pins_list
def bytes_endswith_range(x: bytes, suffix: bytes, start: int, end: int) -> bool: """Does specified slice of given bytes object end with the subsequence suffix? Compiling bytes.endswith with range arguments compiles this function. This function is only intended to be executed in this compiled form. Args: x: The bytes object to examine. suffix: The subsequence to look for. start: Beginning of slice of x. Interpreted as slice notation. end: End of slice of x. Interpreted as slice notation. Returns: Result of check. """ if start < 0: start += len(x) if start < 0: start = 0 if end < 0: end += len(x) if end < 0: end = 0 elif end > len(x): end = len(x) if end - start < len(suffix): return False index = end - len(suffix) if index < 0: return False for i in suffix: if x[index] != i: return False index += 1 return True
def line_name_to_pyneb_format(lineName): """ Takes a line name in the form similar to OIII-5007A or H-Alpha and returns the pyneb format: H1r_6563A. This function is basic and assumes tha the letter 'I' in the lineName are used only for roman numerals """ if 'H-Alpha' in lineName: atomName, ionNumber, restWave = 'H', '1r', '6563A' elif 'H-Beta' in lineName: atomName, ionNumber, restWave = 'H', '1r', '4861A' elif 'H-Gamma' in lineName: atomName, ionNumber, restWave = 'H', '1r', '4341A' elif 'H-Delta' in lineName: atomName, ionNumber, restWave = 'H', '1r', '4102A' elif 'HeIH8' in lineName: atomName, ionNumber, restWave = 'He', '1r', lineName.split('-') elif 'I-' in lineName or 'IV-' in lineName: ionName, restWave = lineName.split('-') ionNumber = '4' if 'IV' in ionName else str(ionName.count('I')) atomName = ionName.split('I')[0] restWave = restWave.split('_')[0] else: print("Unknown lineName type: %s" % lineName) return("XX_XXXXX") pynebName = "{0}{1}_{2}".format(atomName, ionNumber, restWave) return pynebName
def compare_lists(bench, val): """ Checks to see if two list like objects are the same. """ if not len(bench) == len(val): return False if not sorted(bench) == sorted(val): return False return True
def rows_to_columns(input_matrix): """ Turn matrix row into columns :param input_matrix: list of lists of numerical values of consistent length :return: """ return [[input_matrix[p][i] for p in range(len(input_matrix))] for i in range(len(input_matrix[0]))]
def less_than(values, puzzle_input): """if the first parameter is less than the second parameter, it stores 1 in the position given by the thrid parameter. Otherwise it stores 0. """ if values[0] < values[1]: puzzle_input[values[2]] = 1 else: puzzle_input[values[2]] = 0 return puzzle_input
def unpack(variables): """ remove first dimension and zero padding from each variable variables is a list with one tensor per variable a tensor [3, 5, 4] may be unpacked to [[5, 4], [2, 4], [1, 4]] """ all_unpacked = [] for v in variables: # note on gpu sorted; on cpu not unless specified unpacked = [item[item.ne(0).nonzero()[:, 0].unique(sorted=True)] for item in v] all_unpacked.append(unpacked) return all_unpacked
def heuristic(a, b): """ Function used to heuristic :param a: :param b: :return: """ (x1, y1) = a (x2, y2) = b return abs(x1 - x2) + abs(y1 - y2)
def _get_single_limb(asm_str): """returns a single limb with a potential increment (e.g "*6++" or "*7")""" if len(asm_str.split()) > 1: raise SyntaxError('Unexpected separator in limb reference') if not asm_str.startswith('*'): raise SyntaxError('Missing \'*\' character at start of limb reference') if asm_str.endswith('++'): inc = True limb = asm_str[1:-2] else: inc = False limb = asm_str[1:] if not limb.isdigit(): raise SyntaxError('limb reference not a number') return int(limb), inc
def getArch(rec): """Return arch type (intel/amd/arm). """ info = rec["product"]["attributes"] if info["physicalProcessor"].startswith("Intel "): return "intel" if info["physicalProcessor"].startswith("High Frequency Intel "): return "intel" if info["physicalProcessor"].startswith("AMD EPYC "): return "amd" if info["physicalProcessor"].startswith("AWS Graviton"): return "arm" if info["physicalProcessor"].startswith("Variable"): return "intel" raise Exception("unknown cpu: %s" % info["physicalProcessor"])
def add_trailing_slash(path): """Add a trailing slash if not present Parameters ---------- path : str A string representing a path Returns ------- str A new string with a trailing / if not previously present. """ if path[-1] != '/': path += '/' return path
def _collatz_next_number(n): """Given a non negative whole number: Return n // 2 if n is equal. Return 3 * n + 1 otherwise. Throw an error, if n <= 0 or n is not an integer. """ if n % 2 == 0: return n // 2 else: return 3 * n + 1
def calculate_precision(total_correct: int, total_found: int) -> float: """ Calculate precision as the ratio of correct acronyms to the found acronyms. :param total_correct: :param total_found: :return: """ return total_correct / total_found if total_found != 0 else 0
def listdir_matches(match): """Returns a list of filenames contained in the named directory. Only filenames which start with `match` will be returned. Directories will have a trailing slash. """ import os last_slash = match.rfind('/') if last_slash == -1: dirname = '.' match_prefix = match result_prefix = '' else: match_prefix = match[last_slash + 1:] if last_slash == 0: dirname = '/' result_prefix = '/' else: dirname = match[0:last_slash] result_prefix = dirname + '/' def add_suffix_if_dir(filename): if (os.stat(filename)[0] & 0x4000) != 0: return filename + '/' return filename matches = [add_suffix_if_dir(result_prefix + filename) for filename in os.listdir(dirname) if filename.startswith(match_prefix)] return matches
def get_unique_characters_again(string: str) -> str: """Returns string with all recurring characters removed.""" return ''.join(set(string.lower()))
def multiply_by_two(number): """Returns the given number multiplied by two The result is always a floating point number. This keyword fails if the given `number` cannot be converted to number. """ return float(number) * 2
def map_pronoun(word, male_names, female_names): """ map word with male and females names, profession, title etc. """ pronoun = "" if word in male_names or word.lower() in male_names: pronoun = "he" elif word in female_names or word.lower() in female_names: pronoun = "she" return pronoun
def get_value(registers, register): """Gets the value""" if register >= 'a' and register <= 'z': return registers[register] return int(register)
def to_weird_case(string): """ Transforms a string into weird case. :param string: a string of words. :return: the same string with all even indexed characters in each word upper cased, and all odd indexed characters in each word lower cased. """ final = [] for x in string.split(): result = "" for i, j in enumerate(x): if i % 2: result += j.lower() else: result += j.upper() final.append(result) return " ".join(final)
def isuzatv(znak): """Provjerava je li znak UZATV""" return znak ==']'
def getDescr(bitInfo, intVal): """Return information about the bits in an integer Inputs: - bitInfo a sequence (list or tuple) of sequences: - bit number, 0 is the least significant bit - info: string describing the associated bit - intVal an integer whose bits are to be described Returns: - infoList a list of info strings, one for each bit that is set in intVal and has a corresponding entry in bitInfo. The returned info is in the same order as it appears in bitInfo. """ return [info for bitNum, info in bitInfo if (1<<bitNum) & intVal]
def mk_number(results): """ Given a list of predicted tag values (prediction for filtered chunks is None), return a list contain the most common prediction of each interval of consecutive chunks with a prediction other than None >>> results = [None, 1, 1, 1, 1, None, None, 2, 2, 3, 2, None, None, 'banana', 'banana', 'fruit', None] >>> number = mk_number(results) >>> assert number == [1, 2, 'banana'] """ i = 0 number = [] if results[0] is not None and results[1] is not None: pressed = True tags = [results[0], results[1]] j = 1 while pressed: if j == len(results) - 1: break if results[j + 1] is not None: tags.append(results[j + 1]) else: pressed = False j += 1 if len(tags) > 2: number.append(max(set(tags), key=tags.count)) while i < len(results) - 1: if results[i] is None and results[i + 1] is not None: pressed = True tags = [results[i + 1]] j = i + 1 while pressed: if j == len(results) - 1: break if results[j + 1] is not None: tags.append(results[j + 1]) else: pressed = False j += 1 if len(tags) > 2: number.append(max(set(tags), key=tags.count)) i += 1 return number
def changed(old,new,delta,relative=True): """ Tests if a number changed significantly -) delta is the maximum change allowed -) relative decides if the delta given indicates relative changes (if True) or absolute change (if False) """ delta = abs(delta) epsilon = 1.0 if old > epsilon: if relative: notChanged = (new <= (1+delta)*old) and (new >= (1-delta)*old) else: notChanged = (new <= old+delta) and (new >= old-delta) elif old < -epsilon: if relative: notChanged = (new >= (1+delta)*old) and (new <= (1-delta)*old) else: notChanged = (new >= old-delta) and (new <= old+delta) else: notChanged = (new >= old-epsilon) and (new <= epsilon+old) return not notChanged
def KK_RC20(w, Rs, R_values, t_values): """ Kramers-Kronig Function: -RC- Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com) """ return ( Rs + (R_values[0] / (1 + w * 1j * t_values[0])) + (R_values[1] / (1 + w * 1j * t_values[1])) + (R_values[2] / (1 + w * 1j * t_values[2])) + (R_values[3] / (1 + w * 1j * t_values[3])) + (R_values[4] / (1 + w * 1j * t_values[4])) + (R_values[5] / (1 + w * 1j * t_values[5])) + (R_values[6] / (1 + w * 1j * t_values[6])) + (R_values[7] / (1 + w * 1j * t_values[7])) + (R_values[8] / (1 + w * 1j * t_values[8])) + (R_values[9] / (1 + w * 1j * t_values[9])) + (R_values[10] / (1 + w * 1j * t_values[10])) + (R_values[11] / (1 + w * 1j * t_values[11])) + (R_values[12] / (1 + w * 1j * t_values[12])) + (R_values[13] / (1 + w * 1j * t_values[13])) + (R_values[14] / (1 + w * 1j * t_values[14])) + (R_values[15] / (1 + w * 1j * t_values[15])) + (R_values[16] / (1 + w * 1j * t_values[16])) + (R_values[17] / (1 + w * 1j * t_values[17])) + (R_values[18] / (1 + w * 1j * t_values[18])) + (R_values[19] / (1 + w * 1j * t_values[19])) )
def payoff_put_fprime(underlying, strike, gearing=1.0): """payoff_put_fprime derivative of payoff of call option with respect to underlying. :param underlying: :param strike: :param gearing: :return: value of derivative with respect to underlying. :rtype: float """ if underlying < strike: return -gearing else: return 0.0
def normalized_probabilistic_similarity(lang_model, query_model): """ Returns the 'normalized probability' (invented by me) of a query text belongs to a language model :rtype : float :param lang_model: The model of a language (usually a dictionary of features and it's values) :param query_model: The query text (usually a dictionary of features and it's values) :return: The normalized probability that the given query belongs to the provided language model """ # if there's no query, then there's no change it belongs to the model if query_model is None: return 0.0 # getting the length of the query model n = len(query_model) # if the query model is empty, then there's no change it belongs to the model if n == 0: return 0.0 # computing total of the features appearances in the language model lang_total = float(sum(lang_model.values())) # computing total of the features appearances in the query query_total = float(sum(query_model.values())) # lambda function to compute distance d = lambda x: 1.0 if x not in lang_model else abs(query_model[x] / query_total - lang_model[x] / lang_total) # computing the total distance of the query to the model query_total_distance = float(sum([d(f) for f in query_model])) # returning the normalized probability return 1.0 - query_total_distance / n
def urgency(t, base, t1, slope): """Ramping urgency function. Evaluate the ramping urgency function at point `t`. Returns ReLu(t-t1)*slope + base. """ return base + ((t-t1)*slope if t>=t1 else 0)
def cell(data, html_class='center'): """Formats table cell data for processing in jinja template.""" return { 'data': data, 'class': html_class, }
def create_GAN_hparams(generator_hidden_layers=[30, 30], discriminator_hidden_layers=[30, 30], learning_rate=None, epochs=100, batch_size=32, activation='relu', optimizer='Adam', loss='mse', patience=4, reg_term=0, generator_dropout=0, discriminator_dropout=0, filters=3): """ Creates hparam dict for input into GAN class. Contain Hyperparameter info :return: hparam dict """ names = ['generator_hidden_layers', 'discriminator_hidden_layers', 'learning_rate', 'epochs', 'batch_size', 'activation', 'optimizer', 'loss', 'patience', 'reg_term', 'generator_dropout', 'discriminator_dropout', 'filters'] values = [generator_hidden_layers, discriminator_hidden_layers, learning_rate, epochs, batch_size, activation, optimizer, loss, patience, reg_term, generator_dropout, discriminator_dropout, filters] hparams = dict(zip(names, values)) return hparams
def get_precedent_type(type1, type2): """Compare and return the most precedent type between two numeric types, i.e., int, float or complex. Parameters ---------- type1 : type The first type to be compared with. type2 : type The second type to be compared with. Returns ------- type The numeric type with most precedent order. """ if type1 == complex or type2 == complex: return complex if type1 == float or type2 == float: return float return int
def validate_required_keys_in_dict(dictionary, key_list): """ Check if the dictionary contains the required keys. If not, raise an exception. :param args: A request instance. :param key_list: The keys that should be in the json structure (only 1st level keys). :return: Returns an array of individual json blocks. """ for key in key_list: if key not in dictionary.keys(): raise ValueError("Missing JSON key: '{}'.".format(key)) return dictionary
def dist_flat_top_hex_grid(path): """https://www.redblobgames.com/grids/hexagons/#coordinates .""" x = 0 y = 0 z = 0 max_distance = 0 for direction in path.split(','): if direction == 'n': y += 1 z += -1 elif direction == 'ne': x += 1 z += -1 elif direction == 'se': x += 1 y += -1 elif direction == 's': y += -1 z += 1 elif direction == 'sw': x += -1 z += 1 elif direction == 'nw': x += -1 y += 1 else: raise ValueError('Invalid direction: {0}'.format(direction)) max_distance = max(max_distance, (abs(x) + abs(y) + abs(z)) // 2) # Manhattan distance for a hex grid is the Manhattan distance # divided by 2. # https://www.redblobgames.com/grids/hexagons/#distances return ((abs(x) + abs(y) + abs(z)) // 2, max_distance)
def get_mirror_path_from_module(app_module): """ >>> app_module = {'git_repo': 'git@github.com:claranet/ghost.git'} >>> get_mirror_path_from_module(app_module) '/ghost/.mirrors/git@github.com:claranet/ghost.git' >>> app_module = {'git_repo': ' git@github.com:claranet/spaces.git '} >>> get_mirror_path_from_module(app_module) '/ghost/.mirrors/git@github.com:claranet/spaces.git' """ return "/ghost/.mirrors/{remote}".format(remote=app_module['git_repo'].strip())
def get_header(headers: dict, keyname: str, default: str) -> str: """ This function deals with the inconsistent casing of http headers :( A fine example of why we can't have nice things """ for k, v in headers.items(): if k.lower() == keyname.lower(): return v return default
def gather_squares_triangles(p1,p2,depth): """ Draw Square and Right Triangle given 2 points, Recurse on new points args: p1,p2 (float,float) : absolute position on base vertices depth (int) : decrementing counter that terminates recursion return: squares [(float,float,float,float)...] : absolute positions of vertices of squares triangles [(float,float,float)...] : absolute positions of vertices of right triangles """ # Break Recursion if depth is met if depth == 0: return [],[] # Generate Points pd = (p2[0] - p1[0]),(p1[1] - p2[1]) p3 = (p2[0] - pd[1]),(p2[1] - pd[0]) p4 = (p1[0] - pd[1]),(p1[1] - pd[0]) p5 = (p4[0] + (pd[0] - pd[1])/2),(p4[1] - (pd[0] + pd[1])/2) # Gather Points further down the tree squares_left,triangles_left = gather_squares_triangles(p4,p5,depth-1) squares_right,triangles_right = gather_squares_triangles(p5,p3,depth-1) # Merge and Return squares = [[p1,p2,p3,p4]]+squares_left+squares_right triangles = [[p3,p4,p5]]+triangles_left+triangles_right return squares,triangles
def find_max_time_overlap(hypseg, reflist): """Find reference segment which encompasses the maximum time of the hypothesis segment.""" hbeg, hend = hypseg[0], hypseg[2] times = [] for [rlbl, rseg] in reflist: b = max(hbeg, rseg[0]) e = min(hend, rseg[2]) times.append(e - b) return times.index(max(times))
def common_filters(marker=None, limit=None, sort_key=None, sort_dir=None, all_projects=False): """Generate common filters for any list request. :param all_projects: list containers in all projects or not :param marker: entity ID from which to start returning entities. :param limit: maximum number of entities to return. :param sort_key: field to use for sorting. :param sort_dir: direction of sorting: 'asc' or 'desc'. :returns: list of string filters. """ filters = [] if all_projects is True: filters.append('all_projects=1') if isinstance(limit, int): filters.append('limit=%s' % limit) if marker is not None: filters.append('marker=%s' % marker) if sort_key is not None: filters.append('sort_key=%s' % sort_key) if sort_dir is not None: filters.append('sort_dir=%s' % sort_dir) return filters
def captcha_dupes(numbers): """Sum only the digits that match the one next in a cyclic string.""" Total = 0 for i in range(len(numbers)): if numbers[i] == numbers[i - 1]: Total += int(numbers[i]) return Total
def formula_search_to_dict(raw_result): """Dictionary form for API""" return [ { "id": item.cluster_id, "text": item.text, "n_entries": item.n_entries, "n_texts": item.unique_text, "verb_text": item.verb_text, } for item in raw_result ]
def escape_facet_value(value: str) -> str: """Escape and quote a facet value for an Algolia search.""" value = value.replace('"', r"\"").replace("'", r"\'") value = f'"{value}"' return value
def call_safe(*args, **kwargs): """ Safely call function that involves a report. If function raises an error, log the error with report.log_error() method. """ report, func, *args = args if not callable(func): func = getattr(report, func) try: return func(*args, **kwargs) except Exception as ex: msg = f"{type(ex).__name__}: {ex}" report.log_error(msg, code=ex) return None
def max_sub_array(nums): """ Returns the max subarray of the given list of numbers. Returns 0 if nums is None or an empty list. Time Complexity: O(n) Space Complexity: O(1) """ if nums == None: return 0 if len(nums) == 0: return 0 max_sum = nums[0] curr_sum = nums[0] for i in range(1, len(nums)): curr_sum = max(curr_sum + nums[i], nums[i]) max_sum = max(curr_sum, max_sum) return max_sum
def _make_sequence(x): """Convert an event _sequence to a python list""" s = [] if x: s = [x[0]] + x[1] return s
def tile_type(tile_ref, level, ground_default, sky_default): """Returns the tile type at the given column and row in the level. tile_ref is the column and row of a tile as a 2-item sequence level is the nested list of tile types representing the level map. ground_default is the tile type to return if the tile_ref is below the bottom of the level. sky_default is the tile type to return if the tile_ref is above, to the left of, or to the right of the bounds of the level.""" # # sky # default # +--------+ # sky | level | sky # default | | default # - - -+--------+- - - # ground # default # # return default 'ground' tile if reference is off the bottom of the level if tile_ref[1] >= len(level): return ground_default # look up tile type in nested level list if reference is inside bounds of level elif len(level)>0 and 0 <= tile_ref[0] < len(level[0]) and 0 <= tile_ref[1] < len(level): return level[tile_ref[1]][tile_ref[0]] # otherwise reference is above, left of or right of the level bounds. return default 'sky' tile else: return sky_default
def check_name(obj): """ Function for returning the name of a callable object. Function and class instances are handled differently, so we use a try/except clause to differentiate between the two. :param obj: An object for which we want to find the name. :return: The name of the object """ try: return obj.__name__ except AttributeError: return obj.__class__.__name__
def _compare_results_paths(truth_path: str, compare_path: str, source_particle: str, replace_particle: str) -> bool: """Handles comparing paths that need to be changed Arguments: truth_path - the path that's to be compared against compare_path - the path that is to be checked source_particle - the starting portion of compare_path that's to be replaced replace_particle - the replacement path Returns: Returns True if the paths match, and False if not Notes: If compare_path doesn't start with source_particle, False will be returned. If truth_path and compare_path start out the same, True is returned. The paths for replacement are compared by complete folder named: '/a/b/c' won't match '/a/b/condor' """ # Easy cases if truth_path == compare_path: return True if not compare_path.startswith(source_particle): return False # Check that the last folder matches exactly compare_parts = compare_path.split('/' if '/' in compare_path else '\\') source_parts = source_particle.split('/' if '/' in source_particle else '\\') last_folder_index = len(source_parts) - 1 if source_parts[last_folder_index] != compare_parts[last_folder_index]: return False # Make the path replacement and compare new_path = replace_particle.replace('\\', '/') index = len(new_path) - 1 if new_path[index] != '/': new_path += '/' new_path += '/'.join(compare_parts[last_folder_index + 1:]) return truth_path == new_path
def _split_line(s, parts): """ Parameters ---------- s: string Fixed-length string to split parts: list of (name, length) pairs Used to break up string, name '_' will be filtered from output. Returns ------- Dict of name:contents of string at given location. """ out = {} start = 0 for name, length in parts: out[name] = s[start:start + length].strip() start += length del out['_'] return out
def gndvi(b3, b8): """ Green Normalized Difference Vegetation Index \ (Gitelson, Kaufman, and Merzlyak, 1996). .. math:: GNDVI = (b8 - b3) / (b8 + b3) :param b3: Green. :type b3: numpy.ndarray or float :param b8: NIR. :type b8: numpy.ndarray or float :returns GNDVI: Index value .. Tip:: Gitelson, A., Kaufman, Y. J., Merzlyak, M. N. 1996. Use of a green \ channel in remote sensing of global vegetation from EOS-MODIS. \ Remote Sensing of Environment 58(3), 289-298. \ doi:10.1016/s0034-4257(96)00072-7. """ GNDVI = (b8 - b3) / (b8 + b3) return GNDVI
def _mapping_has_all(mapping, **key_values): """Test if a `Mapping` has all key/value pairs in `key_values` (by equality). Or: is `key_values` a "sub-mapping" of `mapping` (as sets of key/value pairs)? >>> dct = {'A': 1, 'B': 2, 'C': 3} >>> _mapping_has_all(dct, A=1, B=2) True >>> _mapping_has_all(dct, A=1, B=1) False >>> _mapping_has_all(dct, A=1, D=4) False """ for key, value in key_values.items(): if key not in mapping or mapping[key] != value: return False return True
def add_mod(x: int, y: int, modulo: int = 32) -> int: """ Modular addition :param x: :param y: :param modulo: :return: """ return (x + y) & ((1 << modulo) - 1)
def format_text(text): """Transform special chars in text to have only one line.""" return text.replace('\n', '\\n').replace('\r', '\\r')
def load_YvsX_config(config): """ the input is a list of tuples, each of them having as first element as entry key and the second a string, which should be converted to a list basically this takes the confg as a list of tuples and converts it to dictionary """ #~ keys which values should be converted to numbers using eval eval_keys = [ "beta", "m", "lambda", "x_col", "y_col" ] #~ keys which values should be lists list_keys = [ "eos_name", "beta", "m", "lambda" ] config_dict = {} for entry in config: if entry[0] in eval_keys and entry[0] in list_keys: config_dict.update( { entry[0]: [ eval(_) for _ in entry[1].split(",") if _.strip() ] } ) elif entry[0] not in eval_keys and entry[0] in list_keys: config_dict.update( { entry[0]: [ _.strip() for _ in entry[1].split(",") if _.strip() ] } ) elif entry[0] in eval_keys: config_dict.update( { entry[0]: eval(entry[1]) } ) else: config_dict.update( { entry[0]: entry[1].strip() } ) return config_dict
def remove_in_dict(d, value = 0): """ In a dictionary, remove keys which have certain value :param d: the dictionary :param value: value to remove :returns: new dictionary whithout unwanted value >>> remove_in_dict({'b': 1, 'a': 0}) == {'b': 1} True >>> remove_in_dict({'b': 1, 'a': 0}, 1) == {'a': 0} True """ new_dict = {} for (k,v) in d.items(): if v != value: new_dict[k] = v return new_dict
def format_lazy_import(names): """Formats lazy import lines""" lines = '' for _, name, asname in names: pkg, _, _ = name.partition('.') target = asname or pkg if asname is None: line = '{pkg} = _LazyModule.load({pkg!r}, {mod!r})\n' else: line = '{asname} = _LazyModule.load({pkg!r}, {mod!r}, {asname!r})\n' lines += line.format(pkg=pkg, mod=name, asname=asname) return lines
def mkinput(txid, vout): """ Create an input record with empty scriptSig, as appropriate for Transaction object. """ return {'prevout_hash':txid, 'prevout_n':vout, 'sequence':0, 'x_pubkeys': [], 'pubkeys': [], 'address': None, 'type': 'unknown', 'signatures': [], 'num_sig': 0, 'scriptSig': ''}
def parse_metadata_records(metadata_records, header): """ Group metadata records by Experiment accession metadata_records is a dict indexed by fastq_ID. If experiment is a knockdown, add related control experiments. There may be multiple knockdown experiments with same controls, so controls are stored with a different experiment ID. 'Controlled by' filed holds pointer to files, e.g. '/files/ENCFF078MXU/, /files/ENCFF791HTS/' that point to fastq IDs directly (and not experiment IDs). Important: Make sure experiment IDs are not used downstream of here because they will remain stored in these rows. """ exps = {} for _, rec in metadata_records.items(): # Group fastq. file entries by their experiment ID cur = dict(zip(header, rec)) exp_acc = cur['Experiment accession'] exps.setdefault(exp_acc, []).append(cur) # Controls if cur['Controlled by'] != "": control_ids = map(lambda e: e.split("/")[2], cur['Controlled by'].split(",")) for cid in control_ids: # Find rows for these files if cid in metadata_records: crow = dict(zip(header, metadata_records[cid])) if crow not in exps[exp_acc]: exps[exp_acc].append(crow) else: raise ValueError("Controls for experiment %s recorded, but not fetched!" % exp_acc) print('There are {0:d} experiments.'.format(len(exps))) return exps
def clean_breaks(val_breaks, d): """ Merge breakpoints that are within d bp of each other. """ breaks = sorted(list(set(val_breaks))) i, j = 0, 1 while j < len(breaks): if breaks[j] - breaks[i] < d: breaks.pop(j) else: i += 1 j += 1 return breaks