content
stringlengths
42
6.51k
def parse_one_line(line): """ Get one line of a file in this format 16:17:266:2864 3:4:194:2443 and return a couple of lists [16,17,266,2864], [3,4,194,2443] """ line_trial, line_success = line.split(' ') one_trial = [int(s) for s in line_trial.split(':')] one_success = [int(s) for s in line_success.split(':')] return one_trial, one_success
def normalize(pos, size, flip_y=False): """ normalize return as float """ width, height = size x = pos[0] y = pos[1] x /= float(width) y /= float(height) if flip_y: return x, 1 - y return x, y
def students_check_types(row: tuple) -> bool: """Returns true if row containing students data has correct types""" try: index = int(row[1]) if index < 100000 or index > 999999: return False except ValueError: return False if not isinstance(row[2], str): return False if not isinstance(row[3], (str, int)): return False return True
def scale_p(p, L, pc, nu): """ Scales the probability p according to finite size scaling. :param p: List of values p to be scaled. :param L: System size. :param pc: Critical threshold. :param nu: Critical exponent. :return list: Scaled probabilites. """ return (p - pc) * L**(1 / nu)
def get_geo_list(geo_lists, geo_filter, add_select=False, user_filter=[]): """Get specific Organisational units based on filter and list.""" area_detail, result = {}, () if add_select: area_detail[''] = 'Please Select' try: if geo_lists: for i, geo_list in enumerate(geo_lists): area_id = geo_list['area_id'] area_name = geo_list['area_name'] area_type = geo_list['area_type_id'] if geo_filter == area_type: if user_filter: if area_id in user_filter: area_detail[area_id] = area_name else: area_detail[area_id] = area_name result = area_detail.items() except Exception as e: print('Error - %s' % e) return () else: return result
def digitsum(s: str) -> int: """ >>> all(digitsum(str(i)) == (1 if i == 1 else 0) for i in range(100)) True """ i = sum(pow(int(c), 5) for c in s) return i if i == int(s) else 0
def calc_total_probe_depth(capture_data): """Takes a capture dict and returns a tuple containing the percentage of nucleotide positions in the target space covered by 0, 1, 2, 3, 4, and 5+ probes.""" total = 0 total_0 = 0 total_1 = 0 total_2 = 0 total_3 = 0 total_4 = 0 total_5 = 0 for header,(seq, depth) in capture_data.items(): total += len(depth) total_0 += depth.count(0) total_1 += depth.count(1) total_2 += depth.count(2) total_3 += depth.count(3) total_4 += depth.count(4) total_5 += len([d for d in depth if d >= 5]) total_0 = round(total_0 * 100 / total, 2) total_1 = round(total_1 * 100 / total, 2) total_2 = round(total_2 * 100 / total, 2) total_3 = round(total_3 * 100 / total, 2) total_4 = round(total_4 * 100 / total, 2) total_5 = round(total_5 * 100 / total, 2) return (total_0, total_1, total_2, total_3, total_4, total_5)
def closest_pair_strip(cluster_list, horiz_center, half_width): """ Helper function to compute the closest pair of clusters in a vertical strip Input: cluster_list is a list of clusters produced by fast_closest_pair horiz_center is the horizontal position of the strip's vertical center line half_width is the half the width of the strip (i.e; the maximum horizontal distance that a cluster can lie from the center line) Output: tuple of the form (dist, idx1, idx2) where the centers of the clusters cluster_list[idx1] and cluster_list[idx2] lie in the strip and have minimum distance dist. """ strip = list() for cluster in cluster_list: if abs(cluster.horiz_center()-horiz_center) < half_width: strip.append((cluster, cluster_list.index(cluster))) strip.sort(key = lambda cluster: cluster[0].vert_center()) length = len(strip) dist, idx1, idx2 = float('inf'), -1, -1 for idx_u in range(length-1): for idx_v in range(idx_u+1, min(idx_u+4, length)): uv_dist = strip[idx_u][0].distance(strip[idx_v][0]) if uv_dist < dist: dist = uv_dist if strip[idx_u][1] < strip[idx_v][1]: idx1 = strip[idx_u][1] idx2 = strip[idx_v][1] else: idx1 = strip[idx_v][1] idx2 = strip[idx_u][1] return (dist, idx1, idx2)
def _parse_int(value, default=0): """ Attempt to cast *value* into an integer, returning *default* if it fails. """ if value is None: return default try: return int(value) except ValueError: return default
def insertion_sort(A): """Sort list of comparable elements into nondecreasing order.""" for i in range(1, len(A)): value = A[i] hole = i while hole > 0 and A[hole-1] > value: A[hole] = A[hole-1] hole -= 1 A[hole] = value return A
def est_bien_par_pile(word): """verifie la validite d'un mot donne (par pile)""" stack = [] for c in word: if c == "(": stack.append(c) elif len(stack) == 0: return False else: stack.pop() return len(stack) == 0
def get_space(metric_name, maximum_metric_size): """ Gets the space to create a tabulation. :param metric_name: the metric name :type metric_name: str :param maximum_metric_size: the maximum size of the metric name :type maximum_metric_size: int :return: the spaces to be appended to the message :rtype: str """ return " " * (maximum_metric_size - len(metric_name) + 1)
def in_symbols(category): """Category for code points that are symbols. Args: category (str): Unicode general category. Returns: bool: True if `category` in set. """ return category in {'Sm', 'Sc', 'Sk', 'So'}
def isiterable(target): """ Check if target object is iterable :param target: :return: true if target is iterable. Otherwise, return false """ try: iter(target) except: return False else: return True
def numpydoc_str_param_list(iterable, indent=4): """ Format a list of numpydoc parameters. Parameters ------------- iterable : :class:`list` List of numpydoc parameters. indent : :class:`int` Indent as number of spaces. Returns ------- :class:`str` """ out = [] for param in iterable: if param[1]: out += ["%s : %s" % (param[0], param[1])] else: out += [param[0]] if param[2] and "".join(param[2]).strip(): out += [indent * " " + i for i in param[2]] out += [""] return ("\n" + indent * " ").join(out)
def xor_vec(in_val, mask_vec): """ Returns the XOR of bits from the result of masking bin_vec with the mask vector mask_vec. """ and_val = in_val & mask_vec return (bin(and_val).count('1') % 2)
def parse_http_response(data): """ don't try to get the body, there are reponses without """ header = data.split('\r\n\r\n')[0] lines = header.split('\r\n') cmd = lines[0].split(' ') lines = map(lambda x: x.replace(': ', ':', 1), lines[1:]) lines = filter(lambda x: len(x) > 0, lines) headers = [x.split(':', 1) for x in lines] headers = dict(map(lambda x: (x[0].lower(), x[1]), headers)) return cmd, headers
def node_int(node): """Returns stripped node text as int, or None.""" if node is None: return None text = node.text.strip() if text.lower() == 'none': return None if len(text): return int(text) return None
def provision(tup): """ Convert a type URI 9-tuple into a well-formed URI string """ #Partitioning type, name, secret, issuer, algorithm, digits, period = tup #Assembling uri = "otpauth://" + type + "/" + issuer + ":" + name + "?" +\ "secret=" + secret +\ "&issuer=" + issuer +\ "&algorithm=" + algorithm.upper() +\ "&digits=" + str(digits) +\ "&period=" + str(period) return uri
def mean(x): """Find the mean of a sample by summing the values and dividing by the size of the sample.""" return sum(x) / len(x)
def mass_function_abc(m, a, b, c): """The parametrized surpression function of the halo mass function""" return (1 + (a / m)**b )**c
def is_number(s): """check if a data type is numeric https://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-float?page=1&tab=votes#tab-top Arguments: s {[int,float,str]} -- input to test Returns: bool -- is the input a number """ try: float(s) return True except ValueError: return False
def create_response(status, success=0, failure=0, repository_name=None, branch_name=None): """ :param status: Status of EBS deletion request. :param success: Number of EBS volumes that are deleted successfully. :param failure: Number of EBS volumes that are not deleted. :param repository_name: Full repository name. :param branch_name: Branch name that is deleted. :return: JSON response. """ response = { 'success': { 'statusCode': 200, 'body': f'[SUCCESS] All {success + failure} EBS volumes are deleted for branch {branch_name} in repository {repository_name}', 'isBase64Encoded': 'false' }, 'failure': { 'statusCode': 500, 'body': f'[FAILURE] Failed to delete {failure}/{success + failure} EBS volumes for branch {branch_name} in repository {repository_name}', 'isBase64Encoded': 'false' }, 'unauthorized': { 'statusCode': 401, 'body': 'Unauthorized', 'isBase64Encoded': 'false' }, 'unsupported': { 'statusCode': 204, 'isBase64Encoded': 'false' } } return response[status]
def Upper(v): """Transform a string to upper case. >>> s = Schema(Upper) >>> s('hi') 'HI' """ return str(v).upper()
def get_hit_dice(monster_data) -> str: """Fixes the missing extra HP in the API's hit_dice property.""" # Parse the hit dice string num, sides = monster_data['hit_dice'].split('d') num = int(num) sides = int(sides) # Predict the total hit points without the bonus avg_roll = sides / 2 + 0.5 subtotal = int(avg_roll * num) # Determine the bonus bonus = int(monster_data['hit_points']) - subtotal # Combine into the proper hit dice string hit_dice = "{}d{} + {}".format(num, sides, bonus) return hit_dice
def _label(files_info): """Get a suitable label for the files. Returns "config-2" if the openstack metadata is present. """ if any(x.startswith('openstack/') for x in files_info.values()): return 'config-2' else: return 'VMEDIA_BOOT_ISO'
def package_dir_path(path): """Return package path to package install directory""" return path + '/.pkg'
def get_grand_mean(samples): """ Get the grand mean for a number of samples, or a number of means. Parameter --------- > `samples`: A tuple containing a list of samples, or a list of means. - If the lists are samples, send them like `get_grand_mean([1,2,3,...], [3,4,5,...], [4,5,6,6,7,....], ...)` Here, each list contains all the values of that very sample. - If the lists are the means of the samples, send them like `get_grand_mean([3], [2], [12], ....)` Where each list should contain only one value, and that's the mean of its corresponding sample. Returns ------- The grand mean for the means or the samples. """ N = 0 grand_sum = 0 for sample in samples: grand_sum += sum(sample) N += len(sample) return grand_sum / N
def cds_query(ra, dec, radius=1.): """ Open browswer with CDS catalog query around central position """ #rd = self.get('pan fk5').strip() rd = f'{ra} {dec}' rdst = rd.replace('+', '%2B').replace('-', '%2D').replace(' ', '+') url = (f'http://vizier.u-strasbg.fr/viz-bin/VizieR?' f'-c={rdst}&-c.rs={radius:.1f}') #os.system(f'open {url}') return url
def _make_degrees_digital(parts, max_deg): """Assemble the degrees, minutes, and seconds parts from a regular expression result into a decimal number. """ num = float(parts['deg']) if num > max_deg or num < max_deg*-1: raise ValueError('degrees out of range {}/{}'.format(max_deg*-1, max_deg)) if parts['min']: min = float(parts['min']) if min >= 60.0: raise ValueError('minutes must be less than 60') else: num += min/60.0 if parts['sec']: sec = float(parts['sec']) if sec >= 60: raise ValueError('seconds must be less than 60') num += sec/3600.0 if parts.get('sign') == '-' or (parts.get('hemi') and parts['hemi'].upper() in ('S', 'W')): num *= -1 return num
def extract_from_config(config): """ When parsing, config can be wraped at dictionary where 'config' contains given config """ if isinstance(config, dict) and 'config' in config: return config['config'] return config
def divideset(rows, column, value): """ Divides the dataset according to an attribute. param row -- row in dataset param column -- column in dataset param value -- dividing threshold """ set1 = [] set2 = [] for row in rows: if row[column] > value: set1.append(row) else: set2.append(row) return (set1, set2)
def _get_type_string(attr_type): """Return a string represeting acceptable type(s).""" if isinstance(attr_type, (list, tuple)): if len(attr_type) > 1: return ( ", ".join([x.__name__ for x in attr_type[:-1]]) + " or " + attr_type[-1].__name__ ) return attr_type[0].__name__ return attr_type.__name__
def _index_store_path_overridden(copts): """Checks if index_while_building must be disabled. Index while building is disabled when the copts include a custom `-index-store-path`. Args: copts: The list of copts to be scanned. Returns: True if the index_while_building must be disabled, otherwise False. """ for opt in copts: if opt == "-index-store-path": return True return False
def bytes_hex(a): """ Encode `bytes` into hex on Python 2 or 3. """ if hasattr(a, 'hex'): return a.hex() return a.encode('hex')
def rev_comp(seq): """Take the reverse compliment of a sequence Parameters ---------- seq : str The original sequence. Returns ------- new_seq : str The reverse compliment. """ compliment = {"A": "T", "C": "G", "G": "C", "T": "A"} new_seq = seq[::-1] new_seq = "".join([compliment[i] for i in new_seq]) return new_seq
def uniq(objectSequence): """Remove the duplicates from a list while keeping the original list order """ l = [] d = {} for o in objectSequence: if o not in d: d[o] = None l.append(o) return l
def create_session(user_data: dict) -> bytes: """ Create session based on dict param data: {"username": username, "secret": password} return: key value pairs in "key1=value1;key2=value2;" """ session = "" for k, v in user_data.items(): session += f"{k}={v};" return session.encode()
def get_nelems(arrays): """Computes number of elements from given arrays using the triangular flag.""" # NOQA nelems = 0 for local_arrays in arrays: for array, triangular in local_arrays: if triangular: if array.shape[0] != array.shape[1]: raise RuntimeError('get_nelems: not a square matrix') nelems += array.shape[0] * (array.shape[0] + 1) // 2 else: nelems += array.size return nelems
def prefix_suffix_prep(string1, string2): """Calculates starting position and lengths of two strings such that common prefix and suffix substrings are excluded. Expects len(string1) <= len(string2) Parameters ---------- string_1 : str Base string. string_2 : str The string to compare. Returns ------- len1, len2, start : (int, int, int) `len1` and len2` are lengths of the part excluding common prefix and suffix, and `start` is the starting position. """ # this is also the minimun length of the two strings len1 = len(string1) len2 = len(string2) # suffix common to both strings can be ignored while len1 != 0 and string1[len1 - 1] == string2[len2 - 1]: len1 -= 1 len2 -= 1 # prefix common to both strings can be ignored start = 0 while start != len1 and string1[start] == string2[start]: start += 1 if start != 0: len1 -= start # length of the part excluding common prefix and suffix len2 -= start return len1, len2, start
def check_fit(function, xdata, ydata, func_params): """Check the difference between data and a fit to that data. Parameters ---------- function : callable A function to use to generate data points from input values. xdata : array_like An array of values of shape (N, M) where *N* is the number of input parameters needed by `function`. `function` should act on this array to produce an array of length M values. ydata : array_like A array of values of length M (i.e., the same length as the x-values passed to the function) of data points to be used to fit the function to. func_params : iterable An iterable collection of parameter values to pass to `function`. They will be passed using argument unpacking, so they should be in the order expected by the function used for `function`. Returns ------- `np.ndarray` A 1-D array of differences between data points and the function. """ return ydata - function(xdata, *func_params)
def tex_coord(x, y, size=(32, 32)): """ Return the bounding vertices of the texture square. """ mx = 1. / size[0] my = 1. / size[1] dx = x * mx dy = y * my return dx, dy, dx + mx, dy, dx + mx, dy + my, dx, dy + my
def split_list_at_indices(the_list, indices): """ Split a list at a given set of indices >>> split_list_at_indices([1,3,5,7,9], [0, 3]) [[], [1, 3, 5], [7, 9]] """ return [the_list[i:j] for i, j in zip([0] + indices, indices + [None])]
def candidate_union(cells): """return the union of candidates in cells. cells is a collection supporting for loops """ return set().union(*(cell.candidates for cell in cells))
def ordered_dict_values(dictionary): """ A function to obtain unique values from a dictionary Parameters ---------- dictionary: dict Dictionary to obtain unique values from Returns ------- lst: list A list of unique values in the dictionary by order of appearance """ lst = [] for v in dictionary.values(): if v not in lst: lst.append(v) return lst
def is_horizontal_rule(line): """ Check if a line looks like a horizontal rule.""" line = line.strip() return all(ch == '#' for ch in line) and len(line) > 10
def titleize(text: str): """ Capitalizes all the words in a String """ return " ".join(i.strip().lower().capitalize() for i in text.split())
def delchars(stri, chars): """Returns a string for which all occurrences of characters in chars have been removed.""" # Translate demands a mapping string of 256 characters; # whip up a string that will leave all characters unmolested. identity = ''.join([chr(x) for x in range(256)]) try: stro = stri.translate(identity, chars) except: stro = stri.translate(dict(zip(identity, chars))) return stro
def convert_plain(text): """ text/plain part """ lines = text.split('\n') title = lines[0] result = [] for line in lines[1:]: result.append(line.strip()) return '\n'.join(( title, '', '\n'.join(result) ))
def _extract_8_bits(long_value, shift=1): """Return an integer in the range 0, 255 by extracting 8 bits from the input long value. shift determines which 8 bits are taken, by default the first 8 bits.""" bitmask = (1 << 8 * shift) - 1 return (long_value & bitmask) >> (8 * (shift-1))
def type_check(some_input, memory): """ Assign the correct type on a user entered equation to avoid accomodate a failure to input numbers in an equation. :param some_input: an element from the user input list. :param memory: user answer stored for future use """ try: some_input = float(some_input) return some_input except ValueError: some_input = str(some_input) return some_input
def Xor(b1, b2): """Xors two bit vectors together.""" return [x ^ y for x, y in zip(b1, b2)]
def cost(z): """ Cost function. """ return 0.05 + 0.95 * z[0]**1.5
def _len_guards(M): """Handle small or incorrect window lengths""" if int(M) != M or M < 0: raise ValueError('Window length M must be a non-negative integer') return M <= 1
def line_options(strokeColor='black', fillColor='black', strokeWidth=1): """Map from RLD line option names""" return dict(edgecolor=strokeColor, facecolor=fillColor, linewidth=strokeWidth)
def scaleto255(value): """Scale the input value from 0-100 to 0-255.""" return max(0, min(255, ((value * 255.0) / 100.0)))
def outputids2words(id_list, vocab): """Get words from output ids. """ words = [] for i in id_list: w = vocab._id2word(i) words.append(w) return words
def falling(n, k): """Compute the falling factorial of n to depth k. >>> falling(6, 3) # 6 * 5 * 4 120 >>> falling(4, 3) # 4 * 3 * 2 24 >>> falling(4, 1) # 4 4 >>> falling(4, 0) 1 """ "*** YOUR CODE HERE ***" i = 0 total = 1 while i < k: total = total * n n -= 1 i += 1 return total
def get_fileno(file): """Get the os-level fileno of a file-like object. This function decodes several common file wrapper structures in an attempt to determine the underlying OS-level fileno for an object. """ while not hasattr(file,"fileno"): if hasattr(file,"file"): file = file.file elif hasattr(file,"_file"): file = file._file elif hasattr(file,"_fileobj"): file = file._fileobj else: raise AttributeError return file.fileno()
def _number_format(count=999): """A string format for line numbers Should give a '%d' format with width big enough to `count` lines >>> assert _number_format(77) == '%2d: ' """ digits = len(str(count)) return "%%%dd: " % digits
def doArc8(arcs, domains, assignments): """ Perform the ARC-8 arc checking algorithm and prune domains @attention: Currently unused. """ check = dict.fromkeys(domains, True) while check: variable, _ = check.popitem() if variable not in arcs or variable in assignments: continue domain = domains[variable] arcsvariable = arcs[variable] for othervariable in arcsvariable: arcconstraints = arcsvariable[othervariable] if othervariable in assignments: otherdomain = [assignments[othervariable]] else: otherdomain = domains[othervariable] if domain: changed = False for value in domain[:]: assignments[variable] = value if otherdomain: for othervalue in otherdomain: assignments[othervariable] = othervalue for constraint, variables in arcconstraints: if not constraint(variables, domains, assignments, True): break else: # All constraints passed. Value is safe. break else: # All othervalues failed. Kill value. domain.hideValue(value) changed = True del assignments[othervariable] del assignments[variable] #if changed: # check.update(dict.fromkeys(arcsvariable)) if not domain: return False return True
def all_of(*words: str) -> str: """ Format words to query results containing all the desired words. :param words: List of desired words :return: String in the format google understands """ return " ".join(*words)
def iterative_grouper(ListOfListOfLabels, seed): """ Takes a list of lists containing labels, and seed label. Returns label co-occurrences for the seed label """ aList = [] for i in ListOfListOfLabels: if len(set(i) & set(seed)) > 0: aList += i if set(aList) == set(seed): return set(aList) | set(seed) else: return iterative_grouper(ListOfListOfLabels, set(aList) | set(seed))
def calculate_lower_bounds(x_s, y_s): """ Calculate lower bounds of total timesteps and average episodic return per iteration. Parameters: x_s - A list of lists of total timesteps so far per seed. y_s - A list of lists of average episodic return per seed. Return: Lower bounds of both x_s and y_s """ # x_low is lower bound of timesteps so far, y is lower bound of average episodic reward x_low, y_low = x_s[0], y_s[0] # Find lower bound amongst all trials per iteration for xs, ys in zip(x_s[1:], y_s[1:]): x_low = [x if x < x_low[i] else x_low[i] for i, x in enumerate(xs)] y_low = [y if y < y_low[i] else y_low[i] for i, y in enumerate(ys)] return x_low, y_low
def _make_links_table(config): """Make Markdown links table from configuration.""" if "links" not in config: return "" return "\n\n" + "\n".join(f"[{ln['key']}]: {ln['url']}" for ln in config["links"])
def make_code_block(text: str, language: str = "GDScript") -> str: """ Returns the text surrounded by ` """ return "```{}\n{}\n```".format(language, text)
def rs256_jwks_uri(rs256_domain): """ Return a jwks uri for use in fixtures from the armasec pytest extension. """ return f"https://{rs256_domain}/.well-known/jwks.json"
def build_kwargs_dict(arg_name, value): """Return a dictionary containing `arg_name` if `value` is set.""" kwargs = {} if value: kwargs[arg_name] = value return kwargs
def ignore(text, to_ignore): """ """ for item in to_ignore: if item in text: return True return False
def area_triangle(base, height): """ """ return (base * height) / 2.0
def break_compound_words(list_of_lists_of_tokens, compound_symbol="-"): """ Funcion to break words of the compound form word1<symbol>word2 into the constituting words, then remove resulting empty strings. Parameters ---------- list_of_lists_of_tokens : dataframe column or variable containing a list of word-token lists, with each sublist being a sentence in a paragraph text E.g., [[ 'I', 'think', 'north-south'], ['Me', 'too']] compound-simbol : compound symbol word1<symbol>word2 to be broken, default is '-' OUTPUT : the original list of word-token lists with the specified compound words broken down in their components E.g., [['I', 'think', 'north', 'south'], ['Me', 'too']] """ OUTPUT = [] for sent in list_of_lists_of_tokens: # empty collector for words within each sentence words = [] for w in sent: # 1. break words of the form word1<symbol>word2 into constituting words if compound_symbol in w: words.extend(w.split(compound_symbol)) else: words.append(w) # 2. Remove empty strings words = list(filter(None, words)) OUTPUT.append(words) return OUTPUT
def drop_logical_repo(repo: str) -> str: """Remove the logical part of the repository name.""" return "/".join(repo.split("/", 2)[:2])
def huba505(voltage): """ Given that v is voltage in Volts, return pressure as given by a HUBA 505 transducer The transducer is: 0.5V - 0 bar 3.5V - 4 bar """ pressure = 4 * (voltage - 0.5) / 3 if pressure > 0: return pressure else: return 0
def rgb_int_round(color): """ Rounds all values of 255 color (eg. (243.1, 100, 10.3) is returned as (243, 100, 10) :param color: tuple(int, int, int, int), int color tuple :return: tuple(int, int, int, int), color converted """ return tuple([int(round(color_channel)) for color_channel in color])
def time_to_json(pyt, manager): """Serialize a Python time object to json.""" if pyt is None: return None else: return dict( hours=pyt.hour, # Hours, Minutes, Seconds and Milliseconds minutes=pyt.minute, # are plural in JS seconds=pyt.second, milliseconds=pyt.microsecond / 1000, )
def get_prediction_results(ground_truths, predictions, iou_threshold): """Calculate the number of true positives, false positives and false negatives from the given ground truth and predictions.""" true_pos, false_pos, false_neg = None, None, None # If there are no predictions, then everything is a false negative. if len(predictions) == 0: true_pos, false_pos = 0, 0 false_neg = len(ground_truths) return true_pos, false_pos, false_neg # If there is no ground truth, everything is a false positive. if len(ground_truths) == 0: true_pos, false_neg = 0, 0 false_pos = len(predictions) return true_pos, false_pos, false_neg # Iterate over the predictions and calculate the IOU of each prediction # with each ground truth. ious = [] for i, prediction in enumerate(predictions): for j, ground_truth in enumerate(ground_truths): iou = prediction.calculate_iou(ground_truth) if iou > iou_threshold: ious.append((i, j, iou)) # If no IOUs were over the threshold, return all predictions as false # positives and all ground truths as false negatives. if len(ious) == 0: true_pos = 0 false_pos, false_neg = len(predictions), len(ground_truths) else: # Sort the IOUs and match each box only once. ground_truths_matched, predictions_matched = set(), set() matched = [] for prediction, ground_truth, iou in sorted(ious, key=lambda x: x[-1], reverse=True): if (ground_truth not in ground_truths_matched and prediction not in predictions_matched): ground_truths_matched.add(ground_truth) predictions_matched.add(prediction) matched.append((prediction, ground_truth, iou)) # The matches are the true positives. true_pos = len(matched) # The unmatched predictions are the false positives. false_pos = len(predictions) - len(predictions_matched) # The umatched ground truths are the false negatives. false_neg = len(ground_truths) - len(ground_truths_matched) return true_pos, false_pos, false_neg
def OCEANprice(firm_valuation: float, OCEAN_supply: float) -> float: """Return price of OCEAN token, in USD""" assert OCEAN_supply > 0 return (firm_valuation / OCEAN_supply)
def V_d2bV_by_V_Approx(V): """ Approximate V*d^2(bV)/dV^2 for single mode fiber. This value is needed to determine the waveguide dispersion. This approximation is for the fundamental mode in the fiber and is good to 1% when 1.4<V<2.4. Approximation by Marcuse (1979) Args: V: V-parameter of the fiber [--] Returns: V*d^2(bV)/dV^2 [--] """ return 0.080 + 0.549 * (2.834 - V)**2
def substrucure_in_tree(main, sub): """ :param main: Big Tree :param sub: substructure :return: bool """ def compare(main_tree_sub, sub): """ :param main_tree_sub: node :param sub: substructure_root :return: bool """ if not sub: return True if not main_tree_sub: return False if sub.val == main_tree_sub.val: return compare(main_tree_sub.left, sub.left) and compare(main_tree_sub.right, sub.right) return False if not main: return False if main.val == sub.val and compare(main, sub): return True else: return substrucure_in_tree(main.left, sub) or substrucure_in_tree(main.right, sub) return
def parser_cell_frequency_link_Descriptor(data,i,length,end): """\ parser_cell_frequency_link_Descriptor(data,i,length,end) -> dict(parsed descriptor elements). This descriptor is not parsed at the moment. The dict returned is: { "type": "cell_frequency_link", "contents" : unparsed_descriptor_contents } (Defined in ETSI EN 300 468 specification) """ return { "type" : "cell_frequency_link", "contents" : data[i+2:end] }
def read_run_id(run_id: str): """Read data from run id""" parts = run_id.split("/") if len(parts) < 2: # It's an old style path # central-visayas-1600644750-9fdd80c parts = run_id.split("-") git_commit = parts[-1] timestamp = parts[-2] region_name = "-".join(parts[:-2]) app_name = "covid_19" else: # It's an new style path # covid_19/central-visayas/1600644750/9fdd80c app_name = parts[0] region_name = parts[1] timestamp = parts[2] git_commit = parts[3] return app_name, region_name, timestamp, git_commit
def top_three(input_list): """Returns a list of the three largest elements input_list in order from largest to smallest. If input_list has fewer than three elements, return input_list element sorted largest to smallest/ """ sortedList = sorted(input_list, reverse=True) return sortedList[:3]
def prime_factors(n, primes, factors=[]): """Return a list of prime factors for a given number in ascending order if the number of prime factors equals k.""" factors = [] for p in primes: while n >= (p * p): if n % p: break else: n = n // p factors.append(p) factors.append(n) return factors
def ex_no_host(): """No host response in bytes.""" return b"SPAMD/1.5 68 EX_NOHOST\r\n\r\n"
def thermal_balance_2(q_in, epsilon): """ Models bulk thermal balance in an object by using a simple forward Euler scheme and the Stefan-Boltzmann blackbody radiation equation to find net heat transfer and hence bulk temperature as a function of time. """ theta = 5.670373E-8 T = (q_in / (epsilon * theta))**(1.0/4.0) return [T]
def query_string_param_test(**request): """ Query string parameters are expected to be mangled based on HB rules in this example. """ return {"original_request": request}
def cliproi(shape, roi): """Make sure that a ROI does not exceeds the maximal size. Args: shape (n-tuple): array shape (n1, n2, ...) roi (n-2-tuple): array range indices ((a1,b1),(a2,b2),...) Returns: n-2-list: clipped ROI [[a1,b1],[a2,b2],...] """ if len(shape) != len(roi): raise ValueError("Dimensions for shape and ROI should be the same") roinew = [] for n, (a, b) in zip(shape, roi): if a is None: a = 0 else: if a < 0: a += n a = max(0, min(a, n - 1)) if b is None: b = n else: if b < 0: b += n b = max(0, min(b, n)) roinew += [[a, b]] return roinew
def in_polygon(vertices, point, border_value=True): """Return True/False if a pixel is inside a polygon. @param vertices: @param point: 2-tuple of integers or list @param border_value: boolean @return: True if the pixel is inside a polygon """ counter = 0 for i, polypoint1 in enumerate(vertices): if (polypoint1[0] == point[0]) and (polypoint1[1] == point[1]): return border_value polypoint2 = vertices[(i + 1) % len(vertices)] if (point[1] > min(polypoint1[1], polypoint2[1])): if (point[1] <= max(polypoint1[1], polypoint2[1])): if (point[0] <= max(polypoint1[0], polypoint2[0])): if (polypoint1[1] != polypoint2[1]): xinters = (point[1] - polypoint1[1]) * (polypoint2[0] - polypoint1[0]) / (polypoint2[1] - polypoint1[1]) + polypoint1[0] if (polypoint1[0] == polypoint2[0]) or (point[0] <= xinters): counter += 1 if counter % 2 == 0: return False else: return True
def default_sid_function(id, rsid): """ The default function for turning a Bgen (SNP) id and rsid into a :attr:`pysnptools.distreader.DistReader.sid`. If the Bgen rsid is '' or '0', the sid will be the (SNP) id. Otherwise, the sid will be 'ID,RSID' >>> default_sid_function('SNP1','rs102343') 'SNP1,rs102343' >>> default_sid_function('SNP1','0') 'SNP1' """ if rsid == "0" or rsid == "": return id else: return id + "," + rsid
def get_trader_energy_offer_ramp_rate(trader_id, ramp_rates): """ Given dictionary of trader offer ramp rates, extract the energy offer ramp rate for a given trader. Not all traders participate in the energy market so the function may return if no energy offer ramp rate exists. """ # Check that a trader doesn't have both energy and load offers. Will # not know which offer ramp rate should be used. This case shouldn't # occur in practice. has_generation_offer = (trader_id, 'ENOF') in ramp_rates.keys() has_load_offer = (trader_id, 'LDOF') in ramp_rates.keys() if has_generation_offer and has_load_offer: raise Exception('Trader has both generation and load offers') # Ramp rate corresponding to energy offer if (trader_id, 'ENOF') in ramp_rates.keys(): return ramp_rates[(trader_id, 'ENOF')] elif (trader_id, 'LDOF') in ramp_rates.keys(): return ramp_rates[(trader_id, 'LDOF')] else: return None
def get_xref_mcf(xrefs, xref_to_label): """Returns the mcf format of a given string of xrefs. Convert a list of xrefs to their mcf format of <prop_label>: <prop_text_value> using the xref_to_label dict to lookup the property label of the given indentifier. For this import, xref_to_label is either GENE_XREF_PROP_DICT or DRUG_XREF_PROP_DICT from config.py . Args: xref: a string representing a comma-separated list of xrefs enclosed by double quotes xref_to_label: xref name in pahrmgkb to DC property label mapping Returns: a multiline mcf formatted string of all of the xrefs' prop labels + values """ xref_mcf = '' if not xrefs: return '' for xref in xrefs.split(','): xref_pair = xref.replace('"', '').strip().split(':') if xref_pair[0] not in xref_to_label: print('unexpected format in gene xrefs:' + xrefs) continue prop_label = xref_to_label[xref_pair[0]] prop_value = ':'.join(xref_pair[1:]).strip() xref_mcf += prop_label + ': "' + prop_value + '"\n' return xref_mcf
def compose(shift, org): """ Compose the new string correspond to org. :param shift: int, shift > 0. How many span needed to be shifted :param org:str, original string :return: new, str """ frag_1 = org[:len(org)-shift] frag_2 = org[len(org)-shift:] new = frag_2 + frag_1 return new
def create_document(doc, embedding, index_name): """Format document""" return { "_op_type": "index", "_index": index_name, "text": doc["text"], "title": doc["title"], "url": doc["url"], "text_vector": embedding }
def multidict_split(bundle_dict): """Split multi dict to retail dict. :param bundle_dict: a buddle of dict :type bundle_dict: a dict of list :return: retails of dict :rtype: list """ retails_list = [dict(zip(bundle_dict, i)) for i in zip(*bundle_dict.values())] return retails_list
def quote_ident(val): """ This method returns a new string replacing " with "", and adding a " at the start and end of the string. """ return '"' + val.replace('"', '""') + '"'
def deduplicate(values: list): """Simple deduplicate that uses python sets""" return list(set(values))
def values(dict): """Returns a list of all the enumerable own properties of the supplied object. Note that the order of the output array is not guaranteed across different JS platforms""" return list(dict.values())
def md_getPitch(field): """Get pitch""" return field.split(',')[1].strip()
def splitNamespace(clarkName): """Return (namespace, localname) tuple for a property name in Clark Notation. Namespace defaults to ''. Example: '{DAV:}foo' -> ('DAV:', 'foo') 'bar' -> ('', 'bar') """ if clarkName.startswith("{") and "}" in clarkName: ns, localname = clarkName.split("}", 1) return (ns[1:], localname) return ("", clarkName)
def weighted_average(xvals, yvals): """ Determines the weighted average of a group of masses and abundances :param list xvals: x values :param list yvals: y values :return: weighted average, summed intensity :rtype: tuple of float """ if sum(yvals) == 0: # catch for no intensity return sum(xvals) / len(xvals), 0. return ( sum([x * y for x, y in zip(xvals, yvals)]) / sum(yvals), # weighted m/z sum(yvals) # summed intensity )