content
stringlengths
42
6.51k
def h(x, fx): """helper function as in the PLoS article, doi:10.1371/journal.pcbi.1005352.t003 pg 16/24""" fx = fx % 3 x = x % 3 if fx > x: return x + 1 elif fx < x: return x - 1 else: return x
def parse_flag(value): """ Convert string to boolean (True or false) :param value: string value :return: True if the value is equal to "true" (case-insensitive), otherwise False """ return type(value) == str and value.lower() == "true"
def post_process_botok_segmented_data(segmented_text): """Remove unwanted space from segmented text Args: segmented_text (str): Botok segmented text Returns: str: clean segmented text """ clean_segmented_text = segmented_text.replace('\n ', '\n') clean_segmented_text = clean_segmented_text.replace(' ', ' ') return clean_segmented_text
def parse_address(text): """ parse name and address from WOS C1 field """ if not text or str(text) == 'nan': return [] state = 'NAME' # NAME | ADDRESS | ADDRESS_END name = '' address = '' results = [] for c in text: if state == 'NAME': if c == ']': state = 'ADDRESS' continue elif c == '[': continue else: name += c continue elif state == 'ADDRESS': if c == '[': results.append((name, address)) state = 'NAME' name = '' address = '' continue elif c == ' ' and address == '': continue else: address += c continue else: raise ValueError(state) if name and address: results.append((name, address)) return results
def possessive(value): """ Returns a possessive form of a name according to English rules Mike returns Mike's, while James returns James' """ if value[-1] == 's': return "%s'" % value return "%s's" % value
def switch_position_from_wire_position(base, global_pos): """Each switch occupies two wire positions (pos, pos+1); given a wire position (plus, a base for offsetting the switch within subnetwork that created it), this function returns the "canonical" position for the switch, that is, the "upper" position global_pos. global_pos is assumed to be input position for the LHS switches and output position for the RHS switches.""" return ((global_pos - base) & ~1) + base
def hund_case_a_landau_g_factor(o, j, s, l, gs, gl): """ Hund case A Landau g-factor .. math:: g = \\frac{\\Omega}{J(J+1)} \\left( g_sS + g_l\\Lambda \\right) Parameters: o (float): Omega of level j (float): J of level l (float): Lambda of level s (float): Sigma of level gs (float): relativistic spin factor of molecule gl (float): relativistic orbital factor of molecule Returns: g (float): Landau g-factor for Hund case A """ return gs * o * s / (j * (j + 1)) + gl * o * l / (j * (j + 1))
def frac(h,w,n=5): """ This function takes an image shape as input (h,w) and a integer n. It returns the closest image shape (h_out,w_out) of the original one such that h_out < n and w_out < n. We can then cut the image in h_out*w_out pieces. examples: frac(500,375,5) returns (4,3) frac(333,500,5) returns (2,3) """ fr = float(h)/w h_out = 1 w_out = 1 if fr > 1: for i in range(1,n): for j in range(1,i): if abs(float(i)/j - fr) < abs(float(h_out)/w_out - fr): h_out = i w_out = j else: for i in range(1,n): for j in range(i,n): if abs(float(i)/j - fr) < abs(float(h_out)/w_out - fr): h_out = i w_out = j return h_out, w_out
def find_in_ordered_data(value, array_1d): """Returns the index in the ordered list-like array of value; or None if not present.""" def find_in_subset(value, array_1d, start, end): # recursive binary split if start >= end: return None mid = start + (end - start) // 2 sample = array_1d[mid] if sample == value: while mid > 0 and array_1d[mid - 1] == value: mid -= 1 return mid if sample > value: return find_in_subset(value, array_1d, start, mid) if mid == start: mid += 1 return find_in_subset(value, array_1d, mid, end) return find_in_subset(value, array_1d, 0, len(array_1d))
def calculate_rbgs(prbs): """Calculates the number of resource block groups.""" if prbs % 2 == 0: return prbs / 2 else: return (prbs // 2) + 1
def uniform2unit(theta, vmin, vmax): """ mapping from uniform distribution on unit hypercube to uniform distribution on parameter space """ return (theta - vmin) / (vmax - vmin)
def parse_scales_line(line): """ Args: - line: Returns: - scales_dict """ def advance_past_token(str, token): return str[str.find(token) + len(token):] scales_dict = {} line = advance_past_token(line, 'Scales:') pair_str = line.split(',') for pair_str in pair_str: dname, scale = pair_str.split(':') scales_dict[dname.strip()] = float(scale) return scales_dict
def loop_solution(max_number: int) -> int: """Loop solution.""" sum_of_squares = 0 square_of_sum = 0 for i in range(1, max_number + 1): sum_of_squares += i**2 square_of_sum += i square_of_sum = square_of_sum**2 return square_of_sum - sum_of_squares
def rotate(n, rotations=1, width=32): """Bitwise rotate an int. bin(rotate(1, rotations=0)) -> '0b1' bin(rotate(1, rotations=1)) -> '0b10000000000000000000000000000000' bin(rotate(1, rotations=2)) -> '0b1000000000000000000000000000000' bin(rotate(1, rotations=32)) -> '0b1' bin(rotate(1, rotations=31)) -> '0b10' bin(rotate(1, rotations=-1)) -> '0b10' bin(rotate(1, rotations=1, width=8)) -> '0b10000000' bin(rotate(1, rotations=8, width=8)) -> '0b1' """ width = max(n.bit_length(), width) rotations %= width if rotations < 1: return n mask = 2 ** width - 1 n &= mask return (n >> rotations) | ((n << (width - rotations) & mask))
def _decode_to_string(to_decode): """ This function is needed for Python 3, because a subprocess can return bytes instead of a string. """ try: return to_decode.decode("utf-8") except AttributeError: # bytesToDecode was of type string before return to_decode
def index_to_position(index, strides): """ Converts a multidimensional tensor `index` into a single-dimensional position in storage based on strides. Args: index (array-like): index tuple of ints strides (array-like): tensor strides Return: int : position in storage """ position = 0 for i, s in zip(index, strides): position += i*s return position
def is_armstrong(number): """ Verifies if given number is armstrong number or not. Args: number: number which we want to test Returns: true if given number is armstrong number. """ total_sum = 0 str_number = str(number) length = len(str_number) for num in str_number: total_sum += pow(int(num), length) return total_sum == number
def quantize_tick_up(tick, grid): """ Quantizes a given tick number to the closest higher tick on the grid. For example, for ``tick=900`` and ``grid=480``, this returns ``960``. :param tick: The tick number. :param grid: The grid to be quanitzed to. :returns: The closest higher tick on the grid. """ return tick - (tick % grid) + grid
def format_join(grouped): """Formatted output: join factors and exponents.""" joined = [] for group in grouped: group = '^'.join(map(str, group)) joined.extend([group]) joined = ' * '.join(map(str, joined)) return joined
def opposite_dir(d): """Return opposite direction.""" opposites = dict(n='s', e='w', s='n', w='e') return opposites[d]
def sorted_dict_repr(d:dict, order:list): """ Representer of dictionary `d`, with key order `order`. Example: ---------- >>> sorted_dict_repr({'a': 1, '0': 3}, ['0', 'a']) "{'0': 3, 'a': 1}" """ return '{' + ', '.join([f"{repr(k)}: {repr(d[k])}" for k in order]) + '}'
def token_accuracy(hypotheses, references, level="word"): """ Compute the accuracy of hypothesis tokens: correct tokens / all tokens Tokens are correct if they appear in the same position in the reference. :param hypotheses: list of hypotheses (strings) :param references: list of references (strings) :param level: segmentation level, either "word", "bpe", or "char" :return: """ def split_by_space(string): """ Helper method to split the input based on spaces. Follows the same structure as list(inp) :param string: string :return: list of strings """ return string.split(" ") correct_tokens = 0 all_tokens = 0 split_func = split_by_space if level in ['word', 'bpe'] else list assert len(hypotheses) == len(references) for hyp, ref in zip(hypotheses, references): all_tokens += len(hyp) for h_i, r_i in zip(split_func(hyp), split_func(ref)): # min(len(h), len(r)) tokens considered if h_i == r_i: correct_tokens += 1 return (correct_tokens / all_tokens) * 100 if all_tokens > 0 else 0.0
def calc_benjamini_hochberg_corrected_value(p_value, index, total_num_tests): """ Perform the k-calculation for Benjamini-Hochberg correction. See http://en.wikipedia.org/wiki/False_discovery_rate#Independent_tests for more detail. :Parameters: - `p_value`: the uncorrected p-value of a test - `index`: where in the total list of test values this value is [NOTE: this should be one-index based, not zero-index (e.g., the first element is index `1`)] - `total_num_tests`: the total number of tests done """ if index > total_num_tests: raise ValueError("index is greater than the total number of " "tests") bh_corrected_value = p_value * total_num_tests / index if bh_corrected_value > 1: bh_corrected_value = 1.0 return bh_corrected_value
def preprocessligpdb(Ligand_list): """It takes the list of ligand PDB filenames and process them for the matching PARAMETERS ---------- Ligand_list : list of strings list of ligand PDB filenames RETURNS ------- lig_aux : list of ligand PDB filenames processed """ lig_aux = [] for ligand in Ligand_list: lig_aux.append(ligand.replace("ligands/","").replace(".pdb","")) return lig_aux
def determine_percentage(numerator, other): """ Determines a fraction. :param numerator: The numerator of the calculation :type numerator: int :param other: The values that will be added to the numerator to calculate the denominator :type other: int :return: The fraction of the numerator divided by the sum of the two arguments :rtype: int """ return numerator / (numerator + other)
def merge(list1, list2): """ Merge two sorted lists. Returns a new sorted list containing all of the elements that are in either list1 and list2. This function can be iterative. """ merged = [] idx1 = 0 idx2 = 0 while idx1 < len(list1) and idx2 < len(list2): if list1[idx1] <= list2[idx2]: merged.append(list1[idx1]) idx1 += 1 else: merged.append(list2[idx2]) idx2 += 1 if idx1 == len(list1): merged.extend(list2[idx2: ]) else: merged.extend(list1[idx1: ]) return merged
def crlf_lines(physical_line): """ Line contains CR (e.g. as a CRLF line ending). Many free software projects have a strong focus on POSIX platforms (like Linux, *BSD, Unix, Mac OS X, etc.) and they all use LF-only line endings. Only Win32 platform uses CRLF line endings. So if you have a Win32-only source code using CRLF line endings, you might want to exclude this test. """ pos = physical_line.find('\r') if pos >= 0: return pos, "W293 line contains CR char(s)"
def within_threshold(pos,item): """ Function: check if the tip of pressed is within the threshold of piano key boundaries Arguments: pos: x,y pixel coordinates of the tip of finger item: boundaries of bbox of a particular key of a piano returns: boolean value""" if(pos[0]>item[0] and pos[0]<item[2] and pos[1]>item[1] and pos[1]<item[3]): return True else: return False
def ifib(n): """Iterative function""" a, b = 0, 1 if n > 1: for i in range(n): a, b = b, a + b return a return 1
def map_llc_display_result_to_dictionary_list(land_charge_result): """Produce a list of jsonable dictionaries of an alchemy result set """ if not isinstance(land_charge_result, list): return list(map(lambda land_charge: land_charge.to_display_dict(), [land_charge_result])) else: return list(map(lambda land_charge: land_charge.to_display_dict(), land_charge_result))
def calculate_trend(series_range, series_list): """ With the given range and list, calculate the linear regression such that the returned value illustrates if the numbers are trending positive or negative. :param series_range: A range of values against which to calculate a linear regression. :param series_list: The list of numbers to calculate the linear regression against. :return: The calculated trend of the range and list, or None if the determinate indicates no trend. """ range_count = len(series_range) x = 0 y = 0 xx = 0 yy = 0 sx = 0 for range_item, list_item in zip(series_range, series_list): x += range_item y += list_item xx += range_item * range_item yy += list_item * list_item sx += range_item * list_item d = xx * range_count - x * x if d != 0: return (sx * range_count - y * x) / d else: return None
def filter_out_none(dictionary, keys=None): """ Filter out items whose value is None. If `keys` specified, only return non-None items with matched key. """ ret = {} if keys is None: keys = [] for key, value in dictionary.items(): if value is None or key not in keys: continue ret[key] = value return ret
def sort(profiles, sorting_criteria, lang_dict): """ sort profiles that were filtered by show function some of the categories: name a-z,name z-a, age low-high etc. """ list_of_values = list() sorted_profiles = list() if sorting_criteria == lang_dict["sort_prof_tuple1"]: for prof in profiles: list_of_values.append(prof['NAME']) list_of_values.sort() for name in list_of_values: for prof in profiles: if name == prof['NAME']: sorted_profiles.append(prof) elif sorting_criteria == lang_dict["sort_prof_tuple2"]: for prof in profiles: list_of_values.append(prof['NAME']) list_of_values.sort(reverse=True) for name in list_of_values: for prof in profiles: if name == prof['NAME']: sorted_profiles.append(prof) elif sorting_criteria == lang_dict["sort_prof_tuple3"]: counter = 60 while counter > 0: for prof in profiles: if prof["GENDER"] == "female": if prof["MILKING"] != "": milking = int(prof["MILKING"][0]+prof["MILKING"][2]) if milking == counter: sorted_profiles.append(prof) counter -= 1 for prof in profiles: if prof["GENDER"] == "female": if prof["MILKING"] == "": sorted_profiles.append(prof) for prof in profiles: if prof["GENDER"] == "male": sorted_profiles.append(prof) elif sorting_criteria == lang_dict["sort_prof_tuple4"]: counter = 0 while counter < 60: for prof in profiles: if prof["GENDER"] == "female": if prof["MILKING"] != "": milking = int(prof["MILKING"][0]+prof["MILKING"][2]) if milking == counter: sorted_profiles.append(prof) counter += 1 for prof in profiles: if prof["GENDER"] == "female": if prof["MILKING"] == "": sorted_profiles.append(prof) for prof in profiles: if prof["GENDER"] == "male": sorted_profiles.append(prof) elif sorting_criteria == lang_dict["sort_prof_tuple5"]: counter = 20 while counter > 0: for prof in profiles: if prof["DATE_OF_BIRTH"] != "": age = int( prof["DATE_OF_BIRTH"][11:13].strip(lang_dict["age"])) if age == counter: sorted_profiles.append(prof) counter -= 1 for prof in profiles: if prof["DATE_OF_BIRTH"] == "": sorted_profiles.append(prof) elif sorting_criteria == lang_dict["sort_prof_tuple6"]: counter = 0 while counter < 20: for prof in profiles: if prof["DATE_OF_BIRTH"] != "": age = int( prof["DATE_OF_BIRTH"][11:13].strip(lang_dict["age"])) if age == counter: sorted_profiles.append(prof) counter += 1 for prof in profiles: if prof["DATE_OF_BIRTH"] == "": sorted_profiles.append(prof) elif sorting_criteria == lang_dict["sort_prof_tuple7"]: for prof in profiles: if prof['GENDER'] == 'female': sorted_profiles.append(prof) for prof in profiles: if prof['GENDER'] == 'male': sorted_profiles.append(prof) return sorted_profiles
def bipartite_match(graph): """ Program to find the bipartite match. Hopcroft-Karp bipartite max-cardinality matching and max independent set David Eppstein, UC Irvine, 27 Apr 2002. Find maximum cardinality matching of a bipartite graph (U,V,E). The input format is a dictionary mapping members of U to a list of their neighbors in V. The output is a triple (M,A,B) where M is a dictionary mapping members of V to their matches in U, A is the part of the maximum independent set in U, and B is the part of the MIS in V. The same object may occur in both U and V, and is treated as two distinct vertices if this happens. """ matching = {} for u in graph: for v in graph[u]: if v not in matching: matching[v] = u break while True: predictions = {} unmatched = [] prediction = dict([(u, unmatched) for u in graph]) for v in matching: del prediction[matching[v]] layer = list(prediction) while layer and not unmatched: new_layer = {} for u in layer: for v in graph[u]: if v not in predictions: new_layer.setdefault(v, []).append(u) layer = [] for v in new_layer: predictions[v] = new_layer[v] if v in matching: layer.append(matching[v]) prediction[matching[v]] = v else: unmatched.append(v) if not unmatched: not_layered = {} for u in graph: for v in graph[u]: if v not in predictions: not_layered[v] = None return matching, list(prediction), list(not_layered) def recurse(v): if v in predictions: L = predictions[v] del predictions[v] for u in L: if u in prediction: pu = prediction[u] del prediction[u] if pu is unmatched or recurse(pu): matching[v] = u return 1 return 0 for v in unmatched: recurse(v)
def word(i: int) -> int: """Return an unsigned 16 bit word of an integer. """ if i < 0: i = (-i ^ 0xFFFF) + 1 return i & 0xFFFF
def flatten(S): """ Helper function to recursively flatten a list :param S: The nested list :return: The flattened list """ if S == []: return S if isinstance(S[0], list): return flatten(S[0]) + flatten(S[1:]) return S[:1] + flatten(S[1:])
def get_name(your_name): """Pass `your_name` parameter as output.""" return { "name": your_name, }
def get_create_table_field_data(field_data): """ Generates the field wise query segments needed to create a table. :param field_data: List of dicts with each dict having keys 'name', 'type' and, optionally, 'modifiers' :return: none """ field_query_list = [] for field in field_data: query_field_segment = field["name"] + " " + field["type"] if field["modifiers"]: query_field_segment += " " + field["modifiers"] field_query_list.append(query_field_segment) return ", ".join(field_query_list)
def get_add_term_cal_Fz(uZ_nodim, membrane_geometry): """ This is a proper adoption of particle-flux conservation law on mBLA method. The actual use of it is cal_Fz, which is related to the under-relaxed fixed-point iteration. """ if membrane_geometry=='FMS': return (2/3.)*(1. - uZ_nodim) return 0.
def node_id(node): """ mapping of name of hydrophone node to ID Parameter --------- node : str name or ID of the hydrophone node Returns ------- str ID of hydrophone node """ # broadband hydrophones if node == "Oregon_Shelf_Base_Seafloor" or node == "LJ01D": return "LJ01D" if node == "Oregon_Slope_Base_Seafloor" or node == "LJ01A": return "LJ01A" if node == "Oregon_Slope_Base_Shallow" or node == "PC01A": return "PC01A" if node == "Axial_Base_Shallow" or node == "PC03A": return "PC03A" if node == "Oregon_Offshore_Base_Seafloor" or node == "LJ01C": return "LJ01C" if node == "Axial_Base_Seafloor" or node == "LJ03A": return "LJ03A" # low frequency hydrophones if node == "Slope_Base" or node == "HYSB1": return "HYSB1" if node == "Southern_Hydrate" or node == "HYS14": return "HYS14" if node == "Axial_Base" or node == "AXBA1": return "AXBA1" if node == "Central_Caldera" or node == "AXCC1": return "AXCC1" if node == "Eastern_Caldera" or node == "AXEC2": return "AXEC2" else: print("No node exists for name or ID " + node) return ""
def sfc_rad(swup_sfc, swdn_sfc, lwup_sfc, lwdn_sfc): """All-sky surface upward radiative flux.""" return swup_sfc - swdn_sfc + lwup_sfc - lwdn_sfc
def get_pretrained_index_weight(word_pretrained_index, words_weight): """ Get the map from word index in pretrained embeddings and weights """ index_weights = {} for word, idx in word_pretrained_index.items(): if word in words_weight: index_weights[idx] = words_weight[word] else: index_weights[idx] = 1.0 return index_weights
def formed_bond_keys(tra): """ keys for bonds that are formed in the transformation """ #print('tra test:', len(tra), tra) #if len(tra) == 1: # frm_bnd_keys = tra #else: frm_bnd_keys, _ = tra return frm_bnd_keys
def has_won_recursive(board): """Validate NxN board recursively.""" def find(row, col, move, count, x_or_o): def valid_sq(): if -1 < row < len(board) and \ -1 < col < len(board) and \ -1 < count <= len(board)and \ x_or_o == board[row][col]: return True return False if not valid_sq(): return False count += 1 if count == len(board): return True if move == "down": print('down') return find(row + 1, col, 'down', count, x_or_o) elif move == 'diag': print('diag') return find(row + 1, col + 1, 'diag', count, x_or_o) elif move == 'revdiag': print('revdiag') return find(row - 1, col + 1, 'revdiag', count, x_or_o) elif move == 'right': print('right') return find(row, col + 1, 'down', count, x_or_o) return (find(row + 1, col, 'down', count, x_or_o) or \ find(row + 1, col + 1, 'diag', count, x_or_o) or \ find(row - 1, col + 1, 'revdiag', count, x_or_o) or \ find(row, col + 1, 'right', count, x_or_o)) def check_winner(player): for row in range(len(board)): if find(row, 0, 'any', 0, player): return True for col in range(len(board)): if find(0, col, 'any', 0, player): return True return False x_wins = check_winner('x') if not x_wins: return 'o wins!' if check_winner('o') else 'no one wins' return 'x wins!'
def _gen_table_cols(col_ids): """Generate Dash table columns in the expected format. :param col_ids: list of columns; must be in format <table-alias.name>, like "s.serial_number", as in the SQL select statement -- except for derived column values which must literally use "alias." plus the name. :return: List of dictionaries, where each contains an 'id' and a 'name' key for a Dash DataTable. """ col_list = [] for col in col_ids: split_col = col.partition('.') if split_col[0] == 'alias': col_list.append({'id' : 'alias_{}'.format(split_col[2]), 'name' : split_col[2]}) else: col_list.append({'id' : split_col[2], 'name' : split_col[2]}) return col_list
def dfs_traverse_recursive(graph, start, visited=None): """ Traversal by recursive depth first search. """ if visited is None: visited = set() visited.add(start) for next_node in graph[start]: if next_node not in visited: dfs_traverse_recursive(graph, next_node, visited) return visited
def _escape_char(in_c): """ Escape some special characters in java .properties files. :param in_c: Input character >>> "\\:" == _escape_char(':') True >>> "\\=" == _escape_char('=') True >>> _escape_char('a') 'a' """ return '\\' + in_c if in_c in ('"', '\'', '\\') else in_c
def test_ident(i): """Is the unicode string valid in a Python 3 identifier.""" # Some characters are not valid at the start of a name, but we still want to # include them. So prefix with 'a', which is valid at the start. return ('a' + i).isidentifier()
def word_lengths(lst): """Return a list of word lengths.""" return [len(str_) for str_ in lst]
def to_dict(dictish): """ Given something that closely resembles a dictionary, we attempt to coerce it into a propery dictionary. """ if hasattr(dictish, 'iterkeys'): m = dictish.iterkeys elif hasattr(dictish, 'keys'): m = dictish.keys else: raise ValueError(dictish) return dict((k, dictish[k]) for k in m())
def none_if_empty(tup): """Returns None if passed an empty tuple This is helpful since a SimpleVar is actually an IndexedVar with a single index of None rather than the more intuitive empty tuple. """ if tup is (): return None else: return tup
def get_store_key(key): """Return the key to use with homeassistant.helpers.storage.Storage.""" return key if "/" in key else f"hacs.{key}"
def _add_dot(ext_list): """Add a dot (.) to the beginning of each extension in a list. Args: ext_list (list): A list of file extensions. Returns: A list of extensions, with a dot prepended to each extension, if it doesn't already exist. """ # LOOP THROUGH EXTENSIONS for idx, ext in enumerate(ext_list): # TEST FOR DOT (.ext) >> IF NOT, ADD IT AND UPDATE LIST if not ext.startswith('.'): ext_list[idx] = '.%s' % ext # RETURN MODIFIED EXTENSION LIST return ext_list
def remove_nulls(data): """ Removes those key value pairs from data where value is None :param data: :return: the cleaned up data """ null_keys = [] for key, val in data.items(): if val == None: null_keys.append(key) for key in null_keys: del data[key] return data
def multiplicative_inverse(a, b): """Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb """ # r = gcd(a,b) i = multiplicitive inverse of a mod b # or j = multiplicitive inverse of b mod a # Neg return values for i or j are made positive mod b or a respectively # Iterateive Version is faster and uses much less stack space x = 0 y = 1 lx = 1 ly = 0 oa = a # Remember original a/b to remove ob = b # negative values from return results while b != 0: q = a // b (a, b) = (b, a % b) (x, lx) = ((lx - (q * x)), x) (y, ly) = ((ly - (q * y)), y) if lx < 0: lx += ob # If neg wrap modulo orignal b if ly < 0: ly += oa # If neg wrap modulo orignal a # return a , lx, ly # Return only positive values return lx
def _dict_sub(left, right): """ >>> _dict_sub({'a': 21}, {'a': 7}) {'a': 14} :param left: :param right: :return: """ new = left.copy() for key, value in right.items(): new[key] -= value return new
def metric_max_over_ground_truths(metric_fn, predictions, ground_truths): """Take the average best score against all ground truth answers. This is a bit different than SQuAD in that there are multiple answers **and** predictions that we average over. For some situations (e.g., *top k* beams or multiple human references) we might want to calculate the average performance. In most cases, however, predictions will be a list of length 1. Args: metric_fn: Callable on (prediction, ground_truth). predictions: List of whitespace separated prediction tokens. ground_truths: List of whitespace separated answer tokens. Returns: max_score: Max output of metric_fn. """ all_metrics = [] for prediction in predictions: scores_for_ground_truths = [] for ground_truth in ground_truths: score = metric_fn(prediction, ground_truth) scores_for_ground_truths.append(score) all_metrics.append(max(scores_for_ground_truths)) return sum(all_metrics) / len(all_metrics)
def find_min_iterative(array, left, right): """ Find the minimum in rotated array in O(log n) time. >>> find_min_iterative([1,2,3,4,5,6], 0, 5) 1 >>> find_min_iterative([6, 5, 4, 3, 2, 1], 0, 5) 1 >>> find_min_iterative([6, 5, 1, 4, 3, 2], 0, 5) 1 """ while array[left] > array[right]: mid = left + (right - left) // 2 if array[mid] < array[right]: right = mid else: left = mid + 1 return array[left]
def clustercenter(samples): """ Computes the geometric center of a set of vectors. samples = [ [v11,...,v1N], ... [vn1,...,vnN] ] """ N, dim = len(samples), len(samples[0]) if N == 1: # singleton cluster return samples[0] # Cluster center is the average in all dimensions dsum = [0.0] * dim for d in range(dim): for i in range(N): dsum[d] += samples[i][d] dsum[d] /= N return dsum
def is_in(elements, value): """ Determines if a value is in a list. It also handles degenerate cases when instead of a list, elements is True, False or None """ if not elements: return False elif elements is True: return True else: return value in elements
def _setUnits(value): """ Convert a value in bytes so a human-readable format with units. """ if value > 1024.0**3: value = value / 1024.0**3 unit = 'GB' elif value > 1024.0**2: value = value / 1024.0**2 unit = 'MB' elif value > 1024.0**1: value = value / 1024.0*1 unit = 'kB' else: unit = ' B' return value, unit
def verify_non_decreasing(arr: list) -> bool: """ O(n) & O(n) """ len_arr = len(arr) if len_arr < 3: return True stack = [] count_del = 0 for element in arr: while stack and stack[-1] > element: count_del += 1 stack.pop() if count_del > 1: return False stack.append(element) return True
def scale(pnt, frac): """scale a point""" return pnt[0] * frac, pnt[1] * frac
def get_middle_indexes(lst): """ Fetch indexes of one or two middle numbers of a list. """ n = len(lst) if n <= 2: return [None] if n % 2 == 0: return [n / 2 - 1, n / 2] else: return [n // 2]
def subset_dict(dictionary, keys): """ Returns a dict that contains all key,value pairs in dictionary where the key is one of the provided keys. This is used by some of the methods in message.py. """ keys = set(keys) return {key: value for key, value in dictionary.items() if key in keys}
def parse_git_url(url): """ return (organization, repository) tuple from url line of .git/config file """ if url.startswith("git@"): # deal with git@github.com:org/repo.git url = url.split(":")[1] org, repo = url.rstrip(".git").split("/")[-2:] return org, repo
def tri_reflect_y(a, b, c): """Reflects the given triangle through the x-axis and returns the co-ordinates of the new triangle""" return (1 - c, 1 - b, 1 - a)
def sign(x) -> int: """Return the sign of the argument. [-1, 0, 1]""" return x and (1, -1)[x < 0]
def content_function(obj): """Just return content""" _, _, _, content = obj return 'content', content
def is_breakpoint(BREAKPOINTS, PC): """ Determine if the current programme counter is at a predetermined breakpoint. Parameters ---------- BREAKPOINT : list, mandatory A list of the predetermined breakpoints PC: int, mandatory The current value of the program counter Returns ------- True if the current program counter is at a breakpoint False if the current program counter is not at a breakpoint Raises ------ N/A Notes ------ N/A """ for i in BREAKPOINTS: if str(i) == str(PC): return True return False
def get_chunk_type(tok, idx_to_tag): # method implemented in https://github.com/guillaumegenthial/sequence_tagging/blob/master/model/data_utils.py """ Args: tok: id of token, ex 4 idx_to_tag: dictionary {4: "B-PER", ...} Returns: tuple: "B", "PER" """ tag_name = idx_to_tag[tok] tag_class = tag_name.split('-')[0] tag_type = tag_name.split('-')[-1] return tag_class, tag_type
def human_readable(additions): """Print additions in a human-readable way Parameters ---------- additions : list Additions to a given word Returns ------- str Easily readable additions """ result = "" for a in additions: result = result + a["letter"].upper() + ": " + a["word"] + "\n" return result
def distanceSquared(a,b): """Squared L2 distance""" if len(a)!=len(b): raise RuntimeError('Vector dimensions not equal') sum=0 for i in range(len(a)): sum = sum + (a[i]-b[i])*(a[i]-b[i]) return sum
def open_file(file): """ Receives 1 parameter: a file (mainly .txt). Then it opens the file, reads it's lines, returns the lines through the "text" variable, and closes the file automatically. """ with open(file, "r") as f: text = f.readlines() return text
def make_jinja2_filename(file_name: str) -> str: """ Add .jinja2 to a filename. :param file_name: the filename without an extension. :return: the filename. """ return f'{file_name}.jinja2'
def GetExonsRange(exons, first, last, full=True, min_overlap=0, min_exon_size=0): """get exons in range (first:last) (peptide coordinates). Set full to False, if you don't require full coverage. """ new = [] me = 3 * min_exon_size mo = 3 * min_overlap for e in exons: if e.mPeptideFrom > last or e.mPeptideTo < first: continue if full and (e.mPeptideTo > last or e.mPeptideFrom < first): continue overlap = min(e.mPeptideTo, last) - max(e.mPeptideFrom, first) if overlap < mo: continue if e.mPeptideTo - e.mPeptideFrom < me: continue new.append(e.GetCopy()) return new
def l2_norm(lst): """ Calculates the l2 norm of a list of numbers """ return sum([x*x for x in lst])
def left_to_right_check(input_line: str, pivot: int) -> bool: """ Check row-wise visibility from left to right. Return True if number of building from the left-most hint is visible looking to the right, False otherwise. input_line - representing board row. pivot - number on the left-most hint of the input_line. >>> left_to_right_check("412453*", 4) True >>> left_to_right_check("452453*", 4) False """ line = list(map(int ,list(input_line[1:-1]))) max_height = line[0] visibility = 1 for building in line[1:]: if building > max_height: visibility += 1 max_height = building if visibility == pivot: return True return False
def to_tterm(t): """ turns into hashable term""" if not isinstance(t, tuple): return t h, bs = t cs = map(to_tterm, bs) ds = tuple(cs) return (h, ds)
def partition(lst, fn): """Partition lst by predicate. - lst: list of items - fn: function that returns True or False Returns new list: [a, b], where `a` are items that passed fn test, and `b` are items that failed fn test. >>> def is_even(num): ... return num % 2 == 0 >>> def is_string(el): ... return isinstance(el, str) >>> partition([1, 2, 3, 4], is_even) [[2, 4], [1, 3]] >>> partition(["hi", None, 6, "bye"], is_string) [['hi', 'bye'], [None, 6]] """ true_list = [] false_list = [] for val in lst: if fn(val): true_list.append(val) else: false_list.append(val) return [true_list, false_list]
def get_last_byte(buf): """Get last 1byte value.""" return ord(buf[-1:])
def sign (x): """Return `-1` if `x < 0`, `0` if `x == 0` and `1` if `x > 0`.""" return 0 if x == 0 else (1 if x > 0 else -1)
def integer_at_least(actual_value, expected_value): """Assert that actual_value is an integer of at least expected_value.""" result = isinstance(actual_value, int) if result: result = actual_value >= expected_value if result: return result else: raise AssertionError( "{!r} is NOT an INTEGER, or is LESS than {!r}".format( actual_value, expected_value ) )
def post_data(data): """ Take a dictionary of test data (suitable for comparison to an instance) and return a dict suitable for POSTing. """ ret = {} for key, value in data.items(): if value is None: ret[key] = "" elif type(value) in (list, tuple): if value and hasattr(value[0], "pk"): # Value is a list of instances ret[key] = [v.pk for v in value] else: ret[key] = value elif hasattr(value, "pk"): # Value is an instance ret[key] = value.pk else: ret[key] = str(value) return ret
def mk_or_expr(expr1, expr2): """ returns an or expression of the form (EXPR1 \/ EXPR2) where EXPR1 and EXPR2 are expressions """ return {"type" : "or" , "expr1" : expr1 , "expr2" : expr2 }
def get_y_values(x: int, slopes: list, coordinates: list, edge_count: int) -> list: """ Calculate the y value of the current x from each edge :param x: x-coordinate of the current node :param slopes:a list of slopes of all edges of the polygon :param coordinates: a list of vertices of the polygon :param edge_count: no. of edges in the polygon :return: a list of all y-values """ # Define an empty list to store all y-values dist = [] for i in range(edge_count): dist.append(slopes[i] * (x - coordinates[i][0]) + coordinates[i][1]) # Return the list of y-values return dist
def swsphericalh_A(s, l, m): """ Angular separation constant at a=0. Eq. (50). Has no dependence on m. The formula is A_0 = l(l+1) - s(s+1) Parameters ---------- s: int Spin-weight of interest l: int Angular quantum number of interest m: int Magnetic quantum number, ignored Returns ------- int Value of A(a=0) = l(l+1) - s(s+1) """ return l*(l+1) - s*(s+1)
def time_format(s): """ Takes seconds data and formats it into days, hours, minutes and seconds """ minutes, seconds = divmod(s, 60) hours, minutes = divmod(minutes, 60) days, hours = divmod(hours, 24) if s < 3600: return f'{minutes}m' elif s < 86400: return f'{hours}h {minutes}m' else: return f'{days}d {hours}h {minutes}m'
def add_char(num, char=" "): """Creates a string value give a number and character. args: num (int): Amount to repeat character kwargs: char (str): Character value to loop Returns (str): Iterated string value for given character """ string = "" for i in range(num): string += char return string
def turn(p1, p2, p3): """ 0 if the points are colinear 1 if the points define a left-turn -1 if the points define a right-turn """ # Compute the z-coordinate of the vectorial product p1p2 x p2p3 z = (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0]- p1[0]) return 0 if z == 0 else int(z / abs(z))
def remove_unique_entries(A,B): """ Remove any entries that only exists in one of the data sets """ A_common, B_common = {}, {} common_keys = list(set(A.keys()) & set(B.keys())) sorted_keys = sorted(common_keys) for key in sorted_keys: A_common[key] = {} B_common[key] = {} common_subkeys = list(set(A[key].keys()) & set(B[key].keys())) sorted_subkeys = sorted(common_subkeys) for subkey in sorted_subkeys: A_common[key][subkey] = A[key][subkey] B_common[key][subkey] = B[key][subkey] return A_common, B_common
def _sorted_pe(inlist): """ Generate suitable inputs to ``3dQwarp``. Example ------- >>> paths, args = _sorted_pe([ ... ("dir-AP_epi.nii.gz", {"PhaseEncodingDirection": "j-"}), ... ("dir-AP_bold.nii.gz", {"PhaseEncodingDirection": "j-"}), ... ("dir-PA_epi.nii.gz", {"PhaseEncodingDirection": "j"}), ... ("dir-PA_bold.nii.gz", {"PhaseEncodingDirection": "j"}), ... ("dir-AP_sbref.nii.gz", {"PhaseEncodingDirection": "j-"}), ... ("dir-PA_sbref.nii.gz", {"PhaseEncodingDirection": "j"}), ... ]) >>> paths[0] ['dir-AP_epi.nii.gz', 'dir-AP_bold.nii.gz', 'dir-AP_sbref.nii.gz'] >>> paths[1] ['dir-PA_epi.nii.gz', 'dir-PA_bold.nii.gz', 'dir-PA_sbref.nii.gz'] >>> args '-noXdis -noZdis' >>> paths, args = _sorted_pe([ ... ("dir-AP_epi.nii.gz", {"PhaseEncodingDirection": "j-"}), ... ("dir-LR_epi.nii.gz", {"PhaseEncodingDirection": "i"}), ... ]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: """ out_ref = [inlist[0][0]] out_opp = [] ref_pe = inlist[0][1]["PhaseEncodingDirection"] for d, m in inlist[1:]: pe = m["PhaseEncodingDirection"] if pe == ref_pe: out_ref.append(d) elif pe[0] == ref_pe[0]: out_opp.append(d) else: raise ValueError("Cannot handle orthogonal PE encodings.") return ( [out_ref, out_opp], {"i": "-noYdis -noZdis", "j": "-noXdis -noZdis", "k": "-noXdis -noYdis"}[ ref_pe[0] ], )
def substractArray(array1, array2): """ Substracts array2 of array1. That means, that every number in array1, which is also present in array2 is deleted """ result = array1.copy() for i in range(0,len(array2)): while (result.count(array2[i]) > 0): result.remove(array2[i]) return result
def str_to_grid(sudoku, fill_possibilities=False): """ Convert an 81-char string sequence representing sudoku into a grid (dict) with keys as indexes [0-80] and values as possible assignments. Valid values are [1-9] for filled cells. All other characters represent an empty cell. """ if len(sudoku) != 81: return None possibilities = set(range(1, 10)) if fill_possibilities else set() return {ndx: {int(s)} if s in '123456789' else possibilities for ndx, s in enumerate(sudoku)}
def process_contact_line(line): """Return the contact counts for a line of ibuContacts data.""" return [int(field) for field in line.split()]
def get_user_full_name(username: str) -> str: """Returns a user's full name given a username or original value if not found""" full_name = username with open("/etc/passwd") as f: for line in f: if line.split(":")[0] == username: full_name = line.split(":")[4].strip("/n") break if full_name: full_name = full_name.replace(",,,", "") return full_name return username
def linear_model(slope, x0, x): """Returns the function value of a linear model. The model is y=slope*x + x0. Parameters: * slope: the slope of the linear function * x0: the y-value at x=0 * x: the x value for which the y value should be determined """ return slope * x + x0
def hex_to_rgb(value="FFFFFF"): """Converts hex color to RGB color. Args: value (str, optional): Hex color code as a string. Defaults to 'FFFFFF'. Returns: tuple: RGB color as a tuple. """ value = value.lstrip("#") lv = len(value) return tuple(int(value[i : i + lv // 3], 16) for i in range(0, lv, lv // 3))
def get_cost(prices, item, quantity): """ Calculates cost of item based on quantity Args: prices (dict): {item_code: price} item (str): item_code quantity (int): quantity of item in basket """ return quantity * prices[item]
def find_nouns(tagged_words, skip_words_in_nlp = 0): """ Compile a list of nouns in tagged word list Note: treebankTagger works better than standard nltk.pos_words(). Tags can be compiled with treebank_tagger = nltk.data.load( 'taggers/maxent_treebank_pos_tagger/english.pickle') tagged_words = treebank_tagger.tag(tokens) :param list tagged_words: Tagged word list from an NLTK tagger :param int skip_words_in_nlp: During noun search, if the initial word(s) of a sentence should not be considered, pass a value > 0. For example in a sentence "define word" where "define" is an expected trigger word, skipping it will avoid having it misclassified in the noun list. :return: List of nouns :rtype: list """ nouns = list() current = False for word, pos in tagged_words[skip_words_in_nlp:]: if pos[0:2] != 'NN' and current: nouns.append(current) current = False if pos[0:2] != 'NN': continue if not current: current = word else: # If multiple NN* tags follow, combine the word back into a single noun current += " " + word if current: nouns.append(current) return nouns