content
stringlengths
42
6.51k
def str2bool(value): """Convert a string value to boolean """ return value.lower() in ("yes", "true", "1")
def jaccard_index_pair_counts(a, b, c): """ Compute the Jaccard index from pair counts; helper function. Arguments: a: number of pairs of elements that are clustered in both partitions b: number of pairs of elements that are clustered in first but not second partition c: number of pairs of elements that are clustered in second but not first partition d: number of pairs of elements that are clustered in neither partition Example usage: In [1]: a = 1 In [2]: b = 2 In [3]: c = 2 In [4]: d = 10 In [5]: jaccard_index_pair_counts(a, b, c, d) Out[5]: 0.2 """ if a+b+c!=0: return float(a)/float(a+b+c) else: return 1.0
def superscript(text: str) -> str: """ Return the *text* surrounded by superscript HTML tags. Superscript text appears half a character above the normal line, and is sometimes rendered in a smaller font. >>> superscript("foo") '<sup>foo</sup>' """ return f"<sup>{text}</sup>"
def bits_list(bytes_l): """Transform a list of byte offsets to a list of bit offsets. :param bytes_l: list of offsets (integer) :return: a list """ bits_l = [] for i in bytes_l: bits_l.extend(range(i * 8, i * 8 + 8)) return bits_l
def isBanned(text): """make sure if a thread is caught as spider, if so , change cookie""" chptcha_url = 'http://www.dianping.com/alpaca/captcha.jpg' if chptcha_url in text: return True return False
def clean_name_arr(name_arr, query): """ Only returns values from name_dict whose keys are a substring of query name_dict: maps names to ids, keys """ correct_names = [] query = query + " " lowercase_query = query.lower() quote_removed_query = lowercase_query.replace('\\"', '') question_removed_query = lowercase_query.replace('?', '') quote_removed_question_query = lowercase_query.replace('"', '').replace('?', '') for k in name_arr: spaced_k = k.lower() + " " if spaced_k in lowercase_query or \ spaced_k in quote_removed_query or \ spaced_k in question_removed_query or \ spaced_k in quote_removed_question_query: correct_names.append(k) return correct_names
def remove_superior_symbol(text): """Remove superior and inferior symbols from text""" text = text.replace(">", "") text = text.replace("<", "") return text
def timesteps2str(checkpoints): """ :param checkpoints (list of float) :return (list of str) """ checkpoints_str = [] for check in checkpoints: if check >= 1e6: checkpoints_str.append("{:.1f} M".format(check/1e6)) elif check >= 1e3: checkpoints_str.append("{:.1f} K".format(check/1e3)) else: checkpoints_str.append("{:.1E} K".format(check)) return checkpoints_str
def rgb2hex(rgb): """ Converts rgb color to hex Args: rgb: color in rgb, e.g. (255,0,0) """ return '0x%02x%02x%02x' % (rgb)
def find_position(string, index, last_index, last_pos): """ Given a string and index, return (line, column) """ lines = string.count('\n', last_index, index) if lines > 0: column = index - string.rfind('\n', last_index, index) else: column = last_pos[1] + (index - last_index) return (last_pos[0] + lines, column)
def kelvin_to_farenheit(temp: float): """ Converts kelvin to farenheit """ far = (temp - 273.15) * (9 / 5) + 32 return int(far)
def append_csv_data(file_strings): """ Append data from multiple csv files for the same time period Parameters ----------- file_strings : array-like Lists or arrays of strings, where each string contains one file of data Returns ------- out_string : string String with all data, ready for output to a file """ # Start with data from the first list element out_lines = list() head_line = None # Cycle through the lists of file strings, creating a list of line strings for fstrings in file_strings: file_lines = fstrings.split('\n') # Remove and save the header line head_line = file_lines.pop(0) # Save the data lines out_lines.extend(file_lines) # Sort the output lines by date and station (first two columns) in place out_lines.sort() # Remove all zero-length lines from front, add one to back, and add header i = 0 while len(out_lines[i]) == 0: out_lines.pop(i) out_lines.insert(0, head_line) out_lines.append('') # Join the output lines into a single string out_string = "\n".join(out_lines) return out_string
def poly6(x, b0, b1, b2, b3, b4, b5, b6): """ Taylor polynomial for fit b1 = GD b2 = GDD / 2 b3 = TOD / 6 b4 = FOD / 24 b5 = QOD / 120 b6 = SOD / 720 """ return ( b0 + b1 * x + b2 * x ** 2 + b3 * x ** 3 + b4 * x ** 4 + b5 * x ** 5 + b6 * x ** 6 )
def unifymorphfeat(feats, percolatefeatures=None): """Get the sorted union of features for a sequence of feature vectors. :param feats: a sequence of strings of comma/dot separated feature vectors. :param percolatefeatures: if a set is given, select only these features; by default all features are used. >>> print(unifymorphfeat({'Def.*.*', '*.Sg.*', '*.*.Akk'})) Akk.Def.Sg >>> print(unifymorphfeat({'LID[bep,stan,rest]', 'N[soort,ev,zijd,stan]'})) bep,ev,rest,soort,stan,zijd""" sep = '.' if any('.' in a for a in feats) else ',' result = set() for a in feats: if '[' in a: a = a[a.index('[') + 1:a.index(']')] result.update(a.split(sep)) if percolatefeatures: result.intersection_update(percolatefeatures) return sep.join(sorted(result - {'*', '--'}))
def make_valid_did(lfn_dict): """ When managing information about a LFN (such as in `rucio upload` or the RSE manager's upload), we add the `filename` attribute to record the name of the file on the local disk in addition to the remainder of the DID information. This function will take that python dictionary, and strip out the additional `filename` key. If this is not done, then the dictionary will not pass the DID JSON schema validation. """ lfn_copy = dict(lfn_dict) lfn_copy['name'] = lfn_copy.get('name', lfn_copy['filename']) del lfn_copy['filename'] return lfn_copy
def find_zeros(matrix): """Returns a dict with list of rows and columns that contain zeros within the matrix >>> zero_coordinates = find_zeros([['0', 'w', 'e'], ['a', 's', 'd'], ['z', 'x', '0']]) >>> sorted(zero_coordinates.items()) [('columns', [0, 2]), ('rows', [0, 2])] """ rows = [] columns = [] coordinates = {'rows': rows, 'columns': columns} for list_number, list in enumerate(matrix): for element_number, element in enumerate(list): if element == '0': coordinates['rows'].append(list_number) coordinates['columns'].append(element_number) return coordinates
def callback(msg): """ return success responseText """ return {'success': True, 'msg': msg}
def parse_single_input_size(input_size): """Given an int or a tuple, return a list of tuples""" if isinstance(input_size, tuple): return [input_size] if int(input_size) == input_size: # is integer? input_size = (input_size,) return input_size
def needs_text_relocation(m_type, m_subtype): """ Check whether the given filetype is text that may need relocation. """ return (m_type == "text")
def is_unusual_tag(tagstr): """Identify unusual tags with word to comma ratio > 3. Parameters ---------- tagstr : str tag string to check. Returns ------- bool True if valid tag else False. """ if not tagstr: return False nwords = len(tagstr.split()) ncommas = tagstr.count(",") + 1 if nwords / ncommas > 3: return True return False
def safe_cast(t, s, default=None): """Cast s to type t. Return None if a ValueError or TypeError occurs. """ try: return t(s) except (ValueError, TypeError): return default
def set_up_default_channels(clean_up=False): """Block of text to populate a Text widget.""" channels = ["@my-favorite-channel, 5", "@OdyseeHelp#b, 4", "@lbry:3f, 6"] if clean_up: channels = ["@OdyseeHelp, 2", "@my-favorite-channel-vmv, 15", "@lbry, 1", "@The-Best-Channel-ABC, 5"] channels = "\n".join(channels) return channels
def _add_lists(list1, list2): """Takes polynomial sequences of two annihilators a and b and returns the list of polynomials of sum of a and b. """ if len(list1) <= len(list2): sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):] else: sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):] return sol
def is_prime(n): """ @purpose Checks whether the passed number, n is prime, and return True/False @parameters: n - the number to be checked for primeness @complexity: O(n) @precondition: The function is passed a positive integer value @postcondition: Returns a true/false depending on primeness """ k = 3 flag = True if (n == 2): #if it's 2, it's prime flag = True elif (n % 2 == 0 or n == 1): #if even number or 1, then not prime flag = False else: while (k < n): #while (k <= math.sqrt(n)): alternative, we only have to do trial divison on numbers up to sqrt(n) if (n % k == 0): flag = False break k += 1 return flag
def _split(value): """Split input/output value into two values.""" if isinstance(value, str): # iterable, but not meant for splitting return value, value try: invalue, outvalue = value except TypeError: invalue = outvalue = value except ValueError: raise ValueError('Only single values and pairs are allowed') return invalue, outvalue
def calculate_similarities(user_scores, predition_scores): """ This function counts the number of scores that are same so we know how accurate a computed list has been :param user_scores: the scores of the user :param predition_scores: the scores of the prediction :return: a percentage of how many scores were the same of all the scores """ c = 0 for i in range(len(user_scores)): if list(user_scores)[i] == list(predition_scores)[i]: c += 1 return c / float(len(user_scores)) * 100
def bool2R(value): """Transforms a boolean into a R boolean value T or F""" if value is True: return "T" if value is False: return "F" else: raise ValueError("expecting a boolean value")
def file_get_contents(filename): """ Returns file contents as a string. """ with open(filename) as file: return file.read()
def validate_responses(args): """validate response details""" try: if args["name"] == '' or args["name"].isspace() or \ args["email"] == '' or args["email"].isspace() or \ args["phone"] == '' or args["phone"].isspace() or \ args["message"] == '' or args["message"].isspace() : return{ "status": 401, "error": "Kindly note that the fields cannot be left empty" }, 401 elif( args["name"].isdigit()) or ( args["email"].isdigit()) or ( args["message"].isdigit()) : return{ "status": 401, "error": "The fields should be described in words" }, 401 return"valid" except Exception as error: return{ "status": 401, "error": "please provide all the fields, missing " + str(error) }, 401
def primes_upto(n): """ Use Sieve of Erathostenes """ # In the beginning we set all numbers to prime candidates = [x for x in range(2, n+1)] i = 0 while i < len(candidates): # Remove numbers which are multiples of prime numbers prime = candidates[i] candidates = [j for j in candidates if j == prime or j % prime !=0] i += 1 return candidates
def normalize_sequence(sequence): """Normalize sequence names to a common format. This is required so that <1>, <Button-1>, and <ButtonPress-1> all map to the same handler, and so that <Alt-Shift-Control-1> and <Shift-Alt-Control-1> map to the same event. """ # Split on the dash character parts = sequence[1:-1].split('-') if len(parts) == 1: # If there's only one part, it's a button press number normalized = ['Button', parts[-1]] else: # Look at the second last part. If it's Double, handle as a # double click. If it's Button or ButtonPress, handle as # Button. Otherwise, it's a button press. # Any modifiers before the bit describing the button/double # should be sorted alphabetically. if parts[-2] == 'Double': normalized = sorted(parts[:-2]) + parts[-2:] elif parts[-2] in ('Button', 'ButtonPress'): normalized = sorted(parts[:-2]) + ['Button', parts[-1]] else: normalized = sorted(parts[:-1]) + ['Button', parts[-1]] return '<%s>' % '-'.join(normalized)
def compare_list(list1: list, list2: list) -> bool: """Compare two list and ignore \n at the end of two list.""" while list1[-1] == "\n" or list1[-1] == "": list1.pop() while list2[-1] == "\n" or list2[-1] == "": list2.pop() return list1 == list2
def concat(*args, sep="/"): """ Documentation Strings """ return sep.join(args)
def obter_pos_c(pos): """ obter_pos_c: posicao -> str Esta funcao devolve a componente coluna da posicao. """ if pos[0] in (1, 4, 7): return 'a' elif pos[0] in (2, 5, 8): return 'b' elif pos[0] in (3, 6, 9): return 'c'
def multiply(inputs_list): """ Multiply input tensors list Parameters ---------- inputs_list: Input tensors list """ inputs_num = len(inputs_list) if inputs_num < 2: raise ValueError('number of tensors in inputs_list must be >= 2') outputs = inputs_list[0] for i in range(1, inputs_num): outputs *= inputs_list[i] return outputs
def determine_title_direction(source_languages_list, target_languages_list): """ Function takes two language lists and determines what the direction of the request is. This statistic is stored in Ajos and Wenyuan uses it for statistics as well. :param source_languages_list: A list of languages that are determined to be the source. :param target_languages_list: A list of languages that are determined to be the target. :return: One of four variables: (english_from, english_to, english_both, english_none) """ # Create local lists to avoid changing the main function's lists source_languages_local = list(source_languages_list) target_languages_local = list(target_languages_list) # Exception to be made for languages with 'English' in their name like "Middle English" # Otherwise it will always return 'english_both' if ( all("English" in item for item in source_languages_local) and len(source_languages_local) > 1 ): source_languages_local.remove("English") elif ( all("English" in item for item in target_languages_local) and len(target_languages_local) > 1 ): target_languages_local.remove("English") if ( "English" in source_languages_local and "English" in target_languages_local ): # It's in both combined_list = source_languages_local + target_languages_local if len(list(combined_list)) >= 3: # It's pretty long if len(source_languages_local) >= 2: source_languages_local.remove("English") elif len(target_languages_local) >= 2: target_languages_local.remove("English") if "English" in source_languages_local and "English" not in target_languages_local: return "english_from" elif ( "English" in target_languages_local and "English" not in source_languages_local ): return "english_to" elif "English" in target_languages_local and "English" in source_languages_local: return "english_both" else: return "english_none"
def check_status(p2, p1_row, rating): """ Checks and corrects if there are inconsistencies with companions and incompatibles when importing data :param p2: str name of companion :param p1_row: dict species row from database :param rating: int rating for imported plant relation [-1,0 or 1] :return: """ species = p1_row['species_name'] if rating < 0 and p1_row['known_companions'] is not None and p2 in p1_row['known_companions']: p1_row['known_companions'].remove(p2) print("removed", p2 , "from", species, 'known_companions') elif rating == 0 and p1_row['incompatible_with'] is not None and p2 in p1_row['incompatible_with']: p1_row['incompatible_with'].remove(p2) print("removed", p2, "from", species, 'incompatible_with') elif rating == 0 and p1_row['known_companions'] is not None and p2 in p1_row['known_companions']: p1_row['known_companions'].remove(p2) print("removed", p2, "from", species, 'known_companions') # if companion and plant in in incompatible elif rating > 1 and p2 in p1_row['incompatible_with']: p1_row['incompatible_with'].remove(p2) return p1_row
def ip_to_int(ip: str) -> int: """ Converts a dot-format IP address to an integer. """ i = 0 for x in ip.split('.'): i = i * 256 + int(x) return i
def get_ngrams(terms, poss=None, n=1, included_tags=None, as_strings=True): """Returns a list of all ngrams from length 1 to n. """ ngrams = [(s, e + 1) for s in range(len(terms)) for e in range(s, min(s + n, len(terms)))] if poss is not None and included_tags is not None: # We do filtering according to pos. # ngrampos = [(s, e + 1) # for s in range(len(poss)) # for e in range(s, min(s + n, len(poss)))] filtered_ngram = [] for (s, e) in ngrams: if any([poss[i] in included_tags for i in range(s, e)]): filtered_ngram.append((s, e)) ngrams = filtered_ngram # Concatenate into strings if as_strings: ngrams = ['{}'.format(' '.join(terms[s:e])) for (s, e) in ngrams] return ngrams
def render_report(jobs_with_error): """Build a text report for the jobs with errors """ output = [] for job in jobs_with_error: errors_count = job.info.get('errors_count', 0) close_reason = job.info.get('close_reason') job_id = job.info["id"].split('/') url = 'https://app.scrapinghub.com/p/{0}/job/{1}/{2}'.format( job_id[0], job_id[1], job_id[2]) error_message = ['Errors found for job "{0}" ({1}):'.format( job.info['spider'], url)] if errors_count > 0: error_message.append(' There were {} error{}.'.format( errors_count, '' if errors_count == 1 else 's')) success_reasons = ('no_reason', 'finished') if close_reason not in success_reasons: error_message.append(' Close reason should not be "{}".'.format( close_reason)) output.append('\n'.join(error_message)) return '\n\n'.join(output)
def chunks(_list, n): """ Return n-sized chunks from a list. Args: _list - A list of elements. n - Number defining the size of a chunk. Returns: A list of n-sized chunks. """ return list(map(lambda i: _list[i:i + n], range(0, len(_list), n)))
def reduce_mapping(mapping, data): """Reduce data to contain only properties from mapping """ if mapping is True: return data if data is None: return data result = {} for k, v in mapping.iteritems(): if data is None or k not in data: continue prop = data[k] if v is True: result[k] = prop elif isinstance(v, dict): result[k] = reduce_mapping(v, prop) elif isinstance(v, list): if prop is not None: result[k] = [reduce_mapping(v[0], c) for c in prop] else: result[k] = prop return result
def tuple_scale(tup: tuple, factor) -> tuple: """Return the tuple with each entry scaled by a factor and rounded.""" return tuple(round(i * factor, 5) for i in tup)
def paren_matcher_less_space(s: str, open_index: int) -> int: """ Solution: Iterate through the s from the open_paren index, keeping track of how many remaining open parens there are. When we get to 0, return the index. Complexity: Time: O(n) - Iterate through our string once Space: O(1) - We take a slice of the input s """ num_open = 1 for idx in range(open_index + 1, len(s)): ch = s[idx] if ch == '(': num_open += 1 elif ch == ')': num_open -= 1 if num_open == 0: return idx return 0
def parse_api_container_vm(status): """Get the container or vm api data and return it formatted in a dictionary. It is implemented in this way to allow for more data to be added for sensors in the future. """ return {"status": status["status"], "name": status["name"]}
def dict2conll(data, predict): """Writes conll string in file""" with open(predict, 'w') as CoNLL: CoNLL.write(data['conll_str']) CoNLL.close() return None
def volCube(edge: float) -> float: """Finds volume of a cube""" volume: float = edge * edge * edge return volume
def IsInclude(line): """Return True if line is an include of another file.""" return line.startswith("@include ")
def get_url(api_data) -> str: """ Returns the link to the product on the Loblaws domain; no guarantee the link is still accurate/active """ return 'https://www.loblaws.ca' + api_data['link'].strip()
def fact_recur(n): """Finding factorial recursively""" if n == 1: return 1 return n * fact_recur(n-1)
def strip_comments(lines): """ Returns the lines from a list of a lines with comments and trailing whitespace removed. >>> strip_comments(['abc', ' ', '# def', 'egh ']) ['abc', '', '', 'egh'] It should not remove leading whitespace >>> strip_comments([' bar # baz']) [' bar'] It should also strip trailing comments. >>> strip_comments(['abc #foo']) ['abc'] """ return [line.partition('#')[0].rstrip() for line in lines]
def _fx_list_to_dict(fx_vars): """Convert fx list to dictionary. To be deprecated at some point. """ user_fx_vars = {} for fx_var in fx_vars: if isinstance(fx_var, dict): short_name = fx_var['short_name'] user_fx_vars.update({short_name: fx_var}) continue user_fx_vars.update({fx_var: None}) return user_fx_vars
def has_just_kanji(word_reading): """Returns True if there is just kanji in the word reading. Args: word_reading: A dict that might have a 'word' or 'reading' key. Returns: True if 'reading' in not a key and 'word' is a key in the dict. """ return 'reading' not in word_reading and 'word' in word_reading
def Check_Location(current_table, side, x_ref, y_ref): """Check whether it is legal to place a piece at a given location. According to the reversi game rule, a legal position is a position that there exists at least one straight (horizontal, vertical, or diagonal) occupied line between the new piece and another piece of the same side, with one or more contiguous pieces from the opposite side between them. Args: current_table (2D array): 8*8 values indicating the current condition of the board. side (int): 1 if it is the black side to play, -1 if it is the write side to play. x_ref: x coordinate of the given location. y_ref: y coordinate of the given location. Returns: Return True if the given location is legal, otherwise return False. """ # when the given location is already occupied, we cannot place a piece if current_table[x_ref][y_ref] != 0: return False # 8 directions dx = [-1, 0, 1, -1, 1, -1, 0, 1] dy = [-1, -1, -1, 0, 0, 1, 1, 1] for k in range(8): x = x_ref + dx[k] y = y_ref + dy[k] # whether there is a piece of the other side flag = False while x >= 0 and x < 8 and y >= 0 and y < 8: if current_table[x][y] == 0: break elif current_table[x][y] != side: flag = True x += dx[k] y += dy[k] else: if flag: return True else: break return False
def add_dict_value(dict1, dict2): """ add dict2 value to dict1""" for key, value in dict2.items(): dict1[key] = dict1.get(key, 0) + value return dict1
def unflatten_dict(d, separator='.'): """Unflatten nested dictionary. Transforms {'a.b.c': 5, 'a.d': 6} into {'a': {'b': {'c': 5}, 'd': 6}} Args: d (dict): nested dictionary to flatten. separator (str): the separator to use between keys. prefix (str): key prefix Returns: dict: a expanded dictionary with keys uncompressed. """ ret = dict() for key, value in d.items(): parts = key.split(separator) d = ret for part in parts[:-1]: if part not in d: d[part] = dict() d = d[part] d[parts[-1]] = value return ret
def parse_kipoi_colname(colname): """Parse kipoi column name into: (model, version, type) input: "KV:dir:examples/rbp:DIFF" output: ("dir", "examples/rbp", "DIFF") """ _, source_type, model, diff_type = colname.split(":", 3) return source_type, model, diff_type
def merge(a, b): """Shallow merge two dictionaries""" r = a.copy() r.update(b) return r
def starting_life(level): """ Get the initial life by given level. """ return 20 if level < 4 else 18
def has_parent(obj): """has __objclass__ or __self__ can also try inspect.ismethoddescriptor; hasattr(obj, '__objclass__') """ return getattr(obj, '__qualname__', str(obj)) != getattr(obj, '__name__')
def rotate_matrix(A, m, n): """ Rotates the given m by n matrix A 90 degrees clockwise. """ B = [[0] * m for i in range(n)] for i in range(m): for j in range(n): B[j][m - i - 1] = A[i][j] return B
def process_one_sentence_to_get_ui_bi_tri_gram(sentence, n_gram=3): """ :param sentence: string. example:'w17314 w5521 w7729 w767 w10147 w111' :param n_gram: :return:string. example:'w17314 w17314w5521 w17314w5521w7729 w5521 w5521w7729 w5521w7729w767 w7729 w7729w767 w7729w767w10147 w767 w767w10147 w767w10147w111 w10147 w10147w111 w111' """ result = [] word_list = sentence.split(" ") # [sentence[i] for i in range(len(sentence))] unigram = ''; bigram = ''; trigram = ''; fourgram = '' length_sentence = len(word_list) for i, word in enumerate(word_list): unigram = word # ui-gram word_i = unigram if n_gram >= 2 and i + 2 <= length_sentence: # bi-gram bigram = "".join(word_list[i:i + 2]) word_i = word_i + ' ' + bigram if n_gram >= 3 and i + 3 <= length_sentence: # tri-gram trigram = "".join(word_list[i:i + 3]) word_i = word_i + ' ' + trigram if n_gram >= 4 and i + 4 <= length_sentence: # four-gram fourgram = "".join(word_list[i:i + 4]) word_i = word_i + ' ' + fourgram if n_gram >= 5 and i + 5 <= length_sentence: # five-gram fivegram = "".join(word_list[i:i + 5]) word_i = word_i + ' ' + fivegram result.append(word_i) result = " ".join(result) return result
def _json_key_int(x): """ This attempts to cast the JSON key to an Integer assuming the JSON schema is similar to: json = {"inputs": { "chemical_name": { "0": "ChemX", "1": "ChemY", "2": "ChemZ" } }, ... This is done so the Pandas DataFrame created from the json.loads() dictionary needs to be indexed with Integers, and, since JSON keeps its keys as Strings, the Python dicts had Strings for keys. This lead to the Pandas DataFrame from user JSON payloads to be indexed with unicode Strings, which is unreliable when the default is Integers. " :param x: Dictionary :return: Dictionary """ if isinstance(x, dict): try: "Try to cast JSON key to Integer" return {int(k): v for k, v in x.items()} except ValueError: "If unable to, use key as String" return {k: v for k, v in x.items()} return x
def compute_jaccard_index(set1: set, set2: set) -> float: """Given two sets, compute the Jaccard index. Parameters ---------- set1 : set The first set. set2 : set The second set. Returns ------- float The Jaccard index. """ if len(set1) + len(set2) == 0: raise ValueError('There should at least be one element in set1 and set2.') return len(set1.intersection(set2)) / float(len((set1.union(set2))))
def paths_to_endnode(paths): """Take dictionary of paths (keys) and their probability (values) and remove everything except the last node of the path from the key. Is useful for assigning genotypes to clusters based on their relative probability of reaching a certain peak. """ endnode_dict = {} for path, prob in paths.items(): try: endnode_dict[path[-1]] += prob except KeyError: endnode_dict[path[-1]] = prob return endnode_dict
def findClosestElements(arr, k, x): """ :type arr: List[int] :type k: int :type x: int :rtype: List[int] """ #Set left and right pointers, right is k less than len left, right = 0, len(arr) - k #As long as left is less than right while left < right: #Find the mid point of left and right mid = (left + right) // 2 #If the mid point less targer is greater than if (x - arr[mid]) > (arr[mid + k] - x): left = mid + 1 else: right = mid return arr[left:left + k]
def easy_gain(sample_rate, frequency, gain): """ Create an easily interpolated calibration gain value for testing. :type sample_rate: float :param sample_rate: Sample rate in samples per second :type frequency: float :param frequency: Frequency in hertz :type gain: int :param gain: Signal analyzer gain setting in dB :rtype: float """ return (gain) + (sample_rate / 1e6) + (frequency / 1e9)
def formatfloat(x): """Convert x to a %.3f-format string.""" ret = "%.3f" % float(x) if float(x) >= 0.0: return f" {ret}" return ret
def intersection_ll(ll1, ll2): """ compute the intersection between two list of lists """ assert type(ll1) == list assert type(ll2) == list counts = 0 for list_i in ll1: if list_i in ll2: counts += 1 return counts
def flist(start, stop, step): """ Takes in: start, stop, step = integers or floats Returns: zlist = list start to stop with step as increment """ # print('-flist has been called') i = 0 zlist = [start] while zlist[i] < stop: nextvalue = zlist[i] + step zlist.append(nextvalue) i += 1 continue return zlist
def bts(boolean, y="Y", n="N"): """ Converts a boolean value to a string :param boolean: The boolean to be converted :param y: [string] the value to be returned if boolean is True :param n: [string] the value to be returned if boolean is False :return [string]: """ if boolean: return y return n
def expandtabs(charindex, line): """Expand the tabs in line. Return the equivalent character index to charindex and the expanded line.""" expandedline = line.expandtabs() prefix = line[:charindex].expandtabs() expandedcharindex = len(prefix) return (expandedcharindex, expandedline)
def is_tweet_valid(tweet: str, key_words: list) -> bool: """ :param tweet: str :param key_words: list :return: bool """ for key_word in key_words: if key_word in tweet: return True return False
def stackhunter_helper(cookie = 0x7afceb58): """Args: shellcode [cookie = 0x7afceb58] The helper for the stackhunter, which prepends the cookie at different alignments jump suitable jumps """ cookie = int(cookie) return""" stackhunter_helper: dd 0x%08x jmp short stackhunter_helper_end dd 0x%08x jmp short stackhunter_helper_end db 0xff dd 0x%08x jmp short stackhunter_helper_end dd 0x%08x stackhunter_helper_end: """ % (cookie, cookie, cookie, cookie)
def mock_token_verification(permission=None): """ Mock payload with custom permission Parameters: permission (string): e.g: "read:posts,create:posts" returns: payload (dict) """ if permission is None: permission = [] else: permission = permission.split(',') return { 'iat': 1589041232, 'exp': 1589048432, 'sub': "auth0|test_user_id", 'is_verified': True, 'permissions': permission }
def _digi_nan(fmt): """ Return the WFDB digital value used to store NAN for the format type. Parmeters --------- fmt : str, list The WFDB dat format, or a list of them. Returns ------- int The WFDB digital value per format type. """ if isinstance(fmt, list): return [_digi_nan(f) for f in fmt] if fmt == '80': return -128 if fmt == '310': return -512 if fmt == '311': return -512 elif fmt == '212': return -2048 elif fmt == '16': return -32768 elif fmt == '61': return -32768 elif fmt == '160': return -32768 elif fmt == '24': return -8388608 elif fmt == '32': return -2147483648
def filter_line(l): """[summary] Parameters ---------- l : [type] [description] Returns ------- [type] [description] """ return len(l) > 0 and l[0] != "#"
def rosenbrock(phenome): """The bare-bones Rosenbrock function.""" ret = 0.0 for i in range(len(phenome) - 1): x = phenome[i] ret += 100.0 * (x ** 2 - phenome[i+1]) ** 2 + (x - 1.0) ** 2 return ret
def compute_lps_array(pattern): """Return the `lps` array for given the string `pattern`. This means that `lps[i]` is the longest proper prefix of `pattern[0:i]` which is also a suffix of `pattern[0:i]`. The efficiency of this algorithm is O(M), where M is the length of pattern. Example: pattern: | a | b | a | b | a | b | c | a | index: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | value: | 0 | 0 | 1 | 2 | 3 | 4 | 0 | 1 | """ m = len(pattern) lps = [0] * m j = 0 i = 1 while i < m: if pattern[i] == pattern[j]: j += 1 lps[i] = j i += 1 else: if j != 0: j = lps[j - 1] else: lps[i] = 0 i += 1 return lps
def validate_password_positions(policy, password): """Validate password based on positions in policy.""" positions, char = policy password_chars = set(password[position] for position in positions) return len(password_chars) == 2 and char in password_chars
def create_unique_id(words, key): """ Creating unique ids which represent keys in structure nodes, allowing us to store the same key names but which exist on different levels of doc structure """ if key == 0: 'parent' try: name = words[-2] return name + str(words.__len__() - 1) except IndexError: return None elif key == 1: 'children' return words[-1] + str(words.__len__())
def toUtf8(s, encoding="gbk"): """ toUtf8(s: string, encoding="gbk") s to utf8 encoding """ return s.decode(encoding).encode("utf-8")
def gcd(a: int, b: int) -> int: """ calculate the greatest common divisor of a and b :param a: any integer :param b: any integer :return: the greatest common divisor """ if b == 0: return a return gcd(b, a % b)
def player_rank(player): """Calculate a rank value for a player""" rank = float(((player['WIN'] * 25.0) + (player['CTF_SCORE'] * 5.0) + (player['SLAYER_SCORE'] / 2.0)) - (player['LOSS'] * 30.0)) if rank < 0: rank = 0 return rank
def validate_names(names: list, name: str, command: str): """ Simple function to validate existence of name within names list. Args: names (list): selected index name (str): name command (str): name of command for "tailored" help message """ if name not in names: raise IndexError(f"{command} {name} does not exist... Run `kaos {command} list` again") return name
def local_v_star(phase,aRstar,inclination,vsini,l): """This is the rigid-body, circular-orbt approximation of the local velocity occulted by the planet as it goes through transit, as per Cegla et al. 2016. No tests are done to keep this fast.""" import numpy as np xp = aRstar * np.sin(2.0*np.pi*phase) yp = (-1.0)*aRstar * np.cos(2.0*np.pi*phase) * np.cos(np.deg2rad(inclination)) x_per = xp*np.cos(np.deg2rad(l)) - yp*np.sin(np.deg2rad(l)) return(x_per*vsini)
def unique(l): """Return a list with unique elements""" return list(set(l))
def to_dict_of_lists(G,nodelist=None): """Return adjacency representation of graph as a dictionary of lists. Parameters ---------- G : graph A NetworkX graph nodelist : list Use only nodes specified in nodelist Notes ----- Completely ignores edge data for MultiGraph and MultiDiGraph. """ if nodelist is None: nodelist=G d = {} for n in nodelist: d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist] return d
def experiment_subdir(exp_id): """ """ assert 0 <= exp_id <= 9999 return f'exp_{exp_id:04d}'
def seg_text2listxy(text): """ Purpose: parse x, y coordinates from text of seg (segmentation) annotation in xml Args: text: text of seg (segmentation) annotation in xml, "[x0,y0, x1,y1, x2,y2, x3,y3, ...]" Returns: lists of storing x y coordinates, x: [x0, x1, x2, x3, ...] y: [y0, y1, y2, y3, ...] """ strList = text[1:-1].split(", ") x = list(map(int, strList[::2])) y = list(map(int, strList[1::2])) return x, y
def remove_code_parameter_from_uri(url): """ This removes the "code" parameter added by the first ORCID call if it is there, and trims off the trailing '/?' if it is there. """ return url.split("code")[0].strip("&").strip("/?")
def read_conjugation_pronunciation(arr, urls, idx): """ Reads the URLs of conjugations' pronunciations. """ if idx == -1: return -1 for pron_info in arr: [cur_url, url_text] = urls[idx] if url_text.endswith(pron_info['pronunciation']): pron_info['url'] = cur_url idx += 1 if idx >= len(urls): return -1 return idx
def aws_tags_to_dict(tags): """ Converts a list of AWS tag dicts to a single dict with corresponding keys and values """ return {tag.get('Key'): tag.get('Value') for tag in tags or {}}
def is_float(string): """ Return True is the string represents a number. :param string: the string to be checked :return: True of False """ if not isinstance(string, str): raise TypeError("the input should be a string") try: float(string) return True except ValueError: return False
def abspath(path): """Convert the given path to an absolute path. Since FS objects have no concept of a 'current directory' this simply adds a leading '/' character if the path doesn't already have one. """ if not path.startswith('/'): return '/' + path return path
def whitelist(d, fields): """Whitelists a dictionary by keeping ONLY the selected `fields`. Non-destructive (creates and returns a new dict).""" ret = type(d)() for f in fields: if f in d: ret[f] = d[f] return ret
def validate_tax_id(nip_str): """ check tax id """ if '-' in nip_str: nip_str = nip_str.replace('-', '') if len(nip_str) != 10 or not nip_str.isdigit(): return False digits = [int(i) for i in nip_str] weights = (6, 5, 7, 2, 3, 4, 5, 6, 7) check_sum = sum(d * w for d, w in zip(digits, weights)) % 11 return check_sum == digits[9]
def selected_only(bpy_data): """ Filter out all the objects that are not selected """ def selected_in_subtree(parent_obj): return parent_obj.select or any(selected_in_subtree(child) for child in parent_obj.children) bpy_data['objects'] = [obj for obj in bpy_data['objects'] if selected_in_subtree(obj)] return bpy_data
def sort_categories(categories): """Apply our special sort order (Misc is last) to the category list.""" out = sorted(categories) if 'Misc' in out: out.remove('Misc') out.append('Misc') return out
def getSumProp(prop): """ This sums the proportion of every column and returns a dict with just the total proportion for each column """ final_proportions = {} total = sum([prop[account].sum() for account in prop.keys()]) for account in prop.keys(): final_proportions[account] = prop[account].sum()/total return final_proportions