content
stringlengths
42
6.51k
def listify(iterable): """Try to force any iterable into a list sensibly.""" if isinstance(iterable, list): return iterable if isinstance(iterable, (str, int, float)): return [iterable] if not iterable: return [] if callable(iterable): iterable = iterable() return list(iter(iterable))
def strip_resolution(key): """ Removes the resolution from the lookup key. Args: (str): Lookup key (col&exp&chan&resolution). Returns: (str) """ return key.rsplit('&', 1)[0]
def create_import_error_msg( extra_module: str, forte_module: str, component_name: str, pip_installable: bool = True, ): """ Create an error message for importing package extra required by a forte module. Args: extra_module: module name should be installed by pip. forte_module: forte module User should install by ``pip install forte[`forte_module`]`` to install all extra packages for using the forte module. component_name: the forte component that needs the module. """ install_msg = ( f" `{extra_module}` is not installed correctly." + f" Consider install {extra_module}" ) pip_msg = f" via `pip install {extra_module}` " refer_msg = ( f" or refer to extra requirement for {component_name}" + " at https://github.com/asyml/forte#installation" + f" for more information about installing {forte_module}. " ) if pip_installable: error_msg = install_msg + pip_msg + refer_msg else: error_msg = install_msg + refer_msg return error_msg
def peal_speed_to_blow_interval(peal_minutes: float, num_bells: int) -> float: """ Calculate the blow interval from the peal speed, assuming a peal of 5040 changes """ peal_speed_seconds = peal_minutes * 60 seconds_per_whole_pull = peal_speed_seconds / 2520 # 2520 whole pulls = 5040 rows return seconds_per_whole_pull / (num_bells * 2 + 1)
def num_to_emoji(n): """ Convert number to discord emoji Parameters ---------- n : str string number Returns ------- str discord emoji if valid, False otherwise """ num_emoji_map = { "1": ":one:", "2": ":two:", "3": ":three:", "4": ":four:", "5": ":five:", "6": ":six:", "7": ":seven:", "8": ":eight:", "9": ":nine:", "10": ":ten:", } n = str(n) if n in num_emoji_map: return num_emoji_map[n] return False
def flatten_list(input_list): """ Args: input_list: 2-d list Returns: 1-d list """ output_list = [] for i in input_list: output_list.extend(i) return output_list
def expand_dotted_dict(root): """ Expand dotted dictionary keys. Parameters ---------- root : dict The dictionary to expand. Returns ------- dct : dict The expanded dictionary. """ if not root: return {} if not isinstance(root, dict): raise ValueError('A dictionary is required') res = {} def expand_key(dct, key, value): if isinstance(value, dict): # specifies a sub-dict; use full dotted name parts = key.split('.') else: # specifies a value; last part refers to a value parts = key.split('.')[:-1] for part in parts: if not part: raise ValueError('Dotted key cannot contain empty part ' f'({key})') if part not in dct: dct[part] = {} elif not isinstance(dct[part], dict): raise ValueError('Dotted key does not refer to a dictionary ' f'({part} of {key})') dct = dct[part] return dct def set_values(dct, value): dotted_keys = set(key for key in value if '.' in key) non_dotted = set(value) - dotted_keys for key in non_dotted: if key in dct: raise KeyError(f'Key specified multiple times: {key}') dct[key] = value[key] for key in dotted_keys: sub_value = value[key] sub_dict = expand_key(dct, key, sub_value) if isinstance(sub_value, dict): set_values(sub_dict, sub_value) else: last_part = key.split('.')[-1] sub_dict[last_part] = sub_value return dct return set_values(res, root)
def verify_approval_skip(data, env, env_configs): """Determines if a approval stage can be added/removed from a given environment pipeline stage based on environment setting. Defaults to false, and verifies administrators allow skips in given environments. Args: data (dict): environment config data from pipeline files env (str): Name of environment env_configs (dict): environment configs from foremast files Returns: bool: result of approval skip check/verification and setting """ approval_skip = False if 'approval_skip' in data['app']: if env in env_configs and env_configs[env]['enable_approval_skip']: approval_skip = data['app']['approval_skip'] return approval_skip
def _generate_command_by_dict( mydict: dict, # Debug verbose: bool = False, ): """ Generate an array based on dictionary with keys and values """ array_command = [] # append to a list for key, value in mydict.items(): array_command.extend([key, value]) if verbose: print(array_command) # return values return array_command
def reverse_log(log): """Concatenates lines in reverse order""" return "\n".join(log.split("\n")[::-1])
def partition3_direct_dp(arr): # (Max time used: 0.16/10.00, max memory used: 19165184/536870912.) """ In order to equally partition the souvenirs into 3 bags, each bag must have size = sum(arr) / 3, since the input are all integers, the partition size must be integers rather than float numbers. Let A be a 3-dimensional array, A[k][size1][size2] means that given the first k numbers in arr, whether we can partition in a way that the 1st bag has size1 and the 2nd bag has size2, so the idea is to compute by recurrence the value of A[k][size1][size2], which returns True/False. By dp induction, the last number is either in bag1, or bag2, or not in these 2 bags, let num = arr[k-1], which is the k-th number in arr, and so we have: A[k][size1][size2] = A[k-1][size1 - num][size2] | A[k-1][size1][size2 - num] | A[k-1][size1][size2] At the end, we just need to return A[len(arr)][sum(arr) / 3][sum(arr) / 3], either True or False. However, this algorithm is more time-consuming and space-consuming, since the size of A is cubic. """ if sum(arr) % 3 != 0: return 0 else: partition_size = int(sum(arr) / 3) # 2-step initialize A = {1:{}} for size1 in range(partition_size + 1): A[1][size1] = {} for size2 in range(partition_size + 1): A[1][size1][size2] = False A[1][arr[0]][0] = True A[1][0][arr[0]] = True # recurrence computation for k in range(2, len(arr) + 1): A[k] = {} for size1 in range(partition_size + 1): A[k][size1] = {} for size2 in range(partition_size + 1): num = arr[k - 1] case1 = (size1 >= num) and A[k - 1][size1 - num][size2] case2 = (size2 >= num) and A[k - 1][size1][size2 - num] case3 = A[k - 1][size1][size2] A[k][size1][size2] = case1 | case2 | case3 # "or" relationship return (1 if A[len(arr)][partition_size][partition_size] else 0)
def parse_hpo_gene(hpo_line): """Parse hpo gene information Args: hpo_line(str): A iterable with hpo phenotype lines Yields: hpo_info(dict) """ if not len(hpo_line) > 3: return {} hpo_line = hpo_line.rstrip().split("\t") hpo_info = {} hpo_info["hgnc_symbol"] = hpo_line[1] hpo_info["description"] = hpo_line[2] hpo_info["hpo_id"] = hpo_line[3] return hpo_info
def _str(s): """ Convert PTB tokens to normal tokens """ if (s.lower() == '-lrb-'): s = '(' elif (s.lower() == '-rrb-'): s = ')' elif (s.lower() == '-lsb-'): s = '[' elif (s.lower() == '-rsb-'): s = ']' elif (s.lower() == '-lcb-'): s = '{' elif (s.lower() == '-rcb-'): s = '}' return s
def is_number(item: str) -> bool: """Return True if the string is a number, False otherwise. """ try: float(item) return True except TypeError: return False except ValueError: return False
def recursiveFactorial(num): """assumes num is a positive int returns an int, num! (the factorial of n) """ if num == 0: return 1 else: return num * recursiveFactorial(num-1)
def t_name_to_flan_pattern_name(t_name: str) -> str: """Converts `t_name` to flan `PATTERN` key. Some seqio tasks use the same flan patterns. Args: t_name: Task config name. Returns: a key for `PATTERNS`. """ if 'para_crawl' in t_name: return 'para_crawl' elif 'wmt16_translate' in t_name: return 'wmt16_translate' elif t_name in {'arc_challenge', 'arc_easy'}: return 'arc' elif t_name in {'anli_r1', 'anli_r2', 'anli_r3'}: return 'anli' elif t_name in {'mnli_matched', 'mnli_mismatched'}: return 'mnli' return t_name
def index_in_complete_solution(individual, complete_solution): """Returns the index of an individual in a complete solution based on its type.""" for i in range(len(complete_solution)): if type(complete_solution[i]) == type(individual): return i # else: # return None
def merge_two_dicts(x, y): """Merges two dicts, returning a new copy.""" z = x.copy() z.update(y) return z
def _static_folder_path(static_url, static_folder, static_asset): """ Returns a path to a file based on the static folder, and not on the filesystem holding the file. Returns a path relative to static_url for static_asset """ # first get the asset path relative to the static folder. # static_asset is not simply a filename because it could be # sub-directory then file etc. if not static_asset.startswith(static_folder): raise ValueError("%s static asset must be under %s static folder" % (static_asset, static_folder)) rel_asset = static_asset[len(static_folder):] # Now bolt the static url path and the relative asset location together return u'%s/%s' % (static_url.rstrip('/'), rel_asset.lstrip('/'))
def tflops_per_second(flops, dt): """ Computes an effective processing rate in TFLOPS per second. TFLOP/S = flops * / (dt * 1E12) Args: flops: Estimated FLOPS in the computation. dt: Elapsed time in seconds. Returns: The estimate. """ return flops / (1E12 * dt)
def bindingType(b): """ Function returns the type of a variable binding. Commonly 'uri' or 'literal'. """ type = b['type'] if type == "typed-literal" and b['datatype'] == "http://www.w3.org/2001/XMLSchema#string": type = 'literal' return type
def suffix_unless_suffixed(text: str, suffix: str) -> str: """ Passed string either suffixed by the passed suffix if this string is not yet suffixed by this suffix *or* this string as is otherwise (i.e., if this string is already suffixed by this suffix). Parameters ---------- text : str String to be conditionally suffixed. suffix : str Suffix to be conditionally appended to this string. Returns ---------- str Either: * If this string is *not* yet suffixed by this suffix, this string suffixed by this suffix. * Else, this string as is. """ assert isinstance(text, str), f'{repr(text)} not string.' assert isinstance(suffix, str), f'{repr(suffix)} not string.' # Suffix us up the redemption arc. return text if text.endswith(suffix) else text + suffix
def _find_tools_paths(full_args): """Finds all paths where the script should look for additional tools.""" paths = [] for idx, arg in enumerate(full_args): if arg in ['-B', '--prefix']: paths.append(full_args[idx + 1]) elif arg.startswith('-B'): paths.append(arg[2:]) elif arg.startswith('--prefix='): paths.append(arg[9:]) return paths
def zellers_congruence(day, month, year): """ For a given date year/month/day this algorithm returns the weekday of that date (1 = Monday, 2 = Tuesday, etc.) For details see https://en.wikipedia.org/wiki/Zeller%27s_congruence """ # Consistent variable names with the formula on on Wikipedia q = day if month >= 3: m = month # pragma: no cover else: m = month + 12 year -= 1 K = year % 100 J = year // 100 h = (q + (13 * (m + 1)) // 5 + K + K // 4 + J // 4 + 5*J) % 7 # Convert to ISO return ((h + 5) % 7) + 1
def rand_bytes(number_of_bytes: int) -> bytes: """ generate random bytes :param number_of_bytes: the number of bytes :return: the bytes """ from os import urandom return urandom(number_of_bytes)
def reversetext(contenttoreverse, reconvert=True): """ Reverse any content :type contenttoreverse: string :param contenttoreverse: The content to be reversed :type reeval: boolean :param reeval: Wether or not to reconvert the object back into it's initial state. Default is "True". """ # If reconvert is specified if reconvert is True: # Return the evalated form return eval( str(type(contenttoreverse)).split("'")[1] + "('" + str(contenttoreverse)[::-1] + "')") # Return the raw version return contenttoreverse[::-1]
def horizon_error(ground_truth_horizon, detected_horizon, image_dims): """Calculates error in a detected horizon. This measures the max distance between the detected horizon line and the ground truth horizon line, within the image's x-axis, and normalized by image height. Args: ground_truth_horizon: Tuple with (slope, intercept) for the GT horizon line. detected_horizon: Tuple with (slope, intercept) for the detected horizon line. image_dims: Tuple of integers, (width, height) of the image, in pixels. Returns: Float, or None if a horizon is missing altogether. """ if ground_truth_horizon is None or detected_horizon is None: return None def gt(x): return ground_truth_horizon[0] * x + ground_truth_horizon[1] def dt(x): return detected_horizon[0] * x + detected_horizon[1] width, height = image_dims return max(abs(gt(0) - dt(0)), abs(gt(width) - dt(width))) / height
def scale_axis(axis_bounds, lower_scale=None, upper_scale=None): """ Calculates the new bounds to scale the current axis bounds. The new bounds are calculated by multiplying the desired scale factor by the current difference of the upper and lower bounds, and then adding (for upper bound) or subtracting (for lower bound) from the current bound. Parameters ---------- axis_bounds : tuple(float, float) The current lower and upper axis bounds. lower_scale : float, optional The desired fraction by which to scale the current lower bound. If None, will not scale the current lower bounds. upper_scale : float, optional The desired fraction by which to scale the current upper bound. If None, will not scale the current upper bounds. Returns ------- lower_bound : float The lower bound after scaling. upper_bound : float The upper bound after scaling. """ l_scale = lower_scale if lower_scale is not None else 0 u_scale = upper_scale if upper_scale is not None else 0 difference = axis_bounds[1] - axis_bounds[0] lower_bound = axis_bounds[0] - (l_scale * difference) upper_bound = axis_bounds[1] + (u_scale * difference) return lower_bound, upper_bound
def upper(s): """ Number of upper case letters in a string. Solution for day 4. >>> upper("UpPer") 2 >>> upper("alllower") 0 """ # Current number of upper case letters found upper = 0 # Loop through all the letters in the string and if it is upper, increase for x in s: if x.isupper(): upper += 1 # Return upper return upper
def remove_nested_parens(input_str): """ Returns a copy of string with any parenthesized (..) [..] text removed. Nested parentheses are handled. It also returns a Boolean asserting if the parenthesis were well balanced (True) or not (False). """ result1 = '' paren_level = 0 for ch in input_str: if ch == '(': paren_level += 1 elif (ch == ')') and paren_level: paren_level -= 1 elif not paren_level: result1 += ch result2 = '' paren_level2 = 0 for ch in result1: if ch == '[': paren_level2 += 1 elif (ch == ']') and paren_level2: paren_level2 -= 1 elif not paren_level2: result2 += ch if (paren_level == 0) and (paren_level2 == 0): balanced = True else: balanced = False return (balanced, result2)
def get_at(doc, path, create_anyway=False): """Get the value, if any, of the document at the given path, optionally mutating the document to create nested dictionaries as necessary. """ node = doc last = len(path) - 1 if last == 0: return doc.get(path[0]) for index, edge in enumerate(path): if edge in node: node = node[edge] elif index == last or not create_anyway: # the key doesn't exist, and this is the end of the path: return None else: # create anyway will create any missing nodes: node = node[edge] = {} return node
def longest_target_sentence_length(sentence_aligned_corpus): """ :param sentence_aligned_corpus: Parallel corpus under consideration :type sentence_aligned_corpus: list(AlignedSent) :return: Number of words in the longest target language sentence of ``sentence_aligned_corpus`` """ max_m = 0 for aligned_sentence in sentence_aligned_corpus: m = len(aligned_sentence.words) max_m = max(m, max_m) return max_m
def make_histogram(s: list) -> dict: """Takes a string or a list, finds its elements frequency and returns a dictionary with element-frequency as key-value pairs. """ d = dict() for char in s: d[char] = 1 + d.get(char, 0) return d
def parse_mygene_src_version(d): """ Parse source information. Make sure they are annotated as releases or with a timestamp d: looks like: {"ensembl" : 84, "cpdb" : 31, "netaffy" : "na35", "ucsc" : "20160620", .. } :return: dict, looks likes: {'ensembl': {'id': 'ensembl', 'release': '87'}, 'entrez': {'id': 'entrez', 'timestamp': '20161204'}} """ d2 = {} for source, version in d.items(): if source in {'ensembl', 'refseq'}: d2[source] = {'id': source, 'release': str(version)} elif source in {'uniprot', 'entrez', 'ucsc'}: d2[source] = {'id': source, 'timestamp': str(version)} return d2
def round_steps(stop, steps): """Return number of round steps from '0' to 'stop'.""" return int(round(stop // steps * steps))
def parse_drive_size(line): """Parses a drive line in the partition information file. """ parts = line.split(":") if len(parts) != 2 or parts[0] != "drive": raise ValueError("Drive size line format is 'drive:<size>'") return parts[1]
def fmt_time(value): """ < 60 seconds -> displayed in seconds (limit the decimal digits to 1 or keep int) < 3600 seconds -> display as X m Y s (if float, trim decimal digits) >= 3600 seconds -> display as X h Y m Z s (if float, trim decimal digits) :param value: seconds or None :return: Formatted value """ if value is None: return "N/A" if not isinstance(value, int) and not isinstance(value, float): raise TypeError(f"Expected int or float, got {type(value)}") if 0 < value < 0.1: return f"{value:.3f} s" elif value < 60: if isinstance(value, int): return f"{value} s" else: return f"{value:.1f} s" elif value < 3600: return f"{value//60:.0f} m {value%60:.0f} s" else: return f"{value//3600:.0f} h {(value%3600)//60:.0f} m {value%60:.0f} s"
def storage_format(number: int) -> str: """Format a number representing a bytes of storage according to our convention Uses convention that 1 GB = 1000^3 bytes Parameters ---------- number : int A number of bytes to format Returns ------- str The formatted storage string """ GBs = float(number) / float(1000 ** 3) if GBs > 100: GBs = round(GBs) GBs_str = f"{GBs} GB" else: GBs_str = f"{GBs:,.1f} GB" return GBs_str
def process_purchase(purchase): """ # Takes in a list of tuples with two elements. Puts the # elements into dictionary by second element with # values being the first element and returns. >>> process_purchase([('rice', 'mitsuwa'), ('msg', '99ranch'), \ ('eggs', 'costco')]) {'mitsuwa': ['rice'], '99ranch': ['msg'], 'costco': ['eggs']} >>> process_purchase([('milk', 'ralphs'), ('carrot', 'ralphs'), \ ('milk', 'ralphs'), ('carrot', 'costco')]) {'ralphs': ['milk', 'carrot', 'milk'], 'costco': ['carrot']} # Add at least 3 doctests below here # >>> process_purchase([]) {} >>> process_purchase([('rice', 'mitsuwa'), ('eggs', 'mitsuwa'), \ ('rice', 'mitsuwa'), ('rice', 'costco')]) {'mitsuwa': ['rice', 'eggs', 'rice'], 'costco': ['rice']} >>> process_purchase([('rice', 'costco'), ('eggs', 'mitsuwa')]) {'costco': ['rice'], 'mitsuwa': ['eggs']} """ if len(purchase) == 0: return {} purchase_dict = {} store_names = [] items_purchased = [] for i in range(0, len(purchase)): store_names.append(purchase[i][1]) purchase_dict[store_names[i]] = [] for i in range(0, len(purchase)): items_purchased.append(purchase[i][0]) purchase_dict[store_names[i]].append(items_purchased[i]) return purchase_dict
def trim(txt): """remove empty braces until done""" while True: edited = txt.replace( "{}","").replace( "[]","").replace( "<>","").replace( "()","") if edited == txt: return txt txt = edited
def equal_division(input_string, length_of_division): """ Divide a string up into a list of strings, each string as long as the specified length of division. Discard remainder. """ divisions = [] if len(input_string) < 2: raise ValueError('A single character cannot be divided') while input_string: new_division = input_string[0:length_of_division] if len(new_division) == length_of_division: divisions.append(new_division) input_string = input_string[length_of_division:] return divisions
def _has_method(obj, method): """ Given an object determine if it supports the method. Args: obj: Object which needs to be inspected. method: Method whose presence needs to be determined. Returns: Boolean depending upon whether the method is available or not. """ return getattr(obj, method, None) is not None
def decode(current_output: bytes) -> str: """ bytes to str """ encodings = ["sjis", "utf8", "ascii"] decoded_current_output = "" for enc in encodings: try: decoded_current_output = current_output.decode(enc) break except: continue return decoded_current_output
def isprefix(path1, path2): """Return true is path1 is a prefix of path2. :param path1: An FS path :param path2: An FS path >>> isprefix("foo/bar", "foo/bar/spam.txt") True >>> isprefix("foo/bar/", "foo/bar") True >>> isprefix("foo/barry", "foo/baz/bar") False >>> isprefix("foo/bar/baz/", "foo/baz/bar") False """ bits1 = path1.split("/") bits2 = path2.split("/") while bits1 and bits1[-1] == "": bits1.pop() if len(bits1) > len(bits2): return False for (bit1,bit2) in zip(bits1,bits2): if bit1 != bit2: return False return True
def parse_epic_link(el): """Extract key of epic this issue belongs to (if given), else ''. Example XML: <customfields> <customfield id="customfield_10730" key="com.pyxis.greenhopper.jira:gh-epic-link"> <customfieldname>Epic Link</customfieldname> <customfieldvalues> <customfieldvalue>IRS-4</customfieldvalue> </customfieldvalues> </customfield> ... </customfields> """ if (el is None): return('') for customfield in el.findall('./customfield'): if (customfield.attrib['id'] == "customfield_10730"): return(customfield.find('./customfieldvalues/customfieldvalue').text) return('')
def binary_search_rotated(key, arr, left, right): """ Search in a sorted rotated array. """ if left > right: return False middle = (left + right) / 2 if arr[left] == key or arr[middle] == key or arr[right] == key: return True if arr[middle] <= arr[right]: # Right side is sorted. if arr[middle] < key < arr[right]: return binary_search_rotated(key, arr, middle+1, right-1) else: return binary_search_rotated(key, arr, left+1, middle-1) elif arr[left] <= arr[middle]: # Left side is sorted. if arr[left] < key < arr[middle]: return binary_search_rotated(key, arr, left+1, middle-1) else: return binary_search_rotated(key, arr, middle+1, right-1)
def get_digit(n, i): """ i=0 for units, i=1 for tens, t=2 for hundreds... """ return int(str(n)[::-1][i])
def filter(record): """ Filter for testing. """ return record if record["str"] != "abcdef" else None
def update_inc(initial, key, count): """Update or create a dict of `int` counters, for JSONField.""" initial = initial or {} initial[key] = count + initial.get(key, 0) return initial
def _standardize_and_copy_config(config): """Returns a shallow copy of config with lists turned to tuples. Keras serialization uses nest to listify everything. This causes problems with the NumericColumn shape, which becomes unhashable. We could try to solve this on the Keras side, but that would require lots of tracking to avoid changing existing behavior. Instead, we ensure here that we revive correctly. Args: config: dict that will be used to revive a Feature Column Returns: Shallow copy of config with lists turned to tuples. """ kwargs = config.copy() for k, v in kwargs.items(): if isinstance(v, list): kwargs[k] = tuple(v) return kwargs
def _parse_date(s): """'31/05/11' --> '2011-05-31'""" day, month, year = [int(x) for x in s.split('/')] if year < 15: year = 2000 + year if (year < 2001 or year > 2015): return None if (month < 1 or month > 12) or (day < 1 or day > 31): return None return '%04d-%02d-%02d' % (year, month, day)
def remap(x, oldmin, oldmax, newmin, newmax): """Remap the float x from the range oldmin-oldmax to the range newmin-newmax Does not clamp values that exceed min or max. For example, to make a sine wave that goes between 0 and 256: remap(math.sin(time.time()), -1, 1, 0, 256) """ zero_to_one = (x-oldmin) / (oldmax-oldmin) return zero_to_one*(newmax-newmin) + newmin
def _modulo_ab(x: float, a: float, b: float) -> float: """Map a real number onto the interval [a, b).""" if a >= b: raise ValueError("Incorrect interval ends.") y = (x - a) % (b - a) return y + b if y < 0 else y + a
def merge_dicts(*dict_args): """ Credits: https://stackoverflow.com/a/26853961 Ref: https://stackoverflow.com/questions/38987/how-do-i-merge-two-dictionaries-in-a-single-expression Given any number of dicts, shallow copy and merge into a new dict, precedence goes to key value pairs in latter dicts. Performance Analysis: import timeit min(timeit.repeat(lambda: merge_dicts(x, y))) """ result = {} for dictionary in dict_args: # result.update(dictionary) @@ original result.update(dictionary.copy()) return result
def check_xlsx(b64_data): """check hex signature of b64 file""" return len(b64_data) >= 4 and b64_data[:4].hex() == '504b0304'
def join_year_month(year, month): """Joins year and month for parsing with pd.to_datetime.""" return str(year) + '-' + str(month)
def rec_key_change(key): """ Change recommendation key to numerical values """ if key == 'none': return 0 elif key == 'hold': return 1 elif key == 'buy': return 2 elif key == 'strong_buy': return 3
def calc_term_overlap(termsA, termsB): """ Calculate the reciprocal overlap between two lists of HPO terms """ nA = len(termsA) nB = len(termsB) if nA == 0 or nB == 0: ro = 0 else: nOvr = len(set(termsA).intersection(set(termsB))) oA = nOvr / nA oB = nOvr / nB ro = min([oA, oB]) return ro
def multiFind(string,substring): """Return a list if integers indicating where the substring begins in the string. Substrings are not allowed to overlap themselves: multifind('pppp','pp') = [0,2] If there are no matches, return [] """ start = 0 indices = [] while True: start = string.find(substring,start) if start == -1: return indices indices.append(start) start += len(substring)
def replace_char_at_index(org_str, index, replacement): """Replace character at index in string org_str with the given replacement character.""" new_str = org_str if index < len(org_str): new_str = org_str[0:index] + replacement + org_str[index + 1 :] return new_str
def parse_boolean(s): """Parse an environment variable string into a boolean. Considers empty string, '0', 'no', or 'false' (case insensitive) to be ``False``; all other values are ``True``. """ return s.lower() not in {'', '0', 'no', 'false'}
def flatten_component_tree(tree: dict) -> list: """Flattens a component tree into a list of its final leaves. Traverses the tree recursively until it meets terminating nodes, then collects all those terminating nodes into a list. :param tree: A dictionary of dictionaries any number of levels deep as returned by get_subcomponents. :return: A list of all entries in the input tree that had no lower-level components, as represented by containing only an empty dictionary for a value. """ result = [] for k, v in tree.items(): tmp = flatten_component_tree(v) if tmp: result += tmp else: result.append(k) return result
def detect_code_block(lines, index, limit): """ Detects a code block and returns it's last line +1's index, or the source index if not found. Parameters ---------- lines : `list` of `str` The lines of the section. index : `int` The starting index of the code block. limit : `int` The last element's index. Returns ------- index : `int` The index where the code block is over. If there was no code block, returns the initial index. """ source_index = index found_starter = False while True: if index == limit: if found_starter: return index else: return source_index line = lines[index] if not line: index += 1 continue if line.endswith('```') if found_starter else line.startswith('```'): index += 1 if found_starter: return index found_starter = True continue if found_starter: index += 1 continue return source_index
def modular_geometric_sum(x, n, mod): """ Compute a_n = (1 + a^1 + ... + a^{n-1}) % mod using that a_{2n} = ((x_n + 1) * a_n) % mod a_{2n+1} = (x^{2n} + a_{2n}) % mod """ if n == 1: return 1 % mod elif n % 2 == 0: return ((pow(x, n // 2, mod) + 1) * modular_geometric_sum(x, n // 2, mod)) % mod else: return (pow(x, n - 1, mod) + modular_geometric_sum(x, n - 1, mod)) % mod
def dict_of_list__to__list_of_dicts(dict, n_items): """ ``` x = {'foo': [3, 4, 5], 'bar': [1, 2, 3]} ppp.dict_of_list__to__list_of_dicts(x, 3) # Output: # [ # {'foo': 3, 'bar': 1}, # {'foo': 4, 'bar': 2}, # {'foo': 5, 'bar': 3}, # ] ``` :param dict: :param n_items: :return: """ new_dicts = [{} for _ in range(n_items)] for key, values in dict.items(): for i in range(n_items): new_dicts[i][key] = values[i] return new_dicts
def find_workflow_steps(tool_id,steps): """ Finds appropriate steps in workflow given tool id. :param tool_id: The tool id to search. :param steps: Dictionary of steps in workflow. :return: List of matching steps. """ matches=[] for step in steps: if (steps[step]['tool_id'] == tool_id): matches.append(steps[step]) return matches
def is_pandigital(number, include_zero=False): """Determine if a number (as a string) is pandigital""" number_set = set(number) if include_zero: length = 10 else: length = 9 if '0' in number_set: return False return len(number_set) == len(number) == length
def remove_leading_spaces(data): """ Remove the leading spaces (indentation) of lines of code :param data: input data as a list of files, where each file is a list of strings :return: input data with leading spaces removed """ print('Removing leading spaces ...') for i in range(len(data)): for j in range(len(data[i])): data[i][j] = data[i][j].strip() return data
def build_cmd(seq, out_file, query_id, args): """ Builds a command to run blast.py on the command line. :param seq: A fasta sequence to BLAST :param out_file: The name of the file to store the results in :param query_id: The id of the query :param args: A dictionary of arguments need for the BLAST and EC search from online databases. :return: The command to run blast.py on the command line. """ cmd = ["py", "blast.py", \ "--fasta_sequence", seq, \ "--program" , args['--program'], \ "--email", args["--email"], \ "--out_file", out_file, \ "--id", str(query_id), \ "--min_pct_idnt", str(args["--min_pct_idnt"]), \ "--min_qry_cvr", str(args["--min_qry_cvr"]), \ "--max_blast_hits", str(args["--max_blast_hits"]), \ "--max_uniprot_hits", str(args["--max_uniprot_hits"])] return cmd
def words_from_text(s, words_to_ignore=[]): """ Lowercases a string, removes all non-alphanumeric characters, and splits into words. """ words = [] word = '' for c in ' '.join(s.split()): if c.isalpha(): word += c elif word: if word not in words_to_ignore: words.append(word) word = '' if len(word) and (word not in words_to_ignore): words.append(word) return words
def get_minutes(minutes): """ (number) -> number Return minutes left as after converting it into hh:mm:ss format Precondition: minutes >= 0 >>> get_minutes(3800) 3 """ return (minutes // 60) % 60
def format_bird(ip_version, bird_version, cmd): """Prefixes BIRD command with the appropriate BIRD CLI command. Arguments: ip_version {int} -- IPv4/IPv6 bird_version {int} -- BIRD version cmd {str} -- Unprefixed command Returns: {str} -- Prefixed command """ cmd_prefix = "birdc" if bird_version == 1 and ip_version == 6: cmd_prefix = "birdc6" command = f'{cmd_prefix} "{cmd}"' return command
def _default_init(targ_prob: float, acc_max: float, num_inp: int, num_para: int): """Decide the default integrator chain methods and arguments depending on the problem Parameters ---------- targ_prob : float target failure probability acc_max : float target tolerance for the estimation num_inp : int number of stochastic inputs of the constraints num_para : int number of parallel processes to use Returns ------- integrators : list Integrator classes, that are to be initiated int_args : dict Keyword arguments to pass to integrators """ if targ_prob * acc_max >= 1e-5: if targ_prob * acc_max >= 1e-4: integrators = ["MC"] else: integrators = ["SUSE", "MC"] int_args = {"num_starts": 1, "batch_size": 1e5} elif num_inp < 15: integrators = ["SUSE", "DS"] int_args = {"num_starts": 1} else: integrators = ["SUSE"] int_args = {"num_starts": num_para} print("Using", integrators, "as default chain.") return integrators, int_args
def shQuote(text): """quote the given text so that it is a single, safe string in sh code. Note that this leaves literal newlines alone (sh and bash are fine with that, but other tools may mess them up and need to do some special handling on the output of this function). """ return "'%s'" % text.replace("'", r"'\''")
def render_operation_progress_summary_stages(stages): """ Renders the provided operation progress stages. :param stages: progress stages to render :return: rendered string """ result = '\n|\t'.join( map( lambda entry: '{}: [{}] step(s) done'.format(entry[0], len(entry[1]['steps'])), stages.items() ) ) return '\n|\t{}'.format(result) if result else '-'
def cover_phone_number(no): """ >>> cover_phone_number('01234 567 890') '01234 *** *** **' """ result = '' for order, digit in enumerate(no): if order < 5: result = result + digit else: if order in (5, 8, 11): result = result + ' ' result = result + '*' return result
def merge_two_dicts_shallow(x, y): """ Given two dictionaries, merge them into a new dict as a shallow copy. merging two dict that support Python 2 according https://stackoverflow.com/a/26853961/2212582 unfortunately the fastest `**` unpack method will result in syntax error on Python 2 """ z = x.copy() z.update(y) return z
def makeRevisionOptionStr(revision): """ :param revision: a revision number, or string('HEAD', 'BASE', 'COMMITTED', 'PREV'), or revision range tuple """ if not revision: return '' # some command(svn log...) support revision range if isinstance(revision, tuple) or isinstance(revision, list): return '-r %s:%s' % (revision[0], revision[1]) return '-r %s' % revision
def score_distance(d, ka, coop=1): """ Given some distance d, returns a score on (0,1]. A d of 0 scores 0, and a d of inf scores 1. gamma defines the distance at which the score is 0.5. Modeled off the Hill equation Args: d: The value to score ka: The value at which the score is 0.5 cooperativity: the cooperativity coeffient Returns: float """ score = d ** coop / (ka ** coop + d ** coop) return score
def labeloff(lines, splice_from=5): """strip off the first splice_from characters from each line Warning: without check!""" return [line[splice_from:] for line in lines]
def set_windows_slashes(directory): """ Set all the slashes in a name so they use Windows slashes (\) :param directory: str :return: str """ return directory.replace('/', '\\').replace('//', '\\')
def _suitable_samples(pred_folders, gold_folders): """Returns the path of each sample contained in both prediction and gold standard folders.""" gold_samples = [] pred_samples = [folder.split('/')[-2] for folder in pred_folders] # some folders in Larson's gold standard have a folder named 'Registered', # which contains more information that we need for comparison. We need a # special condition to be able to get the right path on these folders. for folder in gold_folders: aux = folder.split('/') if 'Registered' in aux: gold_samples.append(aux[-3]) else: gold_samples.append(aux[-2]) valid_samples = list(set(pred_samples) & set(gold_samples)) pred_idx = [pred_samples.index(sample) for sample in valid_samples] gold_idx = [gold_samples.index(sample) for sample in valid_samples] pred_valid = [pred_folders[idx] for idx in pred_idx] gold_valid = [gold_folders[idx] for idx in gold_idx] return pred_valid, gold_valid
def split_schema_obj(obj, sch=None): """Return a (schema, object) tuple given a possibly schema-qualified name :param obj: object name or schema.object :param sch: schema name (defaults to 'public') :return: tuple """ qualsch = sch if sch is None: qualsch = 'public' if '.' in obj: (qualsch, obj) = obj.split('.') if obj[0] == '"' and obj[-1:] == '"': obj = obj[1:-1] if sch != qualsch: sch = qualsch return (sch, obj)
def cmset_and(x,y): """ Usage: >>> cmset_and(x,y) returns the index of the elements of array x which are also present in the array y. This is equivalent to using the IDL command >>> botha=cmset_op(namea, 'AND', nameb, /index) i.e. performs the same thing as the IDL routine `cmset_op <http://cow.physics.wisc.edu/~craigm/idl/idl.html>`_. """ idel=[] # list of indexes of x elements which are also in y i=0 for xx in x: if xx in y: idel.append(i) i=i+1 return idel
def solver_problem2(inputs): """ Count the number of increasement from each sum of 3 number from give list """ num_increased = 0 for i in range(1, len(inputs) - 2): # sum_prev = inputs[i-1] + inputs[i] + inputs[i+1] # sum_curr = inputs[i] + inputs[i+1] + inputs[i+2] # (sum_curr - sum_prev) = inputs[i+2] - inputs[i-1] if inputs[i + 2] > inputs[i - 1]: num_increased += 1 return num_increased
def get_caption(attributes, feature, label, group=None): """Construct caption from plotting attributes for (feature, label) pair. Parameters ---------- attributes : dict Plot attributes. feature : str Feature. label : str Label. group : str, optional Group. Returns ------- str Caption. Raises ------ KeyError ``attributes`` does not include necessary keys. """ group_str = '' if group is None else f' ({group})' if feature not in attributes: raise KeyError( f"Attributes do not include necessary key for feature '{feature}'") if label not in attributes: raise KeyError( f"Attributes do not include necessary key for label '{label}'") feature_attrs = attributes[feature] label_attrs = attributes[label] if 'plot_title' not in feature_attrs: raise KeyError( f"Attributes for feature '{feature}' does not include necessary " f"key 'plot_title'") if 'plot_xlabel' not in feature_attrs: raise KeyError( f"Attributes for feature '{feature}' does not include necessary " f"key 'plot_xlabel'") if 'plot_ylabel' not in label_attrs: raise KeyError( f"Attributes for label '{label}' does not include necessary " f"key 'plot_ylabel'") caption = (f"{attributes[feature]['plot_title']}: " f"{attributes[label]['plot_ylabel']} vs. " f"{attributes[feature]['plot_xlabel']}{group_str}.") return caption
def row_to_dict(field_names, data, null_to_empty_string=False): """ Converts a tuple result of a cx_Oracle cursor execution to a dict, with the keys being the column names :param field_names: The names of the columns in the result set (list) :param data: The data in this row (tup) :param null_to_empty_string: Whether or not to convert nulls to empty strings (bool) :return: A row of results (dict) """ clean_data = ( ["" if val is None else val for val in data] if null_to_empty_string else data ) return dict(zip(field_names, clean_data))
def _validate_other_libs(other_libs): """ Validates the other_libs parameter. Makes it a list, if it isn't already and verifies that all the items in the list are python modules with the required functions. Raises a TypeError, if the other_libs parameter is not valid. :param other_libs: parameter to validate :return: validated other_libs parameter """ if other_libs is not None: if not isinstance(other_libs, list): other_libs = [other_libs] import types # todo: formalize and document the 'other_libs' for integration with spark-tk required_functions = ["get_loaders","get_main_object","get_library_dirs"] for lib in other_libs: if not isinstance(lib, types.ModuleType): raise TypeError("Expected other_libs to contain python modules, but received %s." % type(lib) ) for required_function in required_functions: if not hasattr(lib, required_function): raise TypeError("other_lib '%s' is missing %s() function." % (lib.__name__,required_function)) return other_libs
def calc_tile_locations(tile_size, image_size): """ Divide an image into tiles to help us cover classes that are spread out. tile_size: size of tile to distribute image_size: original image size return: locations of the tiles """ image_size_y, image_size_x = image_size locations = [] for y in range(image_size_y // tile_size): for x in range(image_size_x // tile_size): x_offs = x * tile_size y_offs = y * tile_size locations.append((x_offs, y_offs)) return locations
def get_fileid_val(file_identifier, key_value_data, fileid_value): """ Get file identifier value """ file_id_found = False for key in key_value_data: if file_identifier and not file_id_found and file_identifier in key: fileid_value = key[1] file_id_found = True if not file_id_found: fileid_value = str(int(fileid_value) + 1) return fileid_value
def get_value(obj, key, default=None): """ Returns dictionary item value by name for dictionary objects or property value by name for other types. Also list of lists obj is supported. :param obj: dict or object :param key: dict item or property name :param default: default value :return: """ if isinstance(obj, dict): return obj.get(key, default) elif hasattr(obj, '__iter__'): for item in obj: if hasattr(obj, '__iter__') and len(item) > 1 and item[0] == key: return item[1] return default else: return getattr(obj, key, default)
def qual(clazz): """ Return full import path of a class """ return clazz.__module__ + '.' + clazz.__name__
def validate_token(token): """ validate a token agains oauth2.Token object """ if token is not None and not hasattr(token, "key"): raise ValueError("Invalid token.") return token
def _clean_list(id_list): """ return a list where all elements are unique """ id_list.sort() r_list = [] last_element = None for x in id_list: if x != last_element: r_list.append(x) last_element = x return r_list
def get_mu_tilda(x_i, r, n): """Calculates the conditional descendant normal distribution *expectation* for generation-gap n. Latex equation: tilde{\mu}_{i+n} = r^n X_i (See the paper for the derivation.)""" return (r**n) * x_i
def _set_development_risk_icon(risk): """ Function to find the index risk level icon for development environment risk. :param float risk: the Software development environment risk factor. :return: _index :rtype: int """ _index = 0 if risk == 0.5: _index = 1 elif risk == 1.0: _index = 2 elif risk == 2.0: _index = 3 return _index
def get_test_name( name ): """ This module maps PHPBench benchmark names to the names that are used accross the teams. Args: name (str): Name of the benchmark Returns: The mapped name Raises: KeyError when the name passed in does not match any of the keys """ test_name_dict = { 'SqlsrvConnectionBench': 'connection' , 'SqlsrvCreateDbTableProcBench': 'create' , 'SqlsrvCRUDBench': 'crud' , 'SqlsrvInsertBench': 'crud-create' , 'SqlsrvFetchBench': 'crud-retrieve' , 'SqlsrvUpdateBench': 'crud-update' , 'SqlsrvDeleteBench': 'crud-delete' , 'SqlsrvFetchLargeBench': 'large' , 'SqlsrvSelectVersionBench': 'version' , 'PDOConnectionBench': 'connection' , 'PDOCreateDbTableProcBench': 'create' , 'PDOCRUDBench': 'crud' , 'PDOInsertBench': 'crud-create' , 'PDOFetchBench': 'crud-retrieve' , 'PDOUpdateBench': 'crud-update' , 'PDODeleteBench': 'crud-delete' , 'PDOFetchLargeBench': 'large' , 'PDOSelectVersionBench': 'version' } return test_name_dict[ name ]
def _get_location(location_text): """Used to preprocess the input location_text for URL encoding. Doesn't do much right now. But provides a place to add such steps in future. """ return location_text.strip().lower()
def clamp(x, x0, x1): """Clamp the value x to be within x0, x1 (inclusive).""" return max(min(x, x1), x0)
def sum_abs(number_list): """Return the sum of the absolute values of ``number_list``. Parameters ---------- number_list : list of int Returns ------- int """ return sum([abs(x) for x in number_list])