content
stringlengths
42
6.51k
def tts_version(version): """Convert a version string to something the TTS will pronounce correctly. Args: version (str): The version string, e.g. '1.1.2' Returns: str: A pronounceable version string, e.g. '1 point 1 point 2' """ return version.replace('.', ' punto ')
def mean_and_median(values): """Get the mean and median of a list of `values` Args: values (iterable of float): A list of numbers Returns: tuple (float, float): The mean and median """ mean = sum(values) / len(values) midpoint = int(len(values) / 2) if len(values) % 2 == 0: median = (values[midpoint - 1] + values[midpoint]) / 2 else: median = values[midpoint] return mean, median
def generic_summary_provider(value_obj, internal_dict, class_synthetic_provider): """ Checks value type and returns summary. :param lldb.SBValue value_obj: LLDB object. :param dict internal_dict: Internal LLDB dictionary. :param class class_synthetic_provider: Synthetic provider class. :return: Value summary. :rtype: str """ # Class data. # logger = logging.getLogger(__name__) # type_name = value_obj.GetTypeName() if value_obj.GetTypeName() else "Unknown type name" # Using Class Summary Provider. provider = class_synthetic_provider(value_obj, internal_dict) if provider is not None: # logger.debug("generic_summary_provider: using summary provider {} for \"{}\"." # .format(class_synthetic_provider, type_name)) return provider.summary() # Summary not available. # logger.debug("generic_summary_provider: summary unavailable") return "Summary Unavailable"
def apply_matrix_norm(m, v): """Equivalent to apply_matrix_pt(M, (p,q)) - apply_matrix_pt(M, (0,0))""" (a, b, c, d, e, f) = m (p, q) = v return (a * p + c * q, b * p + d * q)
def bin_to_int(n1, n2): """Takes two binary numbers, concatenates the second to the first, and returns the int representation""" return int(n1)*2 + int(n2)
def remove_non_ascii(input_string): """remove non-ascii: http://stackoverflow.com/a/1342373""" return "".join(i for i in input_string if ord(i) < 128)
def isNested(myList): """ Function: isNested =================== Check if a list is nested @param myList: the nested list representing a tree @return: 1 if nested , 0 if not. """ for elem in myList: if type(elem) is list: return 1 else: return 0
def generate_traefik_host_labels(hostname, segment=None, priority=1): """Generates a traefik path url with necessary redirects :hostname: Hostname that gets assigned by the label :segment: Optional traefik segment when using multiple rules :priority: Priority of frontend rule :returns: list of labels for traefik """ label_list = [] # check segment segment = f'.{segment}' if segment is not None else '' # fill list label_list.append( f'traefik{segment}.frontend.rule=HostRegexp:{{domain:{hostname}}}') label_list.append(f'traefik{segment}.frontend.priority={priority}') return label_list
def skip_item(item): """ Determines whether a particular item in the parse needs to be skipped """ if item[0] in "~(": return 1 if item in ["THE", "A", "AN", "IT", "HE", "THEY", "HER", "HAS", "HAD", "HAVE", "SOME", "FEW", "THAT"]: return 2 if item in ["HUNDRED", "THOUSAND", "MILLION", "BILLION", "TRILLION", "DOZEN", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]: return 3 if item in ["DOLLAR", "DUCAT"]: return 5 try: int(item) return 4 except: return 0
def valid_string(val: str) -> bool: """ just string length check """ return True if len(val) > 0 else False
def get_resource_string(arn): """ Given an ARN, return the string after the account ID, no matter the ARN format. :param arn: arn:partition:service:region:account-id:resourcetype/resource :return: resourcetype/resource """ split_arn = arn.split(":") resource_string = ":".join(split_arn[5:]) return resource_string
def only_largest(all_substr): """ Function that returns only the largest substring in a list. all_substr: list return: list """ last_substr = [] auxiliar = [] for character in range(len(all_substr)): if (character > 0): if ( (ord(all_substr[character - 1]) == ord(all_substr[character]) - 1) ): last_substr.append(all_substr[character]) else: if len(last_substr) > len(auxiliar): auxiliar = last_substr[:] last_substr = [] last_substr.append(all_substr[character]) else: last_substr = [] last_substr.append(all_substr[character]) else: last_substr.append(all_substr[character]) if len(auxiliar) > len(last_substr): return auxiliar else: return last_substr
def text_cleanup(str_inn): """Method cleans up text. :param str_inn: :return str_inn: """ if str_inn: str_inn = str_inn.rstrip() str_inn = str_inn.replace('\n', '') str_inn = str_inn.replace('\t', '') str_inn = str_inn.replace('<br/><br/>', ' ') str_inn = str_inn.replace(' ', ' ') # in case four spaces occur by mistake str_inn = str_inn.replace(' ', ' ') str_inn = str_inn.replace(' ', ' ') # in case double spaces occur by mistake str_inn = str_inn.replace(' - ', '-') str_inn = str_inn.replace(' -', '-') str_inn = str_inn.replace('- ', '-') # add space behind a comma if missing if ',' in str_inn: split_message = str_inn.split(',') str_inn = [] for s in split_message: str_inn.append(s.strip()) str_inn = ', '.join(str_inn) if '.' in str_inn: split_message = str_inn.split('.') str_inn = [] for s in split_message: str_inn.append(s.strip()) str_inn = '. '.join(str_inn) # remove spaces to left and right str_inn = str_inn.strip() # if there is no punctuation or exclamation on the end, add.. if not (str_inn.endswith('.') or str_inn.endswith('!')): str_inn = str_inn + '.' return str_inn
def get_index_duplicates(lst, item) -> list: """ :param lst: The list to search for an item or items in. :param item: The item to find indexes for in list. Like list.index(), but list.index() only returns one index. This function returns all the indexes of which an item appears. """ return [i for i, x in enumerate(lst) if x == item]
def analyze_data(all_data): """ Calculates total cost and number of prescribers for each drug. All unique drugs (set of strings) determined using set comprehension. For each drug, number of prescribers (integer) determined by list comprehension wrapped with sum() function. For each prescriber, gross drug cost (list of floats) determined by list comprehension. Net drug cost, accounting for net prescriber costs for given drug, determined using prescriber drug costs wrapped by sum() function. Processed data saved in dictionary. Args: all_data (nested dictionary): contains all collected, parsed, and organized data. Primary key is drug name (string), and primary value is sub-dictionary of prescribers (tuple of strings). Secondary key is prescriber name (string) and secondary value is drug cost (list of floats). Returns: processed_data (dictionary): contains all analyzed data. Primary key is drug name (string), and primary value is tuple containing number of prescribers (integer, index 0) and total cost (float, index 1). """ # Sets initial dictionary for analysis data processed_data = {} # Determines all unique drug names all_drugs = {drug for drug in all_data.keys()} # Iterates over all unique drug names for drug in all_drugs: # Creates set of all prescribers for given drug all_prescribers = {prescriber for prescriber in all_data[drug].keys()} # Calculates total number of prescribers num_prescribers = sum([ 1 for prescriber in all_prescribers ]) # Calculates prescriber net drug cost prescriber_costs = [ sum(all_data[drug][prescriber]) for prescriber in all_prescribers ] # Calculate gross prescriber cost for given drug total_cost = sum(prescriber_costs) # Sets tuple of number of prescribers (index 0) and total cost (1) processed_data[drug] = (num_prescribers, total_cost) # Returns dictionary of analyzed data return processed_data
def sorted_by_pid(processes): """ Sort the given processes by their process ID. :param processes: An iterable of :class:`Process` objects. :returns: A list of :class:`Process` objects sorted by their process ID. """ return sorted(processes, key=lambda p: p.pid)
def bool_flag(value): """ Handle the ability to pass a parameter that can have no value or a boolean value. No value gives True (like the switch is on). Absense of the value returns False (switch is off). You can also pass "TRUE", "True", "true", "1", "FALSE", "False", "false", "0" like: param=True """ if value is None: return False if value == '': return True if value.lower() == 'true': return True if value == "1": return True if value.lower() == 'false': return False if value == "0": return False
def getText(nodelist): """ Get the text value of an XML tag (from the minidom doc) """ rc = [] for node in nodelist: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return ''.join(rc)
def php_substr_count(_haystack, _needle, _offset=0, _length=None): """ >>> text = 'This is a test' >>> php_strlen(text) 14 >>> php_substr_count(text, 'is') 2 >>> php_substr_count(text, 'is', 3) 1 >>> php_substr_count(text, 'is', 3, 3) 0 >>> php_substr_count(text, 'is', 5, 10) 1 >>> php_substr_count('gcdgcdgcd', 'gcdgcd') 1 """ if _length is None: _length = len(_haystack) return _haystack.count(_needle, _offset, _offset + _length)
def create_rock_question(index): """create_rock_question""" return "Rock Question %s" % index
def normalize_user_type(type_: str) -> str: """Normalize the JIRA user type name.""" if type_ == "on-prem": return "atlassian" return type_
def where(cond, x1, x2): """ Differentiable equivalent of np.where (or tf.where) Note that type of three variables should be same. Args: cond: condition x1: selected value if condition is 1 (True) x2: selected value if condition is 0 (False) """ return (cond * x1) + ((1-cond) * x2)
def iou(box_a, box_b): """Apply intersection-over-union overlap between box_a and box_b """ xmin_a = min(box_a[0], box_a[2]) ymin_a = min(box_a[1], box_a[3]) xmax_a = max(box_a[0], box_a[2]) ymax_a = max(box_a[1], box_a[3]) xmin_b = min(box_b[0], box_b[2]) ymin_b = min(box_b[1], box_b[3]) xmax_b = max(box_b[0], box_b[2]) ymax_b = max(box_b[1], box_b[3]) area_a = (ymax_a - ymin_a) * (xmax_a - xmin_a) area_b = (ymax_b - ymin_b) * (xmax_b - xmin_b) if area_a <= 0 and area_b <= 0: return 0.0 xa = max(xmin_a, xmin_b) ya = max(ymin_a, ymin_b) xb = min(xmax_a, xmax_b) yb = min(ymax_a, ymax_b) inter_area = max(xb - xa, 0.0) * max(yb - ya, 0.0) iou_ratio = inter_area / (area_a + area_b - inter_area) return iou_ratio
def get_sig(pval): """Returns asterisk depending on the magnitude of significance""" if pval < 0.001: sig = '***' elif pval < 0.01: sig = '**' elif pval < 0.05: sig = '*' else: sig = 'ns' # non-significant return sig
def check_for_winner(cards): """Return indices of winning cards i.e. has a completed row or column.""" winners = [] for card_num, card in enumerate(cards): row_winner = False for row in card: if len(set(row)) == 1: winners.append(card_num) row_winner = True break if row_winner: continue for col_index in range(len(card[0])): column = [] for row in card: column.append(row[col_index]) if len(set(column)) == 1: winners.append(card_num) break return winners
def _ParseTensorName(tensor_name): """Parses a tensor name into an operation name and output index. This function will canonicalize tensor names as follows: * "foo:0" -> ("foo", 0) * "foo:7" -> ("foo", 7) * "foo" -> ("foo", 0) * "foo:bar:baz" -> ValueError Args: tensor_name: The name of a tensor. Returns: A tuple containing the operation name, and the output index. Raises: ValueError: If `tensor_name' cannot be interpreted as the name of a tensor. """ components = tensor_name.split(':') if len(components) == 2: # Expected format: 'operation_name:output_index'. try: output_index = int(components[1]) except ValueError: raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,)) return components[0], output_index elif len(components) == 1: # Expected format: 'operation_name' (implicit 0th output). return components[0], 0 else: raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
def _repr(s): """ return hexadecimal representation of a string '-/=+!!!!@@' -> '2D2F3D2B 21212121 4040' """ return "".join(["%02X" % ord(c) for c in s]).rstrip()
def normalize_go_id(identifier: str) -> str: """If a GO term does not start with the ``GO:`` prefix, add it.""" if not identifier.startswith('GO:'): return f'GO:{identifier}' return identifier
def get_param(value, sep, pos, defval = ''): """ Extracts a value from "value" that is separated by "sep". Position "pos" is 1 - based """ parts = value.split(sep) if pos <= len(parts): return parts[pos - 1].strip() return defval
def _is_water_file(f): """ Is this the filename of a water file? :type f: str :rtype: bool >>> _is_water_file('LS7_ETM_WATER_144_-037_2007-11-09T23-59-30.500467.tif') True >>> _is_water_file('createWaterExtents_r3450_3752.log') False >>> _is_water_file('LC81130742014337LGN00_B1.tif') False >>> _is_water_file('LS8_OLITIRS_OTH_P51_GALPGS01-032_113_074_20141203') False >>> # We only currently care about the Tiffs: >>> _is_water_file('LS_WATER_150_-022_1987-05-27T23-23-00.443_2014-03-10T23-55-40.796.nc') False """ return 'WATER' in f and f.endswith('.tif')
def update_info(info_dict: dict, name: str, info_to_add: dict) -> dict: """Helper function for summary dictionary generation. Updates the dictionary for each split.""" info_dict[name + "_N"] = info_to_add["N"] info_dict[name + "_MAE"] = info_to_add["MAE"] info_dict[name + "_slope"] = info_to_add["slope"] info_dict[name + "_int"] = info_to_add["intercept"] info_dict[name + "_r_sq"] = info_to_add["r"] ** 2 return info_dict
def move_board(board, move, player): """Calculate the board obtained by making a move by the given player. Args: board: A bitboard. move: An integer position, or None for no move. player: A player number """ bitmove = 1 << move if move is not None else 0 # occupied = board[0] | board[1] # if board[0] & bitmove * (1 - player) or board[1] & bitmove * player: # raise ValueError('{} is an invalid move from parent board {}'.format(move, board)) return (board[0] | bitmove * (1 - player), board[1] | bitmove * player)
def psf_sample_to_pupil_sample(psf_sample, samples, wavelength, efl): """Convert PSF sample spacing to pupil sample spacing. Parameters ---------- psf_sample : float sample spacing in the PSF plane samples : int number of samples present in both planes (must be equal) wavelength : float wavelength of light, in microns efl : float effective focal length of the optical system in mm Returns ------- float the sample spacing in the pupil plane """ return (efl * wavelength) / (psf_sample * samples)
def is_contiguous_sum(pl: list, target: int) -> int: """Searches parm list, summing integers starting from first item, and summing items in order trying to equal target. If target equals the contiguous sum, then it returns the sum of the max and min items in the sum. If contiguous sum fails, the function returns -1.""" s = 0 contig = [] for i in pl: s += i contig.append(i) if s == target: return min(contig) + max(contig) if s > target: return -1 return -1
def _build_col_var_list_str(col_names, var_names): """ Builds the string that contains the list of column to parameterized variable assignments for SQL statements. Args: col_names ([str]): The list of column names. Order and length MUST match that of `var_names`! var_names ([str]): The list of var names, likely the keys in the dict returned by `_prep_sanitized_vars()`. Order and length MUST match that of `col_names`! Returns: (str): The single string that contains the list of all <col> = <var>` items in comma-separated format, where the vars are parameterized inputs (i.e. `%(<>)s`). An emptry string if no col/var names """ assert len(col_names) == len(var_names), 'Col and vars must be same length!' return ', '.join([f'{col_names[i]} = %({var_names[i]})s' for i in range(len(col_names))])
def csv_format(s): """ cleans syntax problems for csv output """ s = s.replace('"', "'") s = s.replace("\n", " ") s = s.replace("\x00D", "") return '"%s"' % s
def format_docstring(docstr: str) -> str: """Removes \n and 4 consecutive spaces.""" return docstr.replace('\n', '').replace(' ', '')
def list2lookup(lst): """ Create a dict where each key is lst[i] and value is i. """ return dict((elm, i) for i, elm in enumerate(lst))
def _get_new_box(src_w, src_h, bbox, scale): """ change the new face boundries into based on the zoomout scale Args: src_w (int): Original Image width src_h (int): Original Image height bbox (tuple): face boundries in (xywh) format scale (float): the zoomout scale Returns: [tuple]: the new face boundries """ x = bbox[0] y = bbox[1] box_w = bbox[2] box_h = bbox[3] scale = min((src_h-1)/box_h, min((src_w-1)/box_w, scale)) new_width = box_w * scale new_height = box_h * scale center_x, center_y = box_w/2+x, box_h/2+y left_top_x = center_x-new_width/2 left_top_y = center_y-new_height/2 right_bottom_x = center_x+new_width/2 right_bottom_y = center_y+new_height/2 if left_top_x < 0: right_bottom_x -= left_top_x left_top_x = 0 if left_top_y < 0: right_bottom_y -= left_top_y left_top_y = 0 if right_bottom_x > src_w-1: left_top_x -= right_bottom_x-src_w+1 right_bottom_x = src_w-1 if right_bottom_y > src_h-1: left_top_y -= right_bottom_y-src_h+1 right_bottom_y = src_h-1 return int(left_top_x), int(left_top_y),\ int(right_bottom_x), int(right_bottom_y)
def sumDigits(number): """ sum_digits == PEP8 (camelCase forced by CodeWars) """ return sum(map(int, str(abs(number))))
def get_fragment(uri): """ Get the final part of the URI. The final part is the text after the last hash (#) or slash (/), and is typically the part that varies and is appended to the namespace. Args: uri: The full URI to extract the fragment from. Returns: The final part of the given URI. """ fragment_delimiters = ('#', '/') alternatives = tuple( uri.rsplit(delimiter, maxsplit=1)[-1] for delimiter in fragment_delimiters if delimiter in uri ) # Return whatever alternative is shortest return min(alternatives, key=len)
def getScore(tweet): """Return annotated score from the given annotation dictionary (for a tweet)""" for member in ["ahmad", "severin", "sina", "ute", "mean"]: if member in tweet: return int(tweet[member]) raise KeyError("tweet-dict doesn't contain any of our names nor 'mean'")
def dms_to_dd(degree, minute, second) -> float: """Convert from degree, minutes, seconds to a decimal degree for storage in a Point""" sign = -1 if degree < 0 else 1 return sign * (int(degree) + float(minute) / 60 + float(second) / 3600)
def dependencies_order_of_build(target_contract, dependencies_map): """ Return an ordered list of contracts that is sufficient to successfully deploy the target contract. Note: This function assumes that the `dependencies_map` is an acyclic graph. """ if not dependencies_map: return [target_contract] if target_contract not in dependencies_map: raise ValueError("no dependencies defined for {}".format(target_contract)) order = [target_contract] todo = list(dependencies_map[target_contract]) while todo: target_contract = todo.pop(0) target_pos = len(order) for dependency in dependencies_map[target_contract]: # we need to add the current contract before all its depedencies if dependency in order: target_pos = order.index(dependency) else: todo.append(dependency) order.insert(target_pos, target_contract) order.reverse() return order
def safe_mul(val1, val2): """ Safely multiplies two values. If either value is -1, then the result is -1 (unknown). """ return -1 if val1 == -1 or val2 == -1 else val1 * val2
def clean(params: dict) -> str: """ Build clean rules for Makefile """ clean = "\t@$(RM) -rf $(BUILDDIR)\n" if params["library_libft"]: clean += "\t@make $@ -C " + params["folder_libft"] + "\n" if params["library_mlx"] and params["compile_mlx"]: clean += "\t@make $@ -C " + params["folder_mlx"] + "\n" return clean
def check_output_filepath(filepath): """ Check and return an appropriate output_filepath parameter. Ensures the file is a csv file. Ensures a value is set. If a value is not set or is not a csv, it will return a default value. :param filepath: string filepath name :returns: a string representing a filepath location. """ if filepath.endswith('.csv'): return filepath return "clean_rules_report.csv"
def wavelen2wavenum(l): """Converts from wavelength in nm to wavenumber in cm^-1""" return 1 / l * 1.0e7
def print_tree(ifaces): """ Prints a list of iface trees """ return " ".join(i.get_tree() for i in ifaces)
def try_int(obj): """Try conversion of object to int.""" try: return int(obj) except ValueError: return obj
def deserialize_list_str(string): """Parse a string version of recursive lists into recursive lists of strings.""" assert string.startswith("[") and string.endswith("]") string = string[1:-1] bracket_count = 0 punctuations = [0] for i, letter in enumerate(string): if letter == "," and bracket_count == 0: punctuations.append(i) elif letter == "[": bracket_count += 1 elif letter == "]": bracket_count -= 1 if len(punctuations) == 1: return [string] List = [] for i in range(len(punctuations)): if i == 0: element = string[:punctuations[1]] elif i == len(punctuations) - 1: element = string[punctuations[i] + 1:].strip() else: element = string[punctuations[i] + 1: punctuations[i+1]].strip() if element.startswith("[") and element.endswith("]"): List.append(deserialize_list_str(element)) else: List.append(element) return List
def caller(n=1): """Return the name of the calling function n levels up in the frame stack. >>> caller(0) 'caller' >>> def f(): ... return caller() >>> f() 'f' """ import inspect return inspect.getouterframes(inspect.currentframe())[n][3]
def shape_of_vertical_links(shape): """Shape of vertical link grid. Number of rows and columns of *vertical* links that connect nodes in a structured grid of quadrilaterals. Parameters ---------- shape : tuple of int Shape of grid of nodes. Returns ------- tuple of int : Shape of the vertical links in grid. Examples -------- >>> from landlab.grid.structured_quad.links import shape_of_vertical_links >>> shape_of_vertical_links((3, 4)) (2, 4) """ return (shape[0] - 1, shape[1])
def find_homology(long_seq, short_seq): """ :param long_seq: str, the base DNA sequence user wants to search in :param short_seq: str, the DNA sequence user wants to match :return: the homology in long_seq """ homology = '' similarity = 0 for i in range(len(long_seq) - len(short_seq) + 1): # Search from [0] to [long_seq - short_seq] in long_seq new_homology = '' new_similarity = 0 for j in range(i, i + len(short_seq)): # Get the similarity of short_seq and the string from long_seq[i] to long_seq[i+len(short_seq)-1] if long_seq[j] == short_seq[j - i]: # The two DNA match and should add up similarity new_similarity += 1 else: pass if new_similarity > similarity: # The new DNA section in long_seq has more similarity and should replace the homology similarity = new_similarity for k in range(i, i + len(short_seq)): # Assign new homology new_homology += long_seq[k] homology = new_homology return homology
def isGRAVSG(filename): """ Checks whether a file is ASCII SG file format. """ try: temp = open(filename, 'rt').readline() except: return False try: if not temp.startswith('[TSF-file]'): return False except: return False return True
def tile_tuples(w, h): """ Return tile sizes for resizing ASCII Images. """ result = lambda x: [i for i in range(2, x) if x % i == 0] return list(zip(result(w), result(h)))
def urlpath2(url:bytes) -> bytes: """ Get url's path(strip params) """ return url.split(b'?', 1)[0]
def _check_type( value ): """ Makes sure that, upon reading a value, it gets assigned the correct type. """ try: int(value) except ValueError: try: float(value) except ValueError: return value else: return float(value) else: return int(value)
def rmvsuffix(subject): """ Remove the suffix from *subject*. """ index = subject.rfind('.') if index > subject.replace('\\', '/').rfind('/'): subject = subject[:index] return subject
def sort_bl(p): """Sort a tuple that starts with a pair of antennas, and may have stuff after.""" if p[1] >= p[0]: return p return (p[1], p[0]) + p[2:]
def _scale8_video_LEAVING_R1_DIRTY(i, scale): """Internal Use Only""" nonzeroscale = 0 if scale != 0: nonzeroscale = 1 if i != 0: i = ((i * scale) >> 8) + nonzeroscale return i
def titlecase_keys(d): """ Takes a dict with keys of type str and returns a new dict with all keys titlecased. """ return {k.title(): v for k, v in d.items()}
def constructQuery(column_lst, case_id): """ Construct the query to public dataset: aketari-covid19-public.covid19.ISMIR Args: column_lst: list - ["*"] or ["column_name1", "column_name2" ...] case_id: str - Optional e.g "case1" Returns: query object """ # Public dataset # project_id = 'aketari-covid19-public' # dataset_id = 'covid19' # table_id = 'ISMIR' if (len(column_lst) == 1) and column_lst[0] == "*": query = ('SELECT * FROM `aketari-covid19-public.covid19.ISMIR` ' 'WHERE `case`="{}" '.format(case_id)) return query else: columns_str = ", ".join(column_lst) query = ('SELECT {} FROM `aketari-covid19-public.covid19.ISMIR` ' 'WHERE `case`="{}" '.format(columns_str, case_id)) return query
def get_is_verbose(ctx): """Returns whether or not verbose mode is enabled""" if hasattr(ctx, "riptide_options"): return ctx.riptide_options["verbose"] if hasattr(ctx, "parent"): if hasattr(ctx.parent, "riptide_options"): return ctx.parent.riptide_options["verbose"] return True
def fn_example(values): """ function / method with built-in unit test example; this example computes the arithmetic mean of a list of numbers. Args: values: list of integers Returns: integer Examples: >>> print(fn_example([20, 30, 70])) 40.0 """ return sum(values) / len(values)
def format_to_string(obj): """ Formatter to print strings and bytes without leading/trailing quotes """ if isinstance(obj, bytes): return repr(obj.decode()).strip('"\'') if isinstance(obj, str): return repr(obj).strip('"\'') return obj
def expandRanges(ranges, size): """Expand Range sets, given those sets and the length of the resource. Expansion means relative start values and open ends """ expanded = [] add = expanded.append for start, end in ranges: if start < 0: start = size + start end = end or size if end > size: end = size # Only use satisfiable ranges if start < size: add((start, end)) return expanded
def _clean_path(path): """ Clean filename so that it is command line friendly. Currently just escapes spaces. """ return path.replace(' ', '\ ')
def parseDMS(dec_in): """Decode an angular value in 'funky SOSS format' (see convertToFloat()), and return a tuple containing sign (+1 or -1), degrees, minutes and seconds of arc.""" # make sure that the input value is numeric dec_in = float(dec_in) # then convert it into the formatted string input_str = '%+011.3f' % (dec_in) # break down the parts if input_str[0] == '-': input_sign = -1 else: input_sign = 1 return (input_sign, int (input_str[1:3]), int (input_str[3:5]), float(input_str[5:]))
def anonymize(labels, unique_ordered_labels): """Renames labels using index of unique_ordered_labels""" index_dict = dict((val, idx) for idx, val in enumerate(unique_ordered_labels)) return [index_dict[x] for x in labels]
def connect_type(word_list): """ This function takes a list of words, then, depeding which key word, returns the corresponding internet connection type as a string. ie) 'ethernet'. """ if 'wlan0' in word_list or 'wlan1' in word_list: con_type = 'wifi' elif 'eth0' in word_list: con_type = 'ethernet' else: con_type = 'current' return con_type
def ang_overlap_stark(l_1, l_2, m_1, m_2, field_orientation, dm_allow): """ Angular overlap <l1, m| cos(theta) |l2, m>. For Stark interaction """ dl = l_2 - l_1 dm = m_2 - m_1 l, m = int(l_1), int(m_1) if field_orientation=='parallel': if (dm == 0) and (dm in dm_allow): if dl == +1: return +(((l+1)**2-m**2)/((2*l+3)*(2*l+1)))**0.5 elif dl == -1: return +((l**2-m**2)/((2*l+1)*(2*l-1)))**0.5 elif (dm == +1) and (dm in dm_allow): if dl == +1: return -((l+m+2)*(l+m+1)/(2*(2*l+3)*(2*l+1)))**0.5 elif dl == -1: return +((l-m)*(l-m-1)/(2*(2*l+1)*(2*l-1)))**0.5 elif (dm == -1) and (dm in dm_allow): if dl == +1: return +((l-m+2)*(l-m+1)/(2*(2*l+3)*(2*l+1)))**0.5 elif dl == -1: return -((l+m)*(l+m-1)/(2*(2*l+1)*(2*l-1)))**0.5 elif field_orientation=='crossed': if dm == +1: if dl == +1: return +(0.5*(-1)**(m-2*l)) * (((l+m+1)*(l+m+2))/((2*l+1)*(2*l+3)))**0.5 elif dl == -1: return -(0.5*(-1)**(-m+2*l)) * (((l-m-1)*(l-m)) /((2*l-1)*(2*l+1)))**0.5 elif dm == -1: if dl == +1: return +(0.5*(-1)**(m-2*l)) * (((l-m+1)*(l-m+2))/((2*l+1)*(2*l+3)))**0.5 elif dl == -1: return -(0.5*(-1)**(-m+2*l)) * (((l+m-1)*(l+m)) /((2*l-1)*(2*l+1)))**0.5 return 0.0
def check_calculate_mean(mean_anual_tmp): """checks if the calculate_mean function works right Parameters ---------- mean_anual_tmp: float the average temperature of a certain period which should be checked Raises ------ TypeError if the type of the input Parameter isn't float Returns ------- mean_anual_tmp: float if the given Parameter has the right type it is returned """ if type(mean_anual_tmp) == float: return mean_anual_tmp else: raise TypeError('The mean anual temperature has to be of type float')
def findBestServer(hiveData): """ Sweeps the dictionary and finds the best server by first finding the minimum average CPU usage and then finding the one with the fewest users. """ def findMin(valueFunction, fullData): """ Finds the minimum value in the data set fullData according to a value obtained by applying valueFunction to those values """ minValue = None minValues = {} for data in fullData: if fullData[data]: averageValue = valueFunction(data, fullData) if minValue == None or averageValue < minValue: minValue = averageValue minValues = {} minValues[data] = fullData[data] elif averageValue == minValue: minValues[data] = fullData[data] return minValues bestCPU = findMin((lambda x, dataSet: dataSet[x]['load_avgs'][1]), hiveData) # First, get the best servers by lowest average CPU usage, as Hivemind's code does return findMin((lambda x, dataSet: len(dataSet[x]['users'])), bestCPU) # Then, get the best servers by fewest number of online users
def _load_type(type_name): """ _load_type('exceptions.KeyError') -> KeyError """ module_name, name = type_name.rsplit('.', 1) mod = __import__(module_name, fromlist = [str(name)]) return getattr(mod, name)
def ci_equals(left, right): """Check is @left string is case-insensative equal to @right string. :returns: left.lower() == right.lower() if @left and @right is str. Or left == right in other case. """ if isinstance(left, str) and isinstance(right, str): return left.lower() == right.lower() return left == right
def bias_param(name, learn_all=True): """ Creates a named param for bias of a conv/fc layer Example of named param for bias: param { name: "conv1_b" lr_mult: 1 decay_mult: 1 } :param name: str :param learn_all: bool. If True, sets the weights of that layer to be modified during the training process :returns: dict with params """ lr_mult = 2 if learn_all else 0 return dict(name=name, lr_mult=lr_mult, decay_mult=0)
def format_perc_3(value): """Format a number in [0, 1] to a percentage number with 3 digits after the dot.""" return "{:.3f}%".format(value * 100)
def densityMultipleFiles(rootName, fileIndex): """ Returns the name of the density file 'fileIndex' when a result is saved in multiple binary files. It takes 2 arguments: root name of the files and the file number whose name is requested (from 0 to DensityHeader.noDensityFiles-1).""" return rootName + ".%i" % fileIndex
def extract_key_vals (data_str): """ Extract key -> value pairs into a dictionary from a string """ split_str = data_str.split(':') key_vals = {} for el in split_str: key_val_split = el.split('>') if len(key_val_split) == 2: key_vals[key_val_split[0]] = float(key_val_split[1]) return key_vals
def TypeCodeToType(typeCode): """ Convert a type code to the class it represents """ if typeCode in ["b", "d", "f", "s"]: return float elif typeCode in ["i", "l"]: return int elif typeCode in ["c"]: return str else: raise Exception("Unrecognised type code: " + typeCode) return
def safe_reldiff(a,b): """ return relative difference between a and b. (b-a)/a avoid divide by zero. """ if a == 0: return 1 if b == 0: return 1 return (b-a)/a
def nearest_nonmixing_event(event_name, i): """Return the indexes of the nearest non mixing events (SpectralEvent and ConstantDurationEvent) about a mixing event at index `i`. Args: event_name: List of event class names. i: Int index of the mixing event. """ options = ["SpectralEvent", "ConstantDurationEvent"] low_range = event_name[:i] high_range = event_name[i:] upper = [high_range.index(item) for item in options if item in high_range] lower = [low_range[::-1].index(item) for item in options if item in low_range] return [ i - 1 - (min(lower) if lower != [] else 0), i + (min(upper) if upper != [] else 0), ]
def flatten_nested_lists(activation_maps): """Flattens a nested list of depth 3 in a row major order. Args: activation_maps: list of list of list of z3.ExprRef with dimensions (channels, activation_map_size, activation_map_size), activation_maps. Returns: list of z3.ExprRef. """ flattened_activation_maps = [] for activation_map in activation_maps: for activation_map_row in activation_map: flattened_activation_maps.extend(activation_map_row) return flattened_activation_maps
def _bytes_to_str(value: bytes) -> str: """Convert ``bytes`` to ``str``""" return value.decode("utf-8")
def anonymize(tumor_list, prefix="CS"): """Anonymizes a list of tumor names.""" return {tumor : tumor for tumor in tumor_list}
def characters(String,returnString=False) : """ Validates a string to be compilant with DL structure Parameters ---------- String : the string to be validated returnString : True returns the compilant string, False returns True or False according to the input string Returns ------- True if String is compilant False if not compilant if returnString=True then modified String will be returned. """ # remove blank spaces and beggining and end and make UPPERCASE NewString = String.upper().strip() # replace special characters by '_' for s in [',','(',')','=','+','#','.',';',' '] : NewString = NewString.replace(s,'_') # replace '&' by '_AND_' NewString = NewString.replace('&','_AND_') # eliminate dobble '_' while '__' in NewString : NewString = NewString.replace('__','_') # remove '_' and the begging and end of the file name, then concatenate the extension in uppercase NewString = NewString.strip('_') if not returnString : return NewString == String else : # returnString == True return NewString
def attrib_basic(_sample, class_id): """ Add basic attribute Args: _sample: data sample class_id: class label asscociated with the data (sometimes indicting from which subset the data are drawn) """ return {'class_id': class_id}
def read_cols_query(col_names, table_name, schema_name): """ Obtain the query to read the columns from a table :param col_names: (list of strs) list of cols names to read :param table_name: (str) table name to read from :param schema_name: (str) schema name of the table :return: A string having the pSQL query for the task """ cols = ", ".join(col_names) query = f"SELECT {cols} FROM {schema_name}.{table_name}" return query
def is_leap(year:int)-> bool: """ Return True for leap year , False otherwise. """ return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def add(x, y, z): """ only a dummy function for testing the multicore function defining it in the test script is not possible since it cannot be serialized with a reference module that does not exist (i.e. the test script) """ return x + y + z
def readable_size(n: int) -> str: """Return a readable size string.""" sizes = ['K', 'M', 'G'] fmt = '' size = n for i, s in enumerate(sizes): nn = n / (1000 ** (i + 1)) if nn >= 1: size = nn fmt = sizes[i] else: break return '%.2f%s' % (size, fmt)
def fibonacci(nth: int) -> int: """ >>> fibonacci(0) 0 >>> fibonacci(1) 1 >>> fibonacci(2) 1 >>> fibonacci(9) 34 """ fibs = [0] * (nth + 2) fibs[0] = 0 fibs[1] = 1 for i in range(2, nth + 1): fibs[i] = fibs[i - 1] + fibs[i - 2] return fibs[nth]
def findORF_all(dna_seq): """ Finds all the longest open reading frames in the DNA sequence. """ tmpseq = dna_seq.upper(); orf_all = [] for i in range(0, len(tmpseq), 3): codon = tmpseq[i:i+3] if codon == 'ATG': orf = tmpseq[i:] for j in range(0, len(orf), 3): codon = orf[j:j+3] if codon == 'TAA' or codon == 'TAG' or codon == 'TGA': orf_all.append(orf[:j]) break return orf_all
def dot_product(tf1, tf2): """ Calculates the dot product of two term frquency vectors. Returns the dot product of the frequencies of matching terms in two term frequency vectors. The term frequency vectors are dictionaries with the term as the key and the frequency as the value. Parameters: tf1 (dict): Term frequency vector 1 {(term, ): frequency} tf2 (dict): Term frequency vector 2 {(term, ): frequency} Returns: int: Dot product of the frequencies of matching terms """ sum = 0.0 for k1 in tf1: if k1 in tf2: sum = sum + (tf1[k1] * tf2[k1]) return sum
def _romberg_diff(b, c, k): """Compute the differences for the Romberg quadrature corrections. See Forman Acton's "Real Computing Made Real," p 143. :param b: R(n-1, m-1) of Rombergs method. :param c: R(n, m-1) of Rombergs method. :param k: The parameter m of Rombergs method. :type b: float or array[float] :type c: float or array[float] :type k: int :returns: R(n, m) of Rombergs method. :rtype: float or array[float] """ tmp = 4.0**k diff = (tmp * c - b) / (tmp - 1.0) return diff
def get_stage_info(total_tokens, num_tasks): """ Get number of tokens for each task during each stage. Based on ERNIE 2.0's continual multi-task learning Number of stages is equal to the number of tasks (each stage is larger than the previous one) :param total_tokens: total number of tokens to train on :param num_tasks: number of tasks :return: Number of tokens for each task at each stage """ tokens_per_task = total_tokens / num_tasks tokens_subunit = tokens_per_task / (num_tasks + 1) tokens_per_task_per_stage = [] for i in range(num_tasks): stage_tokens = [] for j in range(num_tasks): if i < j: stage_tokens.append(0) elif i > j: stage_tokens.append(tokens_subunit) else: stage_tokens.append(tokens_subunit * (i + 2)) tokens_per_task_per_stage.append(stage_tokens) return tokens_per_task_per_stage
def bsearch(arr, t): """ arr: Iterable, sorted. t: target. return: i or None (index of target) """ if not arr or t is None: return None low = 0 high = len(arr) - 1 while low <= high: mid = low + ((high - low) >> 1) # or just // 2 if arr[mid] < t: low = mid + 1 elif arr[mid] > t: high = mid - 1 else: return mid return None
def matrix(mat,nrow=1,ncol=1,byrow=False): """Given a two dimensional array, write the array in a matrix form""" nr=len(mat) rscript='m<-matrix(data=c(' try: nc=len(mat[0]) for m in mat: rscript+=str(m)[1:-1]+ ', ' rscript=rscript[:-2]+'), nrow=%d, ncol=%d, byrow=TRUE,' %(nr,nc) except TypeError: rscript+=str(mat)[1:-1]+',' rscript=rscript[:-1]+'), nrow=%d, ncol=%d,' %(nrow,ncol) if byrow: rscript+='byrow=TRUE,' rscript=rscript[:-1]+')\n' return rscript
def samesideofline(pos, dest, line): """checks if pos and dest is on the same side of line :param pos: a pair/vector of numbers as a Vec2D (e.g. as returned by pos()) :param dest: a pair/vector of numbers as a Vec2D (e.g. as returned by pos()) :param line: a list of two pairs/vectors of numbers as two Vec2D :return: a number that can be used as bool 0 = pos and dest is on each side of line 1 = pos and dest is left of line 2 = pos and dest is over line 4 = pos and dest is right of line 8 = pos and dest is under line 16 = pos and dest is outside of line, should be considered as True as movement is allowed""" xli = min(line[0][0], line[1][0]) # min of x line cord xla = max(line[0][0], line[1][0]) # max of x line cord yli = min(line[0][1], line[1][1]) # min of y line cord yla = max(line[0][1], line[1][1]) # max of y line cord xpi = min(pos[0], dest[0]) # min of x pos and dest xpa = max(pos[0], dest[0]) # max of x pos and dest ypi = min(pos[1], dest[1]) # min of y pos and dest ypa = max(pos[1], dest[1]) # max of y pos and dest # if xli < xpi < xla or xli < xpa < xla: # result = ypa < yli or ypi > yla # elif yli < ypi < yla or yli > ypa > yla: # result = xpa < xli or xpi > xla # else: # result = True # return result if xli < xpi < xla or xli < xpa < xla: if ypa < yli: # pos and dest is under line result = 8 elif ypi > yla: # pos and dest is over line result = 2 else: result = 0 elif yli < ypi < yla or yli > ypa > yla: if xpa < xli: # pos and dest is left of line result = 1 elif xpi > xla: # pos and dest is right of line result = 4 else: result = 0 else: # pos and dest is outside of line result = 16 return result