content
stringlengths
42
6.51k
def get_bin_number(bins, val): """ Gets the bin number of a value in a binspace Parameters ========== bins :: list The bin edges val :: real number The value to get the bin number for Returns ======= The bin number of the test value. -1 if it is not within the binspace """ for i in range(len(bins) - 1): if bins[i] <= val <= bins[i + 1]: return i return -1
def make_recommendation(inner_knee_angle, ideal_angle=145, buffer=5): """Returns a recommendation based on the difference from the ideal angle Args: inner_knee_angle: actual angle of the user ideal_angle: target angle buffer: accepted range above and below ideal_angle Returns: str: 'UP', 'DOWN', 'NOOP' """ if inner_knee_angle < ideal_angle - buffer: return "UP" elif inner_knee_angle > ideal_angle + buffer: return "DOWN" return "NOOP"
def avoids( word, letters ): """Retruns True if the word doesn't use any of the letters. """ for letter in letters: if letter in word: return False return True
def parse_alpm_dict(blob): """ Parse a blob of text as ALPM file. The results are returned as a dictionary with each key having a list of values. """ result = {} key = None values = [] for line in blob.splitlines(): if len(line) == 0: continue if line[0] == '%' and line[-1] == '%': if key is not None: result[key] = values key = line[1:-1] values = [] else: values.append(line) if key is not None: result[key] = values return result
def clean_access_dict(dict): """ Return access dict with unicode replaced by str >>> dirty = {u'one': u'alpha', 'two':[u'beta', 'gamma']} >>> clean = clean_access_dict(dirty) >>> sorted(list(clean.items())) [('one', 'alpha'), ('two', ['beta', 'gamma'])] """ new_dict = {} for (key,value) in dict.items(): if type(value) == type([]): new_value = list(map(str, value)) else: new_value = str(value) new_dict[str(key)] = new_value return new_dict
def merge_outputs(*outputs): """ Merges model outputs for logging Parameters ---------- outputs : tuple of dict Outputs to be merged Returns ------- output : dict Dictionary with a "metrics" key containing a dictionary with various metrics and all other keys that are not "loss" (it is handled differently). """ ignore = ['loss'] # Keys to ignore combine = ['metrics'] # Keys to combine merge = {key: {} for key in combine} for output in outputs: # Iterate over all keys for key, val in output.items(): # Combine these keys if key in combine: for sub_key, sub_val in output[key].items(): assert sub_key not in merge[key].keys(), \ 'Combining duplicated key {} to {}'.format(sub_key, key) merge[key][sub_key] = sub_val # Ignore these keys elif key not in ignore: assert key not in merge.keys(), \ 'Adding duplicated key {}'.format(key) merge[key] = val return merge
def bold(s): """Returns the string bold. Source: http://stackoverflow.com/a/16264094/2570866 """ return r'\textbf{' + s + '}'
def generate_dat_header(name, description, version): """Return the textual DAT header from a given <name>, <description>, and <version>""" header = ['clrmamepro (', '\t' + 'name "%s"' % name, '\t' + 'description "%s"' % description, '\t' + 'version %s' % version, ')'] return '\n'.join(header)
def get_index_of_user_by_id(id_value, data): """Get index of user in list by id""" for count, item in enumerate(data): if item['id'] == id_value: return count return None
def make_dict_lowercase(d): """ Utliity method to convert keys and values in a dictionary `d` to lowercase. Args: `d` (:obj:`dict`): dictionary whose key and values have to be converted into lowercase Returns: `lower_case_dict` that is a copy of `d` but with the key and value converted to lowercase """ lower_case_dict = dict() for k in d.keys(): lower_case_dict[k.lower()] = d[k].lower() return lower_case_dict
def find_pivot(matrix, col: int) -> int: """ Given the matrix and the column index, finds the line that should be swaped with the "current" pivot line. The number returned is the index of the line """ col_terms = (matrix[line][col] for line in range(col, len(matrix))) col_terms_abs = list(map(abs, col_terms)) max_abs = max(col_terms_abs) return col_terms_abs.index(max_abs) + col
def get_multiple_ids_string(queryset): """Returns a string of ids from a queryset for use in a SQL query""" querystring = "" for counter, modobj in enumerate(queryset): querystring += ", %s" % modobj.id if counter > 0 else "%s" % modobj.id return querystring
def validate_string(value): """ Validate a string input. Parameters: value (any): Input value Returns: boolean: True if value has type string """ return isinstance(value, str)
def andlist(list,conjunction="and"): """Turn list of strings into English text.""" if len(list) == 0: return "(empty list!)" if len(list) == 1: return list[0] elif len(list) == 2: return (' '+conjunction+' ').join(list) else: return ', '.join(list[:-1]+[conjunction+' '+list[-1]])
def _make_tied_note_id(prev_id): # non-public """Create a derived note ID for newly created notes, by appending letters to the ID. If the original ID has the form X-Y (e.g. n1-1), then the letter will be appended to the X part. Parameters ---------- prev_id : str Original note ID Returns ------- str Derived note ID Examples -------- >>> _make_tied_note_id('n0') 'n0a' >>> _make_tied_note_id('n0a') 'n0b' >>> _make_tied_note_id('n0-1') 'n0a-1' """ prev_id_parts = prev_id.split("-", 1) prev_id_p1 = prev_id_parts[0] if prev_id_p1: if ord(prev_id_p1[-1]) < ord("a") - 1: return "-".join(["{}a".format(prev_id_p1)] + prev_id_parts[1:]) else: return "-".join( ["{}{}".format(prev_id_p1[:-1], chr(ord(prev_id[-1]) + 1))] + prev_id_parts[1:] ) else: return None
def find_ngrams(single_words, n): """ Args: single_words: list of words in the text, in the order they appear in the text all words are made of lowercase characters n: length of 'n-gram' window Returns: list of n-grams from input text list, or an empty list if n is not a valid value """ ngram_list = [] if n <= 0 or n > len(single_words): return [] else: for i in range(len(single_words) - n +1): p = single_words[i:n+i]# variable p getting a list of ngrams from single words t = " ".join(p) # varibale t creating a string from the p ngram_list.append(t) return ngram_list
def hk_modes(hier_num): """ Generate modes in the HK hierarchy Parameters ---------- hier_num : int Model number in the HK hierarchy. Returns ------- p_modes : list, optional List of psi modes, represented as tuples. Each tuple contains the horizontal and vertical wavenumbers. Only necessary if mode_type = 'input'. Default (Lorenz): [(1,1)]. t_modes : list, optional List of theta modes, represented as tuples. Only necessary if mode_type = 'input'. Default (Lorenz): [(0,2), (1,1)] """ p_modes = [(0,1), (1,1)] t_modes = [(0,2), (1,1)] pair = (1,1) for i in range(1, hier_num): if pair[1] == 1: level = pair[0]+1 pair = (1, level) p_modes.append((0, level*2-1)) t_modes.append((0, level*2)) else: pair = (pair[0]+1, pair[1]-1) p_modes.append(pair) t_modes.append(pair) p_modes.sort() t_modes.sort() return p_modes, t_modes
def validate_result(aocQuestion: str, value, correct_value) -> str: """Displays the results of the 'part', and if the answer was correct or wrong. Args: aocQuestion (str): This is the AOC question text. value ([type]): The value that was found. correct_value ([type]): This is the correct value. Returns: str: a composition of '{aocQuestion} {value} {check}' """ check = 'WRONG' if correct_value is None: check = '' elif correct_value == value: check = 'CORRECT' return f"{aocQuestion} {value} {check}".rstrip()
def correct_byteorder(ptype, byteorder): """Fix the byteorder depending on the PyTables types.""" if ptype in ['string', 'bool', 'int8', 'uint8', 'object']: return "irrelevant" else: return byteorder
def in_letter_digits(category): """Category for code points informally described as "language characters". Args: category (str): Unicode general category. Returns: bool: True if `category` in set. """ return category in {'Ll', 'Lu', 'Lo', 'Nd', 'Lm', 'Mn', 'Mc'}
def memory_index(indices, t): """Location of an item in the underlying memory.""" memlen, itemsize, ndim, shape, strides, offset = t p = offset for i in range(ndim): p += strides[i] * indices[i] return p
def segment_segment_intersection_2(x1, y1, x2, y2, t): """ Return point of intersection between two segments given t value """ return [x1 + t * (x2 - x1), y1 + t * (y2 - y1)]
def composekey(*keys): """Compose a sequence of keys into one key. Example: composekey("attr1.attr2", "attr3") == "attr1.attr2.attr3" """ keys = [key.split() for key in keys] composites = [[]] for alternatives in keys: composites = [com + [alt] for alt in alternatives for com in composites] return " ".join(".".join(key) for key in composites)
def get_DFG_name(job_name): """Returns DFG name.""" return job_name.split('-')[1]
def histogram(text): """ Return a character histogram dictionary for the given text.""" hist = dict() for letter in text: hist.setdefault(letter, 0) hist[letter] += 1 return hist
def rewrite_name(fname): """ Some names are common enough that they're not worth including as part of the url. *-capture.png should be rewritten as just *.png """ return (fname .replace('-capture.png', '.png') .replace('-recording.mp4', '.mp4') .replace('-clip.txt', '.txt'))
def dumb_property_dict(style): """returns a hash of css attributes""" return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
def get_none_list(size): """ Create a list filled with None values """ l = [None] return l*size
def _service_and_endpoint_labels_from_method(method_name): """Get normalized service_label, endpoint_label tuple from method name""" name_parts = method_name.split("/") if len(name_parts) != 3 or name_parts[0] != "" or name_parts[1] == "" or name_parts[2] == "": raise AssertionError("Invalid method name: {}".format(method_name)) service_label = name_parts[1].replace(".", "_") endpoint_label = name_parts[2].replace(".", "_") return service_label, endpoint_label
def exporter_line( auth: str, dst: str, proto: str, count: str, port: int = 4242 ): """Format the exporter metric line with wanted arguments.""" return f'honeypot_{auth}_connections_total{{dst="{dst}",port="{port}",proto="{proto}"}} {count}'
def binary_search(f, lo=0, hi=None): """ Returns a value x such that f(x) is true. Based on the values of f at lo and hi. Assert that f(lo) != f(hi). """ lo_bool = f(lo) if hi is None: offset = 1 while f(lo+offset) == lo_bool: offset *= 2 hi = lo + offset else: assert f(hi) != lo_bool best_so_far = lo if lo_bool else hi while lo <= hi: mid = (hi + lo) // 2 result = f(mid) if result: best_so_far = mid if result == lo_bool: lo = mid + 1 else: hi = mid - 1 return best_so_far
def halley_newton ( fun , ## the function x , ## x deriv1 , ## the first derivative deriv2 = None , ## the second derivative fx = None , ## value of fun(x) args = () ) : ## additional arguments for function calls """Single step of Newton/Halley's algorithm Parameters ----------- fun : the function x : the initial guess for the root positon deriv : the first derivative (for Newton and Halley's method) deriv2 : the second derivative (for Halley's method) fx : (optional) the value of funnction at the guess point args : (optional) parameters for function/derivative calls Returns ------- The next approximation to the root or `None` Example ------- >>> fun = lambda x : sin ( x ) >>> deriv = lambda x : cos ( x ) >>> x = 0.5 >>> for i in range ( 10 ) : ... x = halley_newton ( fun , x , deriv ) ... print i, x References ---------- - see https://en.wikipedia.org/wiki/Halley%27s_method - see https://en.wikipedia.org/wiki/Newton%27s_method """ ## newton corrections d1 = float ( deriv1 ( x , *args ) ) fx = float ( fun ( x , *args ) ) if fx is None else fx if d1 : rn = fx / d1 else : return None ## error here! ## make corrections: Halley's steps if deriv2 : d2 = float ( deriv2 ( x , *args ) ) if d2 : rn /= ( 1.0 - 0.5 * rn * d2 / d1 ) ## Halley's correction return x - rn
def sanitize_str(s): """ Ensures that s is a string. If it's a list or tuple then it joins the items into a string deliminated by a space. """ if isinstance(s, (list, tuple)): return ' '.join([sanitize_str(x) for x in s]) elif isinstance(s, str): return s else: return str(s)
def __gen_t(dt: float, num_timesteps: int) -> list: """generate time vector, starting at 0 Args: dt (float): time between saved timesteps num_timesteps (int): number of total timesteps Returns: t (list): list of times """ t = [float(x) * dt for x in range(0, num_timesteps)] return t
def content_encode(text: str) -> bytes: """ encode the target text to bytes \n :param text: the target text :return: the bytes :rtype bytes """ return text.encode()
def replace_space(value, replace_string): """Basically the inverse of space replace :param value: :param replace_string: :return: """ return value.replace(' ', replace_string)
def get_spiral(iterations=100): """ :param iterations: (int) number of interations of spiral you want back :return: a list of length iterations of tuples describing a spiral on a uniform grid """ spiral_list = [] x = 0 y = 0 dx = 0 dy = -1 for _ in range(iterations): if ((-iterations/2 < x <= iterations/2) and (-iterations/2 < y <= iterations/2)): spiral_list.append((x, y)) if x == y or (x < 0 and x == -y) or (x > 0 and x == 1-y): dx, dy = -dy, dx x, y = x+dx, y+dy return spiral_list
def _convert_to_F(temp: float) -> float: """ Convert C temp to F param temp: temp in C to convert return: float """ return round((temp * 9/5) + 32, 1)
def potential_temp(t, pres, pres_ref=100000, k=0.286): """ Calculate potential temperature Args: t (float) : temperature [K] pres (float) : pressure [Pa] pres_ref (float) : standard pressure [Pa] k : R / cp [ ] Returns: theta : potential temperature [K] """ theta = t * (pres_ref / pres) ** k return theta
def get_line_by_index(index, lines): """ It calculates the chess board line based on array index representations """ if index < lines: return 1 elif index == lines: return 2 elif index >= (lines - 1) * lines: return lines else: for line in range(2, lines): if index >= (line - 1) * lines and index < line * lines: return line return -1
def add_fixated_events(plants): """ Add a field to the elements of a sequence of automata, containing the events that are not used by automata futher down the list. @param plants: Ordered automata. @type plants: C{list} of L{BaseAutomaton} @return: Ordered pairs of automaton, fixated events. @rtype: C{list} of C{tuple} (L{BaseAutomaton}, C{set} of L{Event}) """ total_alphabet = set() for plant in plants: total_alphabet.update(plant.alphabet) # This can be done faster by computing from the end, and moving towards # the start. result = [] for idx, plant in enumerate(plants): fixated = total_alphabet.copy() for pl in plants[idx + 1:]: fixated.difference_update(pl.alphabet) result.append((plant, fixated)) fixated = None total_alphabet.clear() return result
def todaysDate(formatting="standard"): """ References ---------- https://www.programiz.com/python-programming/datetime/current-datetime """ from datetime import date today=date.today() if formatting=="standard": return today.strftime("%d/%m/%Y") elif formatting=="underscore": return today.strftime("%Y_%m_%d")
def summerC(n: int) -> int: """ Finds the sum of the union of the 3-multiples set and the 5-multiples set. My personal favorite solution, for the elegance. :3 """ return sum(set(range(0, n, 3)) | set(range(0, n, 5)))
def chunk_amino_acid(sequence): """ Uses output of mRNA(sequence) and divides it into substrings of length 3, ignoring any "extra DNA" at the far end returning the relevant substrings in a list. :param sequence: the DNA sequence :return: A list where each element is a set of three DNA values """ list_of_chunks = [] for i in range(len(sequence)//3): list_of_chunks.append(sequence[i*3:i*3+3]) return list_of_chunks
def mixing_dict(xy,normalized=False): """Return a dictionary representation of mixing matrix. Parameters ---------- xy : list or container of two-tuples Pairs of (x,y) items. attribute : string Node attribute key normalized : bool (default=False) Return counts if False or probabilities if True. Returns ------- d: dictionary Counts or Joint probability of occurrence of values in xy. """ d={} psum=0.0 for x,y in xy: if x not in d: d[x]={} if y not in d: d[y]={} v=d[x].setdefault(y,0) d[x][y]=v+1 psum+=1 if normalized: for k,jdict in d.items(): for j in jdict: jdict[j]/=psum return d
def transform_key_to_list(key): """ Convert key to list. Takes each 2 digit value and put it in list. :param key: Input key :return: Key as integer list format """ key_int_list = list() for i in range(0, len(key), 2): key_int_list.append(int(key[i:i + 2], 16)) return key_int_list
def overlap(a, b, min_length=3): """ return length of longest suffix of 'a' matching a prefix of 'b' that is at least 'min_length' character long. If no such overlap exists, return 0. """ assert a != '' and b != '' start = 0 # start all the way at the left for i in range(min_length, len(a)): start = a.find(b[:min_length], start) # look for b's suffix in a if start == -1: ## no more occurrences to left return 0 elif b.startswith(a[start:]): ## found occurrence; check if full prefix/suffix match return len(a) -start start += 1
def format_time(start, end): """Format length of time between ``start`` and ``end``. Args: start (:class:`time.time`): The start time. end (:class:`time.time`): The end time. Returns: str: A formatted string of hours, minutes, and seconds. """ hours, rem = divmod(end - start, 3600) minutes, seconds = divmod(rem, 60) return "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)
def isprime(n): """Returns True if n is prime.""" if n == 2: return True if n == 3: return True if n % 2 == 0: return False if n % 3 == 0: return False i = 5 w = 2 while i * i <= n: if n % i == 0: return False i += w w = 6 - w return True
def nabisco_cereals(cereals): """Cereals manufactured by Nabisco""" return [row for row in cereals if row["mfr"] == "N"]
def seasons(month): """ decide the season that date belongs to :param month: :return: """ if 3 <= month <= 5: return 'spring' if 6 <= month <= 8: return 'summer' if 9 <= month <= 11: return 'autumn' return 'winter'
def deciphered_index_with_key(index, key): """ Using key to find the deciphered index, which is the original alphabet. :param index: int, the index of each char of ciphered string in ALPHABET. :param key: int, the secret number to decided how many index position should move to deciphered. :return: int, the index been deciphered. """ if index + key > 25: return index + key - 26 return index + key
def format_money(amount, pos): """ Formats money for the 'y' axis of a plot. :param amount: The amount :type amount: int :param pos: The position argument :type pos: int :return: A formatted money string :rtype: str """ # pylint: disable=unused-argument return '${:,.0f}'.format(amount)
def get_resource_full_name(res): """Get the full name of a resource recursively""" full_name = '' while(res): full_name = res.type + '/' + res.type + '/' + full_name res = res.parent return full_name
def maximum(*args:float) -> float: """ This function is only to show a norm definition """ print('--- max1 ---') return max(args)
def map_to_unit_interval(x, lo=0., hi=1.): """ Linearly map value in [lo_val, hi_val] to [0, 1] """ return (x - lo) / (hi - lo)
def document_metas_from_data(document_data, claimant): """ Return a list of document meta dicts for the given document data. Returns one document meta dict for each document metadata claim in document_data. Each dict can be used to init a DocumentMeta object directly:: document_meta = DocumentMeta(**document_meta_dict) :param document_data: the "document" sub-object that the client POSTed to the API as part of a new or updated annotation :type document_data: dict :param claimant: the URI that the browser was at when this annotation was created (the top-level "uri" field of the annotation) :type claimant: unicode :returns: a list of zero or more document meta dicts :rtype: list of dicts """ def transform_meta_(document_meta_dicts, items, path_prefix=None): """Fill document_meta_dicts with document meta dicts for the items.""" if path_prefix is None: path_prefix = [] for key, value in items.items(): keypath = path_prefix[:] keypath.append(key) if isinstance(value, dict): transform_meta_(document_meta_dicts, value, path_prefix=keypath) else: if not isinstance(value, list): value = [value] type_ = ".".join(keypath) if type_ == "title": # We don't allow None, empty strings, whitespace-only # strings, leading or trailing whitespaces, or empty arrays # in document title values. value = [v.strip() for v in value if v and v.strip()] if not value: continue document_meta_dicts.append( {"type": type_, "value": value, "claimant": claimant} ) items = {k: v for k, v in document_data.items() if k != "link"} document_meta_dicts = [] transform_meta_(document_meta_dicts, items) return document_meta_dicts
def paginated_table(kwargs): """Display a paginated table Args: kwargs (dict): Dictionary with the following arguments defined headers (list): List of header columns for the table rows (list): List of rows, which are lists of columns matching headers pages (list): List of (name, url) for each pagination link to display idx (int): Index of the current page in pages list """ headers = kwargs['headers'] rows = kwargs['rows'] pages = kwargs['pages'] idx = kwargs['idx'] is_first = idx == 0 is_last = idx == len(pages) - 1 prev_class = "disabled" if is_first else "" prev_url = "" if is_first else pages[idx - 1][1] next_class = "disabled" if is_last else "" next_url = "" if is_last else pages[idx + 1][1] pages = [("active" if i == idx else "", pages[i][0], pages[i][1]) for i in range(len(pages))] return { 'headers': headers, 'rows': rows, 'show_pagination': len(pages) != 1, # Only show the bar if multiple pages 'pages': pages, 'previous_class': prev_class, 'previous_url': prev_url, 'next_class': next_class, 'next_url': next_url, }
def levenshtein(a, b): """ Calculates the Levenshtein distance between a and b. """ n, m = len(a), len(b) if n > m: # Make sure n <= m, to use O(min(n,m)) space a, b = b, a n, m = m, n current = list(range(n+1)) for i in range(1, m+1): previous, current = current, [i]+[0]*n for j in range(1, n+1): add, delete = previous[j]+1, current[j-1]+1 change = previous[j-1] if a[j-1] != b[i-1]: change = change + 1 current[j] = min(add, delete, change) return current[n]
def profit(K,L,w,r,alpha,A): """Compute the profit of the firm Args: w (float): wage r (float): rental rate of capital alpha (float): K (float): level of capital Returns: (float): profit of the firm """ return A*(K**alpha)*(L**(1-alpha))-r*K-w*L
def get_areas(): """Return a list of all areas known by the platform""" return [ { "id": "country", "title": "Countries", }, { "id": "NUTS1", "title": "Region NUTS1", }, { "id": "NUTS2", "title": "Region NUTS2", }, { "id": "NUTS3", "title": "Region NUTS3", }, { "id": "LAU", "title": "Cities", }, ]
def rounder(money_dist: list, pot: int, to_coin: int = 2) -> list: """ Rounds the money distribution while preserving total sum stolen from https://stackoverflow.com/a/44740221 """ def custom_round(x): """ Rounds a number to be divisible by to_coin specified """ return int(to_coin * round(x / to_coin)) rs = [custom_round(x) for x in money_dist] k = pot - sum(rs) assert k == custom_round(k) fs = [x - custom_round(x) for x in money_dist] indices = [ i for order, (e, i) in enumerate( reversed(sorted((e, i) for i, e in enumerate(fs))) ) if order < k ] return [r + 1 if i in indices else r for i, r in enumerate(rs)]
def round_off_rating(number): """Round a number to the closest half integer. >>> round_off_rating(1.3) 1.5 >>> round_off_rating(2.6) 2.5 >>> round_off_rating(3.0) 3.0 >>> round_off_rating(4.1) 4.0""" return round(number * 2) / 2
def get_port_str(config, port): """ Returns a port string of the format :portnum Will raise KeyError if port is None and config file doesnt define XNATPORT """ if port is None: port = config.get_key('XNATPORT') if not str(port).startswith(':'): port = ':{}'.format(port) return port
def population_to_groups(population, groups): """Assign each participant in a population to a stratified group. Currently doing this by hardcoding. Need to feed in the stratification variables for dynamic allocation.""" for participant in population: if participant.var_1 == 'A': if participant.var_2 == 'X': groups[('A', 'X')].append( participant.participant_id) else: groups[('A', 'Y')].append( participant.participant_id) else: if participant.var_2 == 'X': groups[('B', 'X')].append( participant.participant_id) else: groups[('B', 'Y')].append( participant.participant_id) return groups
def flatten_nested_dictionary(dictionary={}, base=""): """ Takes in a dictionary and flattens it """ flattened_dict = {} if type(dictionary) != dict: raise TypeError( "The argument passed into flatten_nested_dictionary must be of type 'dict'") if len(dictionary.keys()) <= 0: return flattened_dict stack = [(base + "." + key, dictionary[key]) if len(base) > 0 else (key, dictionary[key]) for key in dictionary] stack.reverse() while len(stack) >= 1: tup = stack.pop() if type(tup[1]) == dict: flattened_dict.update(flatten_nested_dictionary(tup[1], tup[0])) else: flattened_dict[tup[0]] = tup[1] return flattened_dict
def position(x, arr): """ Definition --- To get the row and coloumn of an element in a 2D list Parameters --- x : Element to be searched arr : 2D list Returns --- row, col : indices of the row and coloumn """ for i in arr: if x in i: return arr.index(i), i.index(x) print("Error: Zero not found in node") return -1, -1
def clean_task_output(output: str) -> str: """ Remove CNC metadata from task output. In some cases we need to return data from the task to the cnc application. The only available route to do this is by injecting metadata into the text output from the task. This is done by simply prefixing out metadata with 'CNC:'. This function will remove any metadata from the task output in order to present it to the user :param output: str of output with metadata possibly present :return: str of output with no metadata present """ cleaned_output = "" for line in output.splitlines(): if not line.startswith('CNC:'): cleaned_output += f'{line}\n' return cleaned_output
def reverse_range(object): """Yields reverse range of object: list(reverse_range([1, 2, 3])) -> [2, 1, 0].""" return range(len(object) - 1, -1, -1)
def try_int(text, default=0): """Try to parse an integer but return a default if it fails.""" try: return int(text) except ValueError: return default
def _issubclass(thing, cls): """ LOL PYTHON """ # why would you raise a type error instead of returning false !??!?! try: return issubclass(thing, cls) except TypeError: return False
def counter(aIntervals): """ Count bases in every interval and sum across them """ iBaseCoverage = iIntervalCount = 0 for interval in aIntervals: # Add one to correct 0-based count iBaseCoverage += (interval[2] - interval[1] + 1) iIntervalCount += 1 return iBaseCoverage, iIntervalCount
def search(d, key, default=None): """Return a dict containing to the specified key in the (possibly nested) within dictionary d. If there is no item with that key, return default. """ stack = [d] while stack: cur_d = stack[-1] stack.pop() for k, v in cur_d.items(): if k == key: return cur_d elif isinstance(v, dict): stack.append(v) return default
def get_size(bytes, suffix="B"): """ Scale bytes to its proper format e.g: 1253656 => '1.20MB' 1253656678 => '1.17GB' """ factor = 1024 for unit in ["", "K", "M", "G", "T", "P"]: if bytes < factor: return f"{bytes:.2f}{unit}{suffix}\n" bytes /= factor
def closest_ref_length(references, hyp_len): """ This function finds the reference that is the closest length to the hypothesis. The closest reference length is referred to as *r* variable from the brevity penalty formula in Papineni et. al. (2002) :param references: A list of reference translations. :type references: list(list(str)) :param hyp_len: The length of the hypothesis. :type hyp_len: int :return: The length of the reference that's closest to the hypothesis. :rtype: int """ ref_lens = (len(reference) for reference in references) closest_ref_len = min(ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)) return closest_ref_len
def update_step_size(error_estimate, prev_step_size, safety=0.9, facmin=0.2, facmax=1.4, prev_error_ratio=None): """Adaptively propose the next step size based on estimated errors.""" if error_estimate > 1: pfactor = 0 ifactor = 1 / 1.5 # 1 / 5 else: pfactor = 0.13 ifactor = 1 / 4.5 # 1 / 15 error_ratio = safety / error_estimate if prev_error_ratio is None: prev_error_ratio = error_ratio factor = error_ratio ** ifactor * (error_ratio / prev_error_ratio) ** pfactor if error_estimate <= 1: prev_error_ratio = error_ratio facmin = 1.0 factor = min(facmax, max(facmin, factor)) new_step_size = prev_step_size * factor return new_step_size, prev_error_ratio
def main(n): """Calculates the sum of all numbers that can be written as the sum of the <n> powers of their digits. Args: n (int): nth power """ pv = {v: v**n for v in range(10)} match = [] for num in range(2**n, 10**(n+1)): digits = (int(dg) for dg in str(num)) sum_ = sum(pv[d] for d in digits) if num == sum_: match.append(num) return sum(match)
def is_valid_description(s): """ Validate description strings """ INVALID_CHARS = '~`^*{}_+=/\\><|\'\"' if s == '': return True for char in INVALID_CHARS: if char in s: return False return True
def integer(maybe_string, base): """Make an integer of a number or a string""" if isinstance(maybe_string, int): return maybe_string else: return int(maybe_string, base)
def get_scale_from_size(input_size, output_size): """Get the scale factor given input size and output size. Args: input_size (tuple(int)): The size of the input image. output_size (tuple(int)): The size of the output image. Returns: list[float]: The scale factor of each dimension. """ scale = [ 1.0 * output_shape / input_shape for (input_shape, output_shape) in zip(input_size, output_size) ] return scale
def make_ascii(s): """ Convert text to ASCII """ return "".join(i for i in s if ord(i) < 128)
def canonical_name(k, config): """Return the canonical name for a key. Handles user choice of '-' or '_' conventions by standardizing on whichever version was set first. If a key already exists in either hyphen or underscore form, the existing version is the canonical name. If neither version exists the original key is used as is. """ try: if k in config: return k except TypeError: # config is not a mapping, return the same name as provided return k altk = k.replace('_', '-') if '_' in k else k.replace('-', '_') if altk in config: return altk return k
def keepElementNodes(nodes): """ Get the element nodes """ nodes2 = [] for node in nodes: if node.nodeType == node.ELEMENT_NODE: nodes2 += [node] return nodes2
def filter_ports(ports, ports_to_filter): """ Removes timing relation from ports of a model """ # Make a copy new_ports = dict() for key in ["input", "clock", "output"]: new_ports[key] = [] for name, width, assoc_clock in ports[key]: if key in ["input", "output"]: if name.endswith("_0") or name.endswith("_1"): generic_name = name[:-2] if generic_name in ports_to_filter: assoc_clock = None if name in ports_to_filter: assoc_clock = None new_ports[key].append(( name, width, assoc_clock, )) return new_ports
def get_field_num(multi_hot_flags, multi_hot_len): """ infer field number Example: get_field_num([False,False,True,True,True,False], 3) # 4 get_field_num([False,True,True,True,True,False], 2) # 4 get_field_num([False,True,True,True,True,False], 4) # 3 """ one_hot_flags = [not flag for flag in multi_hot_flags] one_hot_field_num = sum(one_hot_flags) if sum(multi_hot_flags) % multi_hot_len != 0: raise ValueError("cannot infer field number. please check input!") multi_hot_field_num = sum(multi_hot_flags) // multi_hot_len field_num = one_hot_field_num + multi_hot_field_num return field_num
def power(session, Type='Real64', RepCap='', AttrID=1150003, buffsize=0, action=['Get', '']): """[Power (dBm)] Expected RMS power of the input signal, in dBm. Limits depend on hardware configuration. Absolute max is +30 dBm. Reset value: -10 dBm. """ return session, Type, RepCap, AttrID, buffsize, action
def add_operator(solr_Q1,solr_Q2,operator): """ Function joins two Solr Qs with the correct operator :param solr_Q1: Solr Q object :param solr_Q2: Solr Q object :param operator: String, should be AND, OR, AND NOT """ if solr_Q1 is not None and solr_Q2 is not None: if operator.strip() == 'AND NOT': return solr_Q1 & ~solr_Q2 elif operator.strip() == 'OR': return solr_Q1 | solr_Q2 # Defaults to AND else: return solr_Q1 & solr_Q2 if solr_Q1 is not None and solr_Q2 is None: return solr_Q1 if solr_Q1 is None and solr_Q2 is not None: return solr_Q2 else: return None
def convert_oct_set(oct_set, og_names): """ Maps an OCT set on the preprocessed graph to their original vertices. """ try: return list(map(lambda x: og_names[x], oct_set)) except Exception: print('Error: OCT set', oct_set) print('Error: og_names', og_names) print('Problem looking up OCT set')
def generate_body(du_dept_dict): """Return HTML that will be used for the body content of the box report BlogPage""" body = '<ul>' for item in du_dept_dict.items(): body += '<li>{}: {} GB</li>'.format(item[0], int(item[1]) / 1000000) # Convert to GB body += '</ul>' return body
def random(d, params): """ Random path """ import random import math if params == None: b_min = 0. b_max = 2*math.pi else: b_min = params['min'] b_max = params['max'] d_noise = random.uniform(b_min, b_max) return d_noise
def rev_slice(i): """ """ return isinstance(i, slice) and i.step is not None and i.step < 0
def maybe_obj(str_or_obj): """If argument is not a string, return it. Otherwise import the dotted name and return that. """ if not isinstance(str_or_obj, str): return str_or_obj parts = str_or_obj.split(".") mod, modname = None, None for p in parts: modname = p if modname is None else "%s.%s" % (modname, p) try: mod = __import__(modname) except ImportError: if mod is None: raise break obj = mod for p in parts[1:]: obj = getattr(obj, p) return obj
def combinefn(x, inx): """Increment counts array for given index. e.g. [0, 0, ...], 0 -> [1, 0, ...] """ x[inx] += 1 return x
def format_bytes(n): """Format bytes as text Copied from dask to avoid dependency. """ if n > 1e15: return "%0.2f PB" % (n / 1e15) if n > 1e12: return "%0.2f TB" % (n / 1e12) if n > 1e9: return "%0.2f GB" % (n / 1e9) if n > 1e6: return "%0.2f MB" % (n / 1e6) if n > 1e3: return "%0.2f kB" % (n / 1000) return "%d B" % n
def XmlEscape(s): """XML-escapes the given string, replacing magic characters (&<>") with their escaped equivalents.""" s = s.replace("&", "&amp;").replace("<", "&lt;") s = s.replace("\"", "&quot;").replace(">", "&gt;") return s
def str_match_list_item(s, l): """ :param s: :param l: :return: """ for item in l: if set(s) == set(item): return True return False
def isnum(s): """Returns True if s is a number""" try: float(s) return True except ValueError: return False
def bytes2human(n): """ >>> bytes2human(10000) '9.8 K/s' >>> bytes2human(100001221) '95.4 M/s' """ symbols = ('KB', 'MB', 'GB', 'TB', 'PB', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i+1)*10 for s in reversed(symbols): if n >= prefix[s]: value = int(float(n) / prefix[s]) return '%s %s' % (value, s) return "0 Bytes"
def is_pandigital(value): """ Check that each digit appears once only. :param value: integer value (can also be negative) :retuns: true when given number is pandigital see http://en.wikipedia.org/wiki/Pandigital_number >>> is_pandigital(1234567890) True >>> is_pandigital(12345678900) False >>> is_pandigital(9876543210) True >>> is_pandigital(10240) False """ value = abs(value) digits = set() counter = 0 while value > 0: digits.add(value % 10) counter += 1 value //= 10 return counter == len(digits)
def standardize_names_groundings(stmts): """Standardize the names of Concepts with respect to an ontology. NOTE: this function is currently optimized for Influence Statements obtained from Eidos, Hume, Sofia and CWMS. It will possibly yield unexpected results for biology-specific Statements. Parameters ---------- stmts : list[indra.statements.Statement] A list of statements whose Concept names should be standardized. """ print('Standardize names to groundings') for stmt in stmts: for concept in stmt.agent_list(): db_ns, db_id = concept.get_grounding() if db_id is not None: if isinstance(db_id, list): db_id = db_id[0][0].split('/')[-1] else: db_id = db_id.split('/')[-1] db_id = db_id.replace('|', ' ') db_id = db_id.replace('_', ' ') db_id = db_id.replace('ONT::', '') db_id = db_id.capitalize() concept.name = db_id return stmts