content
stringlengths
42
6.51k
def is_floatable(value): """Checks if an object can be cast to a float, returns True or False""" try: float(value) return True except: return False
def lookahead_contains(rule, t): """True if the given lookahead restriction `rule` allows the terminal `t`.""" return (rule is None or (t in rule.set if rule.positive else t not in rule.set))
def percent_non_alpha(text): """Calculates percentage of non alphabetical and space characters. Based on findings from spam.csv that spam texts have, on average, significantly more non alphabetical characters than ham texts (see: avg_non_alpha())""" char_count = len(text) non_alpha_count = 0 for char in text: if not char.isalpha() and not char.isspace(): non_alpha_count += 1 perc_non_alpha = float(non_alpha_count) / float(char_count) return str(perc_non_alpha)
def dataset_name(fuzzer_name): """Get the stats dataset name for the given |fuzzer_name|.""" return fuzzer_name.replace('-', '_') + '_stats'
def get_form_field_chart_url(url, field): """Append 'field_name' to a given url""" return u'%s?field_name=%s' % (url, field)
def _convert_keyword(keywords): """Convert keywords collection.""" if isinstance(keywords, (list, tuple)): return keywords if isinstance(keywords, dict): return keywords.keys()
def is_total_homomorphism(elements, mapping): """Return True if mapping is total.""" return set(elements) == set(mapping.keys())
def prime_sieve(n): """ Return a list of prime numbers from 2 to a prime < n. Very fast (n<10,000,000) in 0.4 sec. Example: >>>prime_sieve(25) [2, 3, 5, 7, 11, 13, 17, 19, 23] Algorithm & Python source: Robert William Hanks http://stackoverflow.com/questions/17773352/python-sieve-prime-numbers """ if n <= 1: return [] sieve = [True] * (n // 2) for i in range(3, int(n ** 0.5) + 1, 2): if sieve[i // 2]: sieve[i * i // 2::i] = [False] * ((n - i * i - 1) // (2 * i) + 1) return [2] + [2 * i + 1 for i in range(1, n // 2) if sieve[i]]
def calc_mean_midstance_toe_angles(toe_angles, mid_stance_index): """ calculated the mean of the three tow angles at midstance""" import numpy as np #print("\nlength of toe_angles: ", len(toe_angles)) if len(toe_angles) > 2: mean_value = np.mean([toe_angles[mid_stance_index - 1], toe_angles[mid_stance_index], toe_angles[mid_stance_index + 1]]) else: mean_value = np.nan if mean_value > 90.: mean_value = 180. - mean_value return mean_value
def file_of_interest(file_name, supported_extensions): """Filter for CONTENT_EXTENSIONS_SUPPORTED.""" for f in supported_extensions: if f in file_name: return True return False
def flatten_semilist(x): """Convert a semi-nested list - a list of (lists and scalars) - to a flat list.""" # convert to a list of lists lists = [n if isinstance(n, list) else [n] for n in x] # flatten nested list return [e for el in lists for e in el]
def validate(string): """Return int(string) if string can be represented as an integer. Otherwise, re-return string unmodified. By the way, it also output an error message to the user.""" try: return int(string) except ValueError: print("\nError: invalid value -> %s\n" % string) return string
def insertion_sort(L): """ Insertion sort implementation Complexity: O(n ** 2) """ for i in range(1, len(L)): for j in range(0, i): if L[i - j] < L[i - j - 1]: L[i - j], L[i - j - 1] = L[i - j - 1], L[i - j] return L
def quote(text): """Quote Discord's MarkDown special characters""" return text \ .replace("\\", "\\\\") \ .replace("*", "\\*") \ .replace("`", "\\`") \ .replace("[", "\\[") \ .replace("_", "\\_") \ .replace("~", "\\~") \ .replace(":", "\\:") \ .replace("<", "\\<")
def getSqTransMoment(system): """//Input SYSTEM is a string with both the molecular species AND the band "system" // Electronic transition moment, Re, needed for "Line strength", S = |R_e|^2*q_v'v" or just |R_e|^2 // //Allen's Astrophysical quantities, 4.12.2 - 4.13.1 // // ROtational & vibrational constants for TiO states:, p. 87, Table 4.17""" #// Square electronic transition moment, |Re|^2, #// needed for "Line strength", S = |R_e|^2*q_v'v" or just |R_e|^2 #// // //Allen's Astrophysical quantities, 4.12.2 - 4.13.1 #// As of Feb 2017 - try the band-head value R_00^2 from last column of table: RSqu = 0.0 #//default initialization #TiO alpha system if ("TiO_C3Delta_X3Delta" == system): RSqu = 0.84 #TiO beta system if ("TiO_c1Phi_a1Delta" == system): RSqu = 4.63 #TiO gamma system if ("TiO_A3Phi_X3Delta" == system): RSqu = 5.24 #CH A^2Delta_X^2Pi system - "G band" at 4300 A if ("CH_A2Delta_X2Pi" == system): RSqu = 0.081 #mean of two values given #// return RSqu
def assure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,)
def get_whereclause(params): """Given a dictionary of params {key1: val1, key2: val2 } return a partial query like: WHERE key1 = val1 AND key2 = val2 ... """ query_parts = [] first = False for key, val in params.items(): if not first: first = True query_parts.append("WHERE %s = '%s'" % (key, val)) else: query_parts.append("AND %s = '%s'" % (key, val)) return " ".join(query_parts)
def last_vowel(s): """(str) -> str Return the last vowel in s if one exists; otherwise, return None. >>> last_vowel("cauliflower") "e" >>> last_vowel("pfft") None """ i = len(s) - 1 while i >= 0: if s[i] in 'aeiouAEIOU': return s[i] i = i - 1 return None
def add_content_line_and_get_leading_whitespace_length( line, snippet_name, snippets, leading_whitespace_length ): """Adds a new code line to a snippet by modifying the snippets argument inline. Calculates white space length if the line is the first code line to be added to the snippet. Args: line ([type]): New code line to add. snippet_name ([type]): Name of the snippet. snippets ([type]): Existing snippets. leading_whitespace_length ([type]): whitespace length for adjusting indents. Returns: [type]: Returns whitespace length in the beginning of the line. """ if not snippets[snippet_name]["body"]: # This is the first actual code line. # Calculate the length of leading whitespace, length can be also zero. # Note: This logic cannot handle situations # where snippet has been previously started, but it now continues # from different level of indentation as snippets[snippet_name]["body"] exists already # and we don't want don't want to recalculate leading_whitespace_length otherwise # indentation for code consisting of different indent levels could be messed up. line_without_whitespace_at_left_end: str = line.lstrip() # This is calculated from the level where the snippet is defined. If the indent of snippet definition # is for example 4, this amount of characters are ignored from the beginning of snippet code line. leading_whitespace_length = len(line) - len(line_without_whitespace_at_left_end) line_without_whitespace_at_right_end: str = line.rstrip()[ leading_whitespace_length: ] snippets[snippet_name]["body"].append(line_without_whitespace_at_right_end) return leading_whitespace_length
def tiempo_a_segundos(horas: int, minutos: int, segundos: int) -> int: """Convierte el tiempo en segundos :param horas: horas :horas type: int :param minutos: minutos :minutos type: int :param segundos: segundos :segundos type: int :return: tiempo en segundos :rtype: int """ return horas * 3600 + minutos * 60 + segundos
def can_receive_blood_from(blood_group): """Return allowed blood groups given a specific blood group.""" can_receive_from = { 'A+': ['A+', 'A-', 'O+', 'O-'], 'O+': ['O+', 'O-'], 'B+': ['B+', 'B-', 'O+', 'O-'], 'AB+': ['A+', 'O+', 'B+', 'AB+', 'A-', 'O-', 'B-', 'AB-'], 'A-': ['O-', 'A-'], 'O-': ['O-'], 'B-': ['B-', 'O-'], 'AB-': ['AB-', 'A-', 'B-', 'O-'] } can_receive_blood_from = can_receive_from[blood_group] return can_receive_blood_from
def count_letters(text): """ Use a dictionary to count the frequency of letters in the input string. Only letters should be counted, not blank spaces, numbers, or punctuation. Upper case should be considered the same as lower case. For example, count_letters("This is a sentence.") should return {'t': 2, 'h': 1, 'i': 2, 's': 3, 'a': 1, 'e': 3, 'n': 2, 'c': 1}. """ result = {} # Go through each letter in the text for letter in text: # Check if the letter needs to be counted or not if letter.isalpha(): # Check if the letter is in the dictionary if letter.lower() in result: # If it is, add 1 to the value result[letter.lower()] += 1 else: # If it is not, add it to the dictionary with a value of 1 result[letter.lower()] = 1 # Add or increment the value in the dictionary return result
def is_palindrome(s): """ Non-letters and capitalization are ignored :param s: str :return: True if letters in s form a palindrome; False otherwise """ def to_chars(s): s = s.lower() letters = '' for char in s: if char in 'abcdefghijklmnopqrstuvwxyz': letters += char return letters def is_pal(s): if len(s) <= 1: return True else: return s[0] == s[-1] and is_pal(s[1:-1]) return is_pal(to_chars(s))
def timeout(*args, **kwargs): """ Limit the function execution time to the given timeout (in seconds). Example: >>> fib = lambda n: 1 if n <= 1 else fib(n - 1) + fib(n - 2) >>> timeout(0.25, fib, 10) # wait at most 0.25 seconds 89 >>> timeout(0.25, fib, 50) # stops before the thermal death of the universe Traceback (most recent call last) ... TimeoutError: Args: timeout (float, seconds): The maximum duration of function execution func: Function to be executed """ from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor executor = ProcessPoolExecutor timeout, func, *args = args if func in ("thread", "process"): executor = ThreadPoolExecutor if func == "thread" else ProcessPoolExecutor func, *args = args with executor() as e: future = e.submit(func, *args, **kwargs) return future.result(timeout=timeout)
def _instance_name(instance): """Shortcut to get instance name.""" return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None)
def read_file_list(filename): """ Reads a trajectory from a text file. File format: The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched) and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp. Input: filename -- File name Output: dict -- dictionary of (stamp,data) tuples """ file = open(filename) data = file.read() lines = data.replace(","," ").replace("\t"," ").split("\n") list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"] list = [(float(l[0]),l[1:]) for l in list if len(l)>1] return dict(list)
def acker(m, n): """ Computes the value of the Ackermann function for the input integers m and n. :param int m: First number. :param int n: Second number. :returns: Computed value. """ if m == 0: return n + 1 elif m > 0 and n == 0: return acker(m - 1, 1) return acker(m - 1, acker(m, n - 1))
def get_output (fname, mystring): """parse fname and get some numbers out""" iters = [] simuls= [] fc = [] try: f=open(fname) except: print(fname + " does not exist, continuing") else: for line in f: if mystring in line: ll = line.split() iters.append( int(ll[2].replace(',',''))) simuls.append(int(ll[4].replace(',',''))) fc.append( float(ll[6].replace('D','e').replace(',',''))) return iters, simuls, fc
def tabunescape(escaped): """ Unescape a string using the specific Dovecot tabescape See: https://github.com/dovecot/core/blob/master/src/lib/strescape.c """ return escaped.replace(b"\x01r", b"\r")\ .replace(b"\x01n", b"\n")\ .replace(b"\x01t", b"\t")\ .replace(b"\x010", b"\x00")\ .replace(b"\x011", b"\x01")
def colourfulness_correlate(C, F_L): """ Returns the *colourfulness* correlate :math:`M`. Parameters ---------- C : numeric *Chroma* correlate :math:`C`. F_L : numeric *Luminance* level adaptation factor :math:`F_L`. Returns ------- numeric *Colourfulness* correlate :math:`M`. Examples -------- >>> C = 0.104707757171 >>> F_L = 1.16754446415 >>> colourfulness_correlate(C, F_L) # doctest: +ELLIPSIS 0.1088421... """ M = C * F_L ** 0.25 return M
def extract_aliases(phase_models): """Map possible phase name aliases to the desired phase model phase name. This function enforces that each alias is only claimed by one phase. Each phase name in the phase models is added as an alias for itself to support an "identity" operation. Parameters ---------- phase_models Phase models ESPEI uses to initialize databases. Must contain a mapping of phase names to phase data (sublattices, site ratios, aliases) Returns ------- Dict[str, str] """ # Intialize aliases with identity for each phase first aliases = {phase_name: phase_name for phase_name in phase_models["phases"].keys()} for phase_name, phase_dict in phase_models["phases"].items(): for alias in phase_dict.get("aliases", []): if alias not in aliases: aliases[alias] = phase_name else: raise ValueError(f"Cannot add {alias} as an alias for {phase_name} because it is already used by {aliases[alias]}") return aliases
def remove_empty_param_keys(params): """Returns a copy of ``params`` dict where any key with a value of ``None`` or ``""`` (empty string) are removed. """ return {k: v for k, v in params.items() if v is not None and v != ""}
def is_every_letter_guessed(word, guesses): """Returns if every letter in the target word is accounted for in the user's guesses. """ # It's easier to check the converse; are we missing anything? Check # if any one of the letters in the target word *isn't* guessed. for letter in word: if letter not in guesses: # Found a letter in the word that hasn't been guessed yet! return False # If we've reached this point, the whole word has been gone over. # Not one of its letters was missing from the list of guesses. return True
def rotate90(grid): """Rotates a grid 90 degrees Args: grid (list): a 2d list representing a grid Returns: grid (list): rotated copy of a 2d grid """ return list(zip(*grid[::-1]))
def join(seq, sep=''): """ Joins elements of the sequence `seq` with the string `sep`. """ return sep.join(str(item) for item in seq)
def findMinValue (theList): """Finds the minimum value in a list. :param theList: The list in which to find a minimum :type theList: list :return: The minimum value in theList :rtype: float or int """ minValue = "" for value in theList: if minValue == "": minValue = value else: if value < minValue: minValue = value return minValue
def eulerToQuaternion(pitch, roll, yaw): """ Returns a quaternion corresponding to the rotation of euler angles pitch, roll, yaw """ import numpy as np cy = np.cos(yaw * 0.5); sy = np.sin(yaw * 0.5); cr = np.cos(roll * 0.5); sr = np.sin(roll * 0.5); cp = np.cos(pitch * 0.5); sp = np.sin(pitch * 0.5); q = [cy * cr * cp + sy * sr * sp, cy * sr * cp - sy * cr * sp, cy * cr * sp + sy * sr * cp, sy * cr * cp - cy * sr * sp] return q
def with_step_ids(steps: list, inputs_offset: int = 0): """Walk over a list of steps and ensure the steps have a numeric id if otherwise missing.""" assert isinstance(steps, list) new_steps = [] for i, step in enumerate(steps): if "id" not in step: step = step.copy() step["id"] = i + inputs_offset assert step["id"] is not None new_steps.append(step) return new_steps
def gilbert_damping(x, alpha, df0): """ A Linear model f=x -> alpha* x + df0. Returns the frequency domain half width half maximum (HWHM). """ f = x return alpha * f + df0
def natural_orbital_indicator(natural_orbital_iteration): """Construct natural orbital indicator string.""" if (natural_orbital_iteration is None): indicator = "" else: indicator = "-no{:1d}".format(natural_orbital_iteration) return indicator
def segmentOnLine(seg,line): """Determines if the input line segment is a proper part of the input line, i.e. if it consists of two consecutive segments on the input line. Args: seg: list two points comprising the segment to check line: list of any number of points comprising the full line Returns: True if the two segment points are consecutive points on the input line, False otherwise.""" for i in range(len(line)-1): start=line[i] # check forward direction if start[0]==seg[0][0] and start[1]==seg[0][1]: end=line[i+1] if end[0]==seg[1][0] and end[1]==seg[1][1]: return True # check backwards direction if start[0]==seg[1][0] and start[1]==seg[1][1]: end=line[i+1] if end[0]==seg[0][0] and end[1]==seg[0][1]: return True return False
def to_namelist(nested_dict): """no-recursion """ if isinstance(nested_dict, list): return [x.name for x in nested_dict] elif isinstance(nested_dict, dict): return list(nested_dict) else: return nested_dict.name
def indentation(s, tabsize=4): """Generator to return level of indentation""" sx = s.expandtabs(tabsize) # if line is empty yields 0 return 0 if sx.isspace() else len(sx) - len(sx.lstrip())
def read_file_lines(filename): """ opens and read file lines from file system returns file lines as list if ok returns None is error while reading file """ try: fhnd = open(filename) lines = fhnd.readlines() fhnd.close() return lines except: return None
def returnSlotNumbers(parking_lot, driver_age=0): """Returns all the slot numbers for all drivers of a particular age""" if not isinstance(driver_age, int) or driver_age < 18: print("Driver Age Should be atleast 18") return -1 slot_numbers = [] for i in range(1, len(parking_lot) + 1): if 'driverAge' in parking_lot[i]: if parking_lot[i]['driverAge'] == driver_age: slot_numbers.append(i) return slot_numbers if slot_numbers else 0
def number_next(ascii_file, skip_blank_lines = True, comment_char = '!'): """Returns True if next token in file is a number.""" file_pos = ascii_file.tell() while True: if (skip_blank_lines): file_pos = ascii_file.tell() # will advance file pos to next non-blank line line = ascii_file.readline() if len(line) == 0: # end of file ascii_file.seek(file_pos) return False words = line.split() if len(words) == 0 or words[0][0] == comment_char: continue # blank line or comment ascii_file.seek(file_pos) # restore file position whether or not number found try: float(words[0]) except Exception: # todo: should only catch a particular exception (type conversion) return False else: return True
def get_warning_level(alert): """ Returns the appropriate warning level as an int for communication to mqtt clients. """ # if alert['ReportDateTime']: # # Default green for sightings that are not current # return 0 if alert['InteractionValue'].lower() == 'detected': # Default red for any detection as shark is <500m return 2 if alert['DistanceUnit'] == 'm offshore': # Sightings within 1km of shore are more likely to result in a beach closure. if int(alert['Distance']) <= 1000: return 2 else: return 1 elif alert['DistanceUnit'] == 'km offshore': if int(alert['Distance']) <= 1: return 2 elif int(alert['Distance']) <= 2: return 1
def info_from_asc_samples_line(line_txt): """ gets sample info from asc SAMPLE lines Parameters ---------- line_txt (string) A single line from an EyeLink asc file. Returns ------- side (str) 'l', 'r', or 'b' has_velocity (bool) True if velocity information is included in samples has_resolution (bool) True if resolution information is included in samples has_htarg (bool) True if head target position information is included in samples has_input (bool) True if head target position information is included in samples """ words = line_txt.split() # this line contains information on what the sample lines contain has_velocity = "VEL" in words has_resolution = "RES" in words has_htarg = 'HTARGET' in words has_input = 'INPUT' in words sample_side = 'b' if 'LEFT' in words and not 'RIGHT' in words: sample_side = 'l' elif 'RIGHT' in words and not 'LEFT' in words: sample_side = 'r' return sample_side, has_velocity, has_resolution, has_htarg, has_input
def isEventHandlerDeclaration(attName): """ Check if an XML attribute represents a event declaration """ return attName[0:2]=="on" and attName[2].isupper()
def denormalise_data(norm_vals, norm_pars): """Normalise np array columnwise. Inputs: vals - 1d np array to be normalised Outputs: norm_vals - 1d np array norm_pars - 2 element list with max and min values """ vals = norm_pars[0] + norm_vals*(norm_pars[1]-norm_pars[0]) return vals
def elision_normalize(s): """Turn unicode characters which look similar to 2019 into 2019.""" return s.replace("\u02BC", "\u2019").replace("\u1FBF", "\u2019").replace("\u0027", "\u2019").\ replace("\u1FBD", "\u2019")
def morph_name(file_path, common_prefix): """ Returns a unique name by finding the unique part of a file's path and combining it into a hyphen separated file name """ unique_part = file_path.replace(common_prefix, "") dir_levels = unique_part.count("/") if dir_levels == 0: new_name = unique_part else: # Common prefix may have split a directory name, so derive the new name # from the original path instead to ensure full names are used new_name = "-".join(file_path.split("/")[-(dir_levels + 1) :]) return new_name
def get_providers(targets, provider, map_fn = None): """Returns the given provider (or a field) from each target in the list. The returned list may not be the same size as `targets` if some of the targets do not contain the requested provider. This is not an error. The main purpose of this function is to make this common operation more readable and prevent mistyping the list comprehension. Args: targets: A list of targets. provider: The provider to retrieve. map_fn: A function that takes a single argument and returns a single value. If this is present, it will be called on each provider in the list and the result will be returned in the list returned by `get_providers`. Returns: A list of the providers requested from the targets. """ if map_fn: return [ map_fn(target[provider]) for target in targets if provider in target ] return [target[provider] for target in targets if provider in target]
def tenchi_split(list1): """ Splits list of codes Splits list of codes in ``str1`` in codes to move *Ten* and *Chi* beads in each rod. :param str1: string with codes for each rod :type str1: str :returns: tuple containing lists of codes for *Ten* and *Chi* beads in each rod and a string ``name`` with filename for swg. :rtype: tuple :example: >>> a = leftpad('0 0 23 12 5') >>> t1, c1, name = tenchi_split(a) >>> print(t1) [0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] >>> print(c1) [0, 0, 3, 2, 5, 0, 0, 0, 0, 0, 0, 0, 0] >>> print(name) 00002312050000000000000000.svg """ ten = [] chi = [] name = '' for i in range(len(list1)): t = list1[i] // 10 c = list1[i] - 10 * t ten.append(t) chi.append(c) name = name + repr(t) + repr(c) name = name + '.svg' return ten, chi, name
def normalize(message: str, **kwargs) -> str: """Normalize the message according to the kwargs. Without any additionnal keyword arguments the message will not be modified. The modifiers are applied in the specified order below. Args: message (str): the message to normalise. lower (bool, optional): if the alphas need to be converted in lowercase. Default to False. upper (bool, optional): if the alphas need to be converted in uppercase. Default to False. accents_to_ascii (bool, optional): if the accents need to be cast in ascii. Default to False. remove_lower_alpha (bool, optional): if the lower alpha need to be removed. Default to False. remove_upper_alpha (bool, optional): if the upper alpha need to be removed. Default to False. remove_alpha (bool, optional): if the alpha need to be remived. Default to False. remove_accents (bool, optional): if the accents need to be removed. Default to False. remove_nums (bool, optional): if the numbers need to be removed. Default to False. remove_puncts (bool, optional): if the puncts need to be removed. Default to False. remove_quotes (bool, optional): if the quotes need to be removed. Default to False. remove_brackets (bool, optional): if the brackets need to be removed. Default to False. remove_spaces (bool, optional): if the spaces need to be removed. Default to False. Raises: ValueError: raised when a kwargs is not recognized. TypeError: raised when a kwargs is not boolean. Returns: str: the normalized message. """ if any([type(value) != bool for value in kwargs.values()]): raise TypeError("Got non bool values") modifiers = { 'accents_to_ascii': False, 'lower': False, 'upper': False, 'remove_lower_alpha': False, 'remove_upper_alpha': False, 'remove_alpha': False, 'remove_accents': False, 'remove_nums': False, 'remove_puncts': False, 'remove_quotes': False, 'remove_brackets': False, 'remove_spaces': False } for key, value in kwargs.items(): if key not in modifiers: raise ValueError(f"Unexpected argument: {key}") modifiers[key] = value # call to the corresponding function message = globals()[key](message) return message
def convert_int_to_binary(int_to_convert): """ Convert an int to binary. I'm aware that in python I could've just done: '{0:b}'.format(int_to_convert) But that's a little less informative :) Args: int_to_convert (int): The int to convert. Returns: str """ result = [] while int_to_convert != 0: result.append(int_to_convert % 2) int_to_convert = int_to_convert // 2 return ''.join([str(b) for b in result[::-1]])
def lb_lookup(session, lb_name): """Look up ELB Id by name Args: session (Session|None) : Boto3 session used to lookup information in AWS If session is None no lookup is performed lb_name (string) : Name of the ELB to lookup Returns: (bool) : If the lb_name is a valid ELB name """ if session is None: return None lb_name = lb_name.replace('.', '-') ### # ELB client = session.client('elb') response = client.describe_load_balancers() for i in range(len(response['LoadBalancerDescriptions'])): if (response['LoadBalancerDescriptions'][i]['LoadBalancerName']) == lb_name: return True ### # ELB v2 client = session.client('elbv2') response = client.describe_load_balancers() for i in range(len(response['LoadBalancers'])): if (response['LoadBalancers'][i]['LoadBalancerName']) == lb_name: return True return False
def find_repeated_frequency(changes): """Find first frequency that's reached twice.""" cur_frequency = 0 seen = set() while True: for change in changes: if cur_frequency in seen: return cur_frequency seen.add(cur_frequency) cur_frequency += change
def get_primary_segments(segments, num_points): """ Checks the segments and returns the segments which are supported by at least the given number of points. Parameters ---------- segments : list of BoundarySegment The boundary (wall) segments of the building (part). num_points : int, optional The minimum number of points a segment needs to be supported by to be considered a primary segment. Returns ------- primary_segments : list of segments The segments which are supported by at least the given number of points. """ primary_segments = [s for s in segments if len(s.points) >= num_points] return primary_segments
def primefactors(x): """Returs the prime factorisation of x, as a list of primes. multiply.reduce(primefactors(x))==x by definition. """ results = [] prime = 2 while prime <= x: if x % prime == 0: results.append(prime) x = x / prime else: prime = prime+1 return results
def xor_blocks(block_1, block_2): """ XOR two blocks written in hexadecimal as string. Parameters ---------- block_1 : string Block of bytes written in hexadecimal. block_2 : string Block of bytes written in hexadecimal. Returns ------- xorted_block : string XORted block written in hexadecimal as string """ return format(int(block_1, 16) ^ int(block_2, 16), '032x')
def sub_dict_in_dict(sub_dict, dict_list, remove_keys=[]): """ Parameters ---------- sub_dict : dict Single dictionary dict_list : list List of dictionaries remove_keys : list List of keys which are removed from dictionaries Example ------ subDictInDict({"x":1},[{"x":2,"y":5,..},{"x":1,"z":2,..}, ..} --> [{"x":1, "z":2, ..},..] In this example list of dictionaries which contain x = 1 is returned Returns ------- list List of dictionaries which contain condition in sub_dict """ assert len(list(sub_dict.keys())) == 1 key = list(sub_dict.keys())[0] matched_dicts = [] for one_dict in dict_list: d = one_dict.copy() for k in remove_keys: d.pop(k) if str(one_dict[key]) == str(sub_dict[key]): matched_dicts.append(d) return matched_dicts
def get_ingest_frequency(frequencies): """Calculate the amount of seconds in a retention unit.""" sec_dict = { 'minute': 60, 'hourly': 3600, 'daily': 86400, 'weekly': 604800, 'monthly': 2592000, 'quarterly': 7776000, 'yearly': 31536000 } secs = [] for freq, freq_data in frequencies.items(): secs.append(sec_dict[freq] * freq_data['frequency']) if not secs: secs = [0] return min(secs)
def calculateFreq(counted_char_dict): """ Calculate Probability (w/ replacement) based on Frequency of Characters """ # Temporary dictionary to hold probability values. probability_dict = {} # Counting the total number of characters. totalChars = sum(counted_char_dict.values()) # For each Key-Value pair in the character count dictionary, calculate probability. for key, value in counted_char_dict.items(): # Calculating probability with replacement on each character. probability_dict[key] = value / totalChars # Cannot divide Log(1/0) in Shannon Entropy, set low value for underscore ("_") if probability_dict["_"] == 0: probability_dict["_"] = 1e-100 return probability_dict
def calc_chunksize(n_workers, len_iterable, factor=4): """Calculate chunksize argument for Pool-methods. Resembles source-code within `multiprocessing.pool.Pool._map_async`. """ chunksize, extra = divmod(len_iterable, n_workers * factor) if extra: chunksize += 1 return chunksize
def sequence_search(arr: list, k: int) -> int: """ Search item is in a list or not using sequence search :param arr: list :param k: item to search for :return: index of item if it exists in arr or -1 if not """ n = len(arr) arr.append(k) i = 0 while arr[i] != k: i += 1 if i < n: return i else: return -1
def inverse_mod(a, m): """Inverse of a mod m.""" if a < 0 or m <= a: a = a % m # From Ferguson and Schneier, roughly: c, d = a, m uc, vc, ud, vd = 1, 0, 0, 1 while c != 0: q, c, d = divmod(d, c) + (c,) uc, vc, ud, vd = ud - q * uc, vd - q * vc, uc, vc # At this point, d is the GCD, and ud*a+vd*m = d. # If d == 1, this means that ud is a inverse. assert d == 1 if ud > 0: return ud else: return ud + m
def saludar(nombre: str) -> str: """Saludo de bienvenida a los integrantes de la comunidad 'Ayuda en Python'. :param nombre: Nombre del usuario :type nombre: str :return: Mensaje de bienvenida :rtype: str """ return f"Hola {nombre.capitalize()}, " \ "'Ayuda en Python' te da la bienvenida!"
def _validate_static_url_path(static_url_path): """ Validates the given static folder value. Parameters ---------- static_url_path : `str` Static url path value to validate. Returns ------- static_url_path : `None` or `str` The validated static url path value. Raises ------ TypeError If `static_url_path` was not given either as `None` or `str` instance. """ if static_url_path is not None: if type(static_url_path) is str: pass elif isinstance(static_url_path, str): static_url_path = str(static_url_path) else: raise TypeError( f'`static_url_path` can be given as `str` instance, got ' f'{static_url_path.__class__.__name__}.' ) return static_url_path
def test_2(input): """ >>> test_2("hijklmmn") False >>> test_2("abcdffaa") True >>> test_2("") True >>> test_2("abdfasdf") True """ if "i" in input or "o" in input or "l" in input: return False return True
def filter_duplicates(steps, losses, accuracies): """Returns copies of the data with duplicates filtered out.""" assert steps assert len(steps) == len(losses) assert len(steps) == len(accuracies) out_steps = [steps[0]] out_losses = [losses[0]] out_accuracies = [accuracies[0]] for cur in range(1, len(steps)): # Consider step for inclusion. prev = cur - 1 if steps[cur] != steps[prev]: out_steps.append(steps[cur]) out_losses.append(losses[cur]) out_accuracies.append(accuracies[cur]) return out_steps, out_losses, out_accuracies
def str_time(seconds, milliseconds=0): """Do math stuff here""" minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) if hours != 0: if milliseconds: return "{0}:{1:0>2}:{2:0>2}.{3:0>3}".format(hours, minutes, seconds, milliseconds) else: return "{0}:{1:0>2}:{2:0>2}".format(hours, minutes, seconds) elif minutes != 0: if milliseconds: return "{0}:{1:0>2}.{2:0>3}".format(minutes, seconds, milliseconds) else: return "{0}:{1:0>2}".format(minutes, seconds) elif seconds != 0: if milliseconds: return "{0}.{1:0>3}".format(seconds, milliseconds) else: return "0:{0:0>2}".format(seconds) else: return "0.{0:0>3}".format(milliseconds)
def is_sorted(t): """Checks whether a list is sorted. t: list returns: boolean """ return t == sorted(t)
def kernel_launch_name(length, precision): """Return kernel name.""" return f'rocfft_internal_dfn_{precision}_ci_ci_stoc_{length}'
def break_camel(s): """ Time complexity: O(n). Space complexity: O(n). """ ls = [] for c in s: if c.isupper(): ls.append(' ') ls.append(c) return ''.join(ls)
def lr_schedule(epoch): """Learning Rate Schedule """ lr = 1e-3 if epoch > 100: lr *= 0.5e-3 elif epoch > 50: lr *= 1e-3 elif epoch > 40: lr *= 1e-2 elif epoch > 20: lr *= 1e-1 print('Learning rate: ', lr) return lr
def harmonic_mean(x, y): """Return the harmonic mean of x and y. >>> harmonic_mean(2, 6) 3.0 >>> harmonic_mean(1, 1) 1.0 >>> harmonic_mean(2.5, 7.5) 3.75 >>> harmonic_mean(4, 12) 6.0 """ return (2/(1/x+1/y))
def sp_get_dict(sub_dict, param, default): """This method yields a standard interface to list of list params. Parameters: ----------- sub_dict : dict Items sub-dictionary of the network dictionary. param : str String specifying the parameter to extract. default : None, int, etc. The default value for the parameter if none given. """ # Determine if param is present in specification. value = sub_dict.get(param, None) # Get list of all sources. sources = [src for src_list in sub_dict["source"] for src in src_list] # Dependent on sources layout specify parameter value. if len(sources) == 1: if value is None: value = [[default]] else: if not isinstance(value, list): value = [[value]] else: if value is None: value = [] for src_list in range(len(sub_dict["source"])): value.append([]) for s in sub_dict["source"][src_list]: value[-1].append(default) return value
def sanitize(string): """remove quirks of corpora (especially wiki)""" #convert strange space chars to standard 0x20 string = string.replace(chr(0xA0), ' ').replace(chr(0x2009), ' ') #recursively remove multiple spaces while ' ' in string: string = string.replace(' ', ' ') return string
def dec_to_another(to_param, number: int): """ Convert number to decimal number systems :param to_param: Target number system :param number: Number to change :type number: int :return: Returns a number converted to the decimal system :rtype: int """ digits = {0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E', 15: 'F'} number_copy = number converted_number = '' while number_copy > 0: rest = number_copy % to_param converted_number = digits[rest] + converted_number number_copy = number_copy // to_param return converted_number
def _GetWithPrefix(item, attribute, prefix): """Convenience method to get an attribute from an item and attach a prefix. Args: item: Any dict-like object with attribute fetching provided by 'get'. attribute: Attribute to retrieve from item. prefix: A string prefix for the returned string, if item.attribute has a value. Returns: Grabs the attribute from item if it exists and returns it with prefix prepended. """ value = item.get(attribute, None) if value: value = str(prefix) + str(value) return value
def kwargs_to_string(kwargs): """ Given a set of kwargs, turns them into a string which can then be passed to a command. :param kwargs: kwargs from a function call. :return: outstr: A string, which is '' if no kwargs were given, and the kwargs in string format otherwise. """ outstr = '' for arg in kwargs: outstr += ' -{} {}'.format(arg, kwargs[arg]) return outstr
def ChangeBackslashToSlashInPatch(diff_text): """Formats file paths in the given text to unix-style paths.""" if diff_text: diff_lines = diff_text.split('\n') for i in range(len(diff_lines)): if (diff_lines[i].startswith('--- ') or diff_lines[i].startswith('+++ ')): diff_lines[i] = diff_lines[i].replace('\\', '/') return '\n'.join(diff_lines) return None
def _translate_keys(inst): """ Coerces into dictionary format, excluding all model attributes save for id and name """ return dict(server=dict(id=inst['id'], name=inst['display_name']))
def camel_to_snake(camel): """ Convert `camel` to snake case. """ return ''.join(['_' + c.lower() if c.isupper() else c for c in camel]).strip('_')
def minor_version_tuple(bz_target): """ Turns '4.5' or '4.5.z' into numeric (4, 5) Assume the target version begins with numbers 'x.y' - explode otherwise :param bz_target: A string like "4.5.0" :return: A tuple like (4, 5) """ if bz_target == '---': return (0, 0) major, minor, _ = f"{bz_target}.z".split('.', 2) return (int(major), int(minor))
def get_photometric_columns(bands, photo_type, err_type, idx, corr=None): """ Returns the photometric columns selected by Photoz Trainning Args: bands (list): Band list photo_type (string): string containing magnitude with {} to concatenate the band. err_type (string): string containing magnitude erro with {} to concatenate the band. idx (string): index column name corr (string, optional): column name to calculate the correction. Defaults to None. Returns: list: selected photometric columns """ columns_list = [idx] if corr: columns_list.append(corr) for band in bands: nmag = photo_type.format(band) nerr = err_type.format(band) columns_list.append(nmag) columns_list.append(nerr) return columns_list
def get_cpp_type(typeTuple): """ Take a list of dbus types and perform validity checking, such as: [ variant [ dict [ int32, int32 ], double ] ] This function then converts the type-list into a C++ type string. """ propertyMap = { 'byte': {'cppName': 'uint8_t', 'params': 0}, 'boolean': {'cppName': 'bool', 'params': 0}, 'int16': {'cppName': 'int16_t', 'params': 0}, 'uint16': {'cppName': 'uint16_t', 'params': 0}, 'int32': {'cppName': 'int32_t', 'params': 0}, 'uint32': {'cppName': 'uint32_t', 'params': 0}, 'int64': {'cppName': 'int64_t', 'params': 0}, 'uint64': {'cppName': 'uint64_t', 'params': 0}, 'double': {'cppName': 'double', 'params': 0}, 'string': {'cppName': 'std::string', 'params': 0}, 'array': {'cppName': 'std::vector', 'params': 1}, 'dict': {'cppName': 'std::map', 'params': 2}} if len(typeTuple) != 2: raise RuntimeError("Invalid typeTuple %s" % typeTuple) first = typeTuple[0] entry = propertyMap[first] result = entry['cppName'] # Handle 0-entry parameter lists. if (entry['params'] == 0): if (len(typeTuple[1]) != 0): raise RuntimeError("Invalid typeTuple %s" % typeTuple) else: return result # Get the parameter list rest = typeTuple[1] # Confirm parameter count matches. if (entry['params'] != -1) and (entry['params'] != len(rest)): raise RuntimeError("Invalid entry count for %s : %s" % (first, rest)) # Parse each parameter entry, if appropriate, and create C++ template # syntax. result += '<' if entry.get('noparse'): # Do not parse the parameter list, just use the first element # of each tuple and ignore possible parameters result += ", ".join([e[0] for e in rest]) else: result += ", ".join([get_cpp_type(e) for e in rest]) result += '>' return result
def _dijkstra(graph, start, end): """Dijkstra's algorithm Python implementation (PRIVATE). Algorithm adapted from http://thomas.pelletier.im/2010/02/dijkstras-algorithm-python-implementation/. However, an obvious bug in:: if D[child_node] >(<) D[node] + child_value: is fixed. This function will return the distance between start and end. Arguments: - graph: Dictionary of dictionary (keys are vertices). - start: Start vertex. - end: End vertex. Output: List of vertices from the beginning to the end. """ D = {} # Final distances dict P = {} # Predecessor dict # Fill the dicts with default values for node in graph.keys(): D[node] = 100 # Vertices are unreachable P[node] = "" # Vertices have no predecessors D[start] = 0 # The start vertex needs no move unseen_nodes = list(graph.keys()) # All nodes are unseen while len(unseen_nodes) > 0: # Select the node with the lowest value in D (final distance) shortest = None node = "" for temp_node in unseen_nodes: if shortest is None: shortest = D[temp_node] node = temp_node elif D[temp_node] < shortest: shortest = D[temp_node] node = temp_node # Remove the selected node from unseen_nodes unseen_nodes.remove(node) # For each child (ie: connected vertex) of the current node for child_node, child_value in graph[node].items(): if D[child_node] > D[node] + child_value: D[child_node] = D[node] + child_value # To go to child_node, you have to go through node P[child_node] = node if node == end: break # Set a clean path path = [] # We begin from the end node = end distance = 0 # While we are not arrived at the beginning while not (node == start): if path.count(node) == 0: path.insert(0, node) # Insert the predecessor of the current node node = P[node] # The current node becomes its predecessor else: break path.insert(0, start) # Finally, insert the start vertex for i in range(len(path) - 1): distance += graph[path[i]][path[i + 1]] return distance
def get_original_keys(keys, keymap, strict=False): """ Get original keys from normalized keys. Args: keys: The collection of keys that identify a value in the dataset. keymap: The keymap returned from :meth:`load` or :meth:`loads`. strict: If true, a KeyError will be raised if the given keys are not found in the keymap. Otherwise, the given key will be returned rather than the original key. This is helpful when reporting errors on required keys that do not exist in the data set. Since they are not in the dataset, no original key is available. Returns: A tuple containing the original keys names. """ original_keys = [] for i in range(len(keys)): try: loc = keymap[tuple(keys[:i+1])] line = loc.key_line original_keys.append(line.key) except AttributeError: # this occurs normally for list indexes original_keys.append(keys[i]) except (KeyError, IndexError): if strict: raise original_keys.append(keys[i]) return tuple(original_keys)
def _get_enabled_components(cluster): """Get all components with non-zero "count".""" return [ (key, value) for key, value in cluster["specification"]["components"].items() if int(value["count"]) > 0 ]
def CalcE(U): """Return the result of the non-linear variable E = 0.5U**2.""" return 0.5 * U * U
def abbrev(line, ab, nchar): """ check for valid abbreviation """ # int i,nc; if len(line) > len(ab): return 0 nc = len(line) if nc < nchar: nc = nchar for i in range(nc): if line[i] != ab[i]: return False return True
def _from_proc_output(output: bytes) -> str: """ Convert proc output from bytes to str, and trim heading- and tailing-spaces :param output: output in bytes :return: output in str """ return str(output, encoding='utf-8').strip(' \t\n')
def param_val(val, values): """Get param values.""" return values[val] if val in values else val
def getFileDialogTitle(msg, title): """ Create nicely-formatted string based on arguments msg and title :param msg: the msg to be displayed :param title: the window title :return: None """ if msg and title: return "%s - %s" % (title, msg) if msg and not title: return str(msg) if title and not msg: return str(title) return None
def capitalize(s): """ Returns a capitalized version of the string `s`. >>> capitalize("test") 'Test' .. seealso:: http://stackoverflow.com/a/352513/1913780 """ return s[:1].upper() + s[1:]
def invert_reduce_to_nonempty(objs, ids, objs_reduced): """Inverse of :func:`reduce_to_nonempty`.""" objs_inv = list(objs) for idx, obj_from_reduced in zip(ids, objs_reduced): objs_inv[idx] = obj_from_reduced return objs_inv
def _compute_yield_pf(metrics): """ Compute the yield (number of bases passing filter) from a populated metrics object generated by get_illumina_sequencing_metrics() """ if not metrics.get('num_clusters_pf'): return None total_length = 0 total_length += len(metrics.get('read1_q30_fraction_by_cycle') or []) total_length += len(metrics.get('read2_q30_fraction_by_cycle') or []) total_length += len(metrics.get('index1_q30_fraction_by_cycle') or []) total_length += len(metrics.get('index2_q30_fraction_by_cycle') or []) return int(metrics.get('num_clusters_pf')*total_length)
def o_n_solution(arr1, arr2): """ Time complexity: Half merge sort i.e half of O(2n) = O(n) Args: arr1: arr2: Returns: """ new_arr = [] i = 0 j = 0 n = len(arr1) while len(new_arr) < n + 1 and i < len(arr1) and j < len(arr2): if arr1[i] < arr2[j]: new_arr.append(arr1[i]) i += 1 else: new_arr.append(arr2[j]) j += 1 return (new_arr[n-1] + new_arr[n]) /2