content
stringlengths
42
6.51k
def is_filler(sym): """ Returns true if C{sym} is a filler word. @param sym: Word string to test @type sym: string @return: True if C{sym} is a filler word (but not <s> or </s>) @rtype: boolean """ if sym == '<s>' or sym == '</s>': return False return ((sym[0] == '<' and sym[-1] == '>') or (sym[0] == '+' and sym[-1] == '+'))
def cast_element_ids_to_s(hash_element_ids): """ Cast a hash of element ids to a string of element ids. :param hash_element_ids: node/relation/way ids :type hash_element_ids: hash :returns: a string of node/relation/way ids :rtype: str """ elements_to_s = str() for el in ['node', 'relation', 'way']: if len(hash_element_ids[el]) > 0: el_to_s = '{}(id:{});'.format( el, ','.join(str(element) for element in hash_element_ids[el])) elements_to_s += el_to_s return elements_to_s
def deg_to_compass(deg): """Convert an angle degree to a compass direction with 16 sectors""" # deg is from 0-360 (though we use modulo here to wrap just in case we're # out of bounds). # # Each sector is 360/16 = 22.5 degrees wide # 0 degrees is in the *middle* of sector 0, so we adjust the input degree # by adding 22.5/2 = 11.25. # # This way, degrees -11.25 through 11.25 get placed in sector 0, # 11.25 through 33.75 get placed in sector 1, etc. sector = int((deg + 11.25) // 22.5) % 16 return [ 'N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW', ][sector]
def add_json_datetime_property(i,date='_date',time='_time',format= '{d}T{t}:00',datetime_field='time'): """Format datetime field based on given date, time, and format. Rationale: Some applications require a joint date-time variable in a specific format. Given an input json feature with properties and the names of fields representing 'date' and 'time', a 'datetime' field is returned. Args: i (geojson feature): This is a geojson feature, containing incorrect date (string): Property name of date string time (string): Property name of time string format (string): A parameterised format for combining these two fields datetime_field : A new field to contain the formatted datetime variable Returns: geojson feature: Geojson feature with date time variable """ t = i['properties']['_time'] d = i['properties']['_date'] i['properties'][datetime_field] = format.format(d=d,t=t) return i
def primes(n): """Return dict with primes and multiplisities. primes(20) -> {2:2, 5:1} """ primfac = {} d = 2 while d*d <= n: primfac[d] = 0 while (n % d) == 0: primfac[d] += 1 n //= d d += 1 if n > 1: primfac[n] = 1 return primfac
def print_scores_result(result, verbose=False): """ """ spacers = 4 if verbose: print(f"\nOverall stats: \n{spacers*' '}min. prec.: {result[0]:6.3f};"\ f"{spacers*' '}min. rec.: {result[3]:6.3f}") print(f"{(spacers-2)*' '}c-avg. prec.: {result[2]:6.3f};"\ f"{(spacers-2)*' '}c-avg. rec.: {result[5]:6.3f}") print(f"{(spacers-2)*' '}t-avg. prec.: {result[2]:6.3f};"\ f"{(spacers-2)*' '}t-avg. rec.: {result[5]:6.3f}") return None
def intersect(a, b): """ return the intersection of two lists """ return list(set(a) & set(b))
def _create_col_dict(allowed_output_cols, req_cols): """ Creates dictionary to apply check condition while extracting tld. """ col_dict = {col: True for col in allowed_output_cols} if req_cols != allowed_output_cols: for col in allowed_output_cols ^ req_cols: col_dict[col] = False return col_dict
def isKey(value): """ Check if a value is a key """ return "a" <= value and value <= "z"
def compute_nbins(max_diff, binsize, refine_factor=1, max_nbins=None): """ Helper utility to find the number of bins for that satisfies the constraints of (binsize, refine_factor, and max_nbins). Parameters ------------ max_diff : double Max. difference (spatial or angular) to be spanned, (i.e., range of allowed domain values) binsize : double Min. allowed binsize (spatial or angular) refine_factor : integer, default 1 How many times to refine the bins. The refinements occurs after ``nbins`` has already been determined (with ``refine_factor-1``). Thus, the number of bins will be **exactly** higher by ``refine_factor`` compared to the base case of ``refine_factor=1`` max_nbins : integer, default None Max number of allowed cells Returns --------- nbins: integer, >= 1 Number of bins that satisfies the constraints of bin size >= ``binsize``, the refinement factor and nbins <= ``max_nbins``. Example --------- >>> from Corrfunc.utils import compute_nbins >>> max_diff = 180 >>> binsize = 10 >>> compute_nbins(max_diff, binsize) 18 >>> refine_factor=2 >>> max_nbins = 20 >>> compute_nbins(max_diff, binsize, refine_factor=refine_factor, ... max_nbins=max_nbins) 20 """ if max_diff <= 0 or binsize <= 0: msg = 'Error: Invalid value for max_diff = {0} or binsize = {1}. '\ 'Both must be positive'.format(max_diff, binsize) raise ValueError(msg) if max_nbins is not None and max_nbins < 1: msg = 'Error: Invalid for the max. number of bins allowed = {0}.'\ 'Max. nbins must be >= 1'.format(max_nbins) raise ValueError(msg) if refine_factor < 1: msg = 'Error: Refine factor must be >=1. Found refine_factor = '\ '{0}'.format(refine_factor) raise ValueError(msg) # At least 1 bin ngrid = max(int(1), int(max_diff/binsize)) # Then refine ngrid *= refine_factor # But don't exceed max number of bins # (if passed as a parameter) if max_nbins: ngrid = min(int(max_nbins), ngrid) return ngrid
def flatten_dictionary(dictionary): """Get a list of tuples with the key value pairs of the dictionary. """ result = [] for k, s in dictionary.items(): for v in s: result.append((k, v)) return result
def compare(key, f, x, y): """ compare(key, f, x, y) -> f(key(x), key(y)) Compare key(x) with key(y) using f. """ return f(key(x), key(y)) # return f(*key(x,y))
def get_published_time(_stories): """ function extract published 'timestamps' from received stories :param _stories: list of stories including story title, link, unique id, published time :return: list of published 'timestamps' """ # Get the stories titles published = [] for story in _stories: published.append(story['published']) return published
def apply_haste(target, rules, added_effects, left): """ Apply the effects of haste to the target: Next turn, target's attack will beat an opposing attack (no clash) :param target: The character being affected :param rules: The ruleset to edit :param added_effects: Additional ability effects :param left: Position of the target (left or right, corresponding to left/right keys in rules dict) :return: Updated target and ruleset """ # "attack": {"beats": ["disrupt", "area", "attack"], "loses": ["block", "dodge"]} if left: # Remove attack from the attack: loses dict if "attack" in rules["attack"]["loses"]: rules["attack"]["loses"].remove("attack") # Add attack to the attack: beats dict if "attack" not in rules["attack"]["beats"]: rules["attack"]["beats"].append("attack") # "attack": {"beats": ["disrupt", "area"], "loses": ["block", "dodge", "attack"]} else: # Remove attack from the attack: beats dict if "attack" in rules["attack"]["beats"]: rules["attack"]["beats"].remove("attack") # Add attack to the attack: loses dict if "attack" not in rules["attack"]["loses"]: rules["attack"]["loses"].append("attack") return target, rules, added_effects
def getBasesLinear(cls, stop_at=object): """Return a list of the linear tree of base classes of a given class.""" bases = [cls] next_base = cls.__bases__[0] while next_base != stop_at: bases.append(next_base) next_base = next_base.__bases__[0] bases.append(next_base) return bases
def containers_result(containers): """ Generates something that looks like of the result from the "docker.Client.containers()" call. Unlike "docker.Client.images()", the result from "container()" is just a stub; full information requires a call to "docker.Client.inspect_container()" """ return [dict(Id=container["Id"], ImageID=container["Image"]) for container in containers]
def xf_list(lst): """Convert list into format for CSV.""" if not lst: return "" return " ".join(lst)
def quick_sort(array): """ quick sort algorithm Arg: array: a list Return: a list """ smaller_list=[] bigger_list=[] equal_list=[] if len(array)<=1: return array else: middle_key=array[0] for records in array: if records < middle_key: smaller_list.append(records) elif records > middle_key: bigger_list.append(records) else: equal_list.append(records) smaller_list=quick_sort(smaller_list) bigger_list=quick_sort(bigger_list) return smaller_list+equal_list+bigger_list
def is_toggle_cmd(msg): """Returns true it the input (msg) is equal to the string "TOGGLE" or is a ISO 8601 formatted date time """ is_toggle = msg == "TOGGLE" # datetime from sensor_reporter RpiGpioSensor (e.g. 2021-10-24T16:23:41.500792) is_dt = len(msg) == 26 and msg[10] == "T" # datetime from openHAB (e.g. 2022-02-27T17:58:45.165491+0100) is_dt_timezone = len(msg) == 31 and msg[10] == "T" return is_toggle or is_dt or is_dt_timezone
def in_range(size, x, y): """ Check x, y range """ if 0 <= x < size and 0 <= y < size: return True return False
def _get_service_account_removal_reasons(service_account_validity): """ Get service account removal reason Args: service_account_validity(GoogleServiceAccountValidity): service account validity Returns: List[str]: the reason(s) the service account was removed """ removal_reasons = [] if service_account_validity is None: return removal_reasons if service_account_validity["valid_type"] is False: removal_reasons.append( "It must be a Compute Engine service account or an user-managed service account." ) if service_account_validity["no_external_access"] is False: removal_reasons.append( "It has either roles attached to it or service account keys generated. We do not allow this because we need to restrict external access." ) if service_account_validity["owned_by_project"] is False: removal_reasons.append("It is not owned by the project.") return removal_reasons
def realign_seqs(block, gap_char='.', align_indels=False): """Add gaps to a block so all residues in a column are equivalent. Given a block, containing a list of "sequences" (dicts) each containing a "seq" (actual string sequence, where upper=match, lower=insert, dash=gap), insert gaps (- or .) into the sequences s.t. 1. columns line up properly, and 2. all resulting sequences have the same length The reason this needs to be done is that the query/consensus sequence is not assigned gaps to account for inserts in the other sequences. We need to add the gaps back to obtain a normal alignment. `return`: a list of realigned sequence strings. """ # ENH: align inserts using an external tool (if align_indels) all_chars = [list(sq['seq']) for sq in block['sequences']] # NB: If speed is an issue here, consider Numpy or Cython # main problem: list.insert is O(n) -- would OrderedDict help? nrows = len(all_chars) i = 0 while i < len(all_chars[0]): rows_need_gaps = [r for r in all_chars if not r[i].islower()] if len(rows_need_gaps) != nrows: for row in rows_need_gaps: row.insert(i, gap_char) i += 1 # special attention should be taken to the last several columns len_set = set(map(len, all_chars)) if len(len_set) == 1: return [''.join(row) for row in all_chars] else: max_len = max(len_set) for row in all_chars: gap_num = max_len - len(row) if gap_num != 0: row.append(gap_char*gap_num) return [''.join(row) for row in all_chars]
def fixed(b1, b2): """ XORs two given sets of bytes, of equal length, together. """ # Ensure that byte arrays are of equal length. if len(b1) != len(b2): raise Exception('Byte arrays are not of equal length.') # XOR byte arrays together and return. return bytearray([x ^ y for x, y in zip(b1, b2)])
def prepare_querystring(*query_arguments, **kw_query_arguments): """Prepare a querystring dict containing all query_arguments and kw_query_arguments passed. :return: Querystring dict. :rtype: dict """ querystring = dict() for argument_dict in query_arguments: if isinstance(argument_dict, dict): querystring.update(argument_dict) querystring.update(kw_query_arguments) return querystring
def steps_to_list(string_literal: str) -> list: """Takes a comma separated list and returns a list data type.""" new_list = [] for item in string_literal.split(','): new_list.append(item) return new_list
def homogenize(vectors, w=1.0): """Homogenise a list of vectors. Parameters ---------- vectors : list A list of vectors. w : float, optional Homogenisation parameter. Defaults to ``1.0``. Returns ------- list Homogenised vectors. Notes ----- Vectors described by XYZ components are homogenised by appending a homogenisation parameter to the components, and by dividing each component by that parameter. Homogenisatioon of vectors is often used in relation to transformations. Examples -------- >>> vectors = [[1.0, 0.0, 0.0]] >>> homogenize(vectors) [[1.0, 0.0, 0.0, 1.0]] """ return [[x * w, y * w, z * w, w] if w else [x, y, z, 0.0] for x, y, z in vectors]
def create_violation_list(security_group_identifier, rule, cidr_ip, cidr_violations): """Create Violation List.""" cidr_violations.append({ "groupIdentifier": security_group_identifier, "ipProtocol": rule["ipProtocol"], "toPort": rule["toPort"], "fromPort": rule["fromPort"], "cidrIp": cidr_ip }) return cidr_violations
def ExtractCommand(line): """ Input: a line that might contain a command and some comment Output: the command itself Purpose: extract the command from all the giberish, if none is found return nothing """ result="" line=line.lstrip() line=line[:line.find("//")] line=line.rstrip() line+='\n' return line
def rename_element_charge(element, charge): """We use the _ to separate so we have something to split on.""" if charge == 0: return f'{element}' elif charge > 0: return f'{element}+{charge}' else: return f'{element}-{abs(charge)}'
def extract_args(argv): """ take sys.argv that is used to call a command-line script and return a correctly split list of arguments for example, this input: ["eqarea.py", "-f", "infile", "-F", "outfile", "-A"] will return this output: [['f', 'infile'], ['F', 'outfile'], ['A']] """ string = " ".join(argv) string = string.split(' -') program = string[0] arguments = [s.split() for s in string[1:]] return arguments
def _validate_func_only(func_only, where): """Helper for get_params()""" if func_only is None: return False if not isinstance(func_only, bool): raise ValueError('Invalid func_only value %s from %s' % (func_only, where)) return func_only
def minimum_edit_distance(s1, s2): """ This function computes a distance between two stings From: https://rosettacode.org/wiki/Levenshtein_distance#Python """ if len(s1) > len(s2): s1, s2 = s2, s1 distances = range(len(s1) + 1) for index2, char2 in enumerate(s2): newDistances = [index2 + 1] for index1, char1 in enumerate(s1): if char1 == char2: newDistances.append(distances[index1]) else: newDistances.append(1 + min((distances[index1], distances[index1 + 1], newDistances[-1]))) distances = newDistances return distances[-1]
def float_seconds_from_string(str_hh_mm_ss_ms): """Convert to seconds in float, from string in format hh:mm:ss Args: string_timedelta (str): format hh:mm:ss.ms Returns: Float: timedelta in seconds """ hr, min, sec = map(float, str_hh_mm_ss_ms.split(':')) float_sec_timedelta = sec + min*60 + hr*60*60 return float_sec_timedelta
def YUVtoYDbDr(Y, U, V): """ convert YUV (PAL) to RGB color :param Y: Y value (0;1) :param U: U value (-0.436-0.436) :param V: V value (-0.615-0.615) :return: YDbDr tuple (Y0;1 D-1.333-1.333) """ Db = 3.059 * U Dr = -2.169 * V return Y, Db, Dr
def c_bool_value(value): """ Returns the C representation of a Boolean value. """ return 'true' if value else 'false'
def first_true(iterable, default=False, pred=None): """Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true. :param iterable: source of values :param default: the value to return if no true value is found (default is False) :param pred: if not None test the result of applying pred to each value instead of the values themselves (default is None) """ # first_true([a,b,c], x) --> a or b or c or x # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x try: return next(filter(pred, iterable)) except StopIteration: return default
def simple_shunting_yard(sequence, operators_dict, operators_precedence): """Simplified shunting yard algorithm by Djikstra to convert inline to RPN """ outqueue = [] opstack = [] def popop(): op = opstack.pop() outqueue.append(operators_dict[op](outqueue.pop(), outqueue.pop())) while sequence: token = sequence.pop(0) if token.isnumeric(): outqueue.append(int(token)) elif token == '(': opstack.append(token) elif token == ')': while opstack[-1] != '(': popop() opstack.pop() # Pop left parenthesis else: precedence = operators_precedence[token] while (opstack and opstack[-1] != '(' and operators_precedence[opstack[-1]] >= precedence): popop() opstack.append(token) while opstack: popop() assert len(outqueue) == 1 return outqueue[0]
def head_tail(line): """Returns the first word in 'line' and the rest of 'line' or None if the line is too short.""" generator = (t.strip() for t in line.split(None, 1)) head = next(generator).strip() tail = '' try: tail = next(generator).strip() except StopIteration: pass return head, tail
def format_size(size_in_bytes, leading=8, trailing=1): """ Formats the given integer as an appropriate string with leading spaces. :param size_in_bytes: :return: """ if size_in_bytes <= 1024: trailing = 0 f = '{:' + str(leading) + '.' + str(trailing) + 'f}' if size_in_bytes >= 1024 * 1024 * 1024: return (f + ' GB').format(size_in_bytes / 1024 / 1024 / 1024) elif size_in_bytes >= 1024 * 1024: return (f + ' MB').format(size_in_bytes / 1024 / 1024) elif size_in_bytes >= 1024: return (f + ' KB').format(size_in_bytes / 1024) else: return (f + ' B').format(size_in_bytes)
def detect_robot_context(code: str, cursor_pos: int): """Return robot code context in cursor position.""" code = code[:cursor_pos] line = code.rsplit("\n")[-1] context_parts = code.rsplit("***", 2) if len(context_parts) != 3: return "__root__" else: context_name = context_parts[1].strip().lower() if context_name == "settings": return "__settings__" elif line.lstrip() == line: return "__root__" elif context_name in ["tasks", "test cases"]: return "__tasks__" elif context_name == "keywords": return "__keywords__" else: return "__root__"
def is_exposed(func): """ Checks if the function is RPC exposed """ return getattr(func, 'exposed', False)
def _order_tiebreak(winners, n=1): """ Given an iterable of possibly tied `winners`, select the lowest-numbered `n` candidates. """ return sorted(winners)[:n]
def quote_string(prop): """ RedisGraph strings must be quoted, quote_string wraps given prop with quotes incase prop is a string. """ if not isinstance(prop, str): return prop if prop[0] != '"': prop = '"' + prop if prop[-1] != '"': prop = prop + '"' return prop
def _IsSelfClosing(lines): """Given pretty-printed xml, returns whether first node is self-closing.""" for l in lines: idx = l.find('>') if idx != -1: return l[idx - 1] == '/' raise RuntimeError('Did not find end of tag:\n%s' % '\n'.join(lines))
def count_occupied(seats): """Count number of seats # in the seats.""" return sum(row.count('#') for row in seats)
def set_odd_parity(buf): """ Modifies the supplied bytearray to set odd parity on the last bit of each byte. This function requires the last (parity) bit is zero. :param bytearray buf: a 192-bit buffer containing a 168-bit key :rtype: bytearray """ for i in range(0, len(buf)): v = buf[i] v ^= v >> 4 v &= 0xf buf[i] |= (0x9669 >> v) & 1 return buf
def countDigits(number: int): """this function is used for counting digits. Args: number (int): any number! Returns: int: number of digits of your inputted number. """ return len(str(number))
def get_filename(file): """Get file name""" try: filename = file.split('/')[-1] except IndexError: filename = "Unknown file" return filename
def time_units_from_node(node): """Returns standard time units string based on node text, or 'unknown'.""" if node is None or node.text == '' or node.text == '\n': return 'unknown' else: return node.text.strip()
def s_star_index_node(i): """ Given an index in the sequence, get the name of node corresponding to this index in s_star. :param i: Index in the sequence. :return: Name of node corresponding to this index in s_star. **Example:** :: s_star_index_node(3) = 's_star_3' .. The indented line above will appear as highlighted code. .. The "Example" will appear in bold. .. This comment and the one above will both not appear in the sphinx documentation. """ return 's_star_{}'.format(i)
def extract_items(topitems_or_libraryitems): """ Extracts a sequence of items from a sequence of TopItem or LibraryItem objects. """ seq = [] for i in topitems_or_libraryitems: seq.append(i.item) return seq
def create_transaction(day, value, type, description): """ :return: a dictionary that contains the data of a transaction """ return {'day': day, 'value': value, 'type': type, 'description': description}
def default_settings_structure(settings): """ define default settings for optimisation Parameters ---------- 'settings' (dictionary): default settings for the genetic algorithm Returns ------- 'settings' (dictionary): default settings for the genetic algorithm updated with default SDM settings """ # used for run_filter_model, userdefined) settings["interference"] = "squaredproduct" settings["logit"] = False settings["threshold"] = "max" return settings
def get_hosts_ram_usage_ceilo(ceilo, hosts_ram_total): """Get (real) ram usage for each host from ceilometer :param ceilo: A Ceilometer client. :type ceilo: * :param hosts_ram_total: A dictionary of (host, total_ram) :type hosts_ram_total: dict(str: *) :return: A dictionary of (host, ram_usage) :rtype: dict(str: *) """ hosts_ram_usage = dict() #dict of (host, ram_usage) for host in hosts_ram_total: #actually hostname_nodename host_res_id = "_".join([host, host]) #sample of ram usage in percentage host_mem_usage = ceilo.samples.list(meter_name='host.memory.usage', limit=1, q=[{'field':'resource_id', 'op':'eq', 'value':host_res_id}]) if host_mem_usage: host_mem_usage = host_mem_usage[0].counter_volume host_mem_total = hosts_ram_total[host] hosts_ram_usage[host] = (int)((host_mem_usage/100)*host_mem_total) return hosts_ram_usage
def convert_refdex_to_single_filename_refdex(input_refdex): """Note that this makes a partially shallow copy.""" refdex = {} for key, value in input_refdex.items(): if 'filenames' in value: refdex[key] = { 'filename': value['filenames'][-1], 'anchor': value['anchor'] } else: refdex[key] = value return refdex
def get_bool(value): """Get boolean from string.""" if value.upper() in ["1", "T", "TRUE"]: return True if value.upper() in ["0", "F", "FALSE"]: return False raise ValueError(f"Unable to convert {value} to boolean.")
def _xml_escape(data): """Escape &, <, >, ", ', etc. in a string of data.""" # ampersand must be replaced first from_symbols = '&><"\'' to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split()) for from_, to_ in zip(from_symbols, to_symbols): data = data.replace(from_, to_) return data
def overlap(rect1, rect2): """ Calculate the overlap between two boxes """ dy1 = abs(rect1[0] - rect1[2]) + 1 dx1 = abs(rect1[1] - rect1[3]) + 1 dy2 = abs(rect2[0] - rect2[2]) + 1 dx2 = abs(rect2[1] - rect2[3]) + 1 a1 = dx1 * dy1 a2 = dx2 * dy2 ia = 0 if rect1[2] > rect2[0] and rect2[2] > rect1[0] and rect1[3] > rect2[1] and rect2[3] > rect1[1]: xx1 = max(rect1[1], rect2[1]) yy1 = max(rect1[0], rect2[0]) xx2 = min(rect1[3], rect2[3]) yy2 = min(rect1[2], rect2[2]) ia = (xx2 - xx1 + 1) * (yy2 - yy1 + 1) return ia / float(a1 + a2 - ia)
def recvall(sock, size): """ Receive data of given size from a socket connection """ data = b"" while len(data) < size: s = sock.recv(size - len(data)) data += s if not s: raise RuntimeError("did not receive data from socket " f"(size {size}, got only {data !r})") return data
def check_int(integer, name): """Verify that an object is an integer, or coercible to one. Parameters ---------- integer : int The integer to check. name : str The name to print in the error message if it fails. """ if isinstance(integer, int): return integer # Else if it is a float: coerced = int(integer) if coerced != integer: raise ValueError(f"'{name}' must be an integer.") return coerced
def footprintBridge(ra, dec): """ Special selection for pointings near the SMC Northern Overdensity (SMCNOD) """ cut = (ra > 30.) & (ra < 60.) & (dec < -65.) return cut
def __getItemIndex__(testObj, firstElementValue): """ Returns the index of list testObj whose first element is equal to firstElementValue. If it cannot be found, then the next free index is returned. """ assert isinstance(testObj, list), 'The provided object is not a list!' for itemIndex in range(len(testObj)): if len(testObj[itemIndex]) > 0 and testObj[itemIndex][0] == firstElementValue: return itemIndex return len(testObj)
def isPalindrome(string): """ Checks if the given string is a palindrome. Params ====== string: str Returns ======= result: bool """ # Length of string length = len(string) # Loop through every character in the string for cx, c in enumerate(string): # Get the last index in the string cx_end = length - cx - 1 # Only check if the characters are equal if cx is strictly less than cx_end # If it is an even-numbered string, cx being strictly less than cx_end ends only after the midpoint # If it is an odd-numbered string, the loop ends when cx == cx_end, which doesn't need to be checked if cx >= cx_end: break # Check c with the index at the end of the string if c != string[cx_end]: return False return True
def ensure_ref_position_is_valid(ref_position, num_alts, param_title): """ Ensures that `ref_position` is None or an integer that is in the interval `[0, num_alts - 1]`. If None, ensures that intercepts are not the parameters being estimated. Raises a helpful ValueError if otherwise. Parameters ---------- ref_position : int. An integer denoting the position in an array of parameters that will be constrained for identification purposes. num_alts : int. An integer denoting the total number of alternatives in one's universal choice set. param_title : {'intercept_names', 'shape_names'}. String denoting the name of the parameters that are being estimated, with a constraint for identification. E.g. 'intercept_names'. Returns ------- None. """ assert param_title in ['intercept_names', 'shape_names'] try: assert ref_position is None or isinstance(ref_position, int) except AssertionError: msg = "ref_position for {} must be an int or None." raise TypeError(msg.format(param_title)) if param_title == "intercept_names": try: assert ref_position is not None except AssertionError: raise ValueError("At least one intercept should be constrained.") try: if ref_position is not None: assert ref_position >= 0 and ref_position <= num_alts - 1 except AssertionError: msg = "ref_position must be between 0 and num_alts - 1." raise ValueError(msg) return None
def standardise(field_name): """ Standardise a field name to lower case for case insensitive matching """ if not field_name or not isinstance(field_name, str): return None else: return field_name.lower().strip()
def _print_res(res): """Prints the results on the screen :param res: The dictionary of the results. :returns: 0 for success. """ tot_length = 120 n_fields = 5 fmt = ''.join(['{:^', str(tot_length // n_fields), '}']) * n_fields print(fmt.format( 'Parameter', 'Result', 'Value', 'Derivative', 'Condition N' )) for k, v in res.items(): print( fmt.format(*(k + tuple(v))) ) continue return 0
def task_combine(iterator, creator_func, combiner_func): """ Combine elements of an iterator (partition) by key in a dictionary. :param iterator: :param creator_func: a function to apply to the value, in its key's first occurrence. :param combiner_func: a function to combine second and later occurrences :return: """ r = dict() for k, v in iterator: r[k] = combiner_func(r[k], v) if k in r else creator_func(v) return r
def poly4(x, b0, b1, b2, b3, b4): """ Taylor polynomial for fit b1 = GD b2 = GDD / 2 b3 = TOD / 6 b4 = FOD / 24 """ return b0 + b1 * x + b2 * x ** 2 + b3 * x ** 3 + b4 * x ** 4
def apply_to_structure(op, param_dict): """ Apply the op operation to every entry in the dict """ ret = {} for k in param_dict.keys(): ret[k] = op(param_dict[k]) return ret
def is_list_unique(x): """ all elements must be hashable """ return len(x) == len(set(x))
def remove_link_tags(string): """Remove link tags in string.""" tag_start_index = string.find("<a href") while tag_start_index != -1: end_index = string.find("</a>", tag_start_index) string = string[0:tag_start_index] + " <i>link</i> " + string[end_index + 4:] tag_start_index = string.find("<a href") return string
def preconvert_float(value, name, lower_limit, upper_limit): """ Converts the given `value` to an acceptable float by the wrapper. Parameters ---------- value : `Any` The value to convert. name : `str` The name of the value. lower_limit : `float` The minimal value of `value`. upper_limit : `float` The maximal value of `value`. Returns ------- value : `float` Raises ------ TypeError If `value` was not given as `float` instance. ValueError If `value` is less than `lower_limit`, or is higher than the `upper_limit`. """ if type(value) is float: pass elif isinstance(value, float): value = int(value) else: float_converter = getattr(type(value), '__float__', None) if float_converter is None: raise TypeError(f'`{name}` can be `float` instance, got {value.__class__.__name__}.') value = float_converter(value) if (value < lower_limit) or (value > upper_limit): raise ValueError(f'`{name}` can be between {lower_limit} and {upper_limit}, got {value!r}.') return value
def test_bit(int_type, offset): """Return HIGH if the bit at 'offset' is one, otherwise return LOW.""" mask = 1 << offset return int_type & mask
def roundup(val, width): """roundup a value to N x width-bits""" x = (width >> 3) - 1 return (val + x) & ~x
def _verify_none_type(args): """Function for safely handling if a user specifies "None" from the command line. ---------------------------------------------------------------------------- Args: args: argparse object returned by ArgumentParser.parse_args() Returns: argparse object returned by ArgumentParser.parse_args() """ for arg in vars(args): if getattr(args, arg) == 'None' or getattr(args, arg) == 'none' or \ getattr(args, arg) == 'NONE': # Set argument value to None. setattr(args, arg, None) return args
def CsvEscape(text): """Escapes data entry for consistency with CSV format. The CSV format rules: - Fields with embedded commas must be enclosed within double-quote characters. - Fields with embedded double-quote characters must be enclosed within double-quote characters, and each of the embedded double-quote characters must be represented by a pair of double-quote characters. - Fields with embedded line breaks must be enclosed within double-quote characters. - Fields with leading or trailing spaces must be enclosed within double-quote characters. Args: text: str Data entry. Returns: str CSV encoded data entry. """ if not text: return '' if text.find('"') > -1: text = text.replace('"', '""') if (text == '' or text.find(',') > -1 or text.find('"') > -1 or text.find('\n') > -1 or text.find('\r') > -1 or text[0] == ' ' or text[-1] == ' '): text = '"%s"' % text return text
def sanitize_path_input(user_input: str) -> bool: """ this function takes a file hash and validates it's content to avoid RCE and XSS attacks Args: user_input: any user input which is used to write a path """ return True # temporarily removed as it broke some tak clients #if re.match("^[A-Za-z0-9_-]*$", user_input): # return True #else: # return False
def check_letters_line(row): """ Check whether the input is legal and organize it. :param row: (str) Input letter :return: (bool) Check if it is a legal string :return: (str) Remove spaces and convert to lowercase """ new_row = row.replace(' ', '').lower() if new_row.isalpha() is False or len(new_row) != 4: return False else: return new_row
def reverse(s): """ given a sequence return the reverse """ letters = list(s) return ''.join(letters[::-1])
def longest_common_subsequence(str1: str, str2: str) -> int: """ Finds the length of longest common subsequence between two strings. Parameters : Two strings str1 and str2 Returns : length of the LCS found """ # length of the strings m = len(str1) n = len(str2) # array to store the subproblems dp = [[0] * (n + 1) for i in range(m + 1)] for i in range(1, m + 1): for j in range(1, n + 1): # if characters match, increment the LCS of subproblem # LCS(str1[:i-1], str2[:j-1]) by 1 if str1[i - 1] == str2[j - 1]: dp[i][j] = dp[i - 1][j - 1] + 1 # else, try possible solutions and take maximum else: dp[i][j] = max(dp[i - 1][j], dp[i][j - 1]) # required LCS(str1, str2) return dp[m][n]
def is_null_slice(obj): """ We have a null slice. """ return ( isinstance(obj, slice) and obj.start is None and obj.stop is None and obj.step is None )
def find_if(cond, seq): """ Return the first x in seq such that cond(x) holds, if there is one. Otherwise return None. """ for x in seq: if cond(x): return x return None
def star(func, args): """Allow function to take piped sequence as arguments. Args: func: any function args: a sequence of arguments to the function Returns: evaluated function >>> star(lambda x, y: x * y)([2, 3]) 6 """ return func(*args)
def to_dict(param_list): """Convert a list of key=value to dict[key]=value""" if param_list: return {name: value for (name, value) in [param.split('=') for param in param_list]} else: return None
def remove_troublesome_chars(string: str): """Remove chars that cause trouble when pushed into postgres.""" if type(string) is not str: return string troublesome_chars = {'"': "", "'": "", "\n": " "} for k, v in troublesome_chars.items(): string = string.replace(k, v) return string
def extractAlgoAndDigest(checksum): """Given a checksum string formatted as ``<algo>:<digest>`` returns the tuple ``(algo, digest)``. ``<algo>`` is expected to be `SHA256`, `SHA512`, or `MD5`. ``<digest>`` is expected to be the full length hexdecimal digest. :raises ValueError: if checksum is incorrectly formatted. """ if checksum is None: return None, None if len(checksum.split(':')) != 2: raise ValueError("invalid checksum '%s'. Expected format is '<algo>:<digest>'." % checksum) (algo, digest) = checksum.split(':') expected_algos = ['SHA256', 'SHA512', 'MD5'] if algo not in expected_algos: raise ValueError("invalid algo '%s'. Algo must be one of %s" % (algo, ", ".join(expected_algos))) expected_digest_length = {'SHA256': 64, 'SHA512': 128, 'MD5': 32} if len(digest) != expected_digest_length[algo]: raise ValueError("invalid digest length %d. Expected digest length for %s is %d" % (len(digest), algo, expected_digest_length[algo])) return algo, digest
def control_start(cmd): """ Controls the start state """ if cmd.lower() in ("y", "yes"): action = "instructions" else: action = "game" return action
def color_code_for_rgb(red, green, blue): """ Returns the hexadecimal color code given rgb values """ return "%02X%02X%02X" % (red, green, blue)
def stitch_health_data(health_data): """ Fill in time steps with same infection status""" for node in health_data.keys(): timelist = health_data[node].keys() timelist = sorted(timelist) for num in range(1, len(timelist)): time2 = timelist[num] time1 = timelist[num-1] if health_data[node][time1]==health_data[node][time2]: for step in range(time1+1, time2): health_data[node][step] = health_data[node][time2] return health_data
def _extension_element_validator(values): """Validate extension element values. From https://www.hl7.org/fhir/extensibility.html#Extension : "An extension SHALL have either a value (i.e. a value[x] element) or sub-extensions, but not both. If present, the value[x] element SHALL have content (value attribute or other elements)." """ err_msg = "An extension SHALL have either a value or sub-extensions, but not both." if values.get("extension") is not None: for key, value in values.items(): if key.startswith("value_"): assert value is None, err_msg return values
def valid_name(name): """ nakijken welke tekens er niet in een naam mogen voorkomen voorlopig mag alles """ error_chars = [] for char in error_chars: if char in name: return False return True
def get_num_suffix(number, max_number): """Returns formatted number with number of padding zeros depending on maximum number, used for creating suffix for data series. Does not include the suffix separator. :param number: number to be formatted as map suffix :param max_number: maximum number of the series to get number of digits >>> get_num_suffix(10, 1000) '0010' >>> get_num_suffix(10, 10) '10' """ return '{number:0{width}d}'.format(width=len(str(max_number)), number=number)
def assure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep)
def factorial(n): """ Return the factorial of n. Parameters ---------- n : an integer of which the factorial is evaluated. Returns ------- result : The factorial of n. """ result = 1 for x in range(2, n + 1): result = result * x return result
def fix_quotes(tree: str): """Replace all quotes in the tree with single apostrophes.""" return tree.replace("''", "'") \ .replace("``", "'") \ .replace('"', "'")
def car(lst): """Return the first element of a list.""" if type(lst) != list or len(lst) == 0: return lst else: return lst[0]
def backends_mapping(backend): """Mapping our custom backend to the service""" return {"/": backend}
def encode_edd25519_xmr_const(arr): """ Converts Monero based ed25519 constants to int32_t constants :param arr: :return: """ bits = [26, 25, 26, 25, 26, 25, 26, 25, 26, 25] limbs = [] c = 0 # carry bit for i, x in enumerate(arr): r = x + c if x < 0: r = r + (1 << bits[i]) c = x >> bits[i] else: c = 0 limbs.append(r) return limbs
def identify_contentType(url): """ Given a URL for a content, it identifies the type of the content :param url(str): URL :returns: Type of the content """ extensions = ['mp3', 'wav', 'jpeg', 'zip', 'jpg', 'mp4', 'webm', 'ecar', 'wav', 'png'] if ('youtu.be' in url) or ('youtube' in url): return "youtube" elif url.endswith('pdf'): return "pdf" elif any(url.endswith(x) for x in extensions): return "ecml" else: return "unknown"
def isLeapYear(year): """Given a year returns True if it's a leap year, else False""" if year % 4 == 0: if year % 100 == 0: if year % 400 == 0: return True else: return False else: return True else: return False