content
stringlengths
42
6.51k
def Euler005(n): """Solution of fifth Euler problem.""" found = True number = 0 while found: i = 1 number += n while number % i == 0 and i <= n: if i == n: found = False i += 1 return number
def facet_operator(q_key, field_lookup): """ sorted_facet_query.get('field_lookup', 'iexact') """ if q_key in ["type", "subtype"]: return "iexact" elif q_key in ["value"]: if field_lookup in [ "exact", "iexact", "icontains", "contains", "in", "startswith", "istartswith", "endswith", "iendswith", ]: return field_lookup else: return "iexact" elif q_key in ["indexable_int", "indexable_float"]: if field_lookup in ["exact", "gt", "gte", "lt", "lte"]: return field_lookup else: return "exact" elif q_key in ["indexable_date_range_start", "indexable_date_range_year"]: if field_lookup in ["day", "month", "year", "iso_year", "gt", "gte", "lt", "lte", "exact"]: return field_lookup else: return "exact" else: return "iexact"
def lower(string): """ Return string converted to lower case. """ return str(string).lower()
def GetRegressionRangeFromSpike(spike_index, versions): """Get the regression range based on spike_index and versions.""" if spike_index < 1 or spike_index >= len(versions): return None return (versions[spike_index - 1], versions[spike_index])
def add_token(token_sequence: str, tokens: str) -> str: """Adds the tokens from 'tokens' that are not already contained in `token_sequence` to the end of `token_sequence`:: >>> add_token('', 'italic') 'italic' >>> add_token('bold italic', 'large') 'bold italic large' >>> add_token('bold italic', 'bold') 'bold italic' >>> add_token('red thin', 'stroked red') 'red thin stroked' """ for tk in tokens.split(' '): if tk and token_sequence.find(tk) < 0: token_sequence += ' ' + tk return token_sequence.lstrip()
def _format_servers_list_networks(networks): """Return a formatted string of a server's networks :param networks: a Server.networks field :rtype: a string of formatted network addresses """ output = [] for (network, addresses) in networks.items(): if not addresses: continue addresses_csv = ', '.join(addresses) group = "%s=%s" % (network, addresses_csv) output.append(group) return '; '.join(output)
def list_any_items_inside(_list, *items): """ is ANY of these items in a list? """ return any([x in _list for x in items])
def convert_py_to_cpp_namespace(python_namespace: str) -> str: """Convert a Python namespace name to a C++ namespace. Parameters ---------- python_namespace : `str` A string describing a Python namespace. For example, ``'lsst.example'``. Returns ------- cpp_namespace : `str` A C++ namespace. For example: ``'lsst::example'``. """ return python_namespace.replace(".", "::")
def _Flatten(list_of_list): """Creates set of all items in the sublists.""" flattened = set() for item in list_of_list: flattened.update(item) return flattened
def _UpperCaseAlleles(alleles): """Convert the list of allele strings to upper case.""" upper_alleles = [] for allele in alleles: upper_alleles.append(str(allele).upper()) return upper_alleles
def dataset_split_names(dataset): """ Convention of naming dataset splits """ return dataset + '_1', dataset + '_2'
def debug_skillet(skillet: dict) -> list: """ Verifies the structure of a skillet and returns a list of errors or warning if found, None otherwise :param skillet: loaded skillet :return: list of errors or warnings if found """ errs = list() if skillet is None: errs.append('Skillet is blank or could not be loaded') return errs if type(skillet) is not dict: errs.append('Skillet is malformed') return errs # verify labels stanza is present and is a OrderedDict if 'labels' not in skillet: errs.append('No labels attribute present in skillet') else: if 'collection' not in skillet['labels']: errs.append('No collection defined in skillet') if 'label' not in skillet: errs.append('No label attribute in skillet') if 'type' not in skillet: errs.append('No type attribute in skillet') else: valid_types = ['panos', 'panorama', 'panorama-gpcs', 'pan_validation', 'python3', 'rest', 'terraform', 'template', 'workflow', 'docker'] if skillet['type'] not in valid_types: errs.append(f'Unknown type {skillet["type"]} in skillet') return errs
def rotated_array_search(input_list, number): """ Find the index by searching in a rotated sorted array Args: input_list(array), number(int): Input array to search and the target Returns: int: Index or -1 """ first, last = 0, len(input_list) - 1 try: while first<=last: mid = (first + last)//2 if input_list[mid] == number : return mid elif input_list[mid]<number: if input_list[first]<= number and input_list[first] > input_list[mid]: last = mid - 1 else: first = mid + 1 elif input_list[mid]>number: if input_list[last] >= number and input_list[last] < input_list[mid]: first = mid + 1 else: last = mid - 1 return -1 except: return -1
def compute_color_for_labels(label): """ Simple function that adds fixed color depending on the class # """ if label == 1: return (0, 255, 0) elif label == 2: return (0, 0, 255) else: return (255, 255, 255)
def merge_dict(d1: dict, d2: dict, keep_none=False) -> dict: """Merge d2 into d1, ignoring extra keys in d2""" ret = d1.copy() for k in d1.keys(): if k in d2: if not keep_none and d2[k] is None: continue ret[k] = d2[k] return ret
def RB_decay_to_gate_fidelity(rb_decay, dimension): """ Derived from eq. 5 of [RB] arxiv paper. Note that 'gate' here typically means an element of the Clifford group, which comprise standard rb sequences. :param rb_decay: Observed decay parameter in standard rb experiment. :param dimension: Dimension of the Hilbert space, 2**num_qubits :return: The gate fidelity corresponding to the input decay. """ return 1/dimension - rb_decay*(1/dimension -1)
def pkcs7pad(ciphertext, blocksize, value=b'\x04'): """ ciphertext: bytes string, blocksize: int, value: bytes char Return ciphertext padded with value until it reach blocksize length. """ length = len(ciphertext) pad = blocksize - (length % blocksize) return b''.join((ciphertext, value * pad))
def check_analysis_type(options): """Determines what type of analysis to do based on the command line options.""" if options['cterm_models']: analysis_type = 'cterminus' else: analysis_type = 'nterminus' return analysis_type
def fuel_moisture_index(t, h): """Compute the Fuel moisture index after Sharples et al. (2009a, 2009b). Parameters ---------- t : array temperature [C]. h : array relative humidity [%]. Returns ------- array Fuel moisture index value at current timestep. Example ------- """ return 10 - 0.25 * (t - h)
def merge_dicts(d1, d2): """merges/reduces two dictionaries""" d1.update(d2) return d1
def clamp(value, minimum, maximum): """ Reset value between minimum and maximum """ return max(min(value, maximum), minimum)
def get_hash_of_dirs(directory): """ Recursively hash the contents of the given directory. Args: directory (str): The root directory we want to hash. Returns: A hash of all the contents in the directory. """ import hashlib import os sha = hashlib.sha512() if not os.path.exists(directory): return -1 for root, _, files in os.walk(directory): for names in files: filepath = os.path.join(root, names) with open(filepath, 'rb') as next_file: for line in next_file: sha.update(line) return sha.hexdigest()
def _centroid(x): """ Calculate the centroid from a vectorset x """ return sum(x) / len(x)
def find_paths(paths): """ @param paths: paths to geuss. @type paths: list @return: the path found or None (not found). @rtype: str """ import os for path in paths: if os.path.isdir(path): return path return ''
def calcEaster(year): """Calculate the date of Easter from: http://www.oremus.org/liturgy/etc/ktf/app/easter.html param year returns date tuple (day, month, year) """ gold = year % 19 + 1 sol = (year - 1600) // 100 - (year - 1600) // 400 lun = (((year - 1400) // 100) * 8) // 25 _pasch = (3 - 11 * gold + sol - lun) % 30 if (_pasch == 29) or (_pasch == 28 and gold > 11): pasch = _pasch - 1 else: pasch = _pasch dom = (year + (year // 4) - (year // 100) + (year // 400)) % 7 easter = pasch + 1 + (4 - dom - pasch) % 7 if easter < 11: return (easter + 21, 3, year) else: return (easter - 10, 4, year)
def calculate_distance(p1, p2): """ Calculates distance between two [x,y] coordinates. p1: the point (as list containing x and y values) from which to measure p2: the point (as list containing x and y values) to which to measure returns: the distance """ dx = p2[0] - p1[0] dy = p2[1] - p1[1] distance = (dx ** 2 + dy ** 2)**0.5 return distance
def encode_intent(encoded_account_identifier: str, currency_code: str, amount: int) -> str: """ Encode account identifier string(encoded), currency code and amount into Diem intent identifier (https://dip.diem.com/dip-5/) """ return "diem://%s?c=%s&am=%d" % (encoded_account_identifier, currency_code, amount)
def byteToHex( byteStr ): """ Convert a byte string to it's hex string representation e.g. for output. """ # Uses list comprehension which is a fractionally faster implementation than # the alternative, more readable, implementation below # # hex = [] # for aChar in byteStr: # hex.append( "%02X " % ord( aChar ) ) # # return ''.join( hex ).strip() return ''.join( [ "%02X " % ord( x ) for x in byteStr ] ).strip()
def split_list(input_list, num_sub_lists): """ :param input_list: List to be split :param num_sub_lists: Number of sub lists to be split into :return: list containing sub lists """ output_list = [] # First make empty sub lists, one for each process for n in range(num_sub_lists): output_list.append([]) # Now add file paths evenly to them count = 0 for item in input_list: output_list[count % num_sub_lists].append(item) count += 1 return output_list
def parse_branch_ADO(key, value): """Parse a Filter branch according to the following grammar: (snip) """ if type(value) == type({}): if len(value) == 0: return "True" return "(%s)" % reduce(lambda x,y: "%s and %s" % (x, y), [parse_branch_ADO(eachKey, eachValue) for eachKey, eachValue in value.items()]) elif type(value) == type([]): if len(value) == 0: return "False" return short_or_ADO(key, value) elif type(value) == type(()): return opval_ADO(key, value) else: if value is None: return "IsNull([%s])" % (key) else: return "[%s] = %s" % (key, quoted_field(value))
def rejoin_words_with_hyphen(input_str): """rejoin words that are split across lines with a hyphen in a text file""" input_str = input_str.replace("-\n", "") return input_str
def after_space(s): """ Returns a copy of s after the first space Parameter s: the string to slice Precondition: s is a string with at least one space """ return s[s.find(' ') + 1:]
def get_client_from_scope(scope): """HACK: extract client IP from uvicorn scope dict""" client = scope.get("client") if not client: return "" return client[0]
def pattern_loc(start, end, mask, segment_labels): """ Considering that a time series is characterized by regions belonging to two different labels. Args: start: The starting index of the pattern. end: The ending index of the pattern. mask: Binary mask used to annotate the time series. segment_labels: List of the two labels that characterize the time series. Return: The label name of the region that the pattern is contained in. """ if len(segment_labels) != 2: raise ValueError('segment_labels must contain exactly 2 labels') start = start end = end # the first label in the list will be assigned to for the True regions in the mask true_label = segment_labels[0] # the second label in the list will be assigned to for the False regions in the mask false_label = segment_labels[1] if mask[start] == mask[end]: if mask[start] == True: loc = true_label else: loc = false_label else: # if a pattern spans both regions return the label 'both' loc = 'both' return loc
def comp(DNA: str, pat_len: int) -> list: """Sort all substrings of pat_len length :param DNA: the string to pull substrings from :type DNA: str :param pat_len: the length of substrings to pull :type pat_len: int :returns: all substrings, sorted :rtype: list (of strs) """ if not DNA: raise ValueError('Cannot pull substrings from empty string') if pat_len < 1: raise ValueError('Substrings must be at least length 1') DNA_len = len(DNA) if DNA_len < pat_len: raise ValueError('No substrings of that length') return sorted([DNA[i:i + pat_len] for i in range(DNA_len - pat_len + 1)])
def dict_to_str(d): """ Format a dict as a nice human-readable string. E.g. e.g. {'a':3, 'b': 'aa') becomes "a=3, b='aa'" :param dict d: A dict :return str: A nice, formatted version of this dict. """ return ', '.join('{}:{}'.format(k, repr(v)) for k, v in d.items())
def Fm(fb, fc): """ Compute a Bark filter around a certain center frequency in bark. Args: fb (int): frequency in Bark. fc (int): center frequency in Bark. Returns: (float) : associated Bark filter value/amplitude. """ if fc - 2.5 <= fb <= fc - 0.5: return 10**(2.5 * (fb - fc + 0.5)) elif fc - 0.5 < fb < fc + 0.5: return 1 elif fc + 0.5 <= fb <= fc + 1.3: return 10**(-2.5 * (fb - fc - 0.5)) else: return 0
def _remove_keys(results: dict, remove: list) -> dict: """ Remove a set of keys from a dictionary. Args: results (dict): The dictionary of results remove (list): the keys to remove Returns: dict: The altered dictionary """ removed = {} for key, val in results.items(): if key not in remove: removed[key] = val return removed
def right_pad(string: str, count: int) -> str: """ Add spaces to the end of a string. :param string: The string to pad. :param count: The number of characters to which the given string should be padded. """ if not string: raise ValueError(f"Parameter string is required.") if not isinstance(string, str): raise TypeError(f"Parameter string must be a string. Received {type(string)}") if count < 0: raise ValueError(f"Parameter count must be positive. Received {count}") return string + " " * (count - len(string))
def constrain_angle(x): """Constrains an angle in degrees between 0 and 360 degrees""" x = x % 360 if x < 0: x += 360 return x
def absoluteValue (value): """Calculates the absolute value for a given value. :param value: The value to be calculated :return: The absolute value of value :rtype: int """ if value < 0: return value * -1 return value
def wrapped_list(list, which): """ Selects an element from a list, wrapping in either direction. """ if which < 0: ix = len(list) + (which % len(list)) else: ix = which % len(list) return list[ix]
def infer_version(iri, version_iri): """Infer version from IRI and versionIRI.""" if str(version_iri[:len(iri)]) == str(iri): version = version_iri[len(iri):].lstrip('/') else: j = 0 v = [] for i in range(len(iri)): while i + j < len(version_iri) and iri[i] != version_iri[i + j]: v.append(version_iri[i + j]) j += 1 version = ''.join(v).lstrip('/').rstrip('/#') if '/' in version: raise ValueError('version IRI %r is not consistent with base IRI ' '%r' % (version_iri, iri)) return version
def parse_rank_points(r): """ Used to parse the amount of points collected based on one's W/L/D. """ return int(r.get("wedPunt", 0))
def di(i, a, dt, tau): """ Computes the change of concentration given an initial concentration `i`, a time increment `dt` and a constant `k`. Args: a (float): the initial concentration value of the activator i (float): the initial concentration value of the inhibitor dt (float): the time increment tau (float): the constant tau Returns: (float): the change of concentration """ return dt/tau * (a - i)
def PackageFromFilename(fname: str) -> str: """! If filename has a folder use that as the package designation. @param fname (str): name of file (with path) to extract the package name (enclosing folder). @return (str) the extracted package name, if found in supplied path (fname). """ if fname.find('/') == -1: # no folder = no package for us. return '' # strip off filename x = len(fname) - fname[::-1].find('/') fname = fname[:x-1] # try to pull out folder if fname.find('/') != -1: x = len(fname) - fname[::-1].find('/') fname = fname[x:] return fname
def read_doc(doc_path): """ Args: doc_path: Returns: full_text: """ full_text = '' try: with open(doc_path, 'r') as doc: full_text = doc.read() except FileNotFoundError: print("{} file does not exist".format(doc_path)) return full_text
def is_a_blank_line(line): """ @fn is_a_blank_line @return """ blank = True for ch in line: if ch != chr(0x20): blank = False break return blank
def toTab(data): """newFields helper method""" return "\t".join([str(data[x]) for x in data])
def other_tiles( tiles, tile_id ): """ This function removes the given tile_id from the tiles dict and returns the remaining tiles as a dict. """ return { ID: tiles[ ID ] for ID in tiles if ID not in tile_id }
def downloading_complete(dct,flag=None): """ Utility function: check if all files in the dictionary have been successefully downloaded. You can se the flag to: * 'op' to only check for final products (forecast are ignored) * 'fc' to only check for forecast products (final are ignored) * None to either check final or forecast, i.e. a file is considered successefully downloaded in either case """ if not flag: all([dct[x]['fc'] or dct[x]['op'] for x in dct]) elif flag == 'fc': return all([dct[x]['fc'] for x in dct]) elif flag=='op': return all([dct[x]['op'] for x in dct]) else: raise RuntimeError('[ERROR] Invalid argument to downloading_complete function!')
def update_config(original, update, override=True): """ Recursively update a dict. Subdict's won't be overwritten but also updated. """ for key, value in update.items(): if key not in original or (not isinstance(value, dict) and override): original[key] = value else: update_config(original[key], value) return original
def _preprocess(sentences, preprocess_pipeline, word_tokenize=None): """ Helper function to preprocess a list of paragraphs. Args: param (Tuple): params are tuple of (a list of strings, a list of preprocessing functions, and function to tokenize setences into words). A paragraph is represented with a single string with multiple setnences. Returns: list of list of strings, where each string is a token or word. """ if preprocess_pipeline is not None: for function in preprocess_pipeline: sentences = function(sentences) if word_tokenize is None: return sentences else: return sentences, [word_tokenize(sentence) for sentence in sentences]
def haspropriety(obj, name): """Check if propriety `name` was defined in obj.""" attr = getattr(obj, name, None) return attr and not callable(attr)
def approx_pretty_size(total_bytes) -> str: """ Return a humane and pretty size approximation. This looks silly bellow 1KB but I'm OK with that. Don't call this with negative total_bytes or your pet hamster will go bald. >>> approx_pretty_size(50) '1KB' >>> approx_pretty_size(2000) '2KB' >>> approx_pretty_size(2048) '2KB' >>> approx_pretty_size(3000000) '3MB' >>> approx_pretty_size(4000000000) '4GB' >>> approx_pretty_size(-314) Traceback (most recent call last): ... ValueError: total_bytes may not be negative """ if total_bytes < 0: raise ValueError("total_bytes may not be negative") for scale, _unit in ((1024 * 1024 * 1024, "GB"), (1024 * 1024, "MB"), (1024, "KB")): div, rem = divmod(total_bytes, scale) if div > 0: if rem > 0: div += 1 # always round up break else: div, _unit = 1, "KB" return f"{div:d}{_unit}"
def parse_namespace(ns): """ Parse namespace. """ res = ns.split('.', 1) return res[0], res[1]
def validate_list(value, element_type=None, length=None, broadcast_scalars=True, allow_tuples=True, name=None): """Validates that value is a list with the specified characteristics. Args: value: The value to validate. element_type: A `type` or tuple of `type`s. The expected type for elements of the input list. Can be a tuple to allow more than one type. If `None`, the element type is not enforced. length: An `int`. The expected length of the list. If `None`, the length is not enforced. broadcast_scalars: A `bool`. If `True`, scalar inputs are converted to lists of length `length`, if `length` is not `None`, or length 1 otherwise. If `False`, an error is raised on scalar inputs. allow_tuples: A `bool`. If `True`, inputs of type `tuple` are accepted and converted to `list`s. If `False`, an error is raised on tuple inputs. name: A `string`. The name of the argument being validated. This is only used to format error messages. Returns: A valid `list`. Raises: TypeError: When `value` does not meet the type requirements. ValueError: When `value` does not meet the length requirements. """ # Handle tuples. if allow_tuples and isinstance(value, tuple): value = list(value) # Handle scalars. if broadcast_scalars: if ((element_type is not None and isinstance(value, element_type)) or (element_type is None and not isinstance(value, list))): value = [value] * (length if length is not None else 1) # We've handled tuples and scalars. If not a list by now, this is an error. if not isinstance(value, list): raise TypeError( f"Argument `{name}` must be a `list`, but received type: {type(value)}") # It's a list! Now check the length. if length is not None and not len(value) == length: raise ValueError( f"Argument `{name}` must be a `list` of length {length}, but received a " f"`list` of length {len(value)}") # It's a list with the correct length! Check element types. if element_type is not None: if not isinstance(element_type, (list, tuple)): element_types = (element_type,) else: element_types = element_type for element in value: if type(element) not in element_types: raise TypeError( f"Argument `{name}` must be a `list` of elements of type " f"`{element_type}`, but received type: `{type(element)}`") return value
def _flatten_dicts(dicts): """Flatten an iterable of dicts to a single dict.""" return {k: v for d in dicts for k, v in d.items()}
def prepare_data_dirlist(data_dir): """ """ f1 = data_dir + "train.enc" f2 = data_dir + "train.dec" f3 = data_dir + "test.enc" f4 = data_dir + "test.dec" list = [f1, f2, f3, f4] return list
def categorizeClass(h1, h2): """Greeble classification with respect to horns [0 down, 1 up] h1 |h2 |class 0 |0 |1 0 |1 |2 1 |0 |3 1 |1 |4""" str_class = "" if h1 == 0 and h2 == 0: str_class = 0 elif h1 == 0 and h2 == 1: str_class = 1 elif h1 == 1 and h2 == 0: str_class = 2 elif h1 == 1 and h2 == 1: str_class = 3 return str_class
def delOsmation(db: list, name: str): """Delete an osmation with the name as given. Args: db (list): List of osmations. name (str): Name of osmation. Returns: db: List of osmations with name removed. """ for i in db: if name == i["Name"]: _ = db.pop(db.index(i)) break return db
def extended_gcd(a, b): """Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb """ # r = gcd(a,b) i = multiplicitive inverse of a mod b # or j = multiplicitive inverse of b mod a # Neg return values for i or j are made positive mod b or a respectively # Iterateive Version is faster and uses much less stack space x = 0 y = 1 lx = 1 ly = 0 oa = a # Remember original a/b to remove ob = b # negative values from return results while b != 0: q = a // b (a, b) = (b, a % b) (x, lx) = ((lx - (q * x)), x) (y, ly) = ((ly - (q * y)), y) if lx < 0: lx += ob # If neg wrap modulo orignal b if ly < 0: ly += oa # If neg wrap modulo orignal a return a, lx, ly
def only1(l): """ Checks if the list 'l' of booleans has one and only one True value :param l: list of booleans :return: True if list has one and only one True value, False otherwise """ true_found = False for v in l: if v: if true_found: return False else: true_found = True return true_found
def hass_brightness(device): """Home Assistant logic for determining brightness""" if 'level' in device: level = int((int(device['level']) / 100) * 255) return level else: return 0
def get_deriv_indices(grad): """ Returns the indices of the derivatives involved in the grid derivatives Examples -------- >>> get_deriv_indices(1) ["x", "y", "z"] """ if grad == 0: return [] elif grad == 1: return ["x", "y", "z"] elif grad == 2: return ["x", "y", "z", "xx", "xy", "xz", "yy", "yz", "zz"] else: raise ValueError("Only grid derivatives up to Hessians is supported (grad=2).")
def get_project_name(project_id, projects): """Retrieves project name for given project id Args: projects: List of projects project_id: project id Returns: Project name or None if there is no match """ for project in projects: if project_id == project.id: return project.name
def remap_constraint_indices(tokenized_sequence, detokenized_sequence, constraint_indices): """ Map the constraint indices of a tokenized sequence to the indices of a detokenized sequence Any time there was '@@ ' in the tokenized sequence, we removed it - the detokenized sequence has fewer spaces than the tokenized sequence """ constraint_idx_starts = {start: end for start, end in constraint_indices} constraint_idx_ends = {end: start for start, end in constraint_indices} remapped_indices = [] tokenized_idx = 0 current_offset = 0 true_start = None for true_idx, output_char in enumerate(detokenized_sequence): if tokenized_idx in constraint_idx_starts: true_start = tokenized_idx - current_offset elif tokenized_idx in constraint_idx_ends: assert true_start is not None, 'if we found an end, we also need a start' true_end = tokenized_idx - current_offset remapped_indices.append([true_start, true_end]) true_start = None # this logic assumes that post-processing did not _change_ any characters # I.e. no characters were substituted for other characters while output_char != tokenized_sequence[tokenized_idx]: tokenized_idx += 1 current_offset += 1 if tokenized_idx > len(tokenized_sequence): raise IndexError('We went beyond the end of the longer sequence: {}, when comparing with: {}'.format( tokenized_sequence, detokenized_sequence )) if tokenized_idx in constraint_idx_starts: true_start = tokenized_idx - current_offset elif tokenized_idx in constraint_idx_ends: assert true_start is not None, 'if we found an end, we also need a start' true_end = tokenized_idx - current_offset remapped_indices.append([true_start, true_end]) true_start = None tokenized_idx += 1 if true_start is not None: true_end = tokenized_idx - current_offset remapped_indices.append([true_start, true_end]) return remapped_indices
def trim_precision(value: float, precision: int = 4) -> float: """ Trim the precision of the given floating point value. For example, if you have the value `170.10000000000002` but really only care about it being ``\u2248 179.1``: .. code-block:: python >>> trim_precision(170.10000000000002, 2) 170.1 >>> type(trim_precision(170.10000000000002, 2)) <class 'float'> .. versionadded:: 2.0.0 :param value: :param precision: The number of decimal places to leave in the output. """ return float(format(value, f"0.{precision}f"))
def binary_sum(S, start, stop): """Return the sum of the numbers in implicit slice S[start:stop].""" if start >= stop: # zero elemetns in slice return 0 elif start == stop - 1: return S[start] # one element in slice else: # two or more elements in slice mid = (start + stop) // 2 return binary_sum(S, start, mid) + binary_sum(S, mid, stop)
def is_int(val): """ Check if value is able to be cast to int. :param val: String to be tested to int :return: Boolean, True if success """ try: _ = int(val) except ValueError: return False return True
def modulo_space(space): """Translate spaces greater than 16 to a space on the board Parameters ---------- space : int Space the camel was on Returns ------- int Space the camel will be displayed on """ if space % 16 == 0: return 16 else: return space % 16
def get_unique_sublist(inlist): """return a copy of inlist, but where elements are unique""" newlist = [] for val in inlist: if not val in newlist: newlist.append(val) return newlist
def flatten_list(list_to_flatten): """Simple list comprehension to flatten list. >>> flatten_list([[1, 2], [3, 4, 5]]) [1, 2, 3, 4, 5] >>> flatten_list([[1], [[2]]]) [1, [2]] >>> flatten_list([1, [2]]) Traceback (most recent call last): ... TypeError: 'int' object is not iterable """ return [e for sublist in list_to_flatten for e in sublist]
def generate_ctrlpts2d_weights(ctrlpts2d): """ Generates unweighted control points from weighted ones in 2-D. This function #. Takes in 2-D control points list whose coordinates are organized like (x*w, y*w, z*w, w) #. Converts the input control points list into (x, y, z, w) format #. Returns the result :param ctrlpts2d: 2-D control points (P) :type ctrlpts2d: list :return: 2-D weighted control points (Pw) :rtype: list """ # Divide control points by weight new_ctrlpts2d = [] for row in ctrlpts2d: ctrlptsw_v = [] for col in row: temp = [float(c / col[-1]) for c in col] temp[-1] = float(col[-1]) ctrlptsw_v.append(temp) new_ctrlpts2d.append(ctrlptsw_v) return new_ctrlpts2d
def get_log_file_name(file_path): """ Save only the filename and the subdirectory it is in, strip off all prior paths. If the file ends in .gz, remove that too. Convert to lower case. """ file_name = '/'.join(file_path.lower().split('/')[-1:]) if len(file_name) > 3 and file_name[-3:] == ".gz": file_name = file_name[:-3] return file_name
def any(seq, pred=None): """Returns True if pred(x) is true for at least one element in the iterable""" for elem in filter(pred, seq): return True return False
def process_result(result: dict): """Processes single surveyjs result""" target = {} for variable in result: if isinstance(result[variable], dict): for key in result[variable]: target[str(variable) + '_' + str(key)] = result[variable][key] else: target[variable] = result[variable] return target
def get_backup_filename(block_height: int) -> str: """ :param block_height: the height of the block where we want to go back :return: """ return f"{block_height:010d}.bak"
def merge_dicts(items): """ Merge dictionaries into a new dict (new keys overwrite the old ones). This is obsoleted by `flyingcircus.join()`. Args: items (Iterable[dict]): Dictionaries to be merged together. Returns: merged (dict): The merged dict (new keys overwrite the old ones). Examples: >>> d1 = {1: 2, 3: 4, 5: 6} >>> d2 = {2: 1, 4: 3, 6: 5} >>> d3 = {1: 1, 3: 3, 6: 5} >>> dd = merge_dicts((d1, d2)) >>> print(tuple(sorted(dd.items()))) ((1, 2), (2, 1), (3, 4), (4, 3), (5, 6), (6, 5)) >>> dd = merge_dicts((d1, d3)) >>> print(tuple(sorted(dd.items()))) ((1, 1), (3, 3), (5, 6), (6, 5)) """ merged = {} for item in items: merged.update(item) return merged
def _covered_spikes_criterion(occ_superset, size_superset, occ_subset, size_subset, l_covered_spikes): """ evaluate covered spikes criterion (see pattern_set_reduction) """ reject_superset = True reject_subset = True score_superset = (size_superset - l_covered_spikes) * occ_superset score_subset = (size_subset - l_covered_spikes) * occ_subset if score_superset >= score_subset: reject_superset = False else: reject_subset = False return reject_superset, reject_subset
def truncatestring(name, unicodemode, maxlength, ecutf8): """Return a name truncated to no more than maxlength BYTES. name is the candidate string unicodemode identifies whether in Unicode mode or not maxlength is the maximum byte count allowed. It must be a positive integer ecutf8 is a utf-8 codec If name is a (code page) string, truncation is straightforward. If it is Unicode utf-8, the utf-8 byte representation must be used to figure this out but still truncate on a character boundary.""" if not unicodemode: if len(name) > maxlength: name = name[:maxlength-3] + "..." else: newname = [] nnlen = 0 # In Unicode mode, length must be calculated in terms of utf-8 bytes for c in name: c8 = ecutf8(c)[0] # one character in utf-8 nnlen += len(c8) if nnlen <= maxlength: newname.append(c) else: newname = newname[:-4] newname.append("...") break name = "".join(newname) return name
def tokenize_version(version_string): """Tokenize a version string to a tuple. Truncates qualifiers like ``-dev``. :param str version_string: A version string :return: A tuple representing the version string :rtype: tuple >>> tokenize_version('0.1.2-dev') (0, 1, 2) """ before_dash = version_string.split('-')[0] version_tuple = before_dash.split('.')[:3] # take only the first 3 in case there's an extension like -dev.0 return tuple(map(int, version_tuple))
def replace_leaf(arg, leaves, new_leaves, op, neg): """ Attempt to replace a leaf of a multiplication tree. We search for a leaf in `leaves` whose argument is `arg`, and if we find one, we remove it from `leaves` and add to `new_leaves` a leaf with argument `arg` and variable `op(arg)`. Parameters ---------- arg The argument of the leaf we are looking for. leaves List of leaves to look into. Each leaf should be a pair (x, l) with `x` the argument of the Op found in the leaf, and `l` the actual leaf as found in a multiplication tree output by `parse_mul_tree` (i.e. a pair [boolean, variable]). new_leaves If a replacement occurred, then the leaf is removed from `leaves` and added to the list `new_leaves` (after being modified by `op`). op A function that, when applied to `arg`, returns the Variable we want to replace the original leaf variable with. neg : bool If True, then the boolean value associated to the leaf should be swapped. If False, then this value should remain unchanged. Returns ------- bool True if a replacement occurred, or False otherwise. """ for idx, x in enumerate(leaves): if x[0] == arg: x[1][0] ^= neg x[1][1] = op(arg) leaves.pop(idx) new_leaves.append(x) return True return False
def dummy_test_case(_, returns, raise_exception=None): """Dummy function that returns what is passed it, optionally raising an exception. """ if raise_exception: raise raise_exception return returns
def _encode(coord, max_coord, chars): """Returns a binary geohash of longitude or latitude as an array of 0s and 1s.""" encoding = [] coord += max_coord for p in range(chars): if coord >= max_coord: coord -= max_coord encoding.append('1') else: encoding.append('0') max_coord /= 2 return encoding
def pad_word(start_index: int, word: str, max_length: int, pad_symbol = '.'): """ >>> pad_word(1, 'foo', 5) '.foo.' >>> pad_word(2, 'foo', 5) '..foo' """ return pad_symbol * start_index + word + pad_symbol * (max_length - start_index - len(word))
def meta_command(connection, user_input): """Swap sqlite3 meta commands with equivelant SQL queries""" user_parts = user_input.split() replaced = '' # .tables: print list of tables in database if user_parts[0].lower() == '.tables' or user_parts[0].lower() == '.table': replaced = 'SELECT name FROM sqlite_master WHERE type = "table";' # .schema: print details about a tables column elif user_parts[0].lower() == '.schema': if len(user_parts) == 1: replaced = 'SELECT sql FROM sqlite_master WHERE type = "table";' elif len(user_parts) == 2: s = 'SELECT sql FROM sqlite_master WHERE type = "table" AND name = "{[1]}";' replaced = s.format(user_parts) else: print('Usage: .schema [table]') # .dump: print SQL commands to recreate table # ToDo: http://www.sqlitetutorial.net/sqlite-dump/ elif user_parts[0].lower() == '.dump': if len(user_parts) == 1: for line in connection.iterdump(): print(line) elif len(user_parts) == 2: replaced = 'SELECT * from {[1]};'.format(user_parts) else: print('Usage: .dump [table]') else: print('That sqlite3 meta command has not been implimented') return replaced
def Artifact(file_path, view_url=None, fetch_url=None, content_type='application/octet-stream'): """Build an Artifact dict. Args: file_path: A string with the absolute path where the artifact is stored. view_url: An optional string with a URL where the artifact has been uploaded to as a human-viewable link. fetch_url: An optional string with a URL where the artifact has been uploaded to as a machine downloadable link. content_type: An optional string with the MIME type of the artifact. """ artifact = {'filePath': file_path, 'contentType': content_type} if view_url is not None: artifact['viewUrl'] = view_url if fetch_url is not None: artifact['fetchUrl'] = fetch_url return artifact
def Strip(txt): """Return stripped string, can handle None""" try: return txt.strip() except: return None
def getDrowAnnotationLine(ann): """ levels: 0: eye state (right) 1: eye state(left) """ bodyAnnId = ann[0] a1Ann = 0 if bodyAnnId == 0: # open a1Ann = 0 elif bodyAnnId == 1: # close a1Ann =1 elif bodyAnnId == 2: # opening a1Ann= 2 elif bodyAnnId == 3: # closing a1Ann=3 elif bodyAnnId == 4: # undefined a1Ann=4 lineAnn = [bodyAnnId, a1Ann] for i in range(2, len(ann)): lineAnn.append(ann[i]) return lineAnn
def _prediction_file_to_ckpt(path): """Extract the global step from a prediction filename.""" return int(path.split("_")[-2])
def bits(b): """Returns the bits in a byte""" if isinstance(b, str): b = ord(b) return tuple([(b >> i) & 1 for i in range(8)])
def interpret_numbers(user_range): """ :param user_range: A string specifying a range of numbers. Eg. interpret_numbers('4-6')==[4,5,6] interpret_numbers('4,6')==[4,6] interpret_numbers('4,6-9')==[4,6,7,8,9] :return: A list of integers, or None if the input is not numberic """ if all(d in '0123456789-,' for d in user_range): numbers_and_ranges = user_range.split(',') numbers = [n for lst in [[int(s)] if '-' not in s else range(int(s[:s.index('-')]), int(s[s.index('-')+1:])+1) for s in numbers_and_ranges] for n in lst] return numbers else: return None
def get_min_build_version(version: str) -> str: """ Get min version 5 micro releases lower that `version`. Minimum micro is 0. """ major = minor = micro = "0" if version.count(".") > 2: major, minor, micro, _ = version.split(".", 3) else: major, minor, micro = version.split(".", 2) new_micro = max(int(micro) - 5, 0) return f"{major}.{minor}.{new_micro}"
def fscr_score(ftr_t_1, ftr_t, n): """Feature Selection Change Rate The percentage of selected features that changed with respect to the previous time window :param ftr_t_1: selected features in t-1 :param ftr_t: selected features in t (current time window) :param n: number of selected features :return: fscr :rtype: float """ c = len(set(ftr_t_1).difference(set(ftr_t))) fscr = c/n return fscr
def a10n(string): """That thing where "internationalization" becomes "i18n", what's it called? Abbreviation? Oh wait, no: ``a10n``. (It's actually a form of `numeronym`_.) >>> a10n('abbreviation') 'a10n' >>> a10n('internationalization') 'i18n' >>> a10n('') '' .. _numeronym: http://en.wikipedia.org/wiki/Numeronym """ if len(string) < 3: return string return '%s%s%s' % (string[0], len(string[1:-1]), string[-1])
def tree_attribute(identifier): """ Predicate that returns True for custom attributes added to AttrTrees that are not methods, properties or internal attributes. These custom attributes start with a capitalized character when applicable (not applicable to underscore or certain unicode characters) """ if identifier[0].upper().isupper() is False and identifier[0] != '_': return True else: return identifier[0].isupper()
def listToString(data): """ Return a string from the given list. Example: print listToString(['apple', 'pear', 'cherry']) # apple,pear,cherry :type data: list :rtype: str """ # Convert all items to string and remove 'u' data = [str(item) for item in data] data = str(data).replace("[", "").replace("]", "") data = data.replace("'", "").replace('"', "") return data
def transpose_list(lst): """ From list of lists (of same length) lst Returns transposed list lst2 where lst[i][j] = lst2[j][i] """ if isinstance(lst[0], list): return list(map(list, zip(*lst))) return lst
def computeMetrics(groundtruth, prediction): """Compute the precision, recall and F1 score. Args: groundtruth : set. Ground truth (correct) target values. prediction : set. Estimated targets. Returns: score : float. Example: >>> from seqeval.metrics import f1_score >>> groundtruth = set([("Phosphorylation", ("Phosphorylation", "phosphorylates", "ACE1"))]) >>> prediction = set([("Phosphorylation", ("Phosphorylation", "phosphorylates", "ACE1"))]) >>> f1_score(groundtruth, prediction) 1.00 """ nb_correct = len(groundtruth & prediction) nb_pred = len(prediction) nb_true = len(groundtruth) p = nb_correct / nb_pred * 100 if nb_pred > 0 else 0 r = nb_correct / nb_true * 100 if nb_true > 0 else 0 score = 2 * p * r / (p + r) if p + r > 0 else 0 return p, r, score