content
stringlengths
42
6.51k
def classix(a,b,c): """ doc """ def clladflja(DFDFF): some = list() if 2 > 1: print(3) return clladflja(23123)
def get_deletion_offset(alt_pos, trans_queue, changes): """Helper function to calculate original position in reference genome""" offset = 0 for struct_var in changes: if struct_var[2] > alt_pos and struct_var[0] != "TRANS": break elif struct_var[0] == "DEL": offset += struct_var[3] for trans in trans_queue: if trans[2] > alt_pos: return offset offset += len(trans[1]) return offset
def sorted_options(sort_options): """Sort sort options for display. :param sort_options: A dictionary containing the field name as key and asc/desc as value. :returns: A dictionary with sorting options for Invenio-Search-JS. """ return [ { "title": v["title"], "value": ( "-{0}".format(k) if v.get("default_order", "asc") == "desc" else k ), } for k, v in sorted( sort_options.items(), key=lambda x: x[1].get("order", 0) ) ]
def make_comment(text): """Format text as reStructuredText comment. Args: text: Text string to format. Returns: Formatted text string. """ return '.. {}\n'.format(text)
def problem_1_1(data): """Implement an algorithm to determine if a string has all unique characters. What if you can not use additional data structures? Returns: bool, True if all characters are unique. """ chars = set([]) for c in data: if c in chars: return False chars.add(c) return True
def get_interfaces_from_domain(domain_xml): """ From the ElementTree of a domain, get a map of all network interfaces. Parameters ---------- domain_xml: ElementTree The xml representation of the domain. Returns ------- dict All the network interfaces, as {mac_address: device_name}. """ if domain_xml is None: return {} devices = domain_xml.find('./devices') if devices is None: return {} ifaces = {} for iface in devices.findall('./interface'): mac = iface.find('./mac') source = iface.find('./source') ifaces[mac.attrib['address'].lower()] = source.attrib.get('dev', '') return ifaces
def shape3d(a): """ Ensure a 3D shape. Args: a: a int or tuple/list of length 3 Returns: list: of length 3. if ``a`` is a int, return ``[a, a, a]``. """ if type(a) == int: return [a, a, a] if isinstance(a, (list, tuple)): assert len(a) == 3 return list(a) raise RuntimeError("Illegal shape: {}".format(a))
def preprocess(data, ops=None): """ preprocess """ if ops is None: ops = [] for op in ops: data = op(data) if data is None: return None return data
def split_sample_name(name): """Split name into leading part plus trailing number Returns (start,number) """ i = len(name) while i > 0 and name[i-1].isdigit(): i -= 1 if i > 0 and i < len(name): leading = name[:i] trailing = int(name[i:]) else: leading = name trailing = None return (leading,trailing)
def try_noisy_test_up_to_n_times(noisy_test, n=3, print=print): """ A utility for testing routines that may fail, even if correct, with a small probability (a spurious failure). It attempts to get a successful run up to 3 times, and fails in case that does not happen. """ failure_value = None for i in range(n): try: failure_value = noisy_test() if failure_value is True: failure_value = None except AssertionError as e: failure_value = e if failure_value is None: return True if print is not None: print("Test failed: " + str(failure_value)) if i < n - 1: print("That may have been spurious, so will try again.") if isinstance(failure_value, Exception): raise failure_value else: return failure_value
def make_symmetry_matrix(upper_triangle): """Given three or six integers/rationals, fill them into a 3x3 (or 4x4) symmetric matrix. """ if len(upper_triangle) == 3: a12, a13, a23 = upper_triangle return [[1, a12, a13], [a12, 1, a23], [a13, a23, 1]] elif len(upper_triangle) == 6: a12, a13, a14, a23, a24, a34 = upper_triangle return [[1, a12, a13, a14], [a12, 1, a23, a24], [a13, a23, 1, a34], [a14, a24, a34, 1]] else: raise ValueError("Three or six inputs are expected.")
def rgb_to_bw(r, g, b): """Method to convert rgb to a bw intensity value.""" return 0.35 * r + 0.45 * g + 0.2 * b
def space_indentation(s): """The number of leading spaces in a string :param str s: input string :rtype: int :return: number of leading spaces """ return len(s) - len(s.lstrip(' '))
def total_absorptivity(absorptivity: float, reflectivity: float) -> float: """Compute total internal balloon absorptivity/emissivity factor. This function computes total absorptivity or total emissivity. For the absorptivity process, the dynamics are as follows: --> From the radiation hitting the surface, R is reflected outward, A is absorbed, and the rest, T, is "transmitted" through the surface, where T = 1 - R - A. --> From the amount transmitted through the surface, T, a portion TA is absorbed, a portion TR is reflected back into the sphere, and the rest (TT) leaves the sphere. --> From the amount reflected into the sphere, TR, a portion TRA is absorbed, a portion TRR is reflected back into the sphere, and the rest (TRT) is lost. --> Continuing this process to infinity and adding up all the aborbed amounts gives the following: A_total = A + TA + TRA + TR^2 A + TR^3 A + ... = A + TA (1 + R + R^2 + R^3 + ...) = A (1 + T / (1 - R)) Similarly, we can analyze the emissivity process: --> From the radiation emitted by the surface, A is emitted outwards where E = A = emissivity, and A is emitted inwards (double radiation). --> From the inwards amount, AA is re-absorbed, AR is internally reflected and AT is emitted through the film. --> From the internally reflected amount, ARA is re-absorbed, ARR is internally reflected, and ART is emitted through the film. --> Continuing this process to infinity and adding up all the outwards emissions gives the following: E_total = A + AT + ART + AR^2T + AR^3 T + ... = A + AT (1 + R + R^2 + R^3 + ...) = A (1 + T / (1 - R)) Noting that E_total and A_total are equivalent, we can use this function for both incoming and outgoing emissions. Args: absorptivity: Balloon film's absorptivity/emissivity. reflectivity: Balloon film's reflectivity. Returns: total_absorptivity_factor: Factor of radiation absorbed/emitted by balloon. """ transmisivity = 1.0 - absorptivity - reflectivity total_absorptivity_factor = absorptivity * (1.0 + transmisivity / (1.0 - reflectivity)) if total_absorptivity_factor < 0.0 or total_absorptivity_factor > 1.0: raise ValueError( 'total_absorptivity: ' 'Computed total absorptivity factor out of expected range [0, 1].') return total_absorptivity_factor
def check_valid_word(word): """Valid word contains only letters and apostrophes""" if not word: return False if word.isalpha(): return True for char in word: if not(char.isalpha() or char == "'"): return False return True
def _macro_defn_action(_s, _l, tokens): """ Builds a dictionary structure which defines the given macro. """ assert len(tokens) == 3 assert tokens[0] == "@" return {tokens[1]: tokens[2]}
def namestr(obj, namespace=globals()): """ a = 'some var' namestr(a, globals()) ['a'] """ return [name for name in namespace if namespace[name] is obj]
def _key_id_or_name_n(key, index): """Internal helper function for key ID and name transforms. Args: key: A datastore key. index: The depth in the key to return, where 0 is the root key and -1 is the leaf key. Returns: The ID or name of the nth deep sub key in key. """ if not key: return None path = key.to_path() if not path: return None path_index = (index * 2) + 1 return path[path_index]
def key_values_to_tags(dicts): """ Converts the list of key:value strings (example ["mykey:myValue", ...]) into a list of AWS tag dicts (example: [{'Key': 'mykey', 'Value': 'myValue'}, ...] """ return [{'Key': tag_key_value[0], 'Value': tag_key_value[1]} for tag_key_value in [key_value_option.split(":", 1) for key_value_option in dicts]]
def parse(string, start, divider, type=str): """general purpose tokenizer, used below""" if string.startswith(start): string = string[len(start):] try: result, string = string.split(divider, 1) except ValueError: # if there's no separator found and we found the `start` token, the whole input is the result result, string = string, '' else: result = None if result is not None: result = type(result) return result, string
def fib(num): """recursive function is O(n!) w/o dp.""" if num == 0 or num == 1: return num elif num > 1: return fib(num - 1) + fib(num - 2)
def _scrub_headers(headers): """scrub auth info from headers""" headers = dict(headers) if 'Authorization' in headers: auth = headers['Authorization'] if auth.startswith('token '): headers['Authorization'] = 'token [secret]' return headers
def degminsec2dec(degrees, minutes, seconds): """ convert a triple (int degrees, int minutes, float seconds) to a floating point number of degrees .. code-block:: python >>> assert dec2degminsec(degminsec2dec(30,30,0.0)) == (30,30,0.0) """ dec = float(degrees) if minutes: dec += float(minutes) / 60 if seconds: dec += float(seconds) / 3600 return dec
def has_any_letters(text): """ Check if the text has any letters in it """ # result = re.search("[A-Za-z]", text) # works only with english letters result = any(c.isalpha() for c in text) # works with any letters - english or non-english return result
def parse_manytomany_object_list(object_list, model): """given one occurence or list of model instances or id of model instances returns a list of model instances""" if not isinstance(object_list, list): object_list = [object_list] if len(object_list) > 0: if isinstance(object_list[0], int): object_list = [s for s in model.objects.filter(id__in=object_list)] return object_list
def yellow(string: str) -> str: """Wraps the given string in terminal color code yellow""" return "\033[93m" + string + "\033[0m"
def sizeof_fmt(num, suffix='B'): """Format `num` bytes to human readable format.""" for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
def _checkInstall(install): """ Helper function for checking the installation / upgrade process of cpanm.installed """ ret = { 'errors': [], 'changes': {} } for mod, cha in install.items(): if 'error' in cha: ret["errors"].append(mod) else: ret["changes"] = {mod: cha} return ret
def version_to_str(version): """Return a version string from a version tuple.""" return '.'.join(str(n) for n in version)
def eval_f1(ref, pred): """ :param ref: list(list(list(any))), a list of reference sentences, each element of the list is a list of references :param pred: list(list(any)), a list of predictions :return: f1 score """ assert len(ref) == len(pred) > 0 precisions = [] recalls = [] for i, s in enumerate(pred): ref_set = set() for rs in ref[i]: for w in rs: ref_set.add(w) pred_set = set() for w in s: pred_set.add(w) p = 0 for w in s: if w in ref_set: p += 1 if len(s) > 0: p /= len(s) r = 0 for rs in ref[i]: for w in rs: if w in pred_set: r += 1 tot_l = sum([len(rs) for rs in ref[i]]) if tot_l > 0: r /= tot_l precisions.append(p) recalls.append(r) precision = sum(precisions) / len(precisions) recall = sum(recalls) / len(recalls) return 0.0 if precision == recall == 0 else 2 * precision * recall / (precision + recall)
def all_indices(sequence, sub_sequence, offset=0): """ Returns list of all indices at which sub_sequence is found in sequence """ indices = [] i = sequence.find(sub_sequence, offset) while i >= 0: indices.append(i) i = sequence.find(sub_sequence, i + 1) return indices
def overlaps(s1, e1, s2, e2): """ >>> overlaps(2, 4, 3, 5) True >>> overlaps(2, 4, 4, 5) False >>> overlaps(2, 200, 3, 5) True >>> overlaps(3, 5, 2, 200) True >>> overlaps (3, 5, 5, 6) False >>> overlaps (5, 6, 3, 5) False >>> overlaps(2, 4, 2, 4) True """ return not (e1 <= s2 or s1 >= e2)
def steering2(course, power): """ Computes how fast each motor in a pair should turn to achieve the specified steering. Input: course [-100, 100]: * -100 means turn left as fast as possible, * 0 means drive in a straight line, and * 100 means turn right as fast as possible. * If >100 power_right = -power * If <100 power_left = power power: the power that should be applied to the outmost motor (the one rotating faster). The power of the other motor will be computed automatically. Output: a tuple of power values for a pair of motors. Example: for (motor, power) in zip((left_motor, right_motor), steering(50, 90)): motor.run_forever(speed_sp=power) """ if course >= 0: if course > 100: power_right = 0 power_left = power else: power_left = power power_right = power - ((power * course) / 100) else: if course < -100: power_left = 0 power_right = power else: power_right = power power_left = power + ((power * course) / 100) return (int(power_left), int(power_right))
def is_valid_bool(val): """ Validates if the value passed is a boolean or not. Args: val (any type): value to be tested Returns: bool: True if bool else False """ return type(val) == bool
def tolerance(a, b, e): """Return if a-b is within tolerance e""" d = a - b if d < 0: d = -d if a != 0: e = e * a if e < 0: e = -e return d <= e
def align_self(keyword): """``align-self`` property validation.""" return keyword in ( 'auto', 'flex-start', 'flex-end', 'center', 'baseline', 'stretch')
def unsnake(st): """BECAUSE_WE_DONT_READ_LIKE_THAT""" return st.replace("_", " ").title()
def get_route_name(route): """Returns route name.""" # split once, take last peice name = route.split("/", 1)[-1] return name
def lcs(X, Y): """ Function for finding the longest common subsequence between two lists. In this script, this function is particular used for aligning between the ground-truth output string and the predicted string (for visualization purpose). Args: X: a list Y: a list Returns: a list which is the longest common subsequence between X and Y """ m, n = len(X), len(Y) L = [[0 for x in range(n + 1)] for x in range(m + 1)] # Following steps build L[m+1][n+1] in bottom up fashion. Note # that L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1] for i in range(m + 1): for j in range(n + 1): if i == 0 or j == 0: L[i][j] = 0 elif X[i - 1] == Y[j - 1]: L[i][j] = L[i - 1][j - 1] + 1 else: L[i][j] = max(L[i - 1][j], L[i][j - 1]) # Following code is used to print LCS index = L[m][n] # Create a character array to store the lcs string lcs = [''] * (index + 1) lcs[index] = '' # Start from the right-most-bottom-most corner and # one by one store characters in lcs[] i = m j = n while i > 0 and j > 0: # If current character in X[] and Y are same, then # current character is part of LCS if X[i - 1] == Y[j - 1]: lcs[index - 1] = X[i - 1] i -= 1 j -= 1 index -= 1 # If not same, then find the larger of two and # go in the direction of larger value elif L[i - 1][j] > L[i][j - 1]: i -= 1 else: j -= 1 return lcs[:-1]
def make_items(times, leading='+'): """Make lines with leading char multiple times. So wait why is this set up so that times is a keyword argument...? Couldn't you just go times=None and then if times is None: times = len(t[0]) like wtf? Used by snippet li(st)? and for olist. :param: times, how many times you need :param: leading, leading character """ times = int(times) if leading == 1: msg = "" for x in range(1, times + 1): msg += "%s. Item\n" % x return msg else: return ("%s Item\n" % leading) * times
def angle_dist_sqr(a1, a2): """Distance between two points in angle space.""" return (a1[0] - a2[0])**2 + (a1[1] - a2[1])**2
def convertToFrLocationFormat(faceLocations): """ converts face locations into format for face_recognition """ toReturn = [] for (x, y, w, h) in faceLocations: top = y right = x + w bottom = y + h left = x toReturn.append((top, right, bottom, left)) return toReturn
def _isnotsuite(test): """A crude way to tell apart testcases and suites with duck-typing :param test: :return: """ try: iter(test) except TypeError: return True return False
def select_paging_modes(paging_modes): """ Function to read the paging modes specified by the user, and generate only those modes and not every possible paging mode. Returns a list of the user selected modes """ mode = [] if isinstance(paging_modes, tuple): mode = list(paging_modes) mode.sort() elif paging_modes is not None: try: modes = paging_modes.replace(' ', ',') modes = modes.replace(', ', ',') modes = modes.replace(' ,', ',') mode = list(set(modes.split(","))) mode.remove('') mode.sort() except ValueError: pass if not mode: mode.append('sv39') return mode
def gen_tooltips(data): """Generate the Tooltips text for the variable discribed in data @param[in] data A dictionary with the caracteristics of the variable @param[out] Tooltips String with the Tooltips texte from data """ return data["desc"]
def get_host_finding_threat_hr(threats): """ Prepare human readable json for "risksense-get-host-finding-detail" command. Including threats details. :param threats: threats details from response :return: list of dict """ threats_list = [{ 'Title': threat.get('title', ''), 'Category': threat.get('category', ''), 'Source': threat.get('source', ''), 'CVEs': ', '.join(threat.get('cves', '')), 'Published': threat.get('published', ''), 'Updated': threat.get('updated', '') } for threat in threats] # To present human readable horizontally if len(threats) == 1: threats_list.append({}) return threats_list
def _preprocess(param): """ Helper function to preprocess a list of paragraphs. Args: param (Tuple): params are tuple of (a list of strings, a list of preprocessing functions, and function to tokenize setences into words). A paragraph is represented with a single string with multiple setnences. Returns: list of list of strings, where each string is a token or word. """ sentences, preprocess_pipeline, word_tokenize = param for function in preprocess_pipeline: sentences = function(sentences) return [word_tokenize(sentence) for sentence in sentences]
def denorm(x): """De-normalization""" out = (x+1) / 2 return out
def SortEdge(edge): """Sort a pair of nodes alphabetically""" return tuple(sorted(edge))
def _is_callable(var_or_fn): """Returns whether an object is callable or not.""" # Python 2.7 as well as Python 3.x with x > 2 support 'callable'. # In between, callable was removed hence we need to do a more expansive check if hasattr(var_or_fn, '__call__'): return True try: return callable(var_or_fn) except NameError: return False
def left_to_right_check(input_line: str, pivot: int) -> bool: """ Check row-wise visibility from left to right. Return True if number of building from the left-most hint is visible looking to the right, False otherwise. input_line - representing board row. pivot - number on the left-most hint of the input_line. >>> left_to_right_check("412453*", 4) True >>> left_to_right_check("452453*", 5) False >>> left_to_right_check("512345*", 5) True >>> left_to_right_check("4124531", 4) True """ row = input_line max_num = 0 count = 0 for _, num in enumerate(row[1:-1]): # If the row is *, we move on to the next if num == "*": continue # Check if the current building is the one we need if int(num) > max_num: max_num = int(num) count += 1 if count == pivot: return True return False
def check_uniqueness_in_rows(board: list): """ Check buildings of unique height in each row. Return True if buildings in a row have unique length, False otherwise. >>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*543215',\ '*35214*', '*41532*', '*2*1***']) True >>> check_uniqueness_in_rows(['***21**', '452453*', '423145*', '*543215', \ '*35214*', '*41532*', '*2*1***']) False >>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*553215', \ '*35214*', '*41532*', '*2*1***']) False """ for i in board[1:-1]: i = i[1:-1].replace('*', '') if len(set(i))!= len(i): return False return True
def canonicalize_revision_range(lower, upper): """Given a revision range, returns a string containing a canonical revision specifier for that range. Collapses "degenerate" ranges to a single revision number. """ if upper == lower: return '#%s' % (lower,) else: return '#%s,%s' % (lower, upper)
def kumaraswamy_invcdf(a, b, u): """Inverse CDF of the Kumaraswamy distribution""" return (1.0 - (1.0 - u) ** (1.0 / b)) ** (1.0 / a)
def aascore(aa1, aa2, match=1, penaltyA=-10000, penaltyX=0, gap_penalty=-1): """Returns matching score for the pair of AAs.""" aa1 = aa1.upper() aa2 = aa2.upper() if not aa1.isalpha() or not aa2.isalpha(): return gap_penalty if aa1 == aa2: return 1 elif aa1=='X' or aa2=='X': return penaltyX else: return penaltyA
def listify(s, sep=None, as_int=True): """A helper func for the Jupyter Notebook, which generates a correctly formatted list out of pasted text.""" to_number = int if as_int else float result = [] if sep is None: if "\n" in s[:-2]: sep = "\n" elif "," in s[:-2]: sep = "," elif "\t" in s[:-2]: sep = "\t" else: sep = " " if s.startswith("["): s = s[1:] if s.endswith("]"): s = s[:-1] lst = s.split(sep) for el in lst: if len(el) == 0: continue try: el = to_number(el) except ValueError: pass result.append(el) return result
def decode(part): """Decode a part of a URI which has already been percent-encoded. Params: - part (str): percent-encoded URI component. """ if type(part) is int: # for port return part all_parts = part.split("%") part_list = all_parts[1:] res = [all_parts[0]] for piece in part_list: possible_code = piece[:2] rest = piece[2:] i = int(possible_code, 16) add = chr(i) if i >= 0 and i < 256 else "%" + possible_code res.append(add + rest) return "".join(res)
def counting_sort(arr, element_range): """ Sorts array by counting element frequencies range is expected to be [lower, upper] such as [0, 9] """ l = len(arr) element_range_count = element_range[1] - element_range[0] + 1 output = [0] * l count = [0] * element_range_count for i in range(0, l): count[arr[i]] += 1 for i in range(1, element_range_count): count[i] += count[i - 1] i = l - 1 while i >= 0: output[count[arr[i]] - 1] = arr[i] count[arr[i]] -= 1 i -= 1 for i in range(0, l): arr[i] = output[i] return arr
def transpose_dataset(dataset): """Converts list of demo data to multiple lists of demo properties. For example, one entry might contain time steps and poses so that we generate one list for all time steps and one list for all poses (trajectories). Parameters ---------- dataset : list of tuples There are n_demos entries. Each entry describes one demonstration completely. An entry might, for example, contain an array of time (T, shape (n_steps,)) or the dual arm trajectories (P, shape (n_steps, 14)). Returns ------- dataset : tuple of lists Each entry contains a list of length n_demos, where the i-th entry corresponds to an attribute of the i-th demo. For example, the first list might contain to all time arrays of the demonstrations and the second entry might correspond to all trajectories. Raises ------ ValueError If the dataset is empty. """ n_samples = len(dataset) if n_samples == 0: raise ValueError("Empty dataset") n_arrays = len(dataset[0]) arrays = [[] for _ in range(n_arrays)] for demo in dataset: for i in range(n_arrays): arrays[i].append(demo[i]) return arrays
def lookup_key_from_chunk_key(chunk_key): """ breaks out the lookup_key from the chunk_key. Args: chunk_key (str): volumetric chunk key = hash&num_items&col_id&exp_id&ch_idres&x&y&z" Returns (str): lookup_key col_id&exp_id&ch_id """ parts = chunk_key.split('&') lookup_parts = parts[2:5] return "&".join(lookup_parts)
def aggregate_teams(players): """Compute aggregate stats for each team for the tournament.""" # stats to sum stats = {'kills', 'deaths'} # don't sum these stats dont_sum = {'n', 'duration (s)'} # compute base stats (aggregated) teams = {} for p in players: t = p['team'] if t not in teams: teams[t] = {'team': t, **{k:0 for k in stats}, **{k:p[k] for k in dont_sum}} for k in stats: teams[t][k] += p[k] # compute derived stats for t in teams.values(): t['k/d'] = '{:.2f}'.format(t['kills'] / t['deaths']) t['+/-'] = '{}{}'.format('+' if t['kills'] > t['deaths'] else '', t['kills'] - t['deaths']) t['k per 10min'] = '{:.1f}'.format(600 * t['kills'] / t['duration (s)']) return sorted(teams.values(), key=lambda x: x['team'].lower())
def any(p, xs): """``any :: (a -> Bool) -> [a] -> Bool`` Applied to a predicate and a list, any determines if any element of the list satisfies the predicate. For the result to be False, the list must be finite; True, however, results from a True value for the predicate applied to an element at a finite index of a finite or infinite list. """ return True in ((p(x) for x in xs))
def normalize_suffix(suffix: str) -> str: """ Normalises a filename suffix for lookups. Arguments: suffix: Filename suffix. """ suffix = suffix.lower().strip() if suffix.startswith("."): suffix = suffix[1:] return suffix
def fetch_process(id,data,children): """Fetch a tuple of available process info""" return ( # This entry should always exist. Any spawned process will have an entry in data data['process-%d'%id] if 'process-%d'%id in data else {}, # This will only exist if spawned by the current run of the server # If the server crashed and restarted, the child is orphaned, # and the server will only be able to read its output files # (unable to access the return code or terminate it) children[id] if id in children else False )
def pep426_name(name): """Normalize package name to pep426 style standard.""" return name.lower().replace('_','-')
def get_digit_number(n, count): """ :param n: int, input a positive integer number :param count: int, input a initial number (usually 1) to count how many digits in a given number (n) :return: int, return a number that represents the number of digit in n """ if n < 10: return count else: n /= 10 count += 1 count = get_digit_number(n, count) return count
def slugify(text: str) -> str: """Remove whitespace from text, join words with a hyphen and make the string all lowercase. """ return text.strip().replace(" ", "-").lower()
def _bool(text: str): """Convert str to bool""" if text.lower() in ["0", "false", "none", "null", "n/a", ""]: return False return True
def to_iterable(x, preferred_type=list): """ Converts an object into an object which can be iterated (or, if a preferred type is given, into an object of the preferred type). >>> to_iterable(1) [1] >>> to_iterable(1, preferred_type=set) {1} >>> to_iterable(1, preferred_type=tuple) (1,) >>> to_iterable([1]) [1] >>> to_iterable((1,)) [1] >>> to_iterable({1}) [1] """ try: iter(x) return preferred_type(x) except TypeError: return preferred_type([x])
def convert_frac(ratio): """ Converts ratio strings into float, e.g. 1.0/2.0 -> 0.5 """ try: return float(ratio) except ValueError: num, denom = ratio.split('/') return float(num) / float(denom)
def clip(x: float, lower_cap: float = -1, higher_cap: float = 1): """Returns a clipped value in the range [lower_cap, higher_cap]""" if x < lower_cap: return lower_cap elif x > higher_cap: return higher_cap else: return x
def calc_quotient(num1, num2): """Returns num1 divided by num2.""" result = None return result
def upper(value): """ Functor to return a upper-case string. """ return value.upper()
def convert_value(value): """ Converts the input value (as a string) to its native type. """ # strings are treated separately in parse_card() if value == "T": value = True elif value == "F": value = False elif "." in value: value = float(value) else: value = int(value) return value
def general_acquisition_info(metadata): """General sentence on data acquisition. This should be the first sentence in the MRI data acquisition section. Parameters ---------- metadata : :obj:`dict` The metadata for the dataset. Returns ------- out_str : :obj:`str` Output string with scanner information. """ out_str = ( "MR data were acquired using a {tesla}-Tesla {manu} {model} MRI " "scanner.".format( tesla=metadata.get("MagneticFieldStrength", "UNKNOWN"), manu=metadata.get("Manufacturer", "MANUFACTURER"), model=metadata.get("ManufacturersModelName", "MODEL"), ) ) return out_str
def experiment_config(max_trial, benchmark_algorithms): """Return a experiment template configure""" config = dict( name="experiment-name", space={"x": "uniform(0, 200)"}, metadata={ "user": "test-user", "orion_version": "XYZ", "VCS": { "type": "git", "is_dirty": False, "HEAD_sha": "test", "active_branch": None, "diff_sha": "diff", }, }, version=1, pool_size=1, max_trials=max_trial, working_dir="", algorithms=benchmark_algorithms[0]["algorithm"], producer={"strategy": "NoParallelStrategy"}, ) return config
def get_exif_data(exif_data, exif_param): """ Return exif parameter from exif data, return None if param not found @param: - exif_data : all Exif data from file - exif_param : EXIF info to retrieve """ try: exif_value = exif_data[exif_param].raw_value return exif_value if isinstance(exif_value, list) else [exif_value] except KeyError: return None return None
def unflatten(dict_, separator="."): """Turn back a flattened dict created by :py:meth:`flatten()` into a nested dict. >>> unflatten({"my.sub.path": True, "another.path": 3, "my.home": 4}) {'my': {'sub': {'path': True}, 'home': 4}, 'another': {'path': 3}} """ items = {} for k, v in dict_.items(): keys = k.split(separator) sub_items = items for ki in keys[:-1]: try: sub_items = sub_items[ki] except KeyError: sub_items[ki] = {} sub_items = sub_items[ki] sub_items[keys[-1]] = v return items
def rotate(times, index, length): """rotate index number of times""" return (index + times) % length
def zero_counter(number): """Counts the number of consecutive 0's at the end of the number""" x = 0 while (number >> x) & 1 == 0: x = x + 1 return x
def _index_spec_params(spec_params): """ Makes an index of the spec parameters. It dict-ifies the list of spec params provided by the SpecManager, and also returns the set of param ids that are used in groups. This gets returned as a tuple (indexed params, group param ids) """ spec_params_dict = dict() grouped_parents = dict() for p in spec_params: spec_params_dict[p["id"]] = p # groupify the parameters - identify params that are part of groups, and don't include # them in the list separately. children = p.get("parameter_ids") if children: for child in children: grouped_parents[child] = p["id"] return (spec_params_dict, grouped_parents)
def object_belongs_to_module(obj, module_name): """ Checks if a given object belongs to a given module (or some sub-module). @param obj: Object to be analysed @param module_name: Name of the module we want to check @return: Boolean -> True if obj belongs to the given module, False otherwise """ return any(module_name == x for x in type(obj).__module__.split('.'))
def create_formula_map(latex_post_map, slt_post_map): """ latex_post_map, slt_post_map are dict {formual_id: [single formula item]} Return a map of {latex-formula: slt-formula-content} """ d = {} for formula_id in latex_post_map: if formula_id not in slt_post_map: continue latex_content = latex_post_map[formula_id][0] slt_content = slt_post_map[formula_id][0] d[latex_content["formula"]] = { k: v for k, v in latex_content.items() if k != "formula" } d[latex_content["formula"]].update({ "formula": slt_content["formula"], }) return d
def technologies_set(projects, sorted=True): """return a list of unique technologies for all given projects :param projects: list of projects as dictionaries :type projects: list :param sorted: whether or not to return a sorted list :type sorted: bool :return: list """ tech_set = set() for project in projects: tech_set.update(project['technologies']) tech_set = list(tech_set) if sorted: tech_set.sort() return tech_set
def _validate_list_of_or_raise(a_list, t): """Validates a List of items of a specific type""" if not isinstance(a_list, (list, tuple)): raise TypeError("Expected list, got {t}".format(t=type(a_list))) for item in a_list: if not isinstance(item, t): raise TypeError("Expected type {t}, Got {x}".format(t=t, x=type(item))) return a_list
def cardlist_leq(cardlist1, cardlist2): """Check if a cardlist is contained within another""" for name, amount in cardlist1.items(): if name == '__filename__': continue if name not in cardlist2: return False if amount > cardlist2[name]: return False return True
def group_by_label(X, labels): """ return a dictionary 'l2x' in which the elements 'x' of list 'X' are grouped according to 'labels' """ l2x = dict([(l ,[]) for l in set(labels)]) for x ,l in list(zip(X ,labels)): l2x[l] += [x] return l2x
def decompose(field): """ Function to decompose a string vector field like 'gyroscope_1' into a tuple ('gyroscope', 1) """ field_split = field.split('_') if len(field_split) > 1 and field_split[-1].isdigit(): return '_'.join(field_split[:-1]), int(field_split[-1]) return field, None
def quoted(s): """Returns a quoted s.""" return '"%s"' % s
def check_and_remove_file_extension(filename): """ Remove file extension from filename. :param filename: :return: """ dot = '.' if dot in filename: filename = filename.split(dot)[0] return filename
def convert_from(input, source): """ Convert from one base to digit :param input: String :param source: all digits for a base :return: Int """ base = len(source) result = 0 for power, i in enumerate(input[::-1]): result +=source.index(i) * pow(base, power) return result
def _is_string_or_bytes(s): """Returns True if input argument is string (unicode or not) or bytes. """ return isinstance(s, str) or isinstance(s, bytes)
def from_experience_to_summary(experience: dict) -> dict: """Create experience summary object""" return { "experience_summary.uid": experience.get("uid"), "experience_summary.title": experience.get("title"), "experience_summary.description": experience.get("description"), "experience_summary.external_url": experience.get("external_url"), "experience_summary.status": experience.get("status"), "experience_summary.category": experience.get("category"), "experience_summary.audience": experience.get("audience"), "experience_summary.location": experience.get("location"), "experience_summary.is_free": experience.get("is_free"), "experience_summary.is_eco": experience.get("is_eco"), "experience_summary.eco_features": experience.get("eco_features"), "experience_summary.hashtags": experience.get("hashtags"), }
def almost_equal(value1, value2, slope): """ Computes if two values are close considering a slope as degree of freedom ---- Parameters: value1 (np.double) : first value value2 (np.double) : second value slope (np.double) : tolerance ---- Returns: (bool) """ if (value1 - slope) > value2 or (value1 + slope) < value2: return False else: return True
def validate_HR_data(in_data): # test """Validates input to add_heart_rate for correct fields Args: in_data: dictionary received from POST request Returns: boolean: if in_data contains the correct fields """ expected_keys = {"patient_id", "heart_rate"} for key in in_data.keys(): if key not in expected_keys: return False return True
def folder_name(counter): """ Return the name of a folder generated by this plugin given a counter """ return f"fs{counter}"
def _simplex_dot3D(g, x, y, z): """ 3D dot product """ return g[0] * x + g[1] * y + g[2] * z
def lower(text: str): """ Converts text to lowercase as part of text preprocessing pipeline. """ return text.lower()
def get_error_kind(code): """ Get the kind of the error based on the http error code. """ if code == 400: return 'BadRequest' elif code == 401: return 'UnAuthorized' elif code == 403: return 'Forbidden' elif code == 404: return 'NotFound'
def list_startswith(lst, prefix): """ Check whether `lst` starts with the given `prefix` list. """ return len(prefix) <= len(lst) and all(a == b for a, b in zip(lst, prefix))