content
stringlengths
42
6.51k
def api_logic_to_percentage(api_points: int): """We get a value of an API. At 0 were at 0%, at 1 were at 2%, at 2 were at 4%, etcetera. As magic value, this API returns -1 when we are done. Values are always between 0 and 49 (inclusive) """ if api_points == -1: return 100 return api_points * 2
def filter_data(data, *args, **kwargs): """ Filter a list of objects based on attribute's value args can be [attr, val] for a single atribute, and value kwargs is pairs of attr=val """ tolist = lambda v: isinstance(v, list) and v or [v] f = lambda v: True h = lambda v: True if len(args) >= 2: attr, vals = args[0], set(args[1:]) f = lambda d: d[attr] in vals if kwargs: h = lambda d: all((d.get(k, None) in tolist(v) for k,v in kwargs.items())) return [d for d in data if f(d) and h(d)]
def hex_to_rgb(hex_color: str) -> tuple: """Convert hex to RGA From https://community.plotly.com/t/scatter-plot-fill-with-color-how-to-set-opacity-of-fill/29591 """ hex_color = hex_color.lstrip("#") if len(hex_color) == 3: hex_color = hex_color * 2 return int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16)
def specimen_reporter(server_name, url_base, families): """Produce a report containing urls for a list of families""" report = [] report.append(f"***{server_name} specimens to inspect***") for family in sorted(families): family_url = url_base.format(family.replace(" ", "+")) report.append(family_url) if len(report) == 1: report.append("No urls to inspect") report.append("\n") return "\n".join(report)
def get_mpt_sb_mem_addrs_from_coords(x, y, p): """ :param x: :param y: :param p: :return: """ # two bytes per entry return (p + (18 * y) + (18 * 8 * x)) * 2
def func_ab_pq(a=2, b=3, *, p="p", q="q"): """func. Parameters ---------- a, b: int p, q: str, optional Returns ------- a, b: int p, q: str """ return None, None, a, b, None, p, q, None
def mk2mass_ben_poly(mk): """mk2mass_ben_poly - M_K to mass, Benedict et al. (2016) polynomial. Usage: mass = mk2mass_ben_poly(mk) Where mk is absolute 2MASS K magnitude and mass is in solar masses. This version is the original polynomial "reverse model" (absolute magnitude to mass) from the paper, for comparison purposes. NOTE: the range of the parameters is not checked to ensure the relation is used within the domain of applicability, this is left to the user. References: Benedict et al. (2016) AJ 152 141 """ arg = mk - 7.5 mass = (((-0.0032*arg + 0.0038) * arg + 0.0400) * arg - 0.1352) * arg + 0.2311 return mass
def bench1(x): """A benchmark function for test purposes. f(x) = x ** 2 It has a single minima with f(x*) = 0 at x* = 0. """ return x[0] ** 2
def subdirectory_for_variant(os: str, abi: str): """Calculate the relative directory into which to put a paricular LiteCore variant" Parameters ---------- os : str The normalized OS abi : str The normalized ABI Returns ------- str The relative path name at which to store the variant """ if os == "macos": abi = "x86_64" return f"{os}/{abi}"
def find_isolated_nodes(graph): """ returns a list of isolated nodes. """ isolated = [] for node in graph: if not graph[node]: isolated += node return isolated
def pipe(data, *funcs): """ Pipe a value through a sequence of functions I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))`` We think of the value as progressing through a pipe of several transformations, much like pipes in UNIX ``$ cat data | f | g | h`` >>> double = lambda i: 2 * i >>> pipe(3, double, str) '6' See Also: compose thread_first thread_last """ for func in funcs: data = func(data) return data
def _create_environment_var_name(reference_name, suffix=None): """ Creates the environment variable name using the reference name and suffix. """ reference_name = reference_name.replace('-', '_') if suffix: return '{}_{}'.format(reference_name, suffix).upper() else: return reference_name.upper()
def is_square(words): """ Just double checking... """ if len(words) <= 1: return True else: return all(word[0] in words[0] for word in words[1:]) and is_square( [word[1:] for word in words[1:]])
def duplicates(doc): """ Only return Duplicates """ return doc["doc_type"] == "XFormDuplicate"
def flatten_dict(dct, separator='-->', allowed_types=[int, float, bool]): """Returns a list of string identifiers for each element in dct. Recursively scans through dct and finds every element whose type is in allowed_types and adds a string indentifier for it. eg: dct = { 'a': 'a string', 'b': { 'c': 1.0, 'd': True } } flatten_dict(dct) would return ['a', 'b-->c', 'b-->d'] """ flat_list = [] for key in sorted(dct): if key[:2] == '__': continue key_type = type(dct[key]) if key_type in allowed_types: flat_list.append(str(key)) elif key_type is dict: sub_list = flatten_dict(dct[key]) sub_list = [str(key) + separator + sl for sl in sub_list] flat_list += sub_list return flat_list
def map_range(x, in_min, in_max, out_min, out_max): """ Maps a number from one range to another. :return: Returns value mapped to new range :rtype: float """ mapped = (x-in_min) * (out_max - out_min) / (in_max-in_min) + out_min if out_min <= out_max: return max(min(mapped, out_max), out_min) return min(max(mapped, out_max), out_min)
def sort_object_properties(object): """Sort all properties of an object representing an MSTR object.""" preffered_order = {'id': 1, 'name': 2, 'description': 3, 'alias': 4, 'type': 5, 'subtype': 6, 'ext_type': 7, 'acg': 101, 'acl': 102} order = preffered_order.get(object, 50) return order
def get_tokenized_imdb(data): """ data: list of [string, label] """ def tokenizer(text): return [tok.lower() for tok in text.split(' ')] return [tokenizer(review) for review, _ in data]
def _trim_to_bounds(corners, image_shape): """ corners: plain tuple representation of the rect in (top, right, bottom, left) order """ return max(corners[0], 0), min(corners[1], image_shape[1]), min(corners[2], image_shape[0]), max(corners[3], 0)
def get_analysis_runner_metadata( timestamp, dataset, user, access_level, repo, commit, script, description, output_prefix, driver_image, config_path, cwd, **kwargs, ): """ Get well-formed analysis-runner metadata, requiring the core listed keys with some flexibility to provide your own keys (as **kwargs) """ bucket_type = 'test' if access_level == 'test' else 'main' output_dir = f'gs://cpg-{dataset}-{bucket_type}/{output_prefix}' return { 'timestamp': timestamp, 'dataset': dataset, 'user': user, 'accessLevel': access_level, 'repo': repo, 'commit': commit, 'script': script, 'description': description, 'output': output_dir, 'driverImage': driver_image, 'configPath': config_path, 'cwd': cwd, **kwargs, }
def find_arg_index_by_name(decls, name): """Return index of argument in decls with name. Args: decls - list of Declaration name - argument to find """ if decls is None: return -1 for i, decl in enumerate(decls): if decl.name == name: return i return -1
def unpad(val: str) -> str: """Remove padding from base64 values if need be.""" return val.rstrip("=")
def binary_search(array, element): """ Uses binary search to determine if a given element is in a sorted array. :param array: list :param element: int element :return: Boolean: Bool True if element is in the array else False """ length = len(array) midpoint = length//2 if length == 1: return True if array[0] == element else False elif length == 0: return False elif length == 2: next_half = array[:1] if array[midpoint] > element else array[1:] else: next_half = array[ :midpoint + 1 ] if array[midpoint] >= element \ else array[midpoint:] return binary_search(next_half, element)
def mean_arterial_pressure(diastolic_bp, systolic_bp): """ The Karvonen Method for target heart rate (THR) - using a range of 50% to 85% intensity. The formula is used to calculate heart rate for exercise at a percentage training intensity. args: intensity (float): given as a decimal between 0 and 1 rest (float): resting heart rate, given in beats/minute maximum (float): maximum heart rate, given in beats/minute Returns: float: heart rate for exercise at the given intensity, given in beats/minute """ return ((2 * diastolic_bp) + systolic_bp) / 3
def symptom_similarity(symptoms_tuple_A, symptoms_tuple_B): """ Args: symptoms1: tuple of a set of symptoms present and a set of symptoms absent symptoms2: tuple of a set of symptoms present and a set symptoms absent Returns: present_present + absent_absent - present_absent - absent_present where common_present is the number of symptoms present in both patients absent_absent is the number of symptoms absent in both patients present_absent is the number of symptoms present in patientA and absent in patientB absent_present is the number of symptoms absent in patientA and present in patientB """ common_present = len(symptoms_tuple_A[0].intersection(symptoms_tuple_B[0])) #identifies number of symptoms that are present in patientA and finds symptoms common to both patients absent_absent = len(symptoms_tuple_A[1].intersection(symptoms_tuple_B[1])) #identifies number of symptoms that are not present in both patients present_absent = len(symptoms_tuple_A[0].intersection(symptoms_tuple_B[1])) #identifies number of symptoms present in patient A, but not in patient B absent_present = len(symptoms_tuple_A[1].intersection(symptoms_tuple_B[0])) #identifies number of symptoms present in patient B, but not in patient A return(common_present + absent_absent - present_absent - absent_present) #returns the similarity between two patients represented by a numerical value
def checkBC(bc): """ Checks if boundary condition 'bc' is valid. Each bc must be either 'dirichlet' or 'neumann' """ if(type(bc) is str): bc = [bc, bc] assert type(bc) is list, 'bc must be a list' assert len(bc) == 2, 'bc must have two elements' for bc_i in bc: assert type(bc_i) is str, "each bc must be a string" assert bc_i in ['dirichlet', 'neumann'], "each bc must be either, 'dirichlet' or 'neumann'" return bc
def identity(x): """This has the side-effect of bubbling any exceptions we failed to process in C land """ import sys return x
def remove_dups(seq): """remove duplicates from a sequence, preserving order""" seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))]
def get_params_size(search_string): """Set parameters for API call that gets the article sizes for everything linked to the initial term article""" params = { "action": "query", "prop": "revisions", "rvprop": "size", "titles": search_string, "format": "json", } # Parameter set to query the Wikipedia pages for a list of terms and retrieve links from that page in JSON format. return params
def credentials_url(url, token): """Create url including credentials.""" credentials = token + "@" split_index = url.find("//") + 2 url_with_credentials = url[:split_index] + credentials + url[split_index:] return url_with_credentials
def digits(f): """ """ return len(str(f).split('.')[1].rstrip('0'))
def split_lines(x): """Split each files into a list of lines""" if isinstance(x, list): return [split_lines(i) for i in x] elif isinstance(x, str): return x.splitlines() else: raise TypeError(f"{type(x)} {str(x)[:20]}")
def first(iterable, default=None, key=None): """Return first element of *iterable* that evaluates to ``True``, else return ``None`` or optional *default*. Similar to :func:`one`. >>> first([0, False, None, [], (), 42]) 42 >>> first([0, False, None, [], ()]) is None True >>> first([0, False, None, [], ()], default='ohai') 'ohai' >>> import re >>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)']) >>> m.group(1) 'bc' The optional *key* argument specifies a one-argument predicate function like that used for *filter()*. The *key* argument, if supplied, should be in keyword form. For example, finding the first even number in an iterable: >>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0) 4 Contributed by Hynek Schlawack, author of `the original standalone module`_. .. _the original standalone module: https://github.com/hynek/first """ return next(filter(key, iterable), default)
def heap_sort(li): """ [list of int] => [list of int] Heap sort: divides its input into a sorted and an unsorted region, and it iteratively shrinks the unsorted region by extracting the largest element from it and inserting it into the sorted region. It does not waste time with a linear-time scan of the unsorted region; rather, heap sort maintains the unsorted region in a heap data structure to more quickly find the largest element in each step. To implement a heap using arrays, we will use the rule li[k] >= li[2*k+1] and li[k] >= li[2*k+2] (left child and right child respectively). More generally, the array must satisfy the heap quality: For any given node C, if P is a parent node of C, then the value of P is greater than or equal to the key of C (for max heaps) Graphically, this would look like: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 """ def heapify(lst, heap_size, root): """ ([list of int], int, int) => [list of int] Rearranges the list to satisfy the heap quality. Root is index of the largest element in the lst. """ # the largest node largest = root left_child = 2 * largest + 1 right_child = 2 * largest + 2 # check if left_child and root need to be swapped if left_child < heap_size and lst[largest] < lst[left_child]: largest = left_child # check if right_child and root need to be swapped if right_child < heap_size and lst[largest] < lst[right_child]: largest = right_child # change root, if needed if largest != root: lst[root], lst[largest] = lst[largest], lst[root] # continue to heapify the root heapify(lst, heap_size, largest) # Build a maxheap by iterating through the list backwards for i in range(len(li), -1, -1): heapify(li, len(li), i) print(li) # extract elements one by one for i in range(len(li) - 1, 0, -1): """remember, heap sort differs from insertion sort in that # it searches for the maximum, rather than minimum, element. li[0:end] is a heap (like a tree, but elements are not guaranteed to be sorted) and li[end:len(li)] is in sorted order.""" li[i], li[0] = li[0], li[i] # return to heap, since the heap was messed up by swapping heapify(li, i, 0) return li
def analysis_vindex_usages(analysis): """ Returns a dictionary of Var usages by (namespace, name). 'vindex_usages' stands for 'Var index'. """ return analysis.get("vindex_usages", {})
def get( built_on=None, extra_params=None, sign=None, ): """ Returns all the coins that CryptoCompare has added to the website. This is not the full list of coins in the system, it is just the list of coins CryptoCompare has done some research on. :param built_on: The platform that the token is built on. [Max character length: 10] :param extra_params: The name of your application (recommended to send it). [Max character length: 2000] :param sign: If set to true, the server will sign the requests (by default CryptoCompare doesn't sign them), this is useful for usage in smart contracts. :return: List of available coins in CryptoCompare. """ return { 'builtOn': built_on, 'extraParams': extra_params, 'sign': sign, }
def normalize(key): """Normalizes secret by removing spaces and padding with = to a multiple of 8""" k2 = key.strip().replace(' ','') # k2 = k2.upper() # skipped b/c b32decode has a foldcase argument if len(k2)%8 != 0: k2 += '='*(8-len(k2)%8) return k2
def gen_end(first_instruction_address): """ Generate an end record. """ # specify the size of each column col2_size = 6 # content for each column col1 = "E" col2 = hex(first_instruction_address)[2:].zfill(col2_size).upper() return col1 + col2
def generate_lookup_entries(w, max_edit_distance=0): """given a word, derive strings with up to max_edit_distance characters deleted""" result = {w} queue = {w} for d in range(max_edit_distance): temp_queue = set() for word in queue: if len(word) > 1: for c in range(len(word)): # character index word_minus_c = word[:c] + word[c + 1:] if word_minus_c not in result: result.add(word_minus_c) if word_minus_c not in temp_queue: temp_queue.add(word_minus_c) queue = temp_queue return result
def isentropic_sound_speed(abar, zbar, gamma, tele): """ Compute the ion sound speed for an ideal gas (NRL formulary): Parameters: ----------- - abar: atomic number - zbar: mean ionization - gamma: adiabatic index - tele: electron temperature [eV] Returns: ----------- adiabatic sound speed [km/s] """ return 9.79*(gamma*zbar*tele/abar)**0.5
def _copy_dir_entry(workdir: str, user_id: int, user_group: int, dirname: str) -> str: """Returns the Dockerfile entry necessary to copy a single extra subdirectory from the current directory into a docker container during build. """ owner = "{}:{}".format(user_id, user_group) return """# Copy {dirname} into the Docker container. COPY --chown={owner} {dirname} {workdir}/{dirname} """.format_map({ "owner": owner, "workdir": workdir, "dirname": dirname })
def pull_tag_dict(data): """This will pull out a list of Tag Name-Value objects, and return it as a dictionary. :param data: The dict collected from the collector. :returns dict: A dict of the tag names and their corresponding values. """ # If there are tags, set them to a normal dict, vs. a list of dicts: tags = data.pop('Tags', {}) or {} if tags: proper_tags = {} for tag in tags: proper_tags[tag['Key']] = tag['Value'] tags = proper_tags return tags
def handle_not_found_error(e): """Handle exception when a metric is not found.""" return "Couldn't find resource:\n%s" % e, 404
def format_input_version(input_version): """Format an input version to have leading zeroes""" return str(input_version).zfill(2)
def slugify(s): """ Normalizes the string, converts to lowercase """ return "".join(x if x.isalnum() else "_" for x in s).lower()
def minCardinalityTLCovers(tlcovs): """ Prune top-level covers for minimum cardinality Inputs: tlcovs: A list of top-level covers as returned by explain. Outputs: tlcovs_mc: The pruned top level covers. mc: The minimum cardinality found. """ mc = min(len(u) for (u,_,_,_,_) in tlcovs) tlcovs_mc = [(u,k,d_min,d_max,ts) for (u,k,d_min,d_max,ts) in tlcovs if len(u) == mc] return tlcovs_mc, mc
def normalizeToKwargs(expectedKeys,argsTuple,kwargsDict): """ Normalize, arguments For functions that can take either position or keyword arguments. """ kwargsDictNorm = dict([(k,None) for k in expectedKeys]) if argsTuple and kwargsDict: errMsg = 'mixed argument types - must be either positional of keyword' raise RuntimeError(errMsg) if argsTuple: kwargsDict = dict(zip(expectedKeys,argsTuple)) kwargsDictNorm.update(kwargsDict) return kwargsDictNorm
def clean_movie_title(movie_title: str) -> str: """ Cleans up Movie Titles for Search """ split_title = movie_title.split() return " ".join(title.capitalize() for title in split_title).strip()
def update_dropdown(selected_data): """Update dropdown after neighborhood map selection. Update dropdown menu status after neighborhood map selection is made. If TypeError, returns '92' (University District). Parameters ---------- selected_data : dict Selected data in neighborhood map. Returns ------- neighborhood : int Index of selected neighborhood (0-102). """ try: return selected_data['points'][0]['pointIndex'] except TypeError: return 92
def tuple_from_s(s): """ Create a tuple representing a card, based on its string representation. """ if s == "": return False rank = s[:len(s)-1] suit = s[len(s)-1] if suit.lower() == 'c': suit = 0 elif suit.lower() == 'd': suit = 1 elif suit.lower() == 'h': suit = 2 elif suit.lower() == 's': suit = 3 else: return False if rank.isdigit() and int(rank) > 1 and int(rank) < 11: rank = int(rank) - 2 elif rank.lower() == 'j': rank = 9 elif rank.lower() == 'q': rank = 10 elif rank.lower() == 'k': rank = 11 elif rank.lower() == 'a': rank = 12 else: return False return (rank, suit)
def ffmpeg_command(images_dirname, output_filename, width, height, fps): """prepare a command for ffmpeg""" command = ( "ffmpeg -y -r " + str(fps) + " -f image2 -s " + str(width) + "x" + str(height) + " -i " + images_dirname + "/%05d.png " + "-threads 2 -vcodec libx264 -crf 25 -pix_fmt yuv420p " + output_filename) return command
def strip_whitespace(value): """removes whitespace""" if hasattr(value, 'strip'): return value.strip() return value
def split(attr): """ Split <key>=<val> into tuple (<key>,<val>), if only string is passed, return ('id', <val>)""" if '=' in attr: ret = attr.split('=') return ret[0], ret[1] else: return 'id', attr
def get_unique_filename(names: list, name: str, ext: str) -> str: """ returns a filename not present in input names list When generating a new file, sometimes there are existing files with the same name, in that case, we want to create an unique filename: e.g. "name1.ext". This function does that! :param names: list of already taken names. WITH EXTENSIONS! :type names: list :param name: original name :type name: str :param ext: extension of the name. WHITHOUT THE DOT! :type ext: str :return: unique filename not present in names :rtype: str """ out_name = '.'.join([name, ext]) if out_name not in names: return out_name i = 1 out_name = '.'.join([name + str(i), ext]) while out_name in names: i += 1 out_name = '.'.join([name + str(i), ext]) return out_name
def max_subarray_sum(a): """ Given an array of integers a, find the consecutive subarray with the maximum sum in a e.g. a = [13, -3, -25, 20, -3, -16, -23, 18, 20, -7, 12, -5, -22, 15, -4, 7] should return a sum of 43 ([18, 20, -7, 12]) e.g., given the array [34, -50, 42, 14, -5, 86], the maximum sum would be 137, since we would take elements 42, 14, -5, and 86. Given the array [-5, -1, -8, -9], the maximum sum would be 0, since we would not take any elements. """ max_idx_sum = {} for idx, e in enumerate(a): if idx == 0: max_idx_sum[idx] = e else: sub = a[0: idx] sum_ = sum(sub) max_ = sum_ start = 0 while start < idx: if sum_ > max_: max_ = sum_ sum_ -= a[start] start += 1 max_with_curr = max_ + e max_idx_sum[idx] = e if e > max_with_curr else max_with_curr max_sum = max(max_idx_sum.values()) return max_sum if max_sum > 0 else 0
def set_bit(bb, bit): """ Set a bit to on `1` """ return bb | (1 << bit)
def _rack_data_conv(data): """ Converts data in CSV file to dict input format: "<client>;<port>;<powermodule>" Where the powermodule is optional output format: { 'client': <client>, 'port': <port>, 'powermodule': <powermodule>, } Arguments: data str -- data in the CSV field Returns: dict -- data in dict form """ result = {} x = data.split(";") if len(x) == 2: x.append(None) elif len(x) < 2: raise RuntimeError("Not enough segments in racktivity data. Found: %s" % data) elif len(x) > 3: raise RuntimeError("too many segments in racktivity data. Found: %s" % data) result['client'] = x[0] result['port'] = int(x[1]) result['powermodule'] = x[2] return result
def check_auth(username, password): """This function is called to check if a username/ password combination is valid. """ return username == 'admin' and password == 'secret'
def have_program(program, path=None): """ Return ``True`` if a ``program`` executable is found in the path given by ``path``. INPUT: - ``program`` - a string, the name of the program to check. - ``path`` - string or None. Paths to search for ``program``, separated by ``os.pathsep``. If ``None``, use the :envvar:`PATH` environment variable. OUTPUT: bool EXAMPLES:: sage: from sage.misc.sage_ostools import have_program sage: have_program('ls') True sage: have_program('there_is_not_a_program_with_this_name') False sage: have_program('sage', path=SAGE_ROOT) True sage: have_program('ls', path=SAGE_ROOT) False """ import os if path is None: path = os.environ.get('PATH', "") for p in path.split(os.pathsep): try: if os.access(os.path.join(p, program), os.X_OK): return True except OSError: pass return False
def ndre2(b5, b7): """ Normalized Difference Red-edge 2 (Barnes et al., 2000). .. math:: NDRE2 = (b7 - b5)/(b7 + b5) :param b5: Red-edge 1. :type b5: numpy.ndarray or float :param b7: Red-edge 3. :type b7: numpy.ndarray or float :returns NDRE2: Index value .. Tip:: Barnes, E., Clarke, T., Richards, S., Colaizzi, P., Haberland, J., \ Kostrzewski, M., et al. (2000). Coincident detection of crop water \ stress, nitrogen status and canopy density using ground based \ multispectral data. in: P. C. Robert, R. H. Rust, & W. E. Larson \ (Eds.), Proceedings of the 5th International Conference on Precision \ Agriculture, 16-19 July 2000. Bloomington, USA. """ NDRE2 = (b7 - b5)/(b7 + b5) return NDRE2
def convert(arg_in): """convert to float or int if possible, return string otherwise""" if not arg_in: return None try: arg_float = float(arg_in) arg_int = int(arg_float) if arg_int == arg_float: return arg_int else: return arg_float except: return arg_in
def is_license_plate(length, code): """ Checks if the license plate is in the proper format :param length: Encoded sequence length :param code: license plate for the sequence :return: Boolean value of if it is valid or not """ if not length.isdigit(): return False if any(c in ('A', 'T', 'G', 'C') or (not c.isalpha() and not c.isdigit()) for c in code): return False return True
def e_norte(arg): """ e_norte: direcao --> logico e_norte(arg) tem o valor verdadeiro se arg for o elemento 'N' e falso caso contrario. """ return arg == 'N'
def if_error_false(cb, *args, **kargs): """if generate error return false Args: cb (function): function Returns: Any: function return or False """ try: return cb(*args, **kargs) except Exception as e: return False
def weightWENO(d, s, w, epsilon): """ deriv = weightWENO(d, s, w, epsilon) Helper function to compute and apply WENO weighting terms. See (3.39 - 3.41) Osher and Fedkiw Lekan, Aug 21, 2021 """ # Compute weighting terms alpha1 = w[0] / (s[0] + epsilon)**2 alpha2 = w[1] / (s[1] + epsilon)**2 alpha3 = w[2] / (s[2] + epsilon)**2 denominator = (alpha1 + alpha2 + alpha3) deriv = (alpha1 * d[0] + alpha2 * d[1] + alpha3 * d[2]) / denominator return deriv
def _translate_keys(cons): """Coerces a console instance into proper dictionary format.""" pool = cons['pool'] info = {'id': cons['id'], 'console_type': pool['console_type']} return dict(console=info)
def parse_header(line): """Parse a header line. Args: line: A header line as a string. Returns: None if end of headers is found. A string giving the continuation line if a continuation is found. A tuple of name, value when a header line is found. Raises: ValueError: If the line cannot be parsed as a header. """ if not line or line == "\r\n": return None if line[0] in " \t": return line[1:].rstrip() name, value = line.split(":", 1) return (name.strip(), value.strip())
def parse_distro_info_path(path): """Break distro_info path into repo + file""" path = path.strip().rsplit("/", 1) info_repo = path[0] info_file = path[1] remote = False if info_repo.startswith("http"): remote = True return info_file, info_repo, remote
def get_wildcard_values(config): """Get user-supplied wildcard values.""" return dict(wc.split("=") for wc in config.get("wildcards", []))
def break_string_sequence_to_words(seq): """ Breaks a sequence containing multi-word strings into a set containing individual words. :param seq: The sequence containing multi-word strings :return: A set containing all the individual words >>> break_string_sequence_to_words(['hello world', 'foo bar', 'hello', 'red']) \ == set({'hello', 'world', 'foo', 'bar', 'red'}) True """ return {word for string in seq for word in string.split()}
def shorten_url(s, len_limit=75): """ Make url shorter with max len set to len_limit """ if len(s) > len_limit: split = s.split("/") else: return s short = split[0] for s in split[1:-1]: if (len(short + split[-1]) + 5) > len_limit: short += "/.../" + split[-1] return short else: short += "/" + s return short
def convert_device_size(unformatted_size, units_to_covert_to): """ Convert a string representing a size to an int according to the given units to convert to Args: unformatted_size (str): The size to convert (i.e, '1Gi'/'100Mi') units_to_covert_to (str): The units to convert the size to (i.e, TB/GB/MB) Returns: int: The converted size """ units = unformatted_size[-2:] abso = int(unformatted_size[:-2]) conversion = { "TB": {"Ti": abso, "Gi": abso / 1000, "Mi": abso / 1e6, "Ki": abso / 1e9}, "GB": {"Ti": abso * 1000, "Gi": abso, "Mi": abso / 1000, "Ki": abso / 1e6}, "MB": {"Ti": abso * 1e6, "Gi": abso * 1000, "Mi": abso, "Ki": abso / 1000}, "KB": {"Ti": abso * 1e9, "Gi": abso * 1e6, "Mi": abso * 1000, "Ki": abso}, "B": {"Ti": abso * 1e12, "Gi": abso * 1e9, "Mi": abso * 1e6, "Ki": abso * 1000}, } return conversion[units_to_covert_to][units]
def cdn_cache_control(val): """Translate cdn_cache into a Cache-Control HTTP header.""" if val is None: return 'max-age=3600, s-max-age=3600' elif type(val) is str: return val elif type(val) is bool: if val: return 'max-age=3600, s-max-age=3600' else: return 'no-cache' elif type(val) is int: if val < 0: raise ValueError('cdn_cache must be a positive integer, boolean, or string. Got: ' + str(val)) if val == 0: return 'no-cache' else: return 'max-age={}, s-max-age={}'.format(val, val) else: raise NotImplementedError(type(val) + ' is not a supported cache_control setting.')
def boolean_from_str(src): """Decodes a boolean value from src. Returns python constants True or False. As a convenience, if src is None then None is returned.""" if src is None: return None elif src == "true": return True elif src == "false": return False elif src == "1": return True elif src == "0": return False else: raise ValueError("Can't read boolean from %s" % src)
def _nd(name): """ @return: Returns a named decimal regex """ return '(?P<%s>\d+)' % name
def get_seconds(minstr, secstr): """Return integer time in seconds.""" minutes = int(minstr) return minutes * 60 + int(secstr)
def get_resource_and_action(action, pluralized=None): """Extract resource and action (write, read) from api operation.""" data = action.split(':', 1)[0].split('_', 1) resource = pluralized or ("%ss" % data[-1]) return (resource, data[0] != 'get')
def _bool(x) : """Convert string to boolean""" if x.lower() in ("yes", "true", "1") : return True else : return False
def takefirst(levels, **_): """ Returns the first element in the given Level sequence, returning it as a singleton sequence. >>> takefirst([1, 2, 3, 4]) [1] Args: levels: A slicable sequence of Level objects (deques won't work here, but lists and tuples will) Returns: A singleton sequence of the same type as the input (usually?) containing the first element form the input sequence """ return levels[0:1]
def compute_col(line): """Compute the column of the seat.""" low: int = 0 high: int = 7 for char in line[7:]: diff: int = high - low if char == 'L': # lower half high -= int(diff / 2 + 0.5) elif char == 'R': # upper half low += int(diff / 2 + 0.5) return high
def inverse_filter_dict(dictionary, keys): """Filter a dictionary by any keys not given. Args: dictionary (dict): Dictionary. keys (iterable): Iterable containing data type(s) for valid dict key. Return: dict: Filtered dictionary. """ return { key: val for key, val in dictionary.items() if key not in keys }
def find_all(searchin, substr): """Returns a list of locations where substr occurs in searchin locations are not allowed to overlap""" location = 0 locations = [] substr_len = len(substr) while location != -1: location = searchin.find(substr, location) if location != -1: locations.append(location) location += substr_len return locations
def fix_deplist(deps): """ Turn a dependency list into lowercase, and make sure all entries that are just a string become a tuple of strings """ deps = [ ((dep.lower(),) if not isinstance(dep, (list, tuple)) else tuple([dep_entry.lower() for dep_entry in dep ])) for dep in deps ] return deps
def to_integer(given_input: str): """ Finds the number in a given input(should be a string with numbers) using regular expression. >>> to_integer('abc') 0 >>> to_integer('1') 1 >>> to_integer('abc123') '123' >>> to_integer('abc123abc') '123' >>> to_integer('abc123abc456') '123,456' Args: given_input (str): any text with numbers in it Returns: Number: returns number in a comma separated form if found else returns 0 """ from re import findall if isinstance(given_input, str): match = findall('\d+', given_input) if match: return ",".join(match) return 0 # default value return given_input
def get_element_coordinates(feeder_mapping, element_type, name): """Return the coordinates of the bus to which the element is attached. Parameters ---------- feeder_mapping : dict dictionary created by this module at a feeder level element_type : str capacitors, lines, loads, etc. name : str Element name Returns ------- dict | None None is returned if no coordinates are stored. Example output:: {'x': '34374.509', 'y': '206624.15'} If element_type == 'lines' {'from': None, 'to': {'x': '34802.251', 'y': '206769.654'}} """ bus = feeder_mapping[element_type][name] if element_type == "lines": from_coords = feeder_mapping["bus_coords"].get(bus["from"]) to_coords = feeder_mapping["bus_coords"].get(bus["to"]) return {"from": from_coords, "to": to_coords} if element_type == "transformers": bus = bus["primary"] return feeder_mapping["bus_coords"].get(bus)
def format_bed_key(key: str) -> str: """Clean a bedtools parameter key.""" return '-' + key.replace('_', '')
def retrieve_commands(commands): """ Retrieve context needed for a set of commit actions """ config_commands = commands['config'] support_commit = commands.get('support_commit') config_verify = commands['config_verification'] return (config_commands, support_commit, config_verify)
def get_nested_dict_item(dic: dict, key_list: list, level=0): """Get the nested dictionary item. Args: dic (dict): Dictionary of nested items key_list (list): List of keys level (int): Internal for recussion. Defaults to 0. Returns: object: Item in the dic corresponding to the given key_list Example use: .. code-block: python myDict = Dict( aa=Dict( x1={ 'dda': '34fF' }, y1='Y', z='10um' ), bb=Dict( x2=5, y2='YYYsdg', z='100um' ), cc='100nm' ) key_list = ['cc'] print(get_nested_dict_item(myDict, key_list)) key_list = ['aa', 'x1', 'dda'] print(get_nested_dict_item(myDict, key_list)) Results in >> 100nm >> 34fF """ if not key_list: # get the root return dic if level < len(key_list) - 1: return get_nested_dict_item(dic[key_list[level]], key_list, level + 1) else: return dic[key_list[level]]
def json_format(subtitle, data): """ Format json to string :param subtitle: description to text :type subtitle: string :param data: content to format :type data: dictionary """ msg = subtitle+':\n' for name in data: msg += name+': '+data[name]+'\n' return msg.strip()
def put_path(components, value): """Recursive function to put value in component""" if len(components) > 1: new = components.pop(0) value = put_path(components, value) else: new = components[0] return {new: value}
def generate_all_labelings(node_list): """ Returns a list of all permissible orderings of the nodes. A permissible ordering is one in which each child comes after its parent. See e.g. HNW1993 p. 267, Example 12.5. """ labeled = [[node_list[0]]] for i in range(1,len(node_list)): new_labeled = [] for base in labeled: for node in node_list[1:]: if (node.parent in base) and (node not in base): basecopy = base.copy() basecopy.append(node) new_labeled += [basecopy] labeled = new_labeled.copy() return labeled
def _convert_to_int_if_possible(x): """pd.to_numeric did not correctly work.""" try: return int(x) except ValueError: return x
def insert_date(node_list, extract_date): """ Append the JDBOR extract date to each disorder entry :param node_list: list of disorder objects :param extract_date: JDBOR extract date :return: node_list with extract date """ for node in node_list: node["Date"] = extract_date return node_list
def score_is_valid(module_score: float) -> bool: """Check whether a given score is a valid numeric value. Return a Boolean value.""" try: if ( module_score is not None and isinstance(float(module_score), float) and (0 <= module_score <= 100 or module_score == -1) ): return True except (ValueError, TypeError): pass return False
def p(x, threshold=0.5): """Predicts whether a probability falls into class 1. Args: x (obj): Probability that example belongs to class 1. threshold (float): point above which a probability is deemed of class 1. Returns: int: Binary value to denote class 1 or 0 """ prediction = None if x >= threshold: prediction = 1 else: prediction = 0 return prediction
def gen_change(record_set, action='CREATE'): """Generate expected change.""" return { 'Action': action, 'ResourceRecordSet': record_set }
def block_idx(len_list:int, block_size:int = 1000)->list: """ Helper function to split length into blocks Args: len_list (int): list length. block_size (int, optional, default 1000): size per block. Returns: list[(int, int)]: list of (start, end) positions of blocks. """ blocks = [] start = 0 end = 0 while end <= len_list: end += block_size blocks.append((start, end)) start = end return blocks
def gcd(a,b): """gcd(a,b) -> greatest common divisor of a and b""" while b != 0: a, b = b, a%b return a
def get_foldername(dict_args): """ NOTE adjust foler name attributes """ args_used_in_name = [ ["NumHead", "h"], ["MaxEpoch", "me"], ["ModelDim", "d_model"], ["Dropout", "dp"], ["TimeEmbeddingDim", "teDim"], ["Layer", "layer"], ['LearnRate', 'lr'], ['Seed', 'seed'], ['IgnoreFirst', "ignoreFirst"], ['ID', 'id'], ] folder_name = list() for arg_name, rename in args_used_in_name: folder_name.append('{}-{}'.format(rename, dict_args[arg_name])) folder_name = '_'.join(folder_name) return folder_name
def parse_frame_size(frame_size): """Parses the given frame size, ensuring that it is valid. Args: a (width, height) tuple or list, optionally with dimensions that are -1 to indicate "fill-in" dimensions Returns: the frame size converted to a tuple, if necessary Raises: ValueError: if the frame size was invalid """ if isinstance(frame_size, list): frame_size = tuple(frame_size) if not isinstance(frame_size, tuple): raise ValueError( "Frame size must be a tuple or list; found '%s'" % str(frame_size) ) if len(frame_size) != 2: raise ValueError( "frame_size must be a be a (width, height) tuple; found '%s'" % str(frame_size) ) return frame_size