content
stringlengths
42
6.51k
def has_equal_properties(obj, property_dict): """ Returns True if the given object has the properties indicated by the keys of the given dict, and the values of those properties match the values of the dict """ for field, value in property_dict.items(): try: if getattr(obj, field) != value: return False except AttributeError: return False return True
def metric(num, metric_list=[[10 ** 9, 'B'], [10 ** 6, 'M'], [10 ** 3, 'k']], additive=False): """Returns user-readable string representing given value. Arguments: num is the base value we're converting. metric_list is the list of data we're working off. additive is whether we add the various values together, or separate them. Return: a string such as 345K or 23w6d2h53s""" output = '' for metric_count, metric_char in metric_list: if num > metric_count: if additive: format_str = '{}{}' else: format_str = '{:.1f}{}' num = (num / metric_count) if not additive: num = float(num) output += format_str.format(num, metric_char) if not additive: break # just in case no output if output == '': output = str(num) return output
def set_accuracy_95(num: float) -> float: """Reduce floating point accuracy to 9.5 (xxxx.xxxxx). Used by :class:`Hedron` and :class:`Dihedron` classes writing PIC and SCAD files. :param float num: input number :returns: float with specified accuracy """ # return round(num, 5) # much slower return float(f"{num:9.5f}")
def get_single_request_str(last_r, reqs_w): """ Returns the string that represens the request which would trigger the hold on condition """ req_string = ["0"]*reqs_w req_string[ (last_r + 1) % reqs_w ] = "1" return "".join(req_string)
def reverse_bit(num): """Turn an LSB byte to an MSB byte, and vice versa. Used for SPI as it is LSB for the SHARP, but 99% of SPI implementations are MSB only!""" result = 0 for _ in range(8): result <<= 1 result += num & 1 num >>= 1 return result
def peel(iterable, count=1): """Returns the meat of an iterable by peeling off the specified number of elements from both ends. :param iterable: Iterable sequence. :param count: The number of elements to remove from each end. :returns: Peeled sequence. """ if count < 0: raise ValueError("peel count cannot be negative: %r" % count) if not iterable: return iterable return iterable[count:-count]
def convert_to_seconds(input_str): """Converts a string describing a length of time to its length in seconds.""" hours = {"h", "hr", "hour", "hours"} minutes = {"m", "min", "minute", "minutes"} seconds = {"s", "sec", "second", "seconds"} value, unit = input_str.split() if unit[-1] == "s" and len(unit) != 1: unit = unit[:-1] if unit in seconds: return float(value) elif unit in minutes: return float(value) * 60 elif unit in hours: return float(value) * 3600 else: msg = ( "Invalid unit. Units must be hours, mins, or seconds. Received '{}'".format( unit ) ) raise AssertionError(msg)
def join_columns_with_divider(table, decorator): """Join each line in table with the decorator string between each cell""" return [decorator.join(row) for row in table]
def waiting_time(timestamp, bus): """Bus waiting time.""" before = (timestamp // bus) * bus return (before + bus) - timestamp
def b2s(byteStr): """ Convert bytes to ascii string to avoid issues with implicit unicode conversion not recognising certain characters. """ return byteStr.decode("ascii", "ignore")
def _get_svtype(call): """Retrieve structural variant type from current TitanCNA events. homozygous deletion (HOMD), hemizygous deletion LOH (DLOH), copy neutral LOH (NLOH), diploid heterozygous (HET), amplified LOH (ALOH), gain/duplication of 1 allele (GAIN), allele-specific copy number amplification (ASCNA), balanced copy number amplification (BCNA), unbalanced copy number amplification (UBCNA) """ if call in set(["HOMD", "DLOH"]): return "DEL" elif call in set(["ALOH", "GAIN", "ASCNA", "BCNA", "UBCNA"]): return "DUP" elif call in set(["NLOH"]): return "LOH" else: return "CNV"
def GreatCircleDistance(u, v): """Great circle distance from (lat, lon) in degree in kilometers.""" from math import radians, sqrt, sin, cos, atan2 lat1 = radians(u[0]) lon1 = radians(u[1]) lat2 = radians(v[0]) lon2 = radians(v[1]) dlon = lon1 - lon2 EARTH_R = 6372.8 y = sqrt( (cos(lat2) * sin(dlon)) ** 2 + (cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(dlon)) ** 2 ) x = sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(dlon) d = atan2(y, x) return EARTH_R * d
def GetNumSeqSameUserDict(joblist):#{{{ """calculate the total number of sequences users with jobs either in queue or running joblist is a list of list with the data structure: li = [jobid, status, jobname, ip, email, numseq_str, method_submission, submit_date_str, start_date_str, finish_date_str] the return value is a dictionary {'jobid': total_num_seq} """ # Fixed error for getting numseq at 2015-04-11 numseq_user_dict = {} for i in range(len(joblist)): li1 = joblist[i] jobid1 = li1[0] ip1 = li1[3] email1 = li1[4] try: numseq1 = int(li1[5]) except: numseq1 = 123 pass if not jobid1 in numseq_user_dict: numseq_user_dict[jobid1] = 0 numseq_user_dict[jobid1] += numseq1 if ip1 == "" and email1 == "": continue for j in range(len(joblist)): li2 = joblist[j] if i == j: continue jobid2 = li2[0] ip2 = li2[3] email2 = li2[4] try: numseq2 = int(li2[5]) except: numseq2 = 123 pass if ((ip2 != "" and ip2 == ip1) or (email2 != "" and email2 == email1)): numseq_user_dict[jobid1] += numseq2 return numseq_user_dict
def _urljoin(*parts): """Concatenate url parts.""" return '/'.join([part.strip('/') for part in parts])
def compute_luminosity(red, green, blue): """ Calculates the luminosity of a pixel using NTSC formula to weight red, green, and blue values appropriately. """ return (0.299 * red) + (0.587 * green) + (0.114 * blue)
def get_wanted_statistics(features, statistics): """ Select the wanted statistics from the whole description Parameters ---------- features : list of source features All source feature which you are interested in statistics : list of string The statistics you want to select Returns ------- list of tuple Tuples in the form (feature, statistic) for all statistics and parameters passed as arguments """ result = [[(param, stat) for stat in statistics] for param in features] result = [item for sublist in result for item in sublist] return result
def contents(filename): """The contents of FILENAME, or the empty string if the file does not exist or is unreadable.""" try: with open(filename) as inp: return inp.read() except: return ''
def encode_byte(value: int) -> bytes: """ Protocol encodes each byte as an uppercase, ASCII representation of the hex value. So, one input byte becomes two output bytes. """ if value >= 256: print("ERROR! ", value) assert value < 256 return "{:02X}".format(value).encode("ascii")
def stringToOrd(strList): """Use this function to convert a string to an ord value.""" keyOrds = [] for cstr in strList: for ch in cstr: keyOrds.append(ord(ch)) keyOrds.append(ord(',')) return keyOrds
def get_path(sol: dict): """Gets the path from the solution.""" if sol == None: return sol path = dict() for key in sol.keys(): if not key[0] == "x": path[key] = sol[key] if path == {}: return return path
def alg_int(u, v): """ Given two slopes, compute their algebraic intersection number. """ a, b = u c, d = v return a*d - b*c
def remove_prefix(text, prefix): """ A short algorithm to remove the defined word from the text and move forward with tokenization. Args: text: text that to tokenize prefix: a part of the text (= the word found in dictionary to remove) Returns: truncated text that doesn't contain the tokenized word in the beginning """ return text[text.startswith(prefix) and len(prefix):]
def bold(string): """ Surrounds the string with asterisks (*) so it will be parsed bold when using markdown """ if not isinstance(string, str): raise TypeError("Must be given a string") return "*" + string + "*"
def city_country(city, country): """Return a combined string to show city and country""" return f" {city.title()}, {country.title()}"
def find_object(name, blacklist=None, whitelist=None): """Imports and returns an object given a fully qualified name. >>> find_object("whoosh.analysis.StopFilter") <class 'whoosh.analysis.StopFilter'> """ if blacklist: for pre in blacklist: if name.startswith(pre): raise TypeError("%r: can't instantiate names starting with %r" % (name, pre)) if whitelist: passes = False for pre in whitelist: if name.startswith(pre): passes = True break if not passes: raise TypeError("Can't instantiate %r" % name) lastdot = name.rfind(".") assert lastdot > -1, "Name %r must be fully qualified" % name modname = name[:lastdot] clsname = name[lastdot + 1:] mod = __import__(modname, fromlist=[clsname]) cls = getattr(mod, clsname) return cls
def reduce_tree(f, parent_unit_node, unit_node, get_children, *state): """Enumerate a tree, applying f to in a pre-order fashion to each node. parent_unit_node contains the parent of unit_node. For the root of the tree, parent_unit_node == unit_node. get_children is a single argument function applied to a unit_node to get a list/iterator to its children. state is used by f to modify state information relating to whatever f does to the tree. """ def as_tuple(x): if isinstance(x, tuple): return x else: return (x,) state = f(parent_unit_node, unit_node, *state) for child_unit_node in get_children(unit_node): state = reduce_tree(f, unit_node, child_unit_node, get_children, *as_tuple(state)) return state
def intlist(tensor): """ A slow and stupid way to turn a tensor into an iterable over ints :param tensor: :return: """ if type(tensor) is list or type(tensor) is tuple: return tensor tensor = tensor.squeeze() assert len(tensor.size()) == 1 s = tensor.size()[0] l = [None] * s for i in range(s): l[i] = int(tensor[i]) return l
def header_name_of_wsgi_key(wsgi_key: str) -> str: """ >>> header_name_of_wsgi_key('HTTP_ACCEPT_LANGUAGE') 'Accept-Language' >>> header_name_of_wsgi_key('HTTP_AUTHORIZATION') 'Authorization' """ if wsgi_key.startswith('HTTP_'): words_for_short = {'IM', 'HTTP2', 'MD5', 'TE', 'DNT', 'ATT', 'UIDH', 'XSS'} return '-'.join((s if s in words_for_short else s.capitalize()) for s in wsgi_key[5:].split('_')) else: return ''
def create_date_range_str(i): """ Creates str needed to get more results from the Pole Emploi API Args: i: "page" index (i.e. we divide all results in pages of 150 elements, the maxium the API returns) Returns: str (e.g. '0-149) """ return str(str(min(i, 1000)) + '-' + str(min(i + 149, 1149)))
def bending_stress(My, Iy, z, Mz=None, Iz=None, y=None): """Bending stress in principle system :param float My: Moment in the y direction :param float Mz: Moment in the y direction :param float Iy: Area moment of inertia for the y direction :param float Iz: Area moment of inertia for the zeta direction :param float z: Coordinate :param float y: Coordinate :returns: Bending Stress in cross section :rtype: float """ if Mz is None: return (My / Iy) * z else: return (My / Iy) * z, -(Mz / Iz) * y
def _validate_emails(emails): """While debugging ensure all emails are sent to me""" return emails # _valid_emails = [] # for email in emails: # try: # assert email.startswith('chrisburr73'), email # assert email[len('chrisburr73')] in ['@', '+'], email # assert email.endswith('@gmail.com'), email # except AssertionError: # try: # assert email in ['christopher.burr@cern.ch', 'c.b@cern.ch'], email # except AssertionError: # logging.warning(f'Skipping sending email to {email}') # else: # _valid_emails.append(email) # return _valid_emails
def PowerOfInt2(base, exponent): """ Slower :param base: :param exponent: :return: """ if base == 0: return 0 if exponent == 0: return 1 exp = abs(exponent) power = 1 for i in range(exp): power *= base if exponent < 0: return 1.0 / power return power
def formatOneTextRow(fields, values = None, fieldDLM="|@|"): """ Parameters in: fields - a list of fields name values - a dictionary of values with fields name, Default None fieldDLM - delimeter of values, default "|@|" Return: if values is None, return fields with delimeters, used as header if is not None, values with delimeters in the same order as fields """ #format a header if values is None: return fieldDLM.join(fields) #format a data row valueList = [] for field in fields: if field in values: if field == "tags": valueList.append(",".join(values[field])) else: valueList.append(values[field]) else: valueList.append("") return fieldDLM.join(valueList)
def candidate_certificates_fixture(signed_data_fixture): """Returns the certificates from the apple pay token.""" return signed_data_fixture['certificates']
def get_neighbor_address(ip): """Get the neighbor address in a subnet /30 Args: ip (`str`): Ip address to get the neighbor for Returns: None """ # Get the neighbor IP address ip_list = ip.split(".") last = int(ip_list[-1]) ip_list[-1] = str(last - 1) if last % 2 == 0 else str(last + 1) return ".".join(ip_list)
def win_decider(comp_and_user_choices): """ Based on the result of the user's choice between rock, paper or scissors, the program will determine who will win in what scenario. And it will return the end result as well as a message based on the result. The returned data type will be a tuple. """ player = comp_and_user_choices[0] computer = comp_and_user_choices[1] message = (("Win", "You win!"), ("Draw", "You both drew!"), ("Loss", "You lose!")) if (player, computer) in (("Rock", "Scissors"), ("Scissors", "Paper"), ("Paper", "Rock")): return message[0] elif player == computer: return message[1] else: return message[2]
def autodoc_skip_member(app, what, name, obj, skip, options): """Always document __init__ method.""" if name == "__init__": return False return skip
def is_num(n): """is_num utility function. Args: n (str): Candidate string Returns: bool: Whether the string represents a float. """ try: num = float(n) except ValueError: return False return True
def describe_class(obj): """ Describe the class object passed as argument, including its methods """ import inspect methods = [] cl = obj.__class__ print ('Class: %s' % cl.__name__) count = 0 for name in cl.__dict__: item = getattr(cl, name) if inspect.ismethod(item): count += 1 #describe_func(item, True) methods.append(item) if count==0: print ('No members') return methods
def chop_ns_prefix(element): """ Return the element of a fully qualified namespace URI element: a fully qualified ET element tag """ return element[element.rindex("}") + 1:]
def _spline(t, p0, p1, p2, p3): """ Catmull-Rom cubic spline to interpolate 4 given points. :param t: Time index through the spline (must be 0-1). :param p0: The previous point in the curve (for continuity). :param p1: The first point to interpolate. :param p2: The second point to interpolate. :param p3: The last point to interpolate. """ return ( t * ((2 - t) * t - 1) * p0 + (t * t * (3 * t - 5) + 2) * p1 + t * ((4 - 3 * t) * t + 1) * p2 + (t - 1) * t * t * p3) / 2
def s2n(s): """ String to number. """ if not len(s): return 0 try: enc = s.encode("hex") except LookupError: enc = "".join("%02x" % ord(c) for c in s) return int(enc, 16)
def factors(number): """ Find all of the factors of a number and return it as a list number: The number to find the factors for """ factors = [] for i in range(1, number + 1): if number % i == 0: factors.append(i) return factors
def nodesort(ops): """sort list by op.name.""" return sorted(ops, key=lambda op: op.name)
def is_stellar(nearZTF, nearPS1, stellarPS1, stellarZTF): """ Get if object is stellar :param nearZTF: :type nearZTF: bool :param nearPS1: :type nearPS1: bool :param stellarPS1: :type stellarPS1: bool :param stellarZTF: :type stellarZTF: bool :return: if the object is stellar :rtype: bool """ return (nearZTF & nearPS1 & stellarPS1) | (nearZTF & ~nearPS1 & stellarZTF)
def functionIsCompilable(f): """Is a python function 'f' compilable? Specifically, we try to cordon off the functions we know are not going to compile so that we can just represent them as python objects. """ # we know we can't compile standard numpy and scipy functions if f.__module__ == "numpy" or f.__module__.startswith("numpy."): return False if f.__module__ == "scipy" or f.__module__.startswith("scipy."): return False return True
def firstOccurenceInStr(aList, aString): """ Return the first element in aList that is contained in the string aString. """ for elem in aList: if elem in aString: return elem else: return None
def construct_index_dict(field_names, index_start=0): """This function will construct a dictionary used to retrieve indexes for cursors. :param - field_names - list of strings (field names) to load as keys into a dictionary :param - index_start - an int indicating the beginning index to start from (default 0). :return - dictionary in the form of {field:index,...}""" dict = {} for index, field in enumerate(field_names, start=index_start): dict.setdefault(field, index) return dict
def ConcatenateChangelogs(revisions): """Concatenate the changelogs of several revisions.""" if not revisions: return '' if len(revisions) == 1: return revisions[0].single_scrubbed_log logs = [rev.scrubbed_log for rev in revisions] for i, log in enumerate(logs[:-1]): if not log.endswith('\n'): logs[i] += '\n' return '\n'.join(logs)
def normalized_to_scaled(normalized_coords, resolution): """ @param normalized_coords batch of coordinates in range [-1,1] @return the same batch with coordinates in [0,resolution-1] """ return (normalized_coords * 0.5 + 0.5) * (resolution - 1)
def get_valid_step(current_step: int, max_step: int) -> int: """ Checks if the current step is within boundaries and returns a corrected step. :param current_step: The current step to check. :param max_step: The maximum allowed step. :return: A corrected step between 1 and the maximum step. """ if current_step < 1: current_step = 1 elif current_step > max_step: current_step = max_step return current_step
def extList(l, item): """Add 'item' to list 'l', return index of 'item'""" l.append(item) return len(l)-1
def _compute_fans(shape): """Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of integer scalars (fan_in, fan_out). """ if len(shape) < 1: # Just to avoid errors for constants. fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: fan_in = shape[0] fan_out = shape[1] else: # Assuming convolution kernels (2D, 3D, or more). # kernel shape: (..., input_depth, depth) receptive_field_size = 1 for dim in shape[:-2]: receptive_field_size *= dim fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size return int(fan_in), int(fan_out)
def mapt(fn, *args): """map(fn, *args) and return the result as a tuple.""" return tuple(map(fn, *args))
def find_endpoints(indices, neighbor_lists): """ Extract endpoints from connected set of vertices. Parameters ---------- indices : list of integers indices to connected vertices neighbor_lists : list of lists of integers each list contains indices to neighboring vertices for each vertex Returns ------- indices_endpoints : list of integers indices to endpoints of connected vertices Examples -------- >>> # Find endpoints of fundus in a fold: >>> from mindboggle.guts.mesh import find_endpoints >>> from mindboggle.guts.mesh import find_neighbors_from_file >>> from mindboggle.mio.fetch_data import prep_tests >>> from mindboggle.mio.vtks import read_scalars >>> urls, fetch_data = prep_tests() >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> fundus_file = fetch_data(urls['left_fundus_per_fold'], '', '.vtk') >>> folds, name = read_scalars(folds_file, True, True) >>> fundi, name = read_scalars(fundus_file, True, True) >>> background_value = -1 >>> # Limit number of folds to speed up the test: >>> limit_folds = True >>> if limit_folds: ... fold_numbers = [2] ... i0 = [i for i,x in enumerate(folds) if x not in fold_numbers] ... folds[i0] = background_value ... fundi[i0] = background_value ... indices = [i for i,x in enumerate(fundi) if x != background_value] >>> neighbor_lists = find_neighbors_from_file(fundus_file) >>> indices_endpoints = find_endpoints(indices, neighbor_lists) >>> indices_endpoints[0:5] [32782, 35142, 45244, 49010, 63051] View endpoints (skip test): >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> fundi[indices_endpoints] = 50 # doctest: +SKIP >>> rewrite_scalars(fundus_file, 'find_endpoints.vtk', fundi, ... 'endpoints', folds, background_value) # doctest: +SKIP >>> plot_surfaces('find_endpoints.vtk') # doctest: +SKIP """ # Find vertices with only one neighbor in a set of given indices: I = set(indices) indices_endpoints = [x for x in indices if len(I.intersection(neighbor_lists[x])) == 1] return indices_endpoints
def nextID_in_poly(poly, id): """ Determines the id of the next point on a standard polygon. Parameters ---------- poly :: List of (x,y) tuples Representation of a polygon, with identical first and last vertices. id : Integer Returns ------- Integer """ if id==len(poly)-1: return 1 elif id== len(poly)-2: return 0 else: return id+1
def is_chinese(string): """ check whether the string includes the Chinese param: string """ for ch in string: if u'\u4e00' <= ch <= u'\u9fff': return True return True
def check_for_empty_nests_in_nest_spec(nest_spec): """ Ensures that the values of `nest_spec` are not empty lists. Raises a helpful ValueError if they are. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. Returns ------- None. """ empty_nests = [] for k in nest_spec: if len(nest_spec[k]) == 0: empty_nests.append(k) if empty_nests != []: msg = "The following nests are INCORRECTLY empty: {}" raise ValueError(msg.format(empty_nests)) return None
def nth_sig_digit(flt: float, n: int = 1) -> float: """ :return: first n-th significant digit of `sig_d` """ return float('{:.{p}g}'.format(flt, p=n))
def has_same_parameters(est_params, boundaries_id, labels_id, params): """Checks whether the parameters in params are the same as the estimated parameters in est_params.""" K = 0 for param_key in params.keys(): if param_key in est_params.keys() and \ est_params[param_key] == params[param_key] and \ est_params["boundaries_id"] == boundaries_id and \ (labels_id is None or est_params["labels_id"] == labels_id): K += 1 return K == len(params.keys())
def _grand_total_value(data): """Return the grand total of the data""" # For "overall", without_mirrors is a subset of with_mirrors. # Only sum with_mirrors. if data[0]["category"] in ["with_mirrors", "without_mirrors"]: grand_total = sum( row["downloads"] for row in data if row["category"] == "with_mirrors" ) else: grand_total = sum(row["downloads"] for row in data) return grand_total
def _requirements_to_dict(rs): """Convert supported requirements into dictionary for output. """ out = [] added = set([]) for r in rs: if r["class"] == "DockerRequirement" and "docker" not in added: added.add("docker") out.append({"requirement_type": "docker", "value": r["dockerImageId"]}) elif r["class"] == "ResourceRequirement": if "coresMin" in r and "cpu" not in added: added.add("cpu") out.append({"requirement_type": "cpu", "value": r["coresMin"]}) if "ramMin" in r and "memory" not in added: added.add("memory") out.append({"requirement_type": "memory", "value": "%s MB" % r["ramMin"]}) if "tmpdirMin" in r and "disks" not in added: added.add("disks") out.append({"requirement_type": "disks", "value": "local-disk %s HDD" % r["tmpdirMin"]}) return out
def _check_default(value, default_value): """Return true if the new value is a match for the default parameter value """ default_present = False if str(default_value) == str(value): default_present = True return default_present
def _update_french_non_int(value): """ Update non-int values like "X" or "-". """ if value == 'X': return 99 if value == '-': return -1 return value
def is_available(list_mask, item_mask): """Checks if item are in list using mask""" return (list_mask & item_mask) != 0
def myfunction_bis(x): """Multiply the value by 2.""" if isinstance(x, int): xx = 2 * x else: xx = None return xx
def recover_label(tags, seq_lens, id2tag): """ ID->tag :param tags: :param seq_lens: :param id2tag: :return: """ labels = [] for tag, seq_len in zip(tags, seq_lens): pre = [id2tag.get(_id, 'O') for _id in tag[:seq_len]] labels.append(pre) return labels
def csvservermems(mems): """ Return a csv string of this server memory (total, free). """ return ", ".join(str(x) for x in mems)
def b2l(b_in): """ return "T" on true and "F" on false Instead of "True" and "False" """ return str(b_in)[0]
def add_suffix_to_str(item: str, suffix: str, divider: str = '') -> str: """Adds 'suffix' to 'item' with 'divider' in between. Args: item (str): item to be modified. suffix (str): suffix to be added to 'item'. divider (str): str to add between 'item' and 'suffix'. Defaults to '', which means no divider will be added. Returns: str: modified item. """ return divider.join([item, suffix])
def obter_pos_l(pos): """ obter_pos_l: posicao -> str Esta funcao devolve a componente linha da posicao. """ if pos[0] in (1, 2, 3): return '1' elif pos[0] in (4, 5, 6): return '2' elif pos[0] in (7, 8, 9): return '3'
def getManifestType(manifest): """ returns manifest type for manifest""" return manifest["Content-Type"]
def disappear_angle_brackets(text): """ :param text: :return: """ text = text.replace("<", "") text = text.replace(">", "") return text
def get_prefix_from_proxy_group(proxy_group): """ Return prefix by analyzing proxy_group name Args: proxy_group (str): proxy group name Returns: str: prefix if exists, empty string if not """ split_name = proxy_group.split("@")[0].split("-") # if there's only two sections, there's no prefix if len(split_name) <= 2: return "" return proxy_group.split("@")[0].split("-")[-3].strip()
def is_header(data_in_hex:str) -> bool: """ Check if input is a header line. A header is a control character string from Qsync. This is important to know if you are correctly at the start of the conversation or if you're picking it up midway. """ return data_in_hex[:4] == '1604'
def nested_lookup(col, key, *keys): """Search dictionaries nested in dictionaries nested in lists, etc, for key. Args: col (dict or list): The collection to be searched through. key (str): The key to locate in the collection. keys (iterable): The keys to iterate through before finding key. Returns: str or dict or list: Returns the value of the key. """ if keys: if isinstance(col, dict): return nested_lookup(col.get(key, {}), *keys) elif isinstance(col, list): if len(col) > 1: return nested_lookup(col[col.index(key)+1], *keys) else: return nested_lookup(col[0].get(key, {}), *keys) if isinstance(col, dict): return col.get(key) elif isinstance(col, list): if isinstance(col[0], dict): return col[0].get(key) else: return col[0]
def bisect_func_right(x, lo, hi, func): """Bisect `func(i)`, returning an index such that consecutive values are greater than `x`. If `x` is present, the returned index is past its last occurrence. EOF is assumed if `func` returns None.""" while lo < hi: mid = (lo + hi) // 2 k = func(mid) if k is not None and x < k: hi = mid else: lo = mid + 1 return lo
def repeat(s, exclaim): """Returns the string s repeated 3 times. If exclaim is true, add exclamation marks. """ result = s +', '+ s +', '+ s # can also use "s * 3" which is faster (Why?) if exclaim: result = result + '!!!' return result
def time_warp(ts): """ >>> time_warp('1 2 3 4 5 5 2 3 3'.split(' ')) ['1', '2', '3', '4', '5', '2', '3'] """ ts = ts.split(' ') result = [] most_recent_elem = None for e in ts: if e != most_recent_elem: result.append(e) most_recent_elem = e return result
def normalizeURL(url: str) -> str: """ Remove trailing / from the url. """ if url is not None: while len(url) > 0 and url[-1] == '/': url = url[:-1] return url
def count_all(d): """Return the total count of all structures in the structure freq data.""" return sum(d.values())
def quad_get_c(x, dy, b, c_base): """Get c parameter for quad polynome for condensing hex.""" return c_base - dy / (2 * b)
def _GenerateMakePrivateGetter(return_type, variable_names, function_name): """ Internal helper method to actually get the data. It calls the DLL and passes the correct data to it. It then returns that value back to ML.NET. """ code = [ "\n\nprivate Delegate MakeGetter(DataViewRow input, int iinfo)", "\n{", "\nValueGetter<{0}> result = (ref {0} dst) =>".format(return_type), "\n{" ] # generate holders for intermediate values for variable_name in variable_names: code.append("\nvar {0}Val = _parent._{0}.GetValue(input);".format(variable_name)) code.append( "\ndst = {function_name}(".format( function_name = function_name ) ) length = len(variable_names) for index in range(length): code.append( "{0}Val{1}".format( variable_names[index], ", " if index != length - 1 else "" ) ) code.extend( [ ');', "\n};", "\n\nreturn result;\n}" ] ) return "".join(code)
def get_uncovered(intervals, start_time, end_time): """ >>> get_uncovered([(1, 3)], 0, 10) [(0, 1), (3, 10)] >>> get_uncovered([(1, 8), (9, 10)], 0, 20) [(0, 1), (8, 9), (10, 20)] >>> get_uncovered([], 0, 20) [(0, 20)] >>> get_uncovered([(1, 3), (3, 6)], 0, 10) [(0, 1), (6, 10)] """ uncovered_intervals = [] curr_start = start_time # Go through the list. for interval in intervals: curr_end = interval[0] # We don't add degenerate intervals. if curr_start < curr_end: uncovered_intervals.append((curr_start, curr_end)) curr_start = interval[1] # If there's still time left! if curr_start < end_time: uncovered_intervals.append((curr_start, end_time)) return uncovered_intervals
def in_dict(obj, key, default=False): """ Returns true if key exists in obj.__dict__; false if not in. If obj.__dict__ is absent, return default """ return (key in obj.__dict__) if getattr(obj, '__dict__', None) else default
def out_of_range(row_mean, all_stdev, all_mean): """Decide whether row val out of range. """ if row_mean > all_mean + 3 * all_stdev or row_mean < all_mean - 3 * all_stdev: return True else: return False
def binary_search(target, array, begin, end): """ Perform a generic binary search for an array. Parameters ---------- target: comparable An item to compare to array: iterable, indexable An array to search for begin: int Beginning index for the array end: int Ending index for the array Returns ------- An item that satisfies the binary search. """ if array == []: return None if begin == end: return begin if begin == end - 1: return begin if target < array[begin] else end mid = begin + int(round((end - begin) / 2.0)) if target < array[mid]: return binary_search(target, array, begin, mid) return binary_search(target, array, mid, end)
def pipe2glue(pcomments,pformat,rec): """ Convert a NMRPipe table to nmrglue table Parameters: * pcomments List of NMRPipe comment lines. * pformats List of NMRPipe table column formats strings. * rec Records array with named fields. Returns: comments,rec * comments List of comments * rec Records array with named fields. """ # add a "#" to the list of comments and we are done comments = ["# "+c for c in pcomments] return comments,rec
def square_of_the_hypotenuse(first_leg, second_leg): """ Calculates the square of the hypotenuse of the right triangle. Args: first_leg (int): leg of the right triangle. second_leg (int): leg of the right triangle. Returns: int: the square of the hypotenuse of the right triangle. """ return first_leg**2 + second_leg**2
def make_paths_absolute(dir_, cfg): """ Make a dir with abs path :param dir_: :param cfg: :return: """ for key in cfg.keys(): if type(cfg[key]) is dict: cfg[key] = make_paths_absolute(dir_, cfg[key]) return cfg
def to_number(s): """ Convert a string to a number. If not successful, return the string without blanks """ ret = s # try converting to float try: ret = float(s) except ValueError: ret = ret.strip('\'').strip() # try converting to uid try: ret = int(s) except ValueError: pass # try converting to boolean if ret == 'True': ret = True elif ret == 'False': ret = False elif ret == 'None': ret = None return ret
def theoretical_driving_power_lorentzian(fc, driving_frequency, driving_amplitude): """Compute the power expected for a given driving input. When driving the stage or trap, we expect to see a delta spike in the power density spectrum. This function returns the expected power contribution of the bead motion to the power spectrum. It corresponds to the driven power spectrum minus the thermal power spectrum integrated over the frequency bin corresponding to the driving input. fc : float Corner frequency [Hz] driving_frequency : float Driving frequency [Hz] driving_amplitude : float Driving amplitude [m] """ return driving_amplitude ** 2 / (2 * (1 + (fc / driving_frequency) ** 2))
def make_file_string(the_list): """Combines strings in list, treats every entry as own line""" return "\n".join(the_list) + "\n"
def twoscompl8(x): """Returns the reciprocal of 8-bit x in two's complement.""" return ((x ^ 0xff) + 1) & 0xff
def add_(arrList1, arrList2): """Return the element-wise sum of two mxn arrayLists. >>> add_([[1, 2, 3]], [[4, 5, 6]]) [[5, 7, 9]] >>> add_([[1, 2, 3], [4, 5, 6]], [[6, 5, 4], [3, 2, 1]]) [[7, 7, 7], [7, 7, 7]] >>> add_([[]], [[]]) [[]] """ assert len(arrList1[0])==len(arrList2[0]) and len(arrList1)==len(arrList2),\ "ArrayLists must be list of lists, with the same number of rows and\ columns." if len(arrList1) == 1: return [[sum(tup) for tup in zip(arrList1[0], arrList2[0])]] else: return [add_([tup[0]], [tup[1]])[0] for tup in zip(arrList1, arrList2)]
def from_iban(alist:list) -> dict: """ Returns the following dictionary --> """ ba_dict = { "by-nib-ref": {}, "by-agent": {}, } for item in alist: agent, name, nib_ref = item["agent"], item["name"], item["nib-ref"] assert agent not in ba_dict["by-agent"], f"Duplicate agent '{agent}', this one: {item}" ba_dict["by-agent"][agent] = (nib_ref, name) if nib_ref in ba_dict["by-nib-ref"]: ba_dict["by-nib-ref"][nib_ref].append((agent, name)) else: ba_dict["by-nib-ref"][nib_ref] = [(agent, name)] return ba_dict
def get_unique_lines_in_unsymbolized_stack(symbolized_stacktrace, unsymbolized_stacktrace): """Return unique lines in unsymbolized stacktrace that are not in the symbolized stacktrace.""" if symbolized_stacktrace == unsymbolized_stacktrace: return '' symbolized_stacktrace_lines = symbolized_stacktrace.splitlines() unsymbolized_stacktrace_lines = unsymbolized_stacktrace.splitlines() stripped_symbolized_stacktrace_lines = set() for line in symbolized_stacktrace_lines: stripped_symbolized_stacktrace_lines.add(line.strip()) index = 0 last_index = len(unsymbolized_stacktrace_lines) - 1 start = -1 end = -1 while index <= last_index: if (unsymbolized_stacktrace_lines[index].strip() not in stripped_symbolized_stacktrace_lines): if start == -1: start = index end = index + 1 else: end = index index += 1 if start == -1: # Nothing unique found, return empty string. return '' line_gap = 2 start = max(0, start - line_gap) end = min(end + line_gap, last_index + 1) result = '\n'.join(unsymbolized_stacktrace_lines[start:end]) return result
def val_closest_to(n: int, val: int) -> int: """returns the integer value closest to val that can be divided into n int partitions :param n: int, number of partitions :param val: int, value to approximate :return: int, approximate value closest to val """ n_partitions = val // n low, high = val - n_partitions * n, n * (n_partitions + 1) - val if low < high: return n_partitions * n return (n_partitions + 1) * n
def italicize(txt: str): """ Italicize. :param txt: A string object. :return: Italicized string markdown syntax. """ return "\n*" + " ".join(txt.replace("\n", "").split()) + "*\n"
def done(state): """ are we done, yes/no?????? """ for row in state: for cell in row: if isinstance(cell, set): return False return True