content
stringlengths
42
6.51k
def dist(x, y): """Some distance""" return abs(x["par"] - y["par"])
def isprobablytitle(s): """Return True if string s looks like it could be a title; False otherwise. This is a heuristic function based on my Gutenberg Poetry corpus. Takes every word from string s with five or more characters and returns True if the resulting joined string is in title case. >>> isprobablytitle("The Day The Earth Stood Still") True >>> isprobablytitle("The Day the Earth Stood Still") True >>> isprobablytitle("the day the earth stood still") False """ s = ' '.join([t.capitalize() if len(t) <= 4 else t for t in s.split()]) return s.istitle()
def is_binary(s: str) -> bool: """Checks if the given string is a binary operator. Parameters: s: string to check. Returns: ``True`` if the given string is a binary operator, ``False`` otherwise. """ return s == '&' or s == '|' or s == '->'
def _filter_by_attr(list_in, attr_name, attr_val, match_substring=False): """ Filter input list by the value of its elements' attributes. """ list_out = [] for val in list_in: if match_substring: if attr_val in getattr(val, attr_name): list_out.append(val) else: if getattr(val, attr_name) == attr_val: list_out.append(val) return list_out
def handle_extensions(extensions=('html',)): """ organizes multiple extensions that are separated with commas or passed by using --extension/-e multiple times. for example: running 'django-admin makemessages -e js,txt -e xhtml -a' would result in a extension list: ['.js', '.txt', '.xhtml'] >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) ['.html', '.js'] >>> handle_extensions(['.html, txt,.tpl']) ['.html', '.tpl', '.txt'] """ ext_list = [] for ext in extensions: ext_list.extend(ext.replace(' ','').split(',')) for i, ext in enumerate(ext_list): if not ext.startswith('.'): ext_list[i] = '.%s' % ext_list[i] # we don't want *.py files here because of the way non-*.py files # are handled in make_messages() (they are copied to file.ext.py files to # trick xgettext to parse them as Python files) return set([x for x in ext_list if x != '.py'])
def float_vct_str ( vct , format = '%.5g' ) : """Convert vector of floating types to string""" try : return '[ ' + ', '.join ( [ format % v for v in vct ] ) + ' ]' except TypeError : pass return float_vct_str ( vct , format = '%.5g' )
def spline_grid_from_range(spline_size, range=2, round_to=1e-6): """ Compute spline grid spacing from desired one-side range and the number of activation coefficients. Args: round_to: round grid to this value """ spline_grid = ((range / (spline_size//2)) // round_to) * round_to return spline_grid
def tag2dict(tags): """ Converts a list of AWS tag dicts to a single dict with corresponding keys and values """ return {tag.get('Key'): tag.get('Value') for tag in tags or {}}
def plural(attr): """ Simple function. Gives plural form with 's' at the end. """ return f"{attr.rstrip('s')}s"
def tag_predicate(p): """ Given a full URI predicate, return a tagged predicate. So, for example, given http://www.w3.org/1999/02/22-rdf-syntax-ns#type return rdf:type """ ns = { "http://www.w3.org/1999/02/22-rdf-syntax-ns#":"rdf:", "http://www.w3.org/2000/01/rdf-schema#":"rdfs:", "http://www.w3.org/2001/XMLSchema#":"xsd:", "http://www.w3.org/2002/07/owl#":"owl:", "http://www.w3.org/2003/11/swrl#":"swrl:", "http://www.w3.org/2003/11/swrlb#":"swrlb:", "http://vitro.mannlib.cornell.edu/ns/vitro/0.7#":"vitro:", "http://purl.org/ontology/bibo/":"bibo:", "http://purl.org/spar/c4o/":"c4o:", "http://purl.org/spar/cito/":"cito:", "http://purl.org/dc/terms/":"dcterms:", "http://purl.org/NET/c4dm/event.owl#":"event:", "http://purl.org/spar/fabio/":"fabio:", "http://xmlns.com/foaf/0.1/":"foaf:", "http://aims.fao.org/aos/geopolitical.owl#":"geo:", "http://purl.obolibrary.org/obo/":"obo:", "http://purl.org/net/OCRe/research.owl#":"ocrer:", "http://purl.org/net/OCRe/study_design.owl#":"ocresd:", "http://www.w3.org/2004/02/skos/core#":"skos:", "http://vivo.ufl.edu/ontology/vivo-ufl/":"ufVivo:", "http://www.w3.org/2006/vcard/ns#":"vcard:", "http://vitro.mannlib.cornell.edu/ns/vitro/public#":"vitro-public:", "http://vivoweb.org/ontology/core#":"vivo:", "http://vivoweb.org/ontology/scientific-research#":"scires:" } for uri, tag in ns.items(): if p.find(uri) > -1: newp = p.replace(uri, tag) return newp return None
def get_eccF(r1_norm, r2_norm, c_norm): """ Computes the eccentricity component along the chord. This value is kept constant for all the problem as long as the boundary conditons are not changed. Parameters ---------- r1_norm: float Norm of the initial vector position. r2_norm: float Norm of the final vector position. c_norm: float Norm of the chord vector. Returns ------- ecc_F: float Eccentricity component along the chord direction. Notes ----- Equation (3) from Avanzini's report [1]. """ ecc_F = (r1_norm - r2_norm) / c_norm return ecc_F
def minkowski_params(n_samples): """Generate minkowski benchmarking parameters. Parameters ---------- n_samples : int Number of samples to be used. Returns ------- _ : list. List of params. """ manifold = "Minkowski" manifold_args = [(3,), (5,)] module = "geomstats.geometry.minkowski" def minkowski_metric_params(): params = [] metric = "MinkowskiMetric" metric_args = manifold_args kwargs = {} common = manifold, module, metric, n_samples, kwargs for manifold_arg, metric_arg in zip(manifold_args, metric_args): params += [common + (manifold_arg, metric_arg)] return params return minkowski_metric_params()
def _insert_intersphinx_mapping(c): """Insert the ``intersphinx_mapping``, ``intersphinx_timeout`` and ``intersphinx_cache_limit`` variableis into the configuration state. """ c["intersphinx_mapping"] = { "python": ("https://docs.python.org/3/", None), # FIXME add local object cache # 'pythonloc': ('http://docs.python.org/', # os.path.abspath( # os.path.join(os.path.dirname(__file__), # 'local/python3_local_links.inv'))), "numpy": ("https://docs.scipy.org/doc/numpy/", None), "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), "matplotlib": ("https://matplotlib.org/", None), "sklearn": ("https://scikit-learn.org/stable/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), "astropy": ("http://docs.astropy.org/en/v3.0.x/", None), "astro_metadata_translator": ( "https://astro-metadata-translator.lsst.io", None, ), "firefly_client": ("https://firefly-client.lsst.io", None), } c["intersphinx_timeout"] = 10.0 # seconds c["intersphinx_cache_limit"] = 5 # days return c
def fetch_symbols_from_elements(elmnts): """Fetch the symbol entry in the elements dictionary in Aiida.""" new_dict = {} for key, value in elmnts.items(): new_dict[value['symbol']] = key return new_dict
def getblockrejectreason(sdb, propcode, blocks): """Get the reason for the block rejection""" for b in blocks: if propcode == b[2]: record = sdb.select('BlockRejectedReason_Id', 'BlockVisit', 'BlockVisit_Id=%i' % b[0])[0][0] return record return 0
def mock_config(_path): """ Load a mock config to override toml.load() Args: path (str): Normally, the path to the TOML is provided Returns: dict: The interpreted TOML file """ return {"project": {"name": "valid"}}
def is_gde_label(x): """Checks if x looks like a GDE label line.""" return x and x[0] in "%#"
def _find_node_by_name(nodes, name): """ Loop through a list of nodes and return the one which has the matching display name :param nodes: List of nodes :param name: Target name :return: Matching node, None otherwise """ # Nodes always have unique names, thus there's no need for duplicate check for node in nodes: if name == node['displayName']: return node return None
def make_key(name: str, param: str) -> str: """ Formats a key in the API style. """ return "{}_{}".format(name, param)
def scaleFactor(line, sample): """ Parameters ---------- line : int sample : int Returns ------- int scalefactor - ratio of sample and minSample or line and minLine """ maxLine = 1000 maxSample = 1000 minLine = 50 minSample = 50 if sample < line: scalefactor = line/maxLine testsamp = int(sample/scalefactor) if testsamp < minSample: scalefactor = sample/minSample else: scalefactor = sample/maxSample testline = int(line/scalefactor) if testline < minLine: scalefactor = line/minLine return scalefactor
def which(program): """ Mimic functionality of unix which command """ import sys,os def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) if sys.platform == "win32" and not program.endswith(".exe"): program += ".exe" fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None
def getTypesWithName(types, names): """function returns all types that have a specific name Keyword arguments: types -- list of model.Type instances names -- list of strings with names """ typesWithName = [] for type in types: for name in names: if type.name == name: typesWithName.append(type) break return typesWithName
def make_ordered_event_pairs(alphabet): """ Make an ordered collection of event pairs (ordered in the sense that the name of the first field of the pair comes lexicographic before the second field). @param alphabet: Alphabet to create combinations for. @type alphabet: C{set} of L{Event} @return: Set of event pairs. @rtype: C{set} of C{tuple} of L{Event} """ events = sorted(list(alphabet)) length = len(events) return set((events[i], events[j]) for i in range(length - 1) for j in range(i + 1, length))
def autonomy_out_cb(outcome_map): """Returns transition""" if outcome_map['TOGGLE_LISTEN'] == 'invalid': return 'enter_teleop' elif outcome_map['AUTONOMY'] == 'aborted': return 'aborted' else: return 'stay'
def author_clean(author): """ Clean an author and return the formatted string """ replace = [".", ";", " ", ",", "_", "-"] author_split = author.strip().split(",") clean_author = "" if len(author_split) >= 2: last_name = author_split[0] first_name = author_split[1] for rep in replace: first_name = first_name.replace(rep, "") clean_author = last_name + " " + first_name else: for rep in replace: clean_author = author.replace(rep, "") return clean_author
def roi_pad(roi, pad, shape): """ Pad ROI on each side, with clamping. Returned ROI is guarnteed to be within ``(0,..) -> shape``. """ def pad_slice(s, n): return slice(max(0, s.start - pad), min(n, s.stop + pad)) if isinstance(roi, slice): return pad_slice(roi, shape) return tuple(pad_slice(s, n) for s, n in zip(roi, shape))
def size (grid): """Returns the size of `grid` as `(nrows, ncols)`.""" return len(grid), len(grid[0])
def drop_command(message, command): """ Given a message text, drops the command prefix from the string. """ return message[len(command) + 1:]
def get_graph_mean(x, data): """function that actually calculates the average over x samples for \ different data""" ind = -1 new_data = [] for index, entry in enumerate(data): if index % x == 0: if ind >= 0: new_data[ind] /= x ind += 1 new_data.append(entry) else: new_data[ind] += entry return new_data
def scalar_multiply(c: float, v) : """Multiplies every element by c""" return [c * v_i for v_i in v]
def unquote(s): """ Undo the effects of quote(). Based heavily on urllib.unquote(). """ mychr = chr myatoi = int list = s.split('_') res = [list[0]] myappend = res.append del list[0] for item in list: if item[1:2]: try: myappend(mychr(myatoi(item[:2], 16)) + item[2:]) except ValueError: myappend('_' + item) else: myappend('_' + item) return "".join(res)
def __same_axes(x_axis, y_axis, xlim, ylim): """Check if two axes are the same, used to determine squared plots""" axes_same_and_not_none = (x_axis == y_axis) and (x_axis is not None) axes_same_lim = xlim == ylim return axes_same_and_not_none and axes_same_lim
def line_splitter(s): """Split a string at newlines. Parameters ---------- s : str The string to be split Returns -------- List[str] List of strings. Obtained by calling s.splitlines(). """ return s.splitlines()
def y_coord(row, rows, line_width): """ y-coordinate in mils""" return (rows - row - 1)*line_width
def validate_timestamp(val): """ Returns a boolean indication that a field is a valid timestamp - a floating point number representing the time in seconds since the Epoch (so called POSIX time, see https://en.wikipedia.org/wiki/Unix_time). """ return (isinstance(val, float) and val >= 0.0)
def parse_results(results): """ parses the results file into a dictionary of names and tuples """ # hold the results in a dictionary results_dict = {} # loop over each line (result) for result in results: # split the string based on spaces parts = result.split() # there should only be a name and an outcome ('w', 'l') if len(parts) > 2: raise Exception("the results file has a bad format") # keep track of the name and the outcome so I don't have to use # parts[0] and parts[1] name = parts[0] outcome = parts[1] # add the name to the dictionary if it's not already there if name not in results_dict: results_dict[name] = (0, 0) # modify the results tuple according to whether its a win or loss if outcome == 'w': results_dict[name] = (results_dict[name][0]+1, results_dict[name][1]) elif outcome == 'l': results_dict[name] = (results_dict[name][0], results_dict[name][1]+1) else: raise Exception("I didn't recognize the outcome") return results_dict
def and_join(strings): """Join the given ``strings`` by commas with last `' and '` conjuction. >>> and_join(['Korea', 'Japan', 'China', 'Taiwan']) 'Korea, Japan, China, and Taiwan' :param strings: a list of words to join :type string: :class:`collections.Sequence` :returns: a joined string :rtype: :class:`str`, :class:`basestring` """ last = len(strings) - 1 if last == 0: return strings[0] elif last < 0: return '' iterator = enumerate(strings) return ', '.join('and ' + s if i == last else s for i, s in iterator)
def _get_existing_wrapped_events(method): """ returns the current wrappers around methods """ ret = [] if hasattr(method, "__get_pycounters_event_names__"): ret = method.__get_pycounters_event_names__() return ret
def datavalidation_result_format_change(log): """ Change the format of result log ex:[{"id":1,"name":"hj","quantity":2}] Args: log: log of datavalidation Returns: Changed format of log ex:[["id","name","quantity"],[1,"hj",2]] """ result = [] keys = [] for key, value in log[0].items(): keys.append(key) result.append(keys) for dic in log: values = [] for key, value in dic.items(): values.append(value) result.append(values) return result
def generateSvalues(b=10, p=15): """ int x int -> list[int] returns a list of all the S quantities that are going to be tested afterwards """ # values: list[int] values = [] # i, i_max: int i = b i_max = pow(b, p) while i < i_max: values.extend(range(i, i*b, i)) i *=b values.append(i_max) return values
def mult(x, y): """Creates an SMTLIB multiplication statement formatted string Parameters ---------- x, y: float First and second numerical arguments to include in the expression """ return "(* " + x + " " + y + ")"
def check_string(input_string): """Updates string characters that may lead to errors when written.""" input_string = input_string.replace(" ", "_") input_string = input_string.replace(",", "_") input_string = input_string.replace(";", "_") input_string = input_string.replace(":", "_") input_string = input_string.replace("\"", "_") input_string = input_string.replace("=", "_equal_") input_string = input_string.replace(">", "_greater_") input_string = input_string.replace("<", "_smaller_") input_string = input_string.replace("&", "_and_") input_string = input_string.replace("|", "_or_") return input_string
def Iq(q, intercept, slope): """ :param q: Input q-value :param intercept: Intrecept in linear model :param slope: Slope in linear model :return: Calculated Intensity """ inten = intercept + slope*q return inten
def clean_names(a): """remove braces and tildes from names""" if a is None: return None a_new = [] for name in a: a_new.append(name.replace("{", "").replace("}", "").replace("~", " ")) return a_new
def without_styl(s): """Return string S without ".styl" extension.""" return s.replace(".styl", "")
def format_argument_version(arg_version): """ Replaces '.' with '-' throughout arg_version to match formatting requirements for log message Args: arg_version(str): Version tag to be formatted Returns: str: Formatted version tag """ return arg_version.replace(".", "-")
def parse_lookup_key(lookup_key): """Parse a lookup key.""" if not lookup_key: raise KeyError("No lookup key specified") # Parse the list of keys if isinstance(lookup_key, str): keys = lookup_key.split('.') elif isinstance(lookup_key, list): keys = lookup_key else: raise TypeError('lookup must be string or list') return keys
def is_integer(num): """Check if given value is an integer.""" try: int(num) return True except ValueError: return False
def merge_two_dicts(x, y): """ merge to dictionaries :param x: :param y: :return: merged dict """ z = x.copy() # start with x's keys and values z.update(y) # modifies z with y's keys and values & returns None return z
def non_empty(value, question, keep=None): """Ask value while it is empty""" if value != "<ask>": if value: return value if keep is not None: return keep elif keep is None: return value while True: value = input("=> " + question + "\n") check_value = value.strip() if check_value: return value print("Please write a non-empty value")
def rgb_to_hex(color): """Convert an rgb color to hex.""" return "#%02x%02x%02x" % (*color,)
def milli_2_readadble(msecs): """Function: milli_2_readadble Description: Converts milliseconds into days, hours, minutes and seconds. Returns values with appropriate tags. Arguments: (input) msecs -> Milliseconds. """ data = msecs / 1000 seconds = data % 60 data /= 60 minutes = data % 60 data /= 60 hours = data % 24 data /= 24 days = data return "%d days %d hours %d minutes %d seconds" \ % (days, hours, minutes, seconds)
def process_coords(coords, size, psize): """ centers object to be pasted on card :param coords: coords of the object to be pasted :param size: size of the we are pasting on :param psize: size of the object to be pasted :return: proper coords of the object to be pasted """ if coords[0] == -1 or coords[1] == -1: if coords[0] == -1: coords[0] = int((size[0] - psize[0]) / 2) if coords[1] == -1: coords[1] = int((size[1] - psize[1]) / 2) return coords
def get_stats(tp, fp, tn, fn): """ Computes performace measures using the correctness measures returned by check_preds() Parameters: ----------------- tp: number of true positive predictions returned by check_preds() fp: number of false positive predictions returned by check_preds() tn: number of true negative predictions returned by check_preds() fn: number of false negative predictions returned by check_preds() Returns: ----------------- acc: (tp + tn) / (tp + tn + fp + fn) recall: tp / (tp + fn) precision: tp / (tp + fp) fscore: F1 Score, 2 * precision * recall / (precision + recall) """ acc = (tp + tn) / (tp + tn + fp + fn) recall = 0.0 precision = 0.0 fscore = 0.0 if tp == 0.0: if fp == 0.0 and fn == 0.0: recall, precision, fscore = 1.0, 1.0, 1.0 else: recall, precision, fscore = 0.0, 0.0, 0.0 else: recall = tp / (tp + fn) precision = tp / (tp + fp) fscore = 2 * precision * recall / (precision + recall) return acc, recall, precision, fscore
def intersect(u,v): """Compute the intersection of the two slice lists.""" if u is None: return v if v is None: return u return tuple([slice(max(u[i].start,v[i].start),min(u[i].stop,v[i].stop)) for i in range(len(u))])
def getKLDivergence( P, Q ): """Compute KL-divergence from P to Q""" import math divergence = 0 assert len(P) == len(Q) for i in range(len(P)): p = P[i] q = Q[i] assert p >= 0 assert q >= 0 if p > 0: divergence += p * math.log( p / q ) return divergence
def calc_tfidf(p, idf): """Returns a dictionary of words present in p with their tfidf scores""" counts = {} for word in p: c = counts.setdefault(word, 0) c += 1 counts[word] = c tfidf = {} for k, c in counts.items(): tfidf[k] = (c / len(p)) * idf[k] return tfidf
def get_axe_names(image, ext_info): """Derive the name of all aXe products for a given image""" # make an empty dictionary axe_names = {} # get the root of the image name pos = image.rfind('.fits') root = image[:pos] # FILL the dictionary with names of aXe products # # the GOL: axe_names['GOL'] = ("{0:s}_{1:d}.cat".format(root, ext_info['axe_ext'])) # the OAF/BAF: axe_names['OAF'] = ("{0:s}_{1:d}.OAF".format(root, ext_info['axe_ext'])) axe_names['BAF'] = ("{0:s}_{1:d}.BAF".format(root, ext_info['axe_ext'])) # the PET: axe_names['PET'] = ("{0:s}_{1:d}.PET.fits" .format(root, ext_info['axe_ext'])) axe_names['BCK_PET'] = ("{0:s}_{1:d}.BCK.PET.fits" .format(root, ext_info['axe_ext'])) # the DPP: axe_names['DPP'] = ("{0:s}_{1:d}.DPP.fits" .format(root, ext_info['axe_ext'])) axe_names['BCK_DPP'] = ("{0:s}_{1:d}.BCK.DPP.fits" .format(root, ext_info['axe_ext'])) # the SPC: axe_names['SPC'] = ("{0:s}_{1:d}SPC.fits" .format(root, ext_info['axe_ext'])) # the STP: axe_names['STP'] = ("{0:s}_{1:d}.STP.fits" .format(root, ext_info['axe_ext'])) # the background mask: axe_names['MSK'] = ("{0:s}_{1:d}.MSK.fits" .format(root, ext_info['axe_ext'])) # the background mask: axe_names['NBCK'] = ("{0:s}_{1:d}.NBCK.fits" .format(root, ext_info['axe_ext'])) # the background mask: axe_names['SGRI'] = ("{0:s}_{1:d}.SGRISM.fits" .format(root, ext_info['axe_ext'])) # the background mask: axe_names['FLX'] = ("{0:s}_{1:d}FLX.fits" .format(root, ext_info['axe_ext'])) # return the dictionary return axe_names
def model_get_kwargs(feature): """ Get LandUseArea get() kwargs--useful for get_or_create()--for the feature. """ return { 'object_id': feature['attributes']['OBJECTID'], }
def all_in_charset(chars, string): """Returns true if string's characters are in chars, false otherwise :param chars: Set of characters to test characters in string against :param string: The string to test :return bool: Whether the characters in string are wholly within chars""" for c in string: if c not in chars: return False return True
def soft_light(lower_rgb, upper_rgb): """Apply soft light blending mode of a layer on an image. """ return (1.0 - lower_rgb) * lower_rgb * upper_rgb \ + lower_rgb * (1.0 - (1.0 - lower_rgb) * (1.0 - upper_rgb))
def get_data(data, variable): """Perform transformations on raw data and int and converts them to supported types. :param data: Raw data dict object :param variable: key of dictionary to be transformed :returns transformed data (str) """ if isinstance(data.get(variable), list): return ",".join(map(str, data.get(variable))) if isinstance(data.get(variable), (dict, int)): return str(data.get(variable))
def _dtype_equality(dtype1, dtype2): """ Check for numpy dtype equality References: https://stackoverflow.com/questions/26921836/correct-way-to-test-for-numpy-dtype Example: dtype1 = np.empty(0, dtype=np.uint8).dtype dtype2 = np.uint8 _dtype_equality(dtype1, dtype2) """ dtype1_ = getattr(dtype1, 'type', dtype1) dtype2_ = getattr(dtype2, 'type', dtype2) return dtype1_ == dtype2_
def calculate_benefit_cost_ratio(region, country_parameters): """ Calculate the benefit cost ratio. """ cost = ( region['network_cost'] + region['spectrum_cost'] + region['tax'] + region['profit_margin'] ) revenue = region['total_revenue'] if revenue > 0 and cost > 0: bcr = revenue / cost else: bcr = 0 return bcr
def finditem(search_dict, field): """ Takes a dict with nested lists and dicts, and searches all dicts for a key of the field provided. """ fields_found = [] for key, value in search_dict.items(): if key == field: fields_found.append(value) elif isinstance(value, dict): results = finditem(value, field) for result in results: fields_found.append(result) elif isinstance(value, list): for item in value: if isinstance(item, dict): more_results = finditem(item, field) for another_result in more_results: fields_found.append(another_result) return fields_found
def concat(list_a: list, list_b: list) -> list: """ Concatenates two lists together into a new list Example: >>> concat([1, 2, 3], [4, 5, 6]) ... [1, 2, 3, 4, 5 6] :param list_a: First list to concatenate :param list_b: Second list to concatenate :return: Concatenated list """ result = list_a + list_b return result
def centerOfMass(positions, weights): """ Calculate the center of mass of a set of weighted positions. Args: positions: A list of (x,y,z) position tuples weights: A list of position weights Return: A tuple of floats representing the coordinates of the center of mass. """ tot_weight = sum(weights) assert tot_weight != 0 zipped = zip(positions, weights) [x, y, z] = [0.0, 0.0, 0.0] for (pos, weight) in zipped: x += pos[0] * weight y += pos[1] * weight z += pos[2] * weight return tuple(map(lambda k: k / tot_weight, [x,y,z]))
def removeWhitespace(seqToTest): """ Checks sequence for non-permitted characters and removes them if necessary. Mainly for whitespace but I also put dashes and asterisks in here """ changed = False badChars = ['\t',' ','\n', '-', '*'] if any([x in badChars for x in seqToTest]): seqToTest = ''.join([x for x in seqToTest if x not in badChars]) changed = True return([seqToTest,changed])
def nonempty_region(r): """Return True if given region is nonempty. >>> nonempty_region([1,2,3,4,5,6]) True >>> nonempty_region([1,2,4,4,5,6]) False """ assert type(r) == list assert len(r) == 6 return (r[0] != r[1] and r[2] != r[3] and r[4] != r[5])
def get_hrefs(data): """Traversing the data recursively, return the list of values for all 'href' keys in the dictionary. If the value for an 'href' key is not a string, then ignore it. """ result = [] if isinstance(data, list): for value in data: result.append(get_hrefs(value)) elif isinstance(data, dict): for key, value in data.items(): if key == "href": if isinstance(value, str): result.append(value) elif isinstance(value, (list, dict)): result.extend(get_hrefs(value)) return result
def to_camel_case(snake_str): """ Converts snake_string to camelCase :param str snake_str: :returns: str """ components = snake_str.split('_') return components[0] + "".join(x.title() for x in components[1:])
def scalar_product(v_1_i, v_2_i): """Scalar product of vectors.""" v_1_1, v_1_2, v_1_3 = v_1_i v_2_1, v_2_2, v_2_3 = v_2_i return v_1_1*v_2_1 + v_1_2*v_2_2 + v_1_3*v_2_3
def gap_digraph(edges): """Given a 0-indexed list of edges, return a 1-indexed list of edges of twice the length (i.e. a directed version) suitable for pasting into GAP.""" return [[a+1,b+1][::s] for (a, b) in edges for s in (1, -1)]
def diff_between_angles(angle_a, angle_b): """Calculates the difference between two angles angle_a and angle_b Args: angle_a (float): angle in degree angle_b (float): angle in degree Returns: float: difference between the two angles in degree. """ delta_mod = (angle_b - angle_a) % 360 if delta_mod > 180: delta_mod -= 360 return delta_mod
def shorten_list(l, max_length=15): """ If list longer than max_length, returned shortened version""" length = len(l) if length > max_length: sl = l[0:max_length] sl.append("...%i total" % length) else: sl = l return sl
def DecodeUTF8(data): """Decodes data as UTF-8, replacing any bad characters. If data is not bytes type, returns as-is. """ if isinstance(data, bytes): return data.decode('utf-8', errors='replace') return data
def ldistinct(seq): """Iterates over sequence skipping duplicates""" seen = set() res = [] for item in seq: if item not in seen: seen.add(item) res.append(item) return res
def _collapse(xlink_pos, by_bc, group_by, multimax=1): """ Report number of cDNAs and reads in cross-link site on xlink_pos. Input parameter `by_bc` has te following structure: by_bc = { 'AAA': [(middle_pos, end_pos, read_len, num_mapped, cigar, second_start), # hit1 (middle_pos, end_pos, read_len, num_mapped, cigar, second_start), # hit2 (middle_pos, end_pos, read_len, num_mapped, cigar, second_start), # ... ] 'AAT': [(middle_pos, end_pos, read_len, num_mapped, cigar, second_start), # hit1 (middle_pos, end_pos, read_len, num_mapped, cigar, second_start), # hit2 ] Counting the number of reads is easy - just count the number of hits per cross-link site. Counting the number of cDNAs is also easy - just count the number of different barcodes. However, following scenarions also need to be handled: * one read ban be mapped to multiple sites. In this case, the "contribution" of such read has to be divided equally to all positions that it maps to. * Also, longer reads should have proportionally greater "contribution". than the short ones. Upper two scenarions imply that each read contributes:: weight = 1 * 1/a * b/c # a = number of hits # b = read length # c = sum(read lengths per same barcode) Another factor to take into account is also the possibility that a group of reads with equal start position and barcode represents multiple cross-links. Imagine a read starting 10 bp before exon-intron junction. One group of reads maps in the intron section and other reads skip the intron and map on next exon with the second part of the read. This can be solved by grouping by "second_start", which is the coordinate of the first nucleotide of the second part of the read. Each group with unique second start is treated as an independent cross-link event. This is done in function ``_separate_by_second_starts`` Returns an object ``counts``:: counts = { position: [cDNA_count, reads_count], 123: [3.14, 42], 124: [5.79, 16], ... } Parameters ---------- xlink_pos : int Cross link position (genomic coordinate). by_bc : dict Dict with hits for each barcode. group_by : str Report by start, middle or end position. multimax : int Ignore reads, mapped to more than ``multimax`` places. Returns ------- dict Number of cDNA and reads for each position. """ group_by_index = ['start', 'middle', 'end'].index(group_by) # Container for cDNA and read counts: counts = {} for hits in by_bc.values(): # separate in groups by second-start ss_groups = {} for read in hits: ss_groups.setdefault(read[4], []).append(read) for ss_group in ss_groups.values(): # Sum of all read lengths per ss_group: sum_len_per_barcode = sum([i[2] for i in ss_group if i[3] <= multimax]) for middle_pos, end_pos, read_len, num_mapped, _ in ss_group: if num_mapped > multimax: continue grp_pos = (xlink_pos, middle_pos, end_pos)[group_by_index] weight = read_len / (num_mapped * sum_len_per_barcode) current_values = counts.get(grp_pos, (0, 0)) upadated_values = (current_values[0] + weight, current_values[1] + 1) counts[grp_pos] = upadated_values return counts
def iroot(n, r): """Computes the integer root of order r of n""" u, s = n, n+1 while u < s: s = u t = (r-1) * s + n // pow(s, r-1) u = t // r return s
def on_storage(full_path): """ Check whether or not a file is located on Google Cloud Storage Args: ``full_path`` (string): file path Returns: ``flag`` (bool) """ return full_path.startswith("gs://")
def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes
def split(word): """ Simple function to convert every string send it to him in to a list of single characters. """ list_word = [] for character in word: list_word.append(character) return list_word
def create_url(controller_ip, endpoint): """Create endpoint url to POST/PUT/GET/DELTE against.""" return 'https://%s:1280/%s' % (controller_ip, endpoint)
def _mel2hz(mel): """Convert Mel frequency to frequency. Arguments: mel:Mel frequency Returns: Frequency. """ return 700 * (10**(mel / 2595.0) - 1)
def F(N): """ Fibonacci Sequence. F(n + 1) = F(n) + F(n - 1) """ a = 0 b = 1 F = [a, b] for j in range(1, N + 1): c = a + b a = b b = c F.append(c) return F[N]
def _findfile(path, pattern): """ find file depending on pattern """ import os, fnmatch result = [] for root, dirs, files in os.walk(path): #DEBUG: print root, dirs, files for name in files: if fnmatch.fnmatch(name, pattern): result.append(os.path.join(root, name)) return result
def factorial(num): """ return the factorial with recursion :param num: the number :return: factorial """ # use short-circuiting return int(num < 2) or num * factorial(num - 1)
def calc_offset_2(offset): """Part 2 offset is incremented by 1 after it's used unless it's decremented""" return offset - 1 if offset >= 3 else offset + 1
def anonymized(tumor_list, prefix="CS"): """Anonymizes a list of tumor sample names.""" result = {} alphabetical = sorted(tumor_list) for idx, elem in enumerate(alphabetical): result[elem] = elem return result
def fib(n): """ Recursively compute the fibonacci sequence """ if n<=1: return n else: return fib(n-1)+fib(n-2)
def get_odd_palindrome_at(s, index): """ (str, int) -> str Precondition: s contains only lowercase alphabetic characters. 0 <= index < len(s) Return the longest odd-length palindrome in s that is centered at index. >>> get_odd_palindrome_at('accccc', 2) 'ccc' >>> get_odd_palindrome_at('accccc', 3) 'ccccc' """ result = s[index] for i in range(0, min(index, len(s) - index)): s1 = s[index - i: index + i + 1] if s1 == s1[::-1]: result = s1 return result
def ZoneNameToRegionName(zone_name): """Converts zone name to region name: 'us-central1-a' -> 'us-central1'.""" return zone_name.rsplit('-', 1)[0]
def list_substract(list1, list2): """OK [52] Returns list1 where each element is substracted by the corresponding list2 element. Returns None if the lenght of the lists is not the same. """ if not list1: return None result_list = [] if len(list1) != len(list2): #print("list substract with lists of different length!!, abort") return None for count, value in enumerate(list1): result_list.append(value - list2[count]) return result_list
def get_link_meta_3_types(source, target, new2old, old2new): """ Return meta data of node pairs from original graph before transformation :param source: id of source node in transformed graph :param target: :param new2old: map of new ids to old (id, type) before transform :return: source, target_in+, target_in-, weight :rtype: (int, int, int, int) """ if (source not in new2old) or ((new2old[source]["type"] != "out") and (new2old[source]["type"] != "out_dummy")): raise Exception("node '%s' is not registered as 'out' but as '%s'" % (source,new2old[source]["type"])) if new2old[source]["type"] == "out_dummy": return -1, -1, -1, -1, -1 target_old = str(new2old[target]["id"]) weight = -1 if new2old[target]["type"] == "in-" else 1 target_out = old2new[target_old]["out"] if weight > 0: target_in_pos = old2new[target_old]["in+"] if "in+" in old2new[target_old] else -1 target_in_neg = old2new[target_old]["in-"] if "in-" in old2new[target_old] else -1 else: target_in_pos = old2new[target_old]["in+"] if "in+" in old2new[target_old] else -1 target_in_neg = old2new[target_old]["in-"] if "in-" in old2new[target_old] else -1 return int(source), int(target_in_pos), int(target_in_neg), int(target_out), weight
def is_float(value): """ Checks if `value` is a float. Args: value (mixed): Value to check. Returns: bool: Whether `value` is a float. Example: >>> is_float(1.0) True >>> is_float(1) False .. versionadded:: 2.0.0 """ return isinstance(value, float)
def bubblesort(lst): """Bubble sorting algorithm.""" if isinstance(lst, list): for i in range(len(lst)): for j in range(len(lst) - 1, i, -1): if lst[j] < lst[j - 1]: lst[j], lst[j - 1] = lst[j - 1], lst[j] return lst return "BubbleSort takes only lists."
def call_me_maybe(obj): """See `here`_ for description. .. _here: https://www.youtube.com/watch?v=fWNaR-rxAic """ return obj() if hasattr(obj, "__call__") else obj
def ipstr(barray): """Print a string of ip digits""" return ".".join('{}'.format(x) for x in barray)
def apply_case(s, case): """Helper function that applies case to a string.""" if case.upper() == "UPPER": s = s.upper() elif case.upper() == "LOWER": s = s.lower() elif case.upper() == "CAPITALIZE": s = s.capitalize() elif case.upper() == "TITLE": s = " ".join([w.capitalize() for w in s.split(" ")]) return s
def sizeof_fmt(num, suffix='B'): """ Utility function to convert byte amounts to human readable format. Taken from http://stackoverflow.com/a/1094933/2528077 """ for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)