content
stringlengths
42
6.51k
def stream_has_colours(stream): """ True if stream supports colours. Python cookbook, #475186 """ if not hasattr(stream, "isatty"): return False if not stream.isatty(): return False # auto color only on TTYs try: import curses curses.setupterm() return curses.tigetnum("colors") > 2 except Exception: return False
def remove_empty_ptags(string): """Remove empty paragraph tags. In certain CMS systems, every line-break gets automatically converted into new paragraph tags, which means people coming from Word-like writing programs can cause serious layout issues. This allows you to strip out any empty B.S. that shouldn't be there in fields that are supposed to be rendered with html (and/or richtext) {{ self.side_content|removeEmptyPs|richtext }} """ return (string .replace('<p></p>', '') .replace('<p><br></p>', '') .replace('<p><br/></p>', ''))
def find_word(keyword, sentence, start=0, end=-1, strict=False): """ word: str 'abc' sentence: list ['a', 'b', 'cd'] return: start_index, end_index: 0, 2 """ if not sentence: return -1, -1 if end == -1 or end > len(sentence): end = len(sentence) if keyword in sentence[start:end]: return sentence.index(keyword, start, end), sentence.index(keyword, start, end) elif strict: return -1, -1 else: s, e = -1, -1 sentence = sentence[start: end] idx = ''.join(sentence).find(keyword) if idx >= 0: l = -1 for i, word in enumerate(sentence): word = sentence[i] l += len(word) if l >= idx and s < 0: s = i + start if l >= idx+len(keyword)-1: e = i + start break return s, e
def _unixypath_to_uri(path): """Converts a unix-style path to a file: URL.""" return '//' + path
def _encode_data(data): """Takes data in nested dictionary form, and converts it for egta All dictionary keys must be strings. This call is non destructive. """ encoded = {} for k, val in data.items(): if isinstance(val, dict): for inner_key, inner_val in _encode_data(val).items(): encoded['{0}[{1}]'.format(k, inner_key)] = inner_val else: encoded[k] = val return encoded
def mock_dictreader(headers, data): """Simulate the behavior of csv dictreader so we don't need files""" return dict(zip(headers, data))
def parse_options(source): """parses chart tag options""" options = {} tokens = [t.strip() for t in source.split('=')] name = tokens[0] for token in tokens[1:-1]: value, next_name = token.rsplit(' ', 1) options[name.strip()] = value name = next_name options[name.strip()] = tokens[-1].strip() return options
def flatten_list_of_objects_and_iterables(L): """ Each element of L could be a non-list object or a list of non-objects. This function returns a list of non-list objects, where the internal lists have been 'flattened'. """ ret = [] for elem in L: if hasattr(elem, '__iter__') and not isinstance(elem, str): ret.extend(elem) else: ret.append(elem) return ret
def calculate_transposition_offsets(U): """Select the last two non-equal digits of the U block""" rev_U = list(reversed(U)) second_offset = rev_U.pop(0) first_offset = next(filter(lambda x: x != second_offset, rev_U)) return (first_offset, second_offset)
def get_n_args_channels(n_chs, model_info): """Get a dictionary containing arguments and values for the model. Args: n_chs (Dict): Number of channels model_info (collections.OrderedDict): Model information for auto_prune Returns: - (Dict): Dictionary containing arguments and values for a model. e.g. {'out_ch_fc1': 1024, 'out_ch_fc2': 1024} """ return {v['arg']: n_chs[k] for k, v in model_info.items() if k in n_chs.keys()}
def _get_tenant_id_for_message(message): """Find the tenant id in the incoming message.""" # give priority to the tenant_id in the router dict if one # exists in the message payload = message.get('payload', {}) for key in ('router', 'port', 'subnet'): if key in payload and payload[key].get('tenant_id'): val = payload[key]['tenant_id'] # LOG.debug('using tenant id from payload["%s"]["tenant_id"] = %s', # key, val) return val for key in ['_context_tenant_id', '_context_project_id']: if key in message: val = message[key] # Some notifications have None as the tenant id, but we # can't shard on None in the dispatcher, so treat those as # invalid. if val is not None: # LOG.debug('using tenant id from message["%s"] = %s', # key, val) return val return None
def convert(rel): """ Switches pos nums to neg, and vice versa, leaving 0s unchanged""" return tuple((-i if i > 0 else abs(i) for i in rel))
def highest(candidates): """Returns the highest-scoring candidate element""" result = sorted(candidates.values(), key=lambda x: x['score'], reverse=True) if len(result) == 0: return None return result[0]
def all_as_list_of_dicts(list_of_dicts: list) -> list: """ Function to convert SQLAlchemy list of objects into list of dicts. :param list_of_dicts: list - The list of SQLAlchemy objects :return: list containing all SQLAlchemy objects as dicts. :rtype: list """ result = [] for item in list_of_dicts: result.append(item.as_dict()) return result
def mac_to_string(mac): """Returns an Ethernet MAC address in the form XX:XX:XX:XX:XX:XX.""" return ('%02X:%02X:%02X:%02X:%02X:%02X' % (mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]))
def _escapeArg(arg): """Escape the given command line argument for the shell.""" #XXX There is a *lot* more that we should escape here. #XXX This is also not right on Linux, just try putting 'p4' is a dir # with spaces. return arg.replace('"', r'\"')
def pep8_module_name(name): """Convert a module name to a PEP8 compliant module name. * lowercase * whitespace -> underscore * dash -> underscore """ if name[0].isnumeric(): name = '_'+name valid_module_name = name.lower().replace('-', '_').replace(' ', '_') return valid_module_name
def convert_to_float(frac_str): """float: util function to convert string of fraction to float.""" try: return float(frac_str) except ValueError: num, denom = frac_str.split("/") try: leading, num = num.split(" ") whole = float(leading) except ValueError: whole = 0 frac = float(num) / float(denom) return whole - frac if whole < 0 else whole + frac
def recipe_is_complete(r): """Return True if recipe is complete and False otherwise. Completeness is defined as the recipe containing a title and instructions. """ if ('title' not in r) or ('instructions' not in r): return False if (r['title'] is None) or (r['instructions'] is None): return False return True
def getOverviewName(viewName): """ Return Overview sheet name formatted with given view. """ return 'Overview ({})'.format(viewName)
def merge_segdb(segdbs): """ segdb are list, concat them together """ segdb = segdbs[0] for r in segdbs[1:]: segdb.extend(r) return segdb
def str2html(s): """Replaces '<', '>', and '&' with html equlivants Parameters ---------- s : str string to convert to a vaild html string to display properly """ return s.replace("&", "&amp;").replace(">", "&gt;").replace("<", "&lt;")
def adjust_result(result_arr): """ This function specifically affects hOCR outputs that include alphabeticall letters. It iterates through the output and makes everything lowercase for better matching. If both the uppercase and lowercase form of a letter exist, then it will go with the highest probability for the lowercase value.\n @param resultArr: The hOCR output to be lowered.\n @return an output similar to resultArr, but all characters are lowercase. """ for i in range(len(result_arr)): # iterates through all words # iterates through character positions for char in range(len(result_arr[i])): # iterates through possible chars in slot for possible_chars in list(result_arr[i][char].keys()): if (possible_chars == None): pass elif possible_chars.isupper(): # making lower val equal to previous val # takes max prob if lower and upper exist if possible_chars.lower() in result_arr[i][char]: result_arr[i][char][possible_chars.lower()] = max( result_arr[i][char][possible_chars], result_arr[i][char][possible_chars.lower()]) else: # otherwise creates lower char with same probability result_arr[i][char][possible_chars.lower( )] = result_arr[i][char][possible_chars] # removes old val from dict del result_arr[i][char][possible_chars] return result_arr
def solution(ar): """ https://app.codility.com/demo/results/trainingEBQW3A-6N8/ 100% idea is use xor - xor of two same number cancel the same bits and make that to zero do xor of all elements of array do xor of 1 to length plus 1 at last xor of all should be zero A non-empty array A consisting of N integers is given. A permutation is a sequence containing each element from 1 to N once, and only once. :param ar: :return: """ xor_sum = 0 # xor of 1 to len ar # xor of all elements of existing array for index in range(1, len(ar) + 1): xor_sum = xor_sum ^ index ^ ar[index - 1] # at the end xor sum should be zero if xor_sum == 0: return 1 else: return 0
def check_keys_exist(d, keys): """ This function ensures the given keys are present in the dictionary. It does not other validate the type, value, etc., of the keys or their values. If a key is not present, a KeyError is raised. The motivation behind this function is to verify that a config dictionary read in at the beginning of a program contains all of the required values. Thus, the program will immediately detect when a required config value is not present and quit. Input: d (dict) : the dictionary keys (list) : a list of keys to check Returns: list of string: a list of all programs which are not found Raises: KeyError: if any of the keys are not in the dictionary """ missing_keys = [k for k in keys if k not in d] if len(missing_keys) > 0: missing_keys = ' '.join(missing_keys) msg = "The following keys were not found: " + missing_keys raise KeyError(msg) return missing_keys
def find_repo_item(repos, name): """Find a repo in a dict mapping Repository objects to data. Arguments: repos: (dict mapping Repository objects to data) name: (str) the full_name of the repo to find. Returns: The data found in the dict, or None if not found. """ for repo, data in repos.items(): if repo.full_name == name: return repo, data return None, None
def combine_as_average_with_boost(vector1, boost, vector2): """ Combine two vectors but "boost" the first vector by multiplying every element by a number. :param vector1: list of values :param boost: multiplier to be applied to the first list of values :param vector2: list of values :return: A list of values, each element is the """ combined_vector = [] if len(vector1) != len(vector2): raise RuntimeError("Vectors must be of equal length!") for index in range(0, len(vector1)): avg = (vector1[index]*boost + vector2[index])/(2 + boost - 1) combined_vector.append(avg) return combined_vector
def parse_atom(dlstr): """Atoms are expected as 'Name core|shell' only""" try: tokens = dlstr.split() atom = {"id": "{} {}".format(tokens[0], tokens[1])} except IndexError: raise ValueError("Unrecognised Atom: {!s}".format(dlstr)) return atom
def image_displacement_to_defocus(dz, fno, wavelength=None): """Compute the wavefront defocus from image shift, expressed in the same units as the shift. Parameters ---------- dz : `float` or `numpy.ndarray` displacement of the image fno : `float` f/# of the lens or system wavelength : `float`, optional wavelength of light, if None return has units the same as dz, else waves Returns ------- `float` wavefront defocus, waves if Wavelength != None, else same units as dz """ if wavelength is not None: return dz / (8 * fno ** 2 * wavelength) else: return dz / (8 * fno ** 2)
def getNameBuiltwithpip(lines): """ Used if buildWithPip == True. If the file contains "%define pipname packagename", returns packagename. None otherwise """ name = None for line in lines: if line.startswith("%define"): if "pip_name" in line: name = line.split()[-1] return name
def wklobjective_converged(qsum, f0, plansum, epsilon, gamma): """Compute finale wkl value after convergence.""" obj = gamma * (plansum + qsum) obj += epsilon * f0 obj += - (epsilon + 2 * gamma) * plansum return obj
def supersat_check(invaltd, invalt): """ Check if a valid dewpoint temperature is greater than a valid air temperature :param invaltd: the input value for dewpoint temperature :param invalt: the input value for air temperature :type invaltd: float :type invalt: float :return: 1 if the input values are invalid/None :return: 1 if the dewpoint temperature is greater than the air temperarture :return: 0 otherwise :return type: integer """ result = 0 if (invaltd is None) | (invalt is None): result = 1 elif invaltd > invalt: result = 1 return result
def inverse_lombardi(one_rm, r): """ Conservative for low reps. Optimistic for high reps. """ return one_rm * r**(-0.10)
def report_progress(typed, prompt, id, send): """Send a report of your id and progress so far to the multiplayer server.""" # BEGIN PROBLEM 8 "*** YOUR CODE HERE ***" correct = 0 for i in range(len(typed)): if typed[i] == prompt[i]: correct = correct + 1 else: break progress = correct / len(prompt) send({'id': id, 'progress': progress}) return progress # END PROBLEM 8
def _generate_base_ents(ents: list) -> list: # noqa: MC0001 """Generate a list of all enties form a set of nested entities.""" base_ents = [] for ent in ents: base_ents.append(ent) for _, item in ent.items(): if isinstance(item, list): for prop in item: if isinstance(prop, dict) and "$id" in prop.keys(): base_ents.append(prop) for val in prop: if isinstance(prop[val], list): for p in prop[val]: if isinstance(p, dict) and "$id" in p.keys(): base_ents.append(p) elif ( isinstance(prop[val], dict) and "$id" in prop[val].keys() ): base_ents.append(val) elif isinstance(item, dict) and "$id" in item.keys(): base_ents.append(item) return base_ents
def split_lhs_rhs(expr): """Split the equation into left and right hand side. >>> split_lhs_rhs(" 12 + a ") (None, '12 + a') >>> split_lhs_rhs(" c = 12 + a ") ('c', '12 + a') """ expr = [x.strip() for x in expr.split("=")] if len(expr) == 1: rhs = expr[0] output = None else: output, rhs = expr return output, rhs
def get_id_str(d): """ Returns the document id or raises KeyError """ if not 'id_str' and not 'id' in d: raise KeyError('id not in document') if 'id_str' in d: return d['id_str'].encode('utf8') return str(d['id'])
def cost_function(pos_neg, off_on, on): """ Cost function for a sequence of control messages, lower is better :param pos_neg: count of forward<->reverse transitions, cost=10 :param off_on: count of off->on transitions, cost=5 :param on: count of on states, cost=0 :return: cost """ return 10 * pos_neg + 5 * off_on
def is_build_successful(build_obj): """Check build success.""" return build_obj['status'] == 'SUCCESS'
def match_indices(shortened_list, primary_list): """ Returns the 'primary_list's indices that correspond to the matching values in the 'shortened_list'. Assumes all values are unique. (For use in the External CTE monitor, matches are between RA and Decs and for a given source we can assume uniqueness.) Parameters: shortened_list : list of anything A shortened 'primary_list' of whose values you want the 'primary_list's indices. Assumes all values are unique. primary_list : list of anything The original list of which the 'shortened_list' is a subset. Assumes all values are unique. Returns: matched_indices : list of ints The 'primary_list's indices that correspond to the matching values in the 'shortened_list'. Outputs: nothing """ matched_indices = [i for i,x in enumerate(primary_list) if x in shortened_list] return matched_indices
def ldexp(x, i): """ returns x * 2 ** i. """ return x * 2 ** i
def parse_param(popts): """ Parse out the specification of the intrinsic space. Examples: >>> parse_param(["mass1=1.4", "mass2", "spin1z=-1.0,10"]) {'mass1': 1.4, 'mass2': None, 'spin1z': (-1.0, 10.0)} """ if popts is None: return {}, {} intr_prms, expand_prms = {}, {} for popt in popts: popt = popt.split("=") if len(popt) == 1: # Implicit expand in full parameter space -- not yet completely # implemented intr_prms[popt[0]] = None elif len(popt) == 2: popt[1] = popt[1].split(",") if len(popt[1]) == 1: # Fix intrinsic point intr_prms[popt[0]] = float(popt[1][0]) else: expand_prms[popt[0]] = tuple(map(float, popt[1])) return intr_prms, expand_prms
def header_names(hdr_tuples): """ Given a list of header tuples, return the set of the header names seen. """ return set([n.lower() for n, v in hdr_tuples])
def new_filename_from_old(old_filename, new_filenames): """ From a list of new file names, find the new name that corresponds with the old file name based on the file extension """ new_filename = None try: extension = old_filename.split(".")[-1] except AttributeError: extension = None if extension: for filename in new_filenames: new_extension = filename.split(".")[-1] if new_extension and extension == new_extension: new_filename = filename return new_filename
def find_stab(state, stab_xs, stab_zs): """ Find the sign of the logical operator. Args: state: stab_xs: stab_zs: Returns: """ # print(stab_xs, stab_zs) if len(stab_xs) == 0 and len(stab_zs) == 0: return True stab_xs = set(stab_xs) stab_zs = set(stab_zs) stabs = state.stabs # Attempt to build stabilizers from the stabilizers in the stabilizer state. built_up_xs = set() built_up_zs = set() for q in stab_xs: for stab_id in state.destabs.col_z[q]: built_up_xs ^= stabs.row_x[stab_id] built_up_zs ^= stabs.row_z[stab_id] for q in stab_zs: for stab_id in state.destabs.col_x[q]: built_up_xs ^= stabs.row_x[stab_id] built_up_zs ^= stabs.row_z[stab_id] # Compare with logical operator built_up_xs ^= stab_xs built_up_zs ^= stab_zs if len(built_up_xs) != 0 or len(built_up_zs) != 0: # print('x...', built_up_xs) # print('z...', built_up_zs) return False # Not found in stabilizer state else: return True
def compare_pipelines(old, new, log): """Return a tuple of raw keys for which definitions changed Parameters ---------- old : {str: dict} A {name: params} dict for the previous preprocessing pipeline. new : {str: dict} Current pipeline. log : logger Logger for logging changes. Returns ------- bad_raw : {str: str} ``{pipe_name: status}`` dictionary. Status can be 'new', 'removed' or 'changed'. bad_ica : {str: str} Same as ``bad_raw`` but only for RawICA pipes (for which ICA files might have to be removed). """ # status: good, changed, new, removed, secondary out = {k: 'new' for k in new if k not in old} out.update({k: 'removed' for k in old if k not in new}) # parameter changes to_check = set(new) - set(out) for key in tuple(to_check): if new[key] != old[key]: log.debug(" raw changed: %s %s -> %s", key, old[key], new[key]) out[key] = 'changed' to_check.remove(key) # does not need to be checked for source if 'raw' in to_check: to_check.remove('raw') out['raw'] = 'good' # secondary changes while to_check: n = len(to_check) for key in tuple(to_check): parents = [new[key][k] for k in ('source', 'ica_source') if k in new[key]] if any(p not in out for p in parents): continue elif all(out[p] == 'good' for p in parents): out[key] = 'good' else: out[key] = 'secondary' to_check.remove(key) if len(to_check) == n: raise RuntimeError("Queue not decreasing") bad_raw = {k: v for k, v in out.items() if v != 'good'} bad_ica = {k: v for k, v in bad_raw.items() if new.get(k, old.get(k))['type'] == 'RawICA'} return bad_raw, bad_ica
def sided_test(t, t_obs): """Compare the hypothesis, whether t is greater than t_observed. This is one-side test. :param t: sample assessment between the two candidates :param t_obs: original assessment between the two candidates :return: 1 if sample assessment is better else 0 """ comparison = t > t_obs return int(comparison)
def title_blocks(paper: dict) -> list: """Retrieve title block of a paper in json form. :param paper (json): The paper to be parsed :return: Lines of text in the paper title """ return [{'text': paper['metadata']['title']}]
def iterable(y): """Check whether or not an object supports iteration. Adapted directly from NumPy ~= 1.10 at commit `46d2e83 <https://github.com/numpy/numpy/tree/ 46d2e8356760e7549d0c80da9fe232177924183c/numpy/lib/ function_base.py#L48-L76>`__. Parameters ---------- y (arbitrary) -- Object to be tested. Returns ------- test |bool| -- Returns |False| if :func:`iter` raises an exception when `y` is passed to it; |True| otherwise. Examples -------- >>> opan.utils.iterable([1, 2, 3]) True >>> opan.utils.iterable(2) False """ try: iter(y) except Exception: return False return True
def add2(matrix1,matrix2): """ Add corresponding numbers in 2D matrix """ combined = [] for row1,row2 in zip(matrix1,matrix2): tmp = [sum(column) for column in zip(row1,row2)] combined.append(tmp) return combined # Alternativley: # nest the list comprehension for a one line solution: # return [[sum(column) for column in zip(row1,row2)] for row1,row2 in zip(matrix1,matrix2)] # and allowing for any number of matricies: # return [[sum(column) for column in zip(*rows)] for rows in zip(*matricies)]
def rename_restrictions(tags, mapping): """Process tag renames for a formula pipe""" new_tags = [] for name, value in reversed(tags): if name == 'rename': if value[0] in mapping: mapping[value[1]] = mapping[value[0]] del mapping[value[0]] else: mapping[value[1]] = value[0] elif name in mapping: new_tags.append((mapping[name], value)) else: new_tags.append((name, value)) new_tags.reverse() return new_tags, mapping
def trim(s: str) -> str: """Returns a trimmed string with preceding and trailing whitespace removed.""" return s.strip()
def parse_boolean(value): """Parses a boolean from test i.e. from "0", "yes", "false" etc. """ return value.lower() in ("1","yes","true")
def is_possible_triangle(triangle): """ >>> is_possible_triangle([3, 4, 5]) True >>> is_possible_triangle([5, 10, 25]) False """ return 2 * max(triangle) < sum(triangle)
def cver_t(verstr): """Converts a version string into a tuple""" if verstr.startswith("b"): return tuple([0,0,0,0]+list(cver_t(verstr[1:]))) return tuple([int(x) for x in verstr.split(".")])
def create_plot_dict(altitude_levels): """Create dict of dicts to be filled with information for all plots. Args: altitude_levels: int #altitdue levels = #keys per dict Returns: plot_dict. dict Dict containing for each altitude level all relevent information """ assert ( altitude_levels <= 10 ), "It is not possible, to generate altitude plots for more than 10 different starting altitudes." plot_dict = {} key_name = "altitude_" i = 1 while i < altitude_levels + 1: altitude_dict = { "start_time": None, "origin": None, "lon_precise": None, "lat_precise": None, "y_surf": None, "y_type": None, "alt_level": None, "subplot_index": None, "max_start_altitude": None, "trajectory_direction": None, "traj_0": { "z": [], "lon": [], "lat": [], "time": [], "z_type": None, "line": None, # "line": color_dict[i], "alpha": 1, }, # main trajectory "traj_1": { "z": [], "lon": [], "lat": [], "time": [], "z_type": None, "line": None, # "line": color_dict[i], "alpha": 0.3, }, # side trajectory 1 "traj_2": { "z": [], "lon": [], "lat": [], "time": [], "z_type": None, "line": None, # "line": color_dict[i], "alpha": 0.3, }, # side trajectory 2 "traj_3": { "z": [], "lon": [], "lat": [], "time": [], "z_type": None, "line": None, # "line": color_dict[i], "alpha": 0.3, }, # side trajectory 3 "traj_4": { "z": [], "lon": [], "lat": [], "time": [], "z_type": None, "line": None, # "line": color_dict[i], "alpha": 0.3, }, # side trajectory 4 } plot_dict[key_name + str(i)] = altitude_dict i += 1 return plot_dict
def _is_jamo(i): """ Function for determining if a Unicode scalar value i is within the range of Jamo. :param i: Unicode scalar value to lookup :return: Boolean: True if the lookup value is within the range of Hangul syllables, otherwise False. """ if i in range(0x1100, 0x11ff + 1): # Range of Jamo as defined in Blocks.txt, "1100..11FF; Hangul Jamo" return True return False
def xieta2colrow(xi, eta, detsize, fill, npixx, npixy): """ Convert detector xi and eta values to image column and row. :param xi: One coordinate of the unprojected detector position. :type xi: numpy.ndarray :param eta: Second coordinate of the unprojected detector position. :type eta: numpy.ndarray :param fill: Ration of the detector extent to the image extent. :type fill: float :param npixx: Number of pixels in the x dimension of an image. :type npixx: int :param nipxy: Number of pixels in the y dimension of an image. :type npixy: int :returns: tuple -- A tuple containing the image column and row values. """ col = (((xi/36000.)/(detsize/2.)*fill+1.)/2.*npixx) row = (((eta/36000.)/(detsize/2.)*fill+1.)/2.*npixy) return col, row
def _get_valid_search_dirs(search_dirs, project): """ compares param collection of search dirs with valid names, raises ValueError if invalid. maintains the order of param if any. If project is given other names are allowed than without. :param search_dirs: collection of foldernames (basename) to search for :param project: the project to search in or None :raises: ValueError """ # define valid search folders valid_global_search_dirs = ['bin', 'etc', 'include', 'lib', 'share'] valid_project_search_dirs = ['etc', 'include', 'libexec', 'share'] valid_search_dirs = (valid_global_search_dirs if project is None else valid_project_search_dirs) if not search_dirs: search_dirs = valid_search_dirs else: # make search folders a list search_dirs = list(search_dirs) # determine valid search folders all_valid_search_dirs = set(valid_global_search_dirs).union( set(valid_project_search_dirs)) # check folder name is known at all diff_dirs = set(search_dirs).difference(all_valid_search_dirs) if len(diff_dirs) > 0: raise ValueError('Unsupported search folders: ' + ', '.join(['"%s"' % i for i in diff_dirs])) # check foldername works with project arg diff_dirs = set(search_dirs).difference(valid_search_dirs) if len(diff_dirs) > 0: msg = 'Searching %s a project can not be combined with the search folders:' % ('without' if project is None else 'for') raise ValueError(msg + ', '.join(['"%s"' % i for i in diff_dirs])) return search_dirs
def unique_sorted(iterable): """ Return unique values in the order they are first encountered in the iterable. """ lookup = set() # a temporary lookup set return [ele for ele in iterable if ele not in lookup and lookup.add(ele) is None]
def adjacency_from_edges(edges): """Construct an adjacency dictionary from a set of edges. Parameters ---------- edges : sequence[[hashable, hashable]] A list of node identifier pairs. Returns ------- dict[hashable, list[hashable]] A dictionary mapping each node in the list of node pairs to a list of adjacent/connected nodes. Examples -------- >>> edges = [[0, 1], [0, 2], [0, 3], [0, 4]] >>> adjacency_from_edges(edges) {0: [1, 2, 3, 4], 1: [0], 2: [0], 3: [0], 4: [0]} """ adj = {} for i, j in iter(edges): adj.setdefault(i, []).append(j) adj.setdefault(j, []).append(i) return adj
def smallest_diff_key(A, B): """return the smallest key adiff in A such that A[adiff] != B[bdiff]""" diff_keys = [k for k in A if A.get(k) != B.get(k)] if diff_keys: return min(diff_keys) return None
def doublecomplement(i,j): """ returns the two element of (1,2,3,4) not in the input """ list=[1,2,3,4] list.remove(i) list.remove(j) return list
def calculateZA( ZACompound, ZAOther, minus = True ) : """This function handles the removal (or addition) of ZAOther to ZACompound include natural compound (but not a natural other).""" if( ( ZACompound % 1000 ) == 0 ) : ZAOther = 1000 * ( ZAOther // 1000 ) if( minus ) : return( ZACompound - ZAOther ) return( ZACompound + ZAOther )
def is_column_valid(col): """Given a column definition, checks if it is Vertica-compliant.""" if(col[6] == 'NULL'): return False if(col[6] == 'varchar'): if(col[7].isdigit()): l = int(col[7]) if(l > 0 and l < 6500): return True return False return False return True
def clean_word(word: str) -> str: """ Clean a word for counting. """ return word.strip(",.:()&-").lower()
def _bcc(recipient): """ Returns a query item matching messages that have certain recipients in the bcc field. Args: recipient (str): The recipient in the bcc field to match. Returns: The query string. """ return f"bcc:{recipient}"
def rescale(value, max_range=0.6, min_range=0.3): """ rescale deltaG values by random distribution :param value: actual value :param max_range: max range :param min_range: min range :return: rescaled value """ scaled = value if value > min_range: scaled = (value) * (max_range - min_range) + min_range return scaled
def same_name(f, g): """ Test whether functions `f` and `g` are identical or have the same name """ return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
def SplitHostname(hostname): """Splits a Wiretap hostname into the server name and its colon-separated product type (IFFFS, Gateway, or Backburner). @param[in] hostname \c{(str)} The Wiretap server's hostname in the form of \c{name:type}. @return \c{(tuple)} The Wiretap server name followed by the product type, if any. """ splitHostname = hostname.split(':') if len(splitHostname) != 2: print("A Wiretap hostname should contain exactly one colon between " "the server name and product type.") if len(splitHostname) == 1: splitHostname.append('') return tuple(splitHostname)
def get_length(collection): """ Return length of collection as str """ return str(len(collection))
def tab_to_string(tab): """ Convert a tab of string into a string """ string = '' for value in tab: string += '%s\n' % value return string
def calculate_lost(tx, rx): """ Returns frame lost percentage. :param tx: number of frame send :param rx: number of frame received :return: percentage of lost. """ if rx == 0 or tx == 0: return 100 frame_lost = abs((tx - rx) / rx * 100) return frame_lost
def get_competitive_tier_mi18n(tier: int) -> str: """Turn the tier returned by the API into the respective tier name displayed in-game.""" return "bbs/" + ("area1", "area2", "area3", "area4")[tier - 1]
def convert_seconds_to_str(sec: float): """Returns a str representing a number of seconds""" msg = "" sec = round(sec) years = sec // 31536000 if years != 0: msg += str(int(years)) + "y " sec -= years * 31536000 days = sec // 86400 if days != 0: msg += str(int(days)) + "d " sec -= days * 86400 hours = sec // 3600 if hours != 0: msg += str(int(hours)) + "h " sec -= hours * 3600 minutes = sec // 60 sec -= minutes * 60 if minutes != 0: msg += str(int(minutes)) + "m " if sec != 0: msg += str(int(sec)) + "s " return msg[:-1]
def parseSendchangeArguments(args): """This function parses the arguments that the Buildbot patch uploader sends to Buildbot via the "changed files". It takes an argument of a list of files and returns a dictionary with key/value pairs """ parsedArgs = {} for arg in args: try: (key, value) = arg.split(":", 1) value = value.lstrip().rstrip() parsedArgs[key] = value except: pass return parsedArgs
def is_empty(s: str) -> bool: """Test the if the string is empty.""" return not bool(s)
def rseq(t, y, en, veff): """Radial SEQ with l=0 and effective potential veff(r).""" y1, y2 = y y1d = y2 # compare with Eq.(11) from Problem Sheet 5 and Eq.(3) from Problem Sheet 4 f = -2 * (en - veff(t)) y2d = f * y1 return [y1d, y2d]
def semeval_PRF(y_true, y_pred): """ Calculate "Micro P R F" of aspect detection task of SemEval-2014. """ s_all=0 g_all=0 s_g_all=0 for i in range(len(y_pred)//5): s=set() g=set() for j in range(5): if y_pred[i*5+j]!=4: s.add(j) if y_true[i*5+j]!=4: g.add(j) if len(g)==0:continue s_g=s.intersection(g) s_all+=len(s) g_all+=len(g) s_g_all+=len(s_g) p=s_g_all/s_all r=s_g_all/g_all f=2*p*r/(p+r) return p,r,f
def fiveplates_clean_field_file(field): """ string representation of targets_clean file for field within fiveplates_field_files zip file. Parameters ---------- field : str identifier of field, e.g. 'GG_010' """ return f'{field}_targets_clean.txt'
def fibonacci_until(values_until): """ Returns all fibonacci sequence values that are less than the given number. raises value error if the input is a string or is less than 0. for 0 <= value_until <= 1 returns a string. """ try: if type(values_until) == str or values_until < 0: raise ValueError elif values_until > 1: initial_sequence = [0, 1] while initial_sequence[-1] + initial_sequence[-2] < values_until: next = initial_sequence[-2] + initial_sequence[-1] initial_sequence.append(next) return initial_sequence else: return f"""[0, 1] For getting better results enter a number bigger than 1.""" except ValueError: raise ValueError( 'Please enter a positive number since fibonacci sequence only includes zero and positive numbers.')
def get_durations(raw_data, get_duration, is_successful): """Retrieve the benchmark duration data from a list of records. :parameter raw_data: list of records :parameter get_duration: function that retrieves the duration data from a given record :parameter is_successful: function that returns True if the record contains a successful benchmark result, False otherwise :returns: list of float values corresponding to benchmark durations """ data = [get_duration(run) for run in raw_data if is_successful(run)] return data
def rotate_right(input, count): """ write a single line of code using what you have learnt in this lesson - slicing and concat assume 0 <= count <= len(input) """ return input[-count:]+input[:-count]
def multiple_letter_count(phrase): """Return dict of {ltr: frequency} from phrase. >>> multiple_letter_count('yay') {'y': 2, 'a': 1} >>> multiple_letter_count('Yay') {'Y': 1, 'a': 1, 'y': 1} """ answer = {} for s in phrase: answer[s] = (phrase.count(s)) return answer
def lucas(n): """Find nth lucas number using recursive algorithm. input: n (int) n for nth lucas number returns: (int) representing value of nth lucas number """ if n < 1: return 2 elif n == 1: return 1 return lucas(n - 1) + lucas(n - 2)
def add(a,b): """ This program adds two numbers and returns results """ result = a + b return result
def welford(x_array, mean=None,var=None,n=0): """ https://www.embeddedrelated.com/showarticle/785.php see also https://brenocon.com/blog/2008/11/calculating-running-variance-in-python-and-c/ No reallocations, unlike 'cumvar' below! """ k = 0 M = 0 S = 0 if mean and var: k+=1+n M=mean S=var*(n-1) for x in x_array: k += 1 Mnext = M + (x - M) / k S = S + (x - M)*(x - Mnext) M = Mnext # return (M, S/(k-1)) return S/(k-1)
def check_ext(file_name, ext): """Check the extension for a file name, and add if missing. Parameters ---------- file_name : str The name of the file. ext : str The extension to check and add. Returns ------- str File name with the extension added. """ return file_name + ext if not file_name.endswith(ext) else file_name
def is_slug(string): """ Function to test if a URL slug is valid """ return all([s in '0123456789-abcdefghijklmnopqrstuvwxyz' for s in string])
def core_coerce_row(row, extra_columns, result_type): """Trim off the extra columns and return as a correct-as-possible sqlalchemy Row.""" if not extra_columns: return row N = len(row._row) - len(extra_columns) return result_type(row._row[:N])
def list_rindex(l, s): """Helper function: *last* matching element in a list""" return len(l) - l[-1::-1].index(s) - 1
def dot(v, u): """v and u are vectors. v and u -> list""" vx, vy = v[0], v[1] ux, uy = u[0], u[1] dotproduct = vx*ux + vy*uy return dotproduct
def get_current_labels(lj): """ Get labels from json file :param lj: labels JSON :type lj: JSON :returns: list of label names :rtype: list """ ret = [] for i in lj: ret.append(i['name']) return ret
def getUniqueByID(seq): """Very fast function to remove duplicates from a list while preserving order Based on sort f8() by Dave Kirby benchmarked at https://www.peterbe.com/plog/uniqifiers-benchmark Requires Python>=2.7 (requires set()) """ # Order preserving seen = set() return [x for x in seq if x.id not in seen and not seen.add(x.id)]
def fail_dict(key_name: str, value: str): """Return a fail dictionary containing the failed request and message""" return {"status": "fail", "data": {key_name: value}}
def twe_parameter_space_getter(X, y): """ generate the twe distance measure :param X: dataset to derive parameter ranges from :return: distance measure and parameter range dictionary """ return { 'penalty' : [0, 0.011111111, 0.022222222, 0.033333333, 0.044444444, 0.055555556, 0.066666667, 0.077777778, 0.088888889, 0.1], 'stiffness': [0.00001, 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1] }
def parse_db_url(url): """ Parse database DSN to particular entities (host, port, database, user, password). """ url = url.replace('postgres://', '').replace('@', ' ').replace(':', ' ').replace('/', ' ').split() database_url = {} for part, credential in zip(range(len(url)), ['user', 'password', 'host', 'port', 'database']): database_url[credential] = url[part] return database_url
def get_index(array, element): """ Finds where an element occurs in an array or -1 if not present. """ if element in array: return array.index(element) else: return -1
def hello(*names: str, greeting: str = "Hello") -> str: """Say hello.""" if not names: names = ("stranger",) return "{}, {}!".format(greeting, ", ".join(names))
def get_import_path(_class): """ Returns import path for a given class. """ return '{}.{}'.format(_class.__module__, _class.__name__)