content
stringlengths
42
6.51k
def is_valid_perioddata(data): """Check that a dictionary of period data has enough information (based on key names) to set up stress periods. Perlen must be explicitly input, or 3 of start_date_time, end_date_time, nper and/or freq must be specified. This is analogous to the input requirements for the pandas.date_range method for generating time discretization (https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html) """ perlen = data.get('perlen') is not None steady = data.get('steady', False) if isinstance(steady, dict): steady = steady.get(0) if not steady: included = [k for k in ['nper', 'start_date_time', 'end_date_time', 'freq'] if data.get(k) is not None] has3 = len(included) >= 3 return perlen or has3 else: nper = data.get('nper') is not None return nper or perlen
def number_of_yang_modules_that_passed_compilation(in_dict: dict, compilation_condition: str): """ Return the number of the modules that have compilation status equal to the 'compilation_condition'. Arguments: :param in_dict (dict) Dictionary of key:yang-model, value:list of compilation results :param compilation_condition (str) Compilation result we are looking for - PASSED, PASSED WITH WARNINGS, FAILED :return: the number of YANG models which meet the 'compilation_condition' """ t = 0 for k, v in in_dict.items(): if in_dict[k][3] == compilation_condition: t += 1 return t
def _infer_color_variable_kind(color_variable, data): """Determine whether color_variable is array, pandas dataframe, callable, or scikit-learn (fit-)transformer.""" if hasattr(color_variable, "dtype") or hasattr(color_variable, "dtypes"): if len(color_variable) != len(data): raise ValueError( "color_variable and data must have the same length.") color_variable_kind = "scalars" elif hasattr(color_variable, "transform"): color_variable_kind = "transformer" elif hasattr(color_variable, "fit_transform"): color_variable_kind = "fit_transformer" elif callable(color_variable): color_variable_kind = "callable" elif color_variable is None: color_variable_kind = "none" else: # Assume color_variable is a selection of columns color_variable_kind = "else" return color_variable_kind
def a1decimal(n): """ Esta funcion recibe un numero y devuelve un float con solo un decimal """ return float(int(n*10))/10
def strongly_connected_components(graph): """ Tarjan's Algorithm (named for its discoverer, Robert Tarjan) is a graph theory algorithm for finding the strongly connected components of a graph. Based on: http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm """ index_counter = [0] stack = [] lowlinks = {} index = {} result = [] def strongconnect(node): # set the depth index for this node to the smallest unused index index[node] = index_counter[0] lowlinks[node] = index_counter[0] index_counter[0] += 1 stack.append(node) # Consider successors of `node` try: successors = graph[node] except: successors = [] for successor in successors: if successor not in lowlinks: # Successor has not yet been visited; recurse on it strongconnect(successor) lowlinks[node] = min(lowlinks[node], lowlinks[successor]) elif successor in stack: # the successor is in the stack and hence in the current strongly connected component (SCC) lowlinks[node] = min(lowlinks[node], index[successor]) # If `node` is a root node, pop the stack and generate an SCC if lowlinks[node] == index[node]: connected_component = [] while True: successor = stack.pop() connected_component.append(successor) if successor == node: break component = tuple(connected_component) # storing the result result.append(component) for node in graph: if node not in lowlinks: strongconnect(node) return result
def intify_and_make_intron(junction): """Convert start and stop strings to ints and shorten the interval Interval is shortened to match the introns of SJ.out.tab files from STAR Parameters ---------- junction : tuple (chrom, start, stop, strand) tuple of strings, e.g. ('chr1', '100', '200', '-') Returns ------- intron : tuple (chrom, start, stop, strand) tuple of string, int, int, string. Adds 1 to original start and subtracts 1 from original stop >>> intify_and_make_intron(('chr1', '100', '200', '-')) ('chr1', 101, 199, '-') """ chrom, start, stop, strand = junction start = int(start) + 1 stop = int(stop) - 1 return chrom, start, stop, strand
def findall_lod(lst, key, val): """get list of dicts of all matches of key-value pair in list of dicts""" return [x for x in lst if x[key] == val]
def first_and_last_n_chars(s, n1=30, n2=30): """ Utility function to display first n1 characters and last n2 characters of a long string (Adjusts display if string is less than n1+n2 char long) :param s: string :return: string for display """ first_len = min(len(s), n1) first = s[:first_len] last_len = min(len(s) - len(first), n2) last = s[-last_len:] if last_len > 0 else '' if first_len == len(s): return first elif first_len + last_len == len(s): return "{}{}".format(first, last) else: return "{}...{}".format(first, last)
def rescale(val, in_min, in_max, out_min, out_max): """ Function to mimic the map() function in processing and arduino. """ return out_min + (val - in_min) * ((out_max - out_min) / (in_max - in_min))
def get_post_args(data): """returns post args as a dict""" ret = {} pargs = data.split('&') for parg in pargs: p = parg.split('=') if len(p) == 2: name = p[0] val = p[1] ret[name] = val return ret
def get_parent_title(obj_json): """Returns the title for an object's parent component. If a component identifier is present, appends that identifier plus the object's level. """ title = obj_json.get("title", obj_json.get("display_string")).strip() if obj_json.get("component_id"): title = "{}, {} {}".format(title, obj_json["level"].capitalize(), obj_json["component_id"]) return title
def product(S1,S2): """In : S1 (set) S2 (set) Out: Cartesian product of S1 and S2 (set of pairs) """ return { (x,y) for x in S1 for y in S2 }
def pvar(var, mdi, scale): """ Convert a variable to a rounded integer :param var: the variable to be scaled and rounded :param mdi: the value to set the variable to if missing or None :param scale: the factor by which to scale the number before rounding :type var: float :type mdi: integer :type scale: integer """ if var is None: ret = mdi else: ret = round(var * scale) return ret
def is_integer(val): """Returns whether the given value is an integer. Args: val (object): value to check Returns: ``True`` if the given value is an integer, otherwise ``False``. """ try: val += 1 except TypeError: return False return True
def nested_get(nested_dict, keys): """ :param nested_dict: dictionary :param keys: list of keys :return: return back object """ for key in keys: if isinstance(nested_dict, list): nested_dict = nested_dict[0][key] else: nested_dict = nested_dict[key] return nested_dict
def py_edit_distance(s, t): """ Pure-Python edit distance """ m = len(s) n = len(t) costs = list(range(m + 1)) for j in range(1, n + 1): prev = costs[0] costs[0] += 1 for i in range(1, m + 1): c = min( prev + int(s[i-1] != t[j-1]), costs[i] + 1, costs[i-1] + 1, ) prev = costs[i] costs[i] = c return costs[-1]
def intersect_line_ray(lineSeg, raySeg): """ Constructs a line from the start and end points of a given line segment, and finds the intersection between that line and a ray constructed from the start and end points of a given ray segment. If there is no intersection (i.e. the ray goes in the opposite direction or the ray is parallel to the line), returns None. """ lineStart, lineEnd = lineSeg rayStart, rayEnd = raySeg lineVector = (lineEnd[0] - lineStart[0], lineEnd[1] - lineStart[1]) rayVector = (rayEnd[0] - rayStart[0], rayEnd[1] - rayStart[1]) p1x, p1y = lineStart p2x, p2y = rayStart d1x, d1y = lineVector d2x, d2y = rayVector # Check if the ray is parallel to the line. parallel = ( (d1x == 0 and d2x == 0) or ((d1x != 0 and d2x != 0) and (float(d1y) / d1x == float(d2y) / d2x)) ) intersection = None # Only non-parallel lines can ever intersect. if not parallel: # Parametrize the line and ray to find the intersection. parameter = ( float(p2y * d1x - p1y * d1x - p2x * d1y + p1x * d1y) / (d2x * d1y - d1x * d2y) ) # Only consider intersections that occur in front of the ray. if parameter >= 0: intersection = ( p2x + parameter * d2x, p2y + parameter * d2y, ) return intersection
def unflatten_dict(d, sep=':', prefix=''): """ Inverse of flatten_dict. Forms a nested dict. """ dd = {} for kk, vv in d.items(): if kk.startswith(prefix + sep): kk = kk[len(prefix + sep):] klist = kk.split(sep) d1 = dd for k in klist[0:-1]: if k not in d1: d1[k] = {} d1 = d1[k] d1[klist[-1]] = vv return dd
def inclusion_test(s1, s2): """ Check that a string s1 is equal to another string s2, except that s2 contains an additional substring Example : "Avenue C Berten" vs "Avenue Clovis Berten" """ import os l_pref = len(os.path.commonprefix([s1, s2])) l_suf = len(os.path.commonprefix([s1[::-1], s2[::-1]])) res = 1 if (l_pref>0) and (l_suf > 0) and (l_pref+l_suf >= min(len(s1), len(s2))) else 0 # if res == 1: # print(s1, s2, res) return res
def gcd(a, b): """Calculate greatest common divisor.""" if b == 0: return a return gcd(b, a % b)
def create_node_descriptor(end_point): """Returns a node descriptor for the robot based on the end_point. The server_alive link is for the robot to check the MQTT connection periodically. Args: end_point (str): The ID of the robot. Returns: dict: A node descriptor of the vizier format for the robot. Example: >>> node_descriptor(1) """ node_descriptor = \ { 'end_point': 'pd_'+end_point, 'links': { '/status': {'type': 'DATA'}, }, 'requests': [ { 'link': 'vizier/'+end_point, 'type': 'STREAM', 'required': False }, ] } return node_descriptor
def find_next_available_file(fname_pattern, max_n=1000, start=1): """ :param str fname_pattern: File name pattern using "%d" style formatting e.g. "result-%03d.png" :param int max_n: Check at most that many files before giving up and returning None :param int start: Where to start counting from, default is 1 """ from pathlib import Path for i in range(start, max_n): fname = fname_pattern % i if not Path(fname).exists(): return fname return None
def all_done(arr): """I think this is specific to the augmented data array;""" for item in arr: if not isinstance(item, int): return False return True
def isalpha(char): """Return True iff char is a letter. """ return char.isalpha()
def depth_first_search(query, graph, visited=None): """Depth first search of a graph with an ajacency list representation""" if visited is None: visited = [False] * len(graph) # Start at index 0 # For each neighbor, recurse through its neighbors by adding it to a stack # of things to visit. Mark these in a separate array. to_visit = 0 for i, node in enumerate(graph): if visited[to_visit]: # We already visited it to_visit = i else: if node['value'] == query: return node['value'] depth_first_search(query, node['neighbors'], visited) return None
def unique_annos(annotations): """ Ensure that annotation keys are unique """ uniq = 0 for ix in range(len(annotations)): annotations[ix]['id'] = uniq uniq += 1 return annotations
def indent(string, prefix=" "): """ Indent a paragraph of text """ return "\n".join([prefix + line for line in string.split("\n")])
def join_filter(sep, seq, pred=bool): """ Join with a filter. """ return sep.join([str(i) for i in seq if pred(i)])
def scale(data, new_min, new_max): """Scales a normalised data series :param data: The norrmalised data series to be scaled :type data: List of numeric values :param new_min: The minimum value of the scaled data series :type new_min: numeric :param new_max: The new maximum of the scaled data series :type new_max: numeric :return: A scaled data series :rtype: list """ return [(x*(new_max-new_min))+new_min for x in data]
def is_outlier(x, p25, p75): """Check if value is an outlier.""" lower = p25 - 1.5 * (p75 - p25) upper = p75 + 1.5 * (p75 - p25) return x <= lower or x >= upper
def allowed_file(filename): """ Checks that the parameter is a .dae file. """ if '.' in filename and filename.rsplit('.', 1)[1] == 'dae': # Extract the file extension and return true if dae return True return False
def set_value(context, step, current_value, entry_value): """Apply a JSON value to a context object. An implementation of 'steps to set a JSON encoding value'. <https://www.w3.org/TR/html-json-forms/#dfn-steps-to-set-a-json-encoding-value> """ key, flags = step if flags.get('last', False): if current_value is None: if flags.get('append', False): context[key] = [entry_value] else: if isinstance(context, list) and len(context) <= key: context.extend([None] * (key - len(context) + 1)) context[key] = entry_value elif isinstance(current_value, list): context[key].append(entry_value) else: context[key] = [current_value, entry_value] return context if current_value is None: if flags.get('type') == 'array': context[key] = [] else: if isinstance(context, list) and len(context) <= key: context.extend([None] * (key - len(context) + 1)) context[key] = {} return context[key] elif isinstance(current_value, dict): return context[key] elif isinstance(current_value, list): if flags.get('type') == 'array': return current_value obj = {} for i, item in enumerate(current_value): if item is not None: obj[i] = item else: context[key] = obj return obj else: obj = {'': current_value} context[key] = obj return obj
def add_matrices(x, y): """ >>> matrix1 = [[1, 3], ... [2, 0]] >>> matrix2 = [[-3, 0], ... [1, 2]] >>> add_matrices(matrix1, matrix2) [[-2, 3], [3, 2]] """ return [[x[i][j] + y[i][j] for j in range(len(x[0]))] for i in range(len(x))]
def model_to_dict(instance, fields=None, exclude=None): """ Returns a dict containing the data in the ``instance`` where: data = {'lable': 'verbose_name', 'name':name, 'value':value,} Verbose_name is capitalized, order of fields is respected. ``fields`` is an optional list of field names. If provided, only the named fields will be included in the returned dict. ``exclude`` is an optional list of field names. If provided, the named fields will be excluded from the returned dict, even if they are listed in the ``fields`` argument. """ data = [] if instance: opts = instance._meta for f in opts.fields: if not f.editable: continue if fields and not f.name in fields: continue if exclude and f.name in exclude: continue value = f.value_from_object(instance) # load the display name of choice fields get_choice = 'get_'+f.name+'_display' if hasattr(instance, get_choice): value = getattr(instance, get_choice)() # only display fields with values and skip the reset if value: data.append({'lable': f.verbose_name.capitalize(), 'name': f.name, 'value':value, 'help':f.help_text, }) if fields and data: sorted_data = [] for f in fields: for d in data: if f.lower == d['name'].lower: sorted_data.append(d) data = sorted_data return data
def get_rgb_normalized(r, g, b, a=1.0): """Retorna una lista con el color rgb normalizado""" return r / 255.0, g / 255.0, b / 255.0, a
def location_transform(data, inv): """ Note: 1. env location(x major), agent location(y major) 2. if inv=True, agent->env; otherwise, env->agent """ location, map_size = data['target_location'], data['map_size'] if location is None: return location def location_check(x, y): try: assert x < map_size[0] and y < map_size[1], 'target location out of range, corrupt replay' except AssertionError: x = min(x, map_size[0] - 1) y = min(y, map_size[1] - 1) print('[WARNING]: abnormal location-map_size: {}/{}'.format(location, map_size)) return [x, y] if inv: y, x = location # long value y, x = float(y), float(x) y += 0.5 # building fix on .5 coordination x += 0.5 y = map_size[1] - y x, y = location_check(x, y) location = [x, y] else: x, y = location y = map_size[1] - y x, y = int(x), int(y) x, y = location_check(x, y) location = [y, x] return location
def balance_calculation(data, centers, mapping): """ Checks fairness for each of the clusters defined by k-centers. Returns balance using the total and class counts. Args: data (list) centers (list) mapping (list) : tuples of the form (data, center) Returns: fair (dict) : key=center, value=(sum of 1's corresponding to fairness variable, number of points in center) """ fair = dict([(i, [0, 0]) for i in centers]) for i in mapping: fair[i[1]][1] += 1 if data[i[0]][0] == 1: # MARITAL fair[i[1]][0] += 1 curr_b = [] for i in list(fair.keys()): p = fair[i][0] q = fair[i][1] - fair[i][0] if p == 0 or q == 0: balance = 0 else: balance = min(float(p/q), float(q/p)) curr_b.append(balance) return min(curr_b)
def parse_cand_pfam_ids(pfam_string): """ Get flat list of pfam_ids from candidate csv field 'pfam_ids' :param pfam_string: pfam_ids string separated by '|' (between each protein) and ';' (between each pfam_id) :return: flat list of pfam_ids """ return [pfam_id for pfam_ids in pfam_string.split('|') for pfam_id in pfam_ids.split(';')]
def is_abbrev(abbrev, text): """Check if `abbrev` is a potential abbreviation of `text`.""" abbrev = abbrev.lower() text = text.lower() words = text.split() if not abbrev: return True if abbrev and not text: return False if abbrev[0] != text[0]: return False elif words: return any(is_abbrev(abbrev[1:], text[i + 1:]) for i in range(len(words[0]))) else: return False
def fasta_from_sequences(seqs, make_seqlabel = None, line_wrap = None): """Returns a FASTA string given a list of sequences. A sequence.Label attribute takes precedence over sequence.Name. - seqs can be a list of sequence objects or strings. - make_seqlabel: callback function that takes the seq object and returns a label str - line_wrap: a integer for maximum line width """ fasta_list = [] for i,seq in enumerate(seqs): # Check if it has a label, or one is to be created label = str(i) if make_seqlabel is not None: label = make_seqlabel(seq) elif hasattr(seq, 'Label') and seq.Label: label = seq.Label elif hasattr(seq, 'Name') and seq.Name: label = seq.Name # wrap sequence lines seq_str = str(seq) if line_wrap is not None: numlines,remainder = divmod(len(seq_str),line_wrap) if remainder: numlines += 1 body = ["%s" % seq_str[j*line_wrap:(j+1)*line_wrap] \ for j in range(numlines)] else: body = ["%s" % seq_str] fasta_list.append('>'+label) fasta_list += body return '\n'.join(fasta_list)
def convert(value): """ change color value type """ digit = list(map(str, range(10))) + list("ABCDEF") if isinstance(value, tuple): string = '#' for i in value: a1 = i // 16 a2 = i % 16 string += digit[a1] + digit[a2] return string elif isinstance(value, str): a1 = digit.index(value[1]) * 16 + digit.index(value[2]) a2 = digit.index(value[3]) * 16 + digit.index(value[4]) a3 = digit.index(value[5]) * 16 + digit.index(value[6]) return (a1, a2, a3)
def make_progress(row): """return string for progress bar""" if row is None: return "" hits = row["hits"] / float(row["tot"]) * 100.0 dots = row["dots"] / float(row["tot"]) * 100.0 # other = row['other'] / float(row['tot']) * 100.0 nulls = row["nulls"] / float(row["tot"]) * 100.0 return """<div class="progress"> <div class="progress-bar progress-bar-success" style="width: %.1f%%"> <span>%s</span> </div> <div class="progress-bar progress-bar-info" style="width: %.1f%%"> <span>%s</span> </div> <div class="progress-bar progress-bar-danger" style="width: %.1f%%"> <span>%s</span> </div> </div>""" % ( hits - 0.05, row["hits"], dots - 0.05, row["dots"], # other - 0.05, row['other'], nulls - 0.05, row["nulls"], )
def first_true(iterable, default=False, pred=None): """Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true. Taken from: https://docs.python.org/3/library/itertools.html#itertools-recipes """ # first_true([a,b,c], x) --> a or b or c or x # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x return next(filter(pred, iterable), default)
def delta_list_to_deltas_and_indices(delta_list, tolerance=0.001): """ Given a list of vectors, eg. [(0,0,0), (1,0,0), (0,0,0)], return a list of indices and vectors, filtering out zero vectors: [1], [(1,0,0)]. >>> delta_list_to_deltas_and_indices([(1,0,0), (0,0,0.000001), (0,1,0)]) ([0, 2], [(1, 0, 0), (0, 1, 0)]) """ # Make a list of the nonzero indices in the new delta list. def is_nonzero(value): return any(abs(v) > tolerance for v in value) delta_vertex_idx = [idx for idx, delta in enumerate(delta_list) if is_nonzero(delta)] deltas = [delta_list[idx] for idx in delta_vertex_idx] return delta_vertex_idx, deltas
def assemblyRotationAlgorithmStringNone(_cs, name, value): """Some users experienced a string None being introduced into this setting and we want to provide a smooth fix See T1345 """ if value == "None": value = "" return {name: value}
def exactly_one(iterable): """Obtain exactly one item from the iterable or raise an exception.""" i = iter(iterable) try: item = next(i) except StopIteration: raise ValueError("Too few items. Expected exactly one.") try: next(i) except StopIteration: return item raise ValueError("Too many items. Expected exactly one.")
def medical_covered_by_1(responses, derived): """ Return whether the children are covered under Claimant 1's plan """ if responses.get('medical_coverage_available', 'NO') == 'YES': return 'My plan' in responses.get('whose_plan_is_coverage_under', '') return False
def _parse_control_depends(fields): """ Parse the package names of the 'Depends:' directive in a debian control file. References: https://www.debian.org/doc/debian-policy/ch-controlfields.html#s-controlsyntax https://www.debian.org/doc/debian-policy/ch-relationships.html#declaring-relationships-between-packages Args: fields: a parsed map of field strings from a debian control file. Returns: A list of package names from the control file Depends directive. """ depends = [] for entry in [fields.get("Depends", None), fields.get("Pre-Depends", None)]: # Skip the fields if they were empty. if not entry: continue # Remove newlines, these are 'folded' fields so newlines are ignored. line = entry.replace("\n", "").replace("\r", "") # Move through each section extracting the packages names. for section in entry.split(","): for alternative in section.split("|"): depend = alternative.strip().split(" ", 1)[0] if depend not in depends: depends.append(depend) return depends
def valid_vlan_id(vlan_id, extended=True): """Validates a VLAN ID. Args: vlan_id (integer): VLAN ID to validate. If passed as ``str``, it will be cast to ``int``. extended (bool): If the VLAN ID range should be considered extended for Virtual Fabrics. Returns: bool: ``True`` if it is a valid VLAN ID. ``False`` if not. Raises: None Examples: >>> import pynos.utilities >>> vlan = '565' >>> pynos.utilities.valid_vlan_id(vlan) True >>> extended = False >>> vlan = '6789' >>> pynos.utilities.valid_vlan_id(vlan, extended=extended) False >>> pynos.utilities.valid_vlan_id(vlan) True """ minimum_vlan_id = 1 maximum_vlan_id = 4095 if extended: maximum_vlan_id = 8191 return minimum_vlan_id <= int(vlan_id) <= maximum_vlan_id
def is_sum_of_powers_of_their_digits(digit: int, power: int): """ Verify is the number can be written as the sum of 'power' powers of their digits :param digit: :param power: :return: """ return digit == eval(' + '.join(str((int(d) ** power)) for d in str(digit)))
def _match_node_property(node, key, value): """Recursively finds a particular key-value pair in node.""" retval = None for child in node.get('IORegistryEntryChildren', []): if node.get(key) == value: retval = node break retval = _match_node_property(child, key, value) if retval is not None: break return retval
def permutations(n, k): """ nPk >>> permutations(52, 2) 2652 """ from math import factorial return factorial(n) // factorial(n - k)
def getTotalAcceptedAnswers(answer, totalAcceptedAnswers, totalScoreOfTotalAcceptedAnswers): """ Increases the number of the total accepted answers by 1, and the total score of the accepted answers by the answer's score, if the answer is accepted. :param answer: an answer dictionary :type answer: dict :param totalAcceptedAnswers: the number of the total accepted answers :type totalAcceptedAnswers: int :param totalScoreOfTotalAcceptedAnswers: the sum of the scores of the accepted answers :type totalScoreOfTotalAcceptedAnswers: int :return: the number of the total accepted answers and the total score of the accepted answers :rtype: int, int """ if answer['is_accepted']: totalAcceptedAnswers += 1 totalScoreOfTotalAcceptedAnswers += answer['score'] return totalAcceptedAnswers, totalScoreOfTotalAcceptedAnswers else: return totalAcceptedAnswers, totalScoreOfTotalAcceptedAnswers
def vowel_equiv(y, y_hat): """ Checks if two cvs have equivalent vowel. """ return y%3 == y_hat%3
def compute_actual_possible(results): """ Takes a result dict that has been output by compute metrics. Returns the results dict with actual, possible populated. When the results dicts is from partial or ent_type metrics, then partial_or_type=True to ensure the right calculation is used for calculating precision and recall. """ correct = results['correct'] incorrect = results['incorrect'] partial = results['partial'] missed = results['missed'] spurious = results['spurious'] # Possible: number annotations in the gold-standard which contribute to the # final score possible = correct + incorrect + partial + missed # Actual: number of annotations produced by the NER system actual = correct + incorrect + partial + spurious results["actual"] = actual results["possible"] = possible return results
def get_attr(name): """ get a class or function by name """ i = name.rfind('.') cls = str(name[i+1:]) module = str(name[:i]) mod = __import__(module, fromlist=[cls]) return getattr(mod, cls)
def cmyk_to_rgb(c, m, y, k): """Convert CMYK values to RGB values.""" r = int(255 * (1.0 - (c + k) / 100.)) g = int(255 * (1.0 - (m + k) / 100.)) b = int(255 * (1.0 - (y + k) / 100.)) return r, g, b
def _check_continuity(bool_list): """Check if all matches are adjoint""" matched_indices = [idx for idx, is_match in enumerate(bool_list) if is_match] return all(a + 1 == b for a, b in zip(matched_indices[:-1], matched_indices[1:])), matched_indices
def compare_datums(in_datum_def: list, out_datum_def: list): """ Compare two lists describing the datums. Remove common parts of the definition starting from the first entry. Stop when they do not agree. Parameters ---------- in_datum_def The datum definition as described in the datum defition database. out_datum_def The datum definition as described in the datum defition database. Returns ------- list A reduced list of the input datum and output datum layers. """ num_to_compare = min(len(in_datum_def), len(out_datum_def)) remove_these = [] for n in range(num_to_compare): if in_datum_def[n] == out_datum_def[n]: remove_these.append(in_datum_def[n]) for rmve in remove_these: in_datum_def.remove(rmve) out_datum_def.remove(rmve) return [in_datum_def, out_datum_def]
def RPL_MOTDSTART(sender, receipient, message): """ Reply Code 375 """ return "<" + sender + ">: " + message
def populate_posts_and_comments_merge_results(results): """ Merges results of backpopulate_posts_and_comments Args: results (iterable of dict): iterable of batch task results Returns: dict: merged result data for all batches """ post_count = 0 comment_count = 0 failures = [] for result in results: post_count += result["posts"] comment_count += result["comments"] failures.extend(result["failures"]) return {"posts": post_count, "comments": comment_count, "failures": failures}
def DropWikiSuffix(wiki_filename): """Removes the .wiki suffix (if any) from the given filename.""" return (wiki_filename[:-len('.wiki')] if wiki_filename.endswith('.wiki') else wiki_filename)
def is_in_rule_list(elem, rulelist): """ Check for input elem in rulelist, return boolean. """ for rule in rulelist: if elem in rule: return True return False
def speed_convert(size): """ Anda tidak bisa membaca byte? """ power = 2 ** 10 zero = 0 units = {0: "", 1: "Kb/s", 2: "Mb/s", 3: "Gb/s", 4: "Tb/s"} while size > power: size /= power zero += 1 return f"{round(size, 2)} {units[zero]}"
def find_eyes(landmarks): """ Helper method to find all key-value pairs in a dict whose keys reference 'left_eye' and 'right_eye' The dictionary should have string keys or it will return a None tuple :param landmarks: Dictionary with String keys referencing left and right eyes :return: a tuple of lists containing pixel locations of landmarks representing left_eye and right_eye """ left_eye = {} right_eye = {} for key in landmarks.keys(): if str(key).startswith("left_eye"): left_eye[key] = landmarks[key] elif key.startswith("right_eye"): right_eye[key] = landmarks[key] return left_eye, right_eye
def get_non_digit_prefix(characters): """ Get the non-digit prefix from a given list of characters. :param characters: A list of characters. :returns: A list of leading non-digit characters (may be empty). Used by :func:`compare_strings()` as part of the implementation of :func:`~deb_pkg_tools.version.compare_versions_native()`. """ prefix = [] while characters and not characters[0].isdigit(): prefix.append(characters.pop(0)) return prefix
def monomial_max(*monoms): """Returns maximal degree for each variable in a set of monomials. Consider monomials x**3*y**4*z**5, y**5*z and x**6*y**3*z**9. We wish to find out what is the maximal degree for each of x, y, z variables: >>> monomial_max((3,4,5), (0,5,1), (6,3,9)) (6, 5, 9) """ return tuple(map(lambda *row: max(row), *monoms))
def none_to_default(field, default): """ Function to convert None values into default values. :param field: the original value that may be None. :param default: the new, default, value. :return: field; the new value if field is None, the old value otherwise. :rtype: any """ if field is None: return default else: return field
def paths_to_j(path_dict): """ Takes a candidate paths dictionary and converts it to a JSON serializable form. Parameters ---------- path_dict : dictionary a dictionary indexed by a node pair whose value is a list of paths, where each path is a list of nodes. Returns ------- list_dict : list a list of dictionaries that can readily be converted to JSON with the json standard library routines. """ tmp = [] # We'll use a list for k in path_dict.keys(): tmp.append({"source": k[0], "target": k[1], "paths": path_dict[k]}) return tmp
def parse_processors(install_profile, libraries, data, root): """Returns the list of processors to run from the install profile. This method transforms {data} and [maven] references into paths, including maven references in the 'jar' and 'classpath' sections. """ def resolve_arg(arg): """Resolves {data} and [maven] references.""" if arg[0] == '[': # maven reference return libraries[arg[1:-1]] elif arg[0] == '{': # data reference return data[arg[1:-1]] else: # unchanged return arg procs_prof = install_profile['processors'] procs = [] for proc_prof in procs_prof: proc = {} proc['jar'] = libraries[proc_prof['jar']] proc['classpath'] = [libraries[el] for el in proc_prof['classpath']] proc['args'] = [resolve_arg(arg) for arg in proc_prof['args']] # (ignore outputs) procs.append(proc) return procs
def starts_with(left: str, right: str) -> str: """Check if the `left` string starts with the `right` substring.""" return left + ' STARTS WITH ' + right
def in_order(root): """ in order traversal of binary tree. change location of visit to get other traversals """ path = [] if root: path = in_order(root.left) path.append(root.data) path.extend(in_order(root.right)) return path
def zord_from_pb_file(path: str) -> str: """ :param path: :return: the name of the zord """ path = path[:-3] i = 1 while path[-i] != "/": i += 1 return path[-i + 1:]
def poly2(x, b0, b1, b2): """ Taylor polynomial for fit b1 = GD b2 = GDD / 2 """ return b0 + b1 * x + b2 * x ** 2
def count(iterator): """ Counts the length of an iterator iterating it Args: iterator: An iterable collection Returns: How many elements you have in the iterator """ return sum(1 for i in iterator)
def hex2rgb(hex): """Helper for converting RGB and RGBA hex values to Color""" hex = hex.strip('#') if len(hex) == 6: split = (hex[0:2], hex[2:4], hex[4:6]) else: raise ValueError('Must pass in a 6 character hex value!') r, g, b = [int(x, 16) for x in split] return (r, g, b)
def complete_digit(digit, length=2): """ A function to complete the left side of a INTEGER with '0' to fill the length desired Returns a CHAR. """ length = int(length) digit = int(digit) # Convert To Int str_digit = "%s" % digit # Convert To String in Order to have the Length digit_length = len(str_digit) if digit_length >= length: return str_digit else: i = 1 while i <= (length - digit_length): str_digit = "0" + str_digit i = i + 1 return str_digit
def metric_wind_dict_to_beaufort(d): """ Converts all the wind values in a dict from meters/sec to the corresponding Beaufort scale level (which is not an exact number but rather represents a range of wind speeds - see: https://en.wikipedia.org/wiki/Beaufort_scale). Conversion table: https://www.windfinder.com/wind/windspeed.htm :param d: the dictionary containing metric values :type d: dict :returns: a dict with the same keys as the input dict and values converted to Beaufort level """ result = {} for key, value in d.items(): if key != 'deg': # do not convert wind degree if value <= 0.2: bf = 0 elif 0.2 < value <= 1.5: bf = 1 elif 1.5 < value <= 3.3: bf = 2 elif 3.3 < value <= 5.4: bf = 3 elif 5.4 < value <= 7.9: bf = 4 elif 7.9 < value <= 10.7: bf = 5 elif 10.7 < value <= 13.8: bf = 6 elif 13.8 < value <= 17.1: bf = 7 elif 17.1 < value <= 20.7: bf = 8 elif 20.7 < value <= 24.4: bf = 9 elif 24.4 < value <= 28.4: bf = 10 elif 28.4 < value <= 32.6: bf = 11 else: bf = 12 result[key] = bf else: result[key] = value return result
def default_formatter(item): """ Default formatter (%s) :param item: The item to save to file :return: The item to be saved to file with a newline appended """ return '%s\n' % item
def average(l): """ Computes average of 2-D list """ llen = len(l) def divide(x): return x / float(llen) return list(map(divide, map(sum, zip(*l))))
def all_not_null(*args): """ check if all in list is not none >>> all_not_null('a', None) return False """ if args is not None: for arg in args: if arg is None: return False return True return False
def format_keep_alive_packages(keep_alive_packages): """Render server alive count max option.""" format_str = '-o ServerAliveCountMax={}'.format return format_str(keep_alive_packages) if keep_alive_packages else ''
def waber(lumin, lambdaValue): """ Weber's law nspired by Physhilogy experiment. """ # lambdaValue normally select 0.6 w = lumin**lambdaValue #w = (255*np.clip(w,0,1)).astype('uint8') return(w)
def filter_duplicate_concessions(concessions): """ Because concessions are made at the product-pack level but we only have prescribing data at the product level, we sometimes end up with multiple concessions matching to a single product on the same date. In this case we use only the highest priced concession. """ costs = [] for index, concession in enumerate(concessions): key = "{}:{}".format(concession["bnf_code"], concession["date"]) costs.append((key, concession["additional_cost"], index)) selected = {key: index for (key, cost, index) in sorted(costs)} selected = set(selected.values()) return [ concession for (index, concession) in enumerate(concessions) if index in selected ]
def str2hex(raw): """ Given raw binary data outputs a string with all octets in hexadecimal notation. """ return ':'.join(["%02X" % ord(c) for c in raw ])
def full_label(l): """Converts a label to full format, e.g. //a/b/c -> //a/b/c:c. If the label is already in full format, it returns it as it is, otherwise appends the folder name as the target name. Args: l: The label to convert to full format. Returns: The label in full format, or the original input if it was already in full format. """ if l.find(":") != -1: return l target_name = l.rpartition("/")[-1] return l + ":" + target_name
def build_query_part(verb_and_vars, subject_term, lines): """Builds a SPARQL query. :param verb_and_vars: SPARQL verb and variables. :param subject_term: Common subject term. :param lines: Lines to insert into the WHERE block. :return: A SPARQL query. """ if len(lines) == 0: return "" query_part = u'%s { \n%s } \n' % (verb_and_vars, lines) #{0} -> subject_term # format() does not work because other special symbols return query_part.replace(u"{0}", subject_term)
def reverse_list_recursive(head): """ Reverse a singly linked list by recursive method :param head: head node of given linked list :type head: ListNode :return: head node of reversed linked list :rtype: ListNode """ if head is None or head.next is None: return head new_head = reverse_list_recursive(head.next) head.next.next = head head.next = None return new_head
def facility_name(hutch): """Return the facility name for an instrument""" return '{}_Instrument'.format(hutch.upper())
def _ComplexSentenceAsFlatList(complex_sentence, prefix=""): """Expand complex_sentence into a flat list of strings e.g. ['NUMBERED'] ['IDS', 'INES'] ['SOLUTION'] to ['NUMBERED ID SOLUTION', 'NUMBERED INES SOLUTION'] """ results = [] next_start = complex_sentence[1:].find('[') if next_start == -1: if complex_sentence[0] == '[': results.append("%s" % (prefix + complex_sentence[1:-1])) else: results.append("%s" % (prefix + complex_sentence)) return results choice_set = complex_sentence[:next_start] rest = complex_sentence[next_start+1:] for choice in choice_set[1:-1].split(", "): results.extend(_ComplexSentenceAsFlatList(rest, prefix+choice+" ")) return results
def get_loop_range(start, end, step): """ :return: the range over which the loop will operate """ start = int(start) end = int(end) step = int(step) end = end + 1 if step > 0 else end - 1 return range(start, end, step)
def is_non_zero_arabic_numeral(string): """ True if string is a non zero natural Arabic Numeral less than or equal to 3899 (max Roman Numeral), False otherwise. PARAMETERS: string : str RETURNS: bool """ # Is comprised only of digits (not a float) and is not only zero(es) return string.isdigit() and int(string) != 0 and int(string) <= 3899
def cookies(self): """ Mock the cookie. :param self: :return: """ return { "JSESSIONID": "test-value" }
def is_relevant_syst_for_shape_corr(flavor_btv, syst): """Returns true if a flavor/syst combination is relevant""" if flavor_btv == 0: return syst in [ "central", "up_jes", "down_jes", "up_lf", "down_lf", "up_hfstats1", "down_hfstats1", "up_hfstats2", "down_hfstats2" ] elif flavor_btv == 1: return syst in [ "central", "up_cferr1", "down_cferr1", "up_cferr2", "down_cferr2" ] elif flavor_btv == 2: return syst in [ "central", "up_jes", "down_jes", "up_hf", "down_hf", "up_lfstats1", "down_lfstats1", "up_lfstats2", "down_lfstats2" ] else: raise ValueError("ERROR: Undefined flavor = %i!!" % flavor_btv) return True
def ns_interp(text, ns=None): """ Triple strings (e.g. foo:Bar) have to be expanded because SPARQL queries can't handle the subject of a triple being d1resolve:doi:10.6073/AA/knb-lter-pie.77.3 but can handle <https://cn.dataone.org/cn/v1/resolve/doi:10.6073/AA/knb-lter-pie.77.3> This method does that interpolation using the class instance's namespaces. Returns: String, either modified or not. """ if ns is None: return text colon_index = text.find(":") if len(text) <= colon_index + 1: return text namespace = text[0:colon_index] rest = text[(colon_index)+1:] if namespace not in ns: return text return "<%s%s>" % (ns[namespace], rest)
def find_first_visited_twice(visited_order): """Find first position that repeats in visited_order.""" visited = set() for location in visited_order: if location in visited: return location visited.add(location) return None
def _get_span_length_key(span): """Sorts span by decreasing length first and incresing first index second.""" return span[1] - span[0], -span[0]
def compute_stats(parameter_list): """ Return average, max, min for a data parameter list""" avg_param = sum(parameter_list) / len(parameter_list) max_param = max(parameter_list) min_param = min(parameter_list) return avg_param, max_param, min_param
def val_noncoding_ht_path(build): """ HT of noncoding variants for validating callset data type. HT written using hail-elasticsearch-pipelines/download_and_create_reference_datasets/v02/hail_scripts/write_dataset_validation_ht.py """ return f"gs://seqr-reference-data/GRCh{build}/validate_ht/common_noncoding_variants.grch{build}.ht"
def inner_apertures(ep, apertures): """ Compute the position of apertures inside a pad, with: ep: (width, height) of pad apertures: (width, height, w_gap, h_gap) of apertures w_gap is the spacing in the x-axis between apertures. Fits as many apertures inside the pad as possible. Returns a list of (x,y) aperture centres. """ out = [] ep_w, ep_h = ep a_w, a_h, a_wg, a_hg = apertures n_x = int((ep_w - a_w) // (a_w + a_wg)) + 1 n_y = int((ep_h - a_h) // (a_h + a_hg)) + 1 x = -((n_x - 1)*(a_w + a_wg)/2.0) for ix in range(n_x): y = -((n_y - 1)*(a_h + a_hg)/2.0) for iy in range(n_y): out.append((x, y)) y += a_h + a_hg x += a_w + a_wg return out