content
stringlengths
42
6.51k
def highlight(colour, text): """ Highlight - highlights text in shell. """ """ Returns plain if colour doesn't exist. """ if colour == "black": return "\033[1;40m" + str(text) + "\033[1;m" if colour == "red": return "\033[1;41m" + str(text) + "\033[1;m" if colour == "green": return "\033[1;42m" + str(text) + "\033[1;m" if colour == "yellow": return "\033[1;43m" + str(text) + "\033[1;m" if colour == "blue": return "\033[1;44m" + str(text) + "\033[1;m" if colour == "magenta": return "\033[1;45m" + str(text) + "\033[1;m" if colour == "cyan": return "\033[1;46m" + str(text) + "\033[1;m" if colour == "gray": return "\033[1;47m" + str(text) + "\033[1;m" return str(text)
def seconds_readable(seconds): """Converts an integer number of seconds to hours, minutes, and seconds. Parameters ---------- seconds : integers; number of seconds to be converted. Returns ------- converted : integer tuple, size 3; (h, m, s) where the arguments are the time given in seconds in hours, minutes, seconds. Raises ------ None """ m, s = divmod(seconds, 60) h, m = divmod(m, 60) return (int(h), int(m), int(s))
def includes(collection, target, from_index=0): """Checks if a given value is present in a collection. If `from_index` is negative, it is used as the offset from the end of the collection. Args: collection (list|dict): Collection to iterate over. target (mixed): Target value to compare to. from_index (int, optional): Offset to start search from. Returns: bool: Whether `target` is in `collection`. Example: >>> includes([1, 2, 3, 4], 2) True >>> includes([1, 2, 3, 4], 2, from_index=2) False >>> includes({'a': 1, 'b': 2, 'c': 3, 'd': 4}, 2) True .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Renamed from ``contains`` to ``includes`` and removed alias ``include``. """ if isinstance(collection, dict): collection = collection.values() else: # only makes sense to do this if `collection` is not a dict collection = collection[from_index:] return target in collection
def fatorial(a, show=False): """ Calcula o fatorial do valor param: a: valor a ser calculado param: show: (bool opcional) mostra o calculo return: resultado no fatorial """ f = 1 for i in range(a, 0, -1): f *= i if show: if i == 1: print(f'{i} = ', end='') else: print(f'{i} x ', end='') return f
def fermat(num): """Runs a number through 1 simplified iteration of Fermat's test.""" if ((2 ** num) - 2) % num == 0: return True else: return False
def binary_search(arr, element, low=0, high=None): """Returns the index of the given element within the array by performing a binary search.""" if high == None: high = len(arr) - 1 if high < low: return -1 mid = (high + low) // 2 if arr[mid] == element: return mid elif arr[mid] > element: return binary_search(arr, element, low, mid-1) else: return binary_search(arr, element, mid+1, high)
def clean_snippet(snippet_value): """Only return the snippet value if it looks like there is a match in the snippet. """ # Snippets are always returned from the search API, regardless of whether # they match the query or not, so the only way to tell if they matched is # ...to look for the "<b>" tags that wrap the matched terms. This is pretty # rudimentary and probably won't survive future Search API updates. if not "<b>" in snippet_value: return None else: # If the text has been snippeted, if there are no ellipses at the end # of the string, the search API adds a superfluous '.' to the end, so # strip that here. Not sure if it does this when there's already a '.' # at the end but... if snippet_value.endswith(".") and not snippet_value.endswith("..."): snippet_value = snippet_value[:-1] return snippet_value
def add_ending(new_txt, txt): """Adds a trailing return if the original text had one.""" if txt.endswith('\n'): new_txt = "%s\n" % new_txt if txt.endswith('\r'): new_txt = "%s\r" % new_txt return new_txt
def get_instance_keys(class_instance): """Get the instance keys of a class""" return [key for key in vars(class_instance) if key[:1] != "_"]
def extract_key(obj, target): """Recursively fetch values from nested JSON. Args: obj ([dict]): an arbitrarily nested dictionary with environemt variables target (str): The target key we want to extract Returns: (str | dict): target key can have a string or another dictionary as value in obj. Raises: KeyError: if the target is not in the dictionary, raises KeyError. """ if isinstance(obj, dict): for k, v in obj.items(): if k == target: return v elif isinstance(v, (dict)): return extract_key(v, target) raise KeyError("Key: '{}' not found".format(target))
def oxfordize_list(strings): """Given a list of strings, formats them correctly given the length of the list. For example: oxfordize_list(['A']) => 'A' oxfordize_list(['A', 'B']) => 'A and B' oxfordize_list(['A', 'B', 'C']) => 'A, B, and C' """ if len(strings) == 0: return '' elif len(strings) == 1: return strings[0] elif len(strings) == 2: return '%s and %s' % (strings[0], strings[1]) else: return '%s, and %s' % (', '.join(strings[:-1]), strings[-1])
def get_export_initialize_vars(database, port, connection_string): """ Gets the shell script that persists the Forseti env variables. """ template = """ export SQL_PORT={}\n export SQL_INSTANCE_CONN_STRING="{}"\n export FORSETI_DB_NAME="{}"\n """ return template.format(port, connection_string, database)
def _permutation_parity(perm): """computes parity of permutation in O(n log n) time using cycle decomposition""" n = len(perm) not_visited = set(range(n)) i = perm[0] c = 1 while len(not_visited) > 0: if i in not_visited: not_visited.remove(i) else: c += 1 i = not_visited.pop() i = perm[i] return (-1) ** (n - c)
def get_match_files(git_pattern: str) -> bool: """ If there is a separator at the end of the pattern then the pattern will match directories and their contents, otherwise the pattern can match both files and directories. >>> assert get_match_files('') >>> assert get_match_files('/something') >>> assert get_match_files('/some/thing') >>> assert not get_match_files('/some/thing/') """ return not git_pattern.endswith("/")
def get_att_index(poly, att): """Returns index of given attribute if found""" j = 0 if isinstance(poly[2], type(None)): return None for _att in poly[2]: if _att[0] == att[0]: return j j += 1 return None
def isNum(s) -> bool: """ Checks if its a number >>> isNum("1") True >>> isNum("-1.2") True >>> isNum("1/2") True >>> isNum("3.9/2") True >>> isNum("3.9/2.8") True >>> isNum("jazz hands///...") False """ if "/" in str(s): s = s.replace("/", "", 1) s = s.replace(".", "", 1) num = True try: complex(s) except ValueError: num = False return num
def biggest_bbox(bbox1: list, bbox2: list) -> list: """ given two bounding boxes, return a bbox encompassing both. if either inputs is an empty list, returns the other one """ if len(bbox1) < 4: return bbox2 if len(bbox2) < 4: return bbox1 return ([bbox1[0] if bbox1[0] < bbox2[0] else bbox2[0], bbox1[1] if bbox1[1] < bbox2[1] else bbox2[1], bbox1[2] if bbox1[2] > bbox2[2] else bbox2[2], bbox1[3] if bbox1[3] > bbox2[3] else bbox2[3]])
def cround(val): """ Rounds a complex number Parameters ---------- val: complex Returns ------- complex """ return complex(round(val.real), round(val.imag))
def dl_ia_utils_check_folder(path_folder): """ check that exists a folder, and if not, create it :param path_folder: string with the path :return error: error code (0:good, 1:bad) """ import os error = 0 try: if not os.path.isdir(path_folder): print('Creating folder: {} '.format(path_folder)) os.mkdir(path_folder) except Exception as exception_msg: print('(!) Error in dl_ia_utils_check_folder: ' + str(exception_msg)) error = 1 return error
def scale_cpu_usage(cpu_usage_ratio, host_cpu_count, cpu_share_limit): """ Scales cpu ratio originally calculated against total host cpu capacity, with the corresponding cpu shares limit (at task or container level) host_cpu_count is multiplied by 1024 to get the total available cpu shares """ scaled_cpu_usage_ratio = (cpu_usage_ratio * host_cpu_count * 1024) / cpu_share_limit return scaled_cpu_usage_ratio
def when_is_vocation(month: str) -> str: """_summary_ Takes a string of month and returns the vocation month first letter capitalized. Args: month (str): _description_ A month a after or after the current month. Returns: str : _description_ """ months_lower = month.lower() return months_lower.capitalize()
def string_set_intersection(set_a, set_b, ignore_case=True, sep=","): """ Return intersection of two coma-separated sets :type set_a str :type set_b str :type ignore_case bool :type sep str :rtype set """ if set_a is None or set_b is None: return set() if ignore_case: set_a = set_a.lower() set_b = set_b.lower() set_a = set(set_a.split(sep)) set_b = set(set_b.split(sep)) return set_a & set_b
def collect_query_fields(node, fragments): """Recursively collects fields from the AST Args: node (dict): A node in the AST fragments (dict): Fragment definitions Returns: A dict mapping each field found, along with their sub fields. { 'name': {}, 'image': { 'id': {}, 'name': {}, 'description': {} }, 'slug': {} } """ field = {} selection_set = None if type(node) == dict: selection_set = node.get('selection_set') else: selection_set = node.selection_set if selection_set: for leaf in selection_set.selections: if leaf.kind == 'field': field.update({ leaf.name.value: collect_query_fields(leaf, fragments) }) elif leaf.kind == 'fragment_spread': field.update(collect_query_fields(fragments[leaf['name']['value']], fragments)) elif leaf.kind == 'inline_fragment': field.update({ leaf.type_condition.name.value: collect_query_fields(leaf, fragments) }) return field
def eat_char(text_string, char_set): """ Eats the specified subset of characters from the beginning of the string and returns index when done eating :param text_string: :param char_set: :return: index into its string where no char_set are found """ idx = 0 for this_char in text_string: if this_char in char_set: idx = idx + 1 else: break return idx
def capitalize_first_letter(text): """ Given a string, capitalize the first letter. """ chars = list(text.strip()) chars[0] = chars[0].upper() return "".join(chars)
def reset_model(model: dict) -> dict: """ Resets detectron2 model 'state'. This means current scheduler, optimizer and MOST IMPORTANTLY iterations are deleted or set to zero. Allows use of this model as a clean_slate. """ assert isinstance(model, dict) assert 'model' in model if 'scheduler' in model: del model['scheduler'] if 'optimizer' in model: del model['optimizer'] if 'iteration' in model: model['iteration'] = 0 return model
def is_tuple(a): """Check if argument is a tuple""" return isinstance(a, tuple)
def payment(rate=0.07, num_periods=72.0, present_value=0.0): """ Calculate the payment for a loan payment based on constant period payments and interest rate. """ return present_value * (rate * (1.0 + rate) ** num_periods) / ( (1.0 + rate) ** num_periods - 1.0)
def mean(a): """ takes the mean of a list """ if not isinstance(a, list): raise TypeError('Function mean() takes a list, not a %s' % a.__class__) if len(a) > 0: return float(sum(a) / len(a)) else: return 0.0
def write_vrt_from_csv(infile, x, y, z): """ A function for writing a vrt file for a csv point dataset """ assert infile.endswith('.csv') s = "<OGRVRTDataSource>\n" s+=' <OGRVRTLayer name="{}">\n'.format(infile.split('.')[0]) s+=' <SrcDataSource>' s+= infile s+= '</SrcDataSource>\n' s+= ' <GeometryType>wkbPoint</GeometryType>\n' s+= ' <GeometryField separator="," encoding="PointFromColumns" x="{}" y="{}" z="{}"/>\n'.format(x,y,z) s+= ' </OGRVRTLayer>\n' s+= ' </OGRVRTDataSource>' vrt_path = infile.replace('csv', 'vrt') with open(vrt_path, 'w') as f: f.write(s) return vrt_path
def _get_image_root_type(image_type): """Determines which root an image type is located in. Args: image_type (str): Type of image. Returns: str: Either "input" or "output" for the corresponding root. """ inputs = { "background_color", "background_color_levels", "background_disp", "background_disp_levels", "background_disp_upsample", "color", "color_levels", "foreground_masks", "foreground_masks_levels", } outputs = { "bin", "confidence", "cost", "disparity", "disparity_upsample", "disparity_levels", "disparity_time_filtered", "disparity_time_filtered_levels", "fused", "mismatches", "exports", "exports_cubecolor", "exports_cubedisp", "exports_eqrcolor", "exports_eqrdisp", "exports_lr180", "exports_tb3dof", "exports_tbstereo", } root_map = dict.fromkeys(inputs, "input") root_map.update(dict.fromkeys(outputs, "output")) return root_map[image_type]
def bh_decode(s): """Replace code strings from .SET files with human readable label strings. """ s = s.replace('SP_', '') s = s.replace('_ZC', ' ZC Thresh.') s = s.replace('_LL', ' Limit Low') s = s.replace('_LH', ' Limit High') s = s.replace('_FD', ' Freq. Div.') s = s.replace('_OF', ' Offset') s = s.replace('_HF', ' Holdoff') s = s.replace('TAC_G', 'TAC Gain') s = s.replace('TAC_R', 'TAC Range') s = s.replace('_TC', ' Time/Chan') s = s.replace('_TD', ' Time/Div') s = s.replace('_FQ', ' Threshold') return s
def has_path(coll, path): """Checks if path exists in the given nested collection.""" for p in path: try: coll = coll[p] except (KeyError, IndexError): return False return True
def maxHeightTry1(d1, d2, d3): """ A method that calculates largest possible tower height of given boxes (bad approach 1/2). Problem description: https://practice.geeksforgeeks.org/problems/box-stacking/1 time complexity: O(n*max(max_d1, max_d2, max_d3)^2) space complexity: O(n*max(max_d1, max_d2, max_d3)^2) Parameters ---------- d1 : int[] a list of int values representing the 1st dimension of a / multiple 3d-box(es) d2 : int[] a list of int values representing the 2nd dimension of a / multiple 3d-box(es) d3 : int[] a list of int values representing the 3rd dimension of a / multiple 3d-box(es) Returns ------- x : int the largest possible tower height """ assert(len(d1) >= 1) assert(len(d1) == len(d2) == len(d3)) max_dimension = max([ max(d1), max(d2), max(d3) ]) boxes = zip(d1, d2, d3) boxes = map(sorted, boxes) boxes = sorted(boxes, key=lambda e: e[2]) boxes = sorted(boxes, key=lambda e: e[1]) boxes = sorted(boxes, key=lambda e: e[0]) boxes = boxes + list(map(lambda e: [e[1], e[2], e[0]], boxes)) boxes = sorted(boxes, key=lambda e: e[2]) boxes = sorted(boxes, key=lambda e: e[1]) boxes = sorted(boxes, key=lambda e: e[0]) n = len(boxes) # dimension 1: i.th box # dimension 2: x-coordinate left # dimension 3: y-coordinate left max_height = {i: [[0 for _ in range(max_dimension + 1)] for _ in range(max_dimension + 1)] for i in range(-1, n)} for i in range(n): box_x, box_y, box_z = boxes[i][0], boxes[i][1], boxes[i][2] for x in range(max_dimension + 1): for y in range(max_dimension + 1): max_tmp = max_height[i-1][x][y] if box_x <= x and box_y <= y: max_tmp = max( max_tmp, max_height[i-1][box_x-1][box_y-1] + box_z) if box_x <= x and box_z <= y: max_tmp = max( max_tmp, max_height[i-1][box_x-1][box_z-1] + box_y) if box_y <= x and box_z <= y: max_tmp = max( max_tmp, max_height[i-1][box_y-1][box_z-1] + box_x) max_height[i][x][y] = max_tmp return max_height[n-1][max_dimension][max_dimension]
def decompressCmd(path, default="cat"): """"return the command to decompress the file to stdout, or default if not compressed, which defaults to the `cat' command, so that it just gets written through""" if path.endswith(".Z") or path.endswith(".gz"): return "zcat" elif path.endswith(".bz2"): return "bzcat" else: return default
def find_github_item_url(github_json, name): """Get url of a blob/tree from a github json response.""" for item in github_json['tree']: if item['path'] == name: return item['url'] return None
def convert_to_hierarchical(contours): """ convert list of contours into a hierarchical structure slice > frame > heartpart -- Contour :param contours: list of Contour objects :return: a hierarchical structure which contains Contour objects """ hierarchical_contours = {} for contour in contours: if not(contour.slice in hierarchical_contours.keys()): hierarchical_contours[contour.slice] = {} if not(contour.frame in hierarchical_contours[contour.slice].keys()): hierarchical_contours[contour.slice][contour.frame] = {} hierarchical_contours[contour.slice][contour.frame][contour.part] = contour return hierarchical_contours
def compress(dates, values): """ inputs: dates | values -----------+-------- 25-12-2019 | 5 25-12-2019 | 6 25-12-2019 | 7 25-12-2019 | 8 25-12-2019 | 9 25-12-2019 | 10 29-12-2019 | 11 29-12-2019 | 12 29-12-2019 | 13 03-01-2020 | 14 03-01-2020 | 15 03-01-2020 | 16 03-01-2020 | 17 returns: dates | values -----------+-------- 25-12-2019 | 10 29-12-2019 | 13 03-01-2020 | 17 """ if len(dates) != len(values): raise Exception("Length of dates and values must match.") dates.sort() values.sort() compressed_dates = [] compressed_values = [] compresser = {} for d, v in zip(dates, values): compresser[d] = v for d, v in compresser.items(): compressed_dates.append(d) compressed_values.append(v) return compressed_dates, compressed_values
def get_url_chat_id(chat_id: int) -> int: """ Well, this value is a "magic number", so I have to explain it a bit. I don't want to use hardcoded chat username, so I just take its ID (see "group_main" variable above), add id_compensator and take a positive value. This way I can use https://t.me/c/{chat_id}/{msg_id} links, which don't rely on chat username. :param chat_id: chat_id to apply magic number to :return: chat_id for t.me links """ return abs(chat_id+1_000_000_000_000)
def _sanitizeName(name): """Convert formula base_name to something safer.""" result = '' for c in name: if c >= 'a' and c <= 'z': result += c elif c >= 'A' and c <= 'Z': result += c elif c >= '0' and c <= '9': result += c else: result += '_' return result
def get_winner(game_state): """Returns the winner if any. Else it returns an empty string.""" for i in range(len(game_state)): row = game_state[i] column = [game_state[0][i], game_state[1][i], game_state[2][i]] if row[0] == row[1] and row[0] == row[2] and row[0] != " ": return row[0] if ( column[0] == column[1] and column[0] == column[2] and column[0] != " " ): return column[0] left_diag = [game_state[0][0], game_state[1][1], game_state[2][2]] right_diag = [game_state[2][0], game_state[1][1], game_state[0][2]] if ( left_diag[0] == left_diag[1] and left_diag[0] == left_diag[2] and left_diag[0] != " " ): return left_diag[0] if ( right_diag[0] == right_diag[1] and right_diag[0] == right_diag[2] and right_diag[0] != " " ): return right_diag[0] return ""
def min_w_nan(array): """Return min ignoring nan""" return min([a for a in array if a==a])
def label_to_mem(value, mem_predict): """turn label to mem""" for index, predict in enumerate(mem_predict): if value <= index: return predict
def job_id(conf): # type: (dict) -> str """Get job id of a job specification :param dict conf: job configuration object :rtype: str :return: job id """ return conf['id']
def student_ranking(student_scores, student_names): """ :param student_scores: list of scores in descending order. :param student_names: list of names in descending order by exam score. :return: list of strings in format ["<rank>. <student name>: <score>"]. """ student_list = [] for index, name in enumerate(student_names): student_list.append(f"{index + 1}. {name}: {student_scores[index]}") return student_list
def altair_fix(html: str, i: int): """ Replaces certain keywords in the JavaScript needed to show altair plots. Allows multiple altair plots to be rendered with vega on a single page """ # convert beautiful soup tag to a string html = str(html) keywords = ["vis", "spec", "embed_opt", "const el"] for word in keywords: html = html.replace(word, f"{word}{str(i)}") return f'<div id="vis{i}"></div>\n' + html
def associate(first_list, second_list, offset, max_difference): """ Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim to find the closest match for every input tuple. Input: first_list -- first dictionary of (stamp,data) tuples second_list -- second dictionary of (stamp,data) tuples offset -- time offset between both dictionaries (e.g., to model the delay between the sensors) max_difference -- search radius for candidate generation Output: matches -- list of matched tuples ((stamp1,data1),(stamp2,data2)) """ first_keys = list(first_list.keys()) second_keys = list(second_list.keys()) potential_matches = [(abs(a - (b + offset)), a, b) for a in first_keys for b in second_keys if abs(a - (b + offset)) < max_difference] potential_matches.sort() matches = [] for diff, a, b in potential_matches: if a in first_keys and b in second_keys: first_keys.remove(a) second_keys.remove(b) matches.append((a, b)) matches.sort() return matches
def strfcount(name_count): """Return the dictionary of events and their count as a string.""" if not name_count: return '' lines = [] c_width = len(str(max(name_count.values()))) data = sorted(name_count.items(), key=lambda x: x[0]) for name, count in data: lines.append('`x {:{width}d}` {}'.format(count, name, width=c_width)) return '\n'.join(lines)
def in_all_repository_dependencies( repository_key, repository_dependency, all_repository_dependencies ): """Return True if { repository_key : repository_dependency } is in all_repository_dependencies.""" for key, val in all_repository_dependencies.items(): if key != repository_key: continue if repository_dependency in val: return True return False
def filter_by_limit(list, limit): """Filters data by the limit specified in the --limit arg """ return list[:limit]
def _format_num_reads(reads): """Format number of reads as k, M, or G""" fmt_reads = [] for x in reads: if int(x) < 1e6: fmt_reads.append("{:.1f}k".format(int(x)/1e3)) elif int(x) > 1e9: fmt_reads.append("{:.1f}G".format(int(x)/1e9)) else: fmt_reads.append("{:.1f}M".format(int(x)/1e6)) return fmt_reads
def createLDAPObjDict(objs): """Creates a dictionary from a list of LDAP Obj's. The keys are the cn of the object, the value is the object.""" return {obj.cn: obj for obj in objs}
def nodes_merge_unwind(labels, merge_properties, property_parameter=None): """ Generate a :code:`MERGE` query which uses defined properties to :code:`MERGE` upon:: UNWIND $props AS properties MERGE (n:Node {properties.sid: '1234'}) ON CREATE SET n = properties ON MATCH SET n += properties The '+=' operator in ON MATCH updates the node properties provided and leaves the others untouched. Call with the labels and a list of properties to :code:`MERGE` on:: query = nodes_merge_unwind(['Foo', 'Bar'], ['node_id']) Pass the node properties as parameter to the query, e.g. with a :py:obj:`py2neo.Graph`:: graph.run(query, props=[{'node_id': 1}, {'node_id': 2}, ...]) You can optionally set the name of the parameter with the argument :code:`property_parameter`:: query = nodes_merge_unwind([['Foo', 'Bar'], ['node_id'], query_parameter='mynodes') graph.run(query, mynodes=[{'node_id': 1}, {'node_id': 2}, ...]) :param labels: Labels for the query. :type labels: list[str] :param merge_properties: Unique properties for the node. :type merge_properties: list[str] :param property_parameter: Optional name of the parameter used in the query. Default is 'props'. :type property_parameter: str :return: Query """ if not property_parameter: property_parameter = 'props' label_string = ':'.join(labels) merge_strings = [] for u in merge_properties: merge_strings.append("{0}: properties.{0}".format(u)) merge_string = ', '.join(merge_strings) q = "UNWIND ${0} AS properties\n" \ "MERGE (n:{1} {{ {2} }} )\n" \ "ON CREATE SET n = properties\n" \ "ON MATCH SET n += properties\n" \ "RETURN count(n) as cnt".format(property_parameter, label_string, merge_string) return q
def add_pattern_exemptions(line, codes): """Add a flake8 exemption to a line.""" if line.startswith('#'): return line line = line.rstrip('\n') # Line is already ignored if line.endswith('# noqa'): return line + '\n' orig_len = len(line) exemptions = ','.join(sorted(set(codes))) # append exemption to line if '# noqa: ' in line: line += ',{0}'.format(exemptions) elif line: # ignore noqa on empty lines line += ' # noqa: {0}'.format(exemptions) # if THIS made the line too long, add an exemption for that if len(line) > 79 and orig_len <= 79: line += ',E501' return line + '\n'
def isinstance_all(ins, *tar): """Apply isinstance() to all elements of target :param ins: types :param tar: target tuple :return: True if all True and False if anyone False """ for x in tar: if not isinstance(x, ins): return False return True
def get_unique_r_numbers(g2r): """Returns the set of R numbers associated to at least one gene in the g2r dict. :param g2r: dict with gene names for keys and the associated R numbers for values :return: frozenset of all R number values in g2r """ reactions = set() for gene in g2r: for reaction in g2r[gene]: reactions.add(reaction) return frozenset(reactions)
def if_match(page, other_page): """ Funcion para definir la similaridad entre diapositivas. """ return page.startswith(other_page)
def convert_to_float(value_string, parser_info={'parser_warnings':[]}): """ Tries to make a float out of a string. If it can't it logs a warning and returns True or False if convertion worked or not. :param value_string: a string :returns value: the new float or value_string: the string given :retruns True or False """ try: value = float(value_string) except TypeError: parser_info['parser_warnings'].append('Could not convert: "{}" to float, TypeError'.format(value_string)) return value_string, False except ValueError: parser_info['parser_warnings'].append('Could not convert: "{}" to float, ValueError'.format(value_string)) return value_string, False return value, True
def search_binary(xs, target): """ Find and return the index of key in sequence xs """ lb = 0 ub = len(xs) while True: if lb == ub: # If region of interest (ROI) becomes empty return -1 # Next probe should be in the middle of the ROI mid_index = (lb + ub) // 2 # Fetch the item at that position item_at_mid = xs[mid_index] print("ROI[{0}:{1}](size={2}), probed='{3}', target='{4}'" .format(lb, ub, ub-lb, item_at_mid, target)) # How does the probed item compare to the target? if item_at_mid == target: return mid_index # Found it! if item_at_mid < target: lb = mid_index + 1 # Use upper half of ROI next time else: ub = mid_index # Use lower half of ROI next time
def checkorder(positionIn): """ Purpose : construct the tuples for transposing the data to standard dimension order and the inverse for transposing it back to the original dimension order Usage : newOrder, inverseOrder = checkorder(positionIn) Passed : positionIn -- array with location of longitude, latitude. level and time respectively in the sense of the python shape of the data Returns ------- newOrder : tuple to transpose data to the order (t,z,y,x) inverseOrder : tuple to transpose data to back to the original order """ # remove the None values from positionIn and reverse the order reducedPosition = [] for item in positionIn: if item is not None: reducedPosition.append(item) reducedPosition.reverse() # make the newOrder tuple newOrder = tuple(reducedPosition) # ----- Determine the inverse to this new order for use in mathtogeo ----- xform = [] for i in range(len(newOrder)): xform.append([newOrder[i], i]) xform.sort() inverse_shapelist = [] for item in xform: inverse_shapelist.append(item[1]) inverseOrder = tuple(inverse_shapelist) return newOrder, inverseOrder
def bps_to_human(bps): """ Convert bps to humand readble string. """ if bps >= 1000000: return "%f Mbps" % (float(bps) / 1000000) elif bps >= 100000: return "%f Kbps" % (float(bps) / 1000) else: return "%u bps" % bps
def get_from_tree(tree, key_path): """Return the value in a nested dict from walking down along the specified keys. Args: tree: a nested dict. key_path: an iterable of the keys to follow. If keys is a string, uses keys as a single key. """ cursor = tree if isinstance(key_path, str): return cursor[key_path] for key in key_path: cursor = cursor[key] return cursor
def remove_trailing_N_characters(sequence): """ Strings representing the nucleotides typically start and end with repeating sequences of the 'N' character. This function strips them from the right and left side of the input sequence. """ start_index = len(str(sequence)) - len(str(sequence).lstrip("N")) end_index = len(str(sequence).rstrip("N")) sequence = sequence[start_index:end_index] offset = start_index return (sequence, offset)
def _numericalize_coefficients(raw_symbols, coefficients): """Replaces the symbols of coefficients in the expression string with values. If there is coefficient symbol in raw_symbols which is not in coefficients dict, it will remain symbolic in the expression string. Args: raw_symbols: List of context-free grammar symbols or strings. coefficients: Dict of coefficients values in expression string. {coefficient_symbol: value}. If not None, the values of the coefficients will replace the symbols of coefficients in the expression string. Returns: Expression string. """ if coefficients is None: coefficients = {} symbols = [] for symbol in map(str, raw_symbols): if symbol in coefficients: symbols.append(str(coefficients[symbol])) else: symbols.append(symbol) return ' '.join(symbols)
def matches(card1,card2): """checks if card1 and card2 match""" return card1[0]==card2[0] or card1[1]==card2[1]
def parse_record(raw_record): """Parse raw record and return it as dict""" return dict(rec.split(":") for rec in raw_record.split())
def _GetResidentPagesJSON(pages_list): """Transforms the pages list to JSON object. Args: pages_list: (list) As returned by ParseResidentPages() Returns: (JSON object) Pages JSON object. """ json_data = [] for i in range(len(pages_list)): json_data.append({'page_num': i, 'resident': pages_list[i]}) return json_data
def gaussian_gradients(x, y, a, mu, sigma, eta): """KL loss gradients of neurons with tanh activation (~ Normal(mu, sigma)).""" sig2 = sigma**2 delta_b = -eta * (-(mu / sig2) + (y / sig2) * (2 * sig2 + 1 - y**2 + mu * y)) delta_a = (eta / a) + delta_b * x return delta_a, delta_b
def is_primitive(value): """ Checks if value if a python primitive :param value: :return: """ return type(value) in (int, float, bool, str)
def allpairs(x): """ Return all possible pairs in sequence x Condensed by Alex Martelli from this thread on comp.lang.python:: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1 """ return [(s, f) for i, f in enumerate(x) for s in x[i + 1:]]
def do_simple_math(number1, number2, operator): """ Does simple math between two numbers and an operator :param number1: The first number :param number2: The second number :param operator: The operator (string) :return: Float """ ans = 0 if operator is "*": ans = number1 * number2 elif operator is "/": ans = number1 / number2 elif operator is "+": ans = number1 + number2 elif operator is "-": ans = number1 - number2 elif operator is "^": ans = number1 ** number2 elif operator is "%": ans = number1 % number2 return ans
def get_indices(num_indices, props, scan_idx=0, cycle_idx=0, beam_idx=0, IF_idx=0, record=0, trimmed=False): """ get the row and construct a tuple for a FITS data array index The maximum is (beam,record,IF,0,0) which will return a spectrum or whatever is in the first column of the multi-dimensional array. In FITS convention, lists start with 1. For Python, subtract 1 (if the list is continuous) This is used by class DSNFITSexaminer. @param num_indices : number of indices in the SPECTRUM array @type num_indices : int @param props : table properties @type props : dict @param scan_idx : index in list of SCAN numbers @type scan_idx : int @param cycle_idx : index in list of CYLE numbers @type cycle_idx : int @param beam_idx : index in list of BEAM axis values @type beam_idx : int @param IF_idx : index in a list of IF numbers @type IF_idx : int @param record : index in list of TIME axis values @type record : int @param trimmed : return tuple with 'RA' and 'dec' indices removed (always 0) @type trimmed : bool """ row = props["num cycles"]*scan_idx + cycle_idx if num_indices < 3: raise RuntimeError("minimum data array index is 'channel, RA, dec'") elif num_indices == 3: # returns row, dec, RA index_tuple = (row,0,0) elif num_indices == 4: # returns s row, IF, dec, RA index_tuple = (row,IF_idx,0,0) elif num_indices == 5: index_tuple = (row,record,IF_idx,0,0) elif num_indices == 6: index_tuple = (row,beam_idx,record,IF_idx,0,0) else: raise RuntimeError("cannot have more that 6 axes in SPECTRUM array") index_tuple = (0,0) if trimmed: return index_tuple[:-2] else: return index_tuple
def check_result(results, locations, box_size): """ Checks if the received regions of interest contain the true aneurysms. Return true if all aneurysms found, false othervise. For testing purposes. Parameters ---------- results : list A list of the coordinates of the middle voxels for proposed boxes of interest [[x, y, z] [x, y, z] [x, y, z]] locations: list A list of the location coordinates of the aneurysms. Has 3 values if one aneurysm exists, 6 if two, 9 if three and so on [1, 2, 3, 4, 5, 6, ..., n], so that n % 3 == 0 box_size : int The length of one side of the box in which the aneurysm must be located in. Returns ------- new : True or False Returns true if all aneurysms are found within the proposed boxes Returns false otherwise new : int Number of aneurysms found new : int Number of aneurysms missed """ truth_list = [] found = False missed = 0 found_amount = 0 if len(locations) == 0: return True, 0, 0 nro_aneur = int(len(locations) / 3) for j in range(nro_aneur): for i in range(len(results)): if(abs(results[i][0] - locations[j + (j*2)]) < box_size/2): if(abs(results[i][1] - locations[j+1+ (j*2)]) < box_size/2): if(abs(results[i][2] - locations[j+2+ (j*2)]) < box_size/2): found = True break truth_list.append(found) found = False if all(truth_list): for boolean in truth_list: found_amount += 1 return True, found_amount, missed else: for boolean in truth_list: if boolean: found_amount += 1 else: missed += 1 return False, found_amount, missed
def _tuples_to_dict(meta, header, tagged_parts): """ Convert a dictionnary and list of tuples into dictionnary """ structured_message = {} structured_message["meta"] = meta structured_message["structured_text"] = {} structured_message["structured_text"]["header"] = header structured_text = [] for part, tag in tagged_parts: dict_message = {} dict_message["part"] = part dict_message["tags"] = tag structured_text.append(dict_message) structured_message["structured_text"]["text"] = structured_text return structured_message
def factorial(n): """ Simple factorial calculator that takes the number n. """ if n == 0: return 1 return n * factorial(n - 1)
def copy_list(original: list) -> list: """Recursively copies n-dimensional list and returns the copy. Slower than list slicing, faster than copy.deepcopy. """ copied = [] for x in original: if not isinstance(x, list): copied.append(x) else: copied.append(copy_list(x)) return copied
def pretty_rotvec_op(ops, join_str='*', format_str='(Rx:{:.1f}Ry:{:.1f}Rz:{:.1f})'): """ops: <list of Rx, Ry, Rx>""" return join_str.join( [op if isinstance(op, str) else format_str.format(*op) for op in ops])
def _tef_P(P): """Define the boundary between Region 3e-3f, T=f(P) Parameters ---------- P : float Pressure [MPa] Returns ------- T : float Temperature [K] References ---------- IAPWS, Revised Supplementary Release on Backward Equations for Specific Volume as a Function of Pressure and Temperature v(p,T) for Region 3 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-VPT3-2016.pdf, Eq. 3 Examples -------- >>> _tef_P(40) 713.9593992 """ return 3.727888004*(P-22.064)+647.096
def list_diff(old_one, new_one, key): """ diff two config list and fill the result , have to specify the index key """ result = {'created': [], 'deleted': [], 'modified': [], 'unchanged': []} old_list = list(old_one) new_list = list(new_one) for old_item in old_one: for new_item in new_one: if old_item[key] == new_item[key]: if old_item != new_item: modified_item = {'new': new_item, 'old': old_item} result['modified'].append(modified_item) else: result['unchanged'].append(new_item) old_list.remove(old_item) new_list.remove(new_item) result['created'] = new_list result['deleted'] = old_list return result
def keymap_replace_substrings(target_str, mappings={"<": "left_angle_bracket", ">": "right_angle_bracket", "[": "left_square_bracket", "]": "right_square_bracket", "{": "left_curly_bracket", "}": "right_curly_bracket", "\n": "newline"}): """Replace parts of a string based on a dictionary. This function takes a dictionary of replacement mappings. For example, if I supplied the string "Hello world.", and the mappings {"H": "J", ".": "!"}, it would return "Jello world!". Warning: replacements are made iteratively, meaning multiple replacements could occur. :param target_str: (str) The string to replace characters in. :param mappings: (str) A dictionary of replacement mappings. :return: (str) String with values replaced """ for substring, replacement in mappings.items(): target_str = target_str.replace(substring, replacement) return target_str
def get_total_blkio(stat): """ Gets the total blkio out of the docker stat :param stat: The docker stat :return: The blkio """ io_list = stat['blkio_stats']['io_service_bytes_recursive'] if len(io_list)>0: total_dics = list(filter(lambda dic: dic['op'] == 'Total', io_list)) if len(total_dics)>0: return total_dics[0]['value'] else: return 0
def wall_dimensions(walls): """ Given a list of walls, returns a tuple of (width, height).""" width = max(walls)[0] + 1 height = max(walls)[1] + 1 return (width, height)
def is_pal(x): """Assumes x is a list Returns True if the list is a palindrome; False otherwise""" temp = x temp.reverse return temp == x
def clean_operations(operations): """Ensure that all parameters with "in" equal to "path" are also required as required by the OpenAPI specification, as well as normalizing any references to global parameters. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#parameterObject. :param dict operations: Dict mapping status codes to operations """ def get_ref(x): return x if isinstance(x, dict) else {'$ref': '#/parameters/' + x} for operation in (operations or {}).values(): if 'parameters' in operation: parameters = operation.get('parameters') for parameter in parameters: if (isinstance(parameter, dict) and 'in' in parameter and parameter['in'] == 'path'): parameter['required'] = True operation['parameters'] = [get_ref(p) for p in parameters]
def perdict(x=-9, y=None, guess=10): """ task 0.5.4 pass x and y into equation and guess the value """ testY = float(float(1) / float(2)) if y is None: y = testY if 2**(y + testY) if x + 10 < 0 else 2**(y - testY) == guess: return True else: return False
def _get_wind_direction(degrees): """ Determines the direction the wind is blowing from based off the degree passed from the API 0 degrees is true north """ wind_direction_text = "N" if 5 <= degrees < 40: wind_direction_text = "NNE" elif 40 <= degrees < 50: wind_direction_text = "NE" elif 50 <= degrees < 85: wind_direction_text = "ENE" elif 85 <= degrees < 95: wind_direction_text = "E" elif 95 <= degrees < 130: wind_direction_text = "ESE" elif 130 <= degrees < 140: wind_direction_text = "SE" elif 140 <= degrees < 175: wind_direction_text = "SSE" elif 175 <= degrees < 185: wind_direction_text = "S" elif 185 <= degrees < 220: wind_direction_text = "SSW" elif 220 <= degrees < 230: wind_direction_text = "SW" elif 230 <= degrees < 265: wind_direction_text = "WSW" elif 265 <= degrees < 275: wind_direction_text = "W" elif 275 <= degrees < 310: wind_direction_text = "WNW" elif 310 <= degrees < 320: wind_direction_text = "NW" elif 320 <= degrees < 355: wind_direction_text = "NNW" return wind_direction_text
def mapped_included(inc: dict) -> dict: """Makes included more easily searchable by creating a dict that has (type,id) pair as key. Also, returned value for the key can be used for relationships node. ie. { ('people', '12'): { 'id': '12, 'type': 'people', 'attributes': { 'name': 'John' } }, ('tag', '11'): { ... } } """ included = {} if inc: for include in inc: included[include.get('type'), include.get('id')] = include return included
def check_two_shapes_need_broadcast(shape_x, shape_y): """Check two shapes need broadcast.""" error = ValueError(f"For 'tensor setitem with tensor', the value tensor shape " f"{shape_y} could not broadcast the required updates shape {shape_x}.") if len(shape_y) > len(shape_x): raise error for i in range(-len(shape_y), 0): if shape_y[i] > shape_x[i]: raise error if shape_y[i] < shape_x[i] and shape_y[i] != 1: raise error if shape_y == shape_x: return False return True
def G1DListMergeEdges(eda, edb): """ Get the merge between the two individual edges :param eda: the edges of the first G1DList genome :param edb: the edges of the second G1DList genome :rtype: the merged dictionary """ edges = {} for value, near in eda.items(): for adj in near: if (value in edb) and (adj in edb[value]): edges.setdefault(value, []).append(adj) return edges
def gcd(a: int, b: int) -> int: """ Return the GCD of a and b using Euclid's algorithm. :param a: First integer. :param b: Second integer. :return: The Greatest Common Divisor between two given numbers. """ while a != 0: a, b = b % a, a return b
def cross_prod(a,b): """ This function takes two vectors, a and b, and find's their cross product. """ c = [a[1]*b[2]-a[2]*b[1],a[2]*b[0]-a[0]*b[2],a[0]*b[1]-a[1]*b[0]] return(c)
def destination_name(path, delimiter="__"): """ Returns a cli option destination name from attribute path **Arguments** - path (`list`): attribute path - delimiter (`str`): delimiter for nested attributes **Returns** cli destination name (`str`) """ return f"{delimiter.join(path)}"
def get_smallest_compound_id(compounds_identifiers): """ Return the smallest KEGG compound identifier from a list. KEGG identifiers may map to compounds, drugs or glycans prefixed respectively with "C", "D", and "G" followed by at least 5 digits. We choose the lowest KEGG identifier with the assumption that several identifiers are due to chirality and that the lower one represents the more common form. Parameters ---------- compounds_identifiers : list A list of mixed KEGG identifiers. Returns ------- str The KEGG compound identifier with the smallest number. Raises ------ ValueError When compound_identifiers contains no KEGG compound identifiers. """ return min( (c for c in compounds_identifiers if c.startswith("C")), key=lambda c: int(c[1:]), )
def getMedianpoints(lst): """ Returns the index or indexes of the median of a list of numerics. :param lst: A list of numerics from which the median index or indexes will be retrieved. \t :type lst: [numerics] \n :returns: The median index or indexes. \t :rtype: : (int, int) \n """ count = len(lst) if count == 1: return (1, -1) elif count % 2 == 1: return (int((count - 1) / 2), -1) else: return (int((count / 2) - 1), int(count / 2))
def rgb(red, green, blue): """ Calculate the palette index of a color in the 6x6x6 color cube. The red, green and blue arguments may range from 0 to 5. """ return 16 + (red * 36) + (green * 6) + blue
def _create_response(message, response_type, sub_response_type=None): """ :param message: :param response_type: string :param sub_response_type: string :return: """ r = {"message": message, "type": response_type} if sub_response_type: r["sub_type"] = sub_response_type return r
def memory_reallocation_v2(s): """memory reallocation.""" cycles = 0 banks = s[:] ll = len(banks) seen = {} uk = ''.join(list(map(str, banks))) while uk not in seen: seen[uk] = cycles key_chosen = banks.index(max(banks)) value_chosen = banks[key_chosen] banks[key_chosen] = 0 while value_chosen > 0: value_chosen -= 1 key_chosen += 1 if key_chosen >= ll: key_chosen = 0 banks[key_chosen] += 1 uk = ''.join(list(map(str, banks))) cycles += 1 return cycles - seen[uk]
def jaccard_index(nw, src, dst): """ Counts Jaccard index between src and dst ports. Args: nw: Network instance src: Source Port dst: Destination Port Returns: Jaccard Index """ try: return len(set(nw.neighbors(src)).intersection(set(nw.neighbors(dst)))) / len(set(nw.neighbors(src)).union(set(nw.neighbors(dst)))) except: return 0
def _pcapname_to_guid(pcap_name): """Converts a Winpcap/Npcap pcpaname to its guid counterpart. e.g. \\DEVICE\\NPF_{...} => {...} """ if "{" in pcap_name: return "{" + pcap_name.split("{")[1] return pcap_name
def get_apiorder(ndim, latitude_dim, longitude_dim): """ Get the dimension ordering for a transpose to the required API dimension ordering. **Arguments:** *ndim* Total number of dimensions to consider. *latitude_dim* Index of the latitude dimension. *longitude_dim* Index of the longitude dimension. **Returns:** *apiorder* A list of indices corresponding to the order required to conform to the specified API order. *reorder* The inverse indices corresponding to *apiorder*. """ apiorder = list(range(ndim)) apiorder.remove(latitude_dim) apiorder.remove(longitude_dim) apiorder.insert(0, latitude_dim) apiorder.insert(1, longitude_dim) reorder = [apiorder.index(i) for i in range(ndim)] return apiorder, reorder