content
stringlengths
42
6.51k
def CreatePattern(week_cap, days): """Smooth the weekly capacity over the selected number of days. Evenly spread the capacity across the week, with any excess added in increments of one starting at first day of the week. """ average = week_cap/days excess = week_cap%days pattern = [int(average) for _ in range(days)] for e in range(excess): pattern[e] += 1 return pattern
def GetElapsedMs(start_time, end_time): """Return milliseconds elapsed between |start_time| and |end_time|. Args: start_time: seconds as a float (or string representation of float). end_time: seconds as a float (or string representation of float). Return: milliseconds elapsed as integer. """ return int((float(end_time) - float(start_time)) * 1000)
def generate_url(date: str) -> str: """ generate a url for the download, given a date string in format YYYY-mm-dd""" base_url = 'https://www.rollingstone.com/charts/albums/' url = base_url + date + '/' return url
def json_api_patch(original, patch, recurse_on=set({})): """Patch a dictionary using the JSON API semantics. Patches are applied using the following algorithm: - patch is a dictionary representing a JSON object. JSON `null` values are represented by None). - For fields that are not in `recursive_fields`: - If patch contains {field: None} the field is erased from `original`. - Otherwise `patch[field]` replaces `original[field]`. - For fields that are in `recursive_fields`: - If patch contains {field: None} the field is erased from `original`. - If patch contains {field: {}} the field is left untouched in `original`, note that if the field does not exist in original this means it is not created. - Otherwise patch[field] is treated as a patch and applied to `original[field]`, potentially creating the new field. :param original:dict the dictionary to patch :param patch:dict the patch to apply. Elements pointing to None are removed, other elements are replaced. :param recurse_on:set of strings, the names of fields for which the patch is applied recursively. :return: the updated dictionary :rtype:dict """ tmp = original.copy() for key, value in patch.items(): if value is None: tmp.pop(key, None) elif key not in recurse_on: tmp[key] = value elif len(value) != 0: tmp[key] = json_api_patch(original.get(key, {}), value) return tmp
def breadcrumb(*args): """"Render a breadcrumb trail Args: args (list) : list of urls and url name followed by the final name Example: url1, name1, url2, name2, name3 """ def pairs(l): a = iter(l) return list(zip(a,a)) return { 'urls': pairs(args[:-1]), 'page': args[-1] }
def label_breakdown(data): """ Find the label breakdown in data. """ res = {} for row in data: # if valid label if (row[3]): res[row[1]] = row[7] neg, neu, pos = 0, 0, 0 for key in res.keys(): r = res[key] if (r == -1): neg += 1 elif (r == 0): neu += 1 else: pos += 1 return "{} / {} / {}".format(neg, neu, pos)
def encode(data): """Encodes a netstring. Returns the data encoded as a netstring. data -- A string you want to encode as a netstring. """ if not isinstance(data, (bytes, bytearray)): raise ValueError("data should be of type 'bytes'") return b''.join((str(len(data)).encode('utf8'), b':', data, b','))
def match_in_dat(query, target): """ Check if given query is matched in target object data. :param query: query to match. :param target: object to match query. :return: bool (True if matched, else False) """ intersection = set(query.items()).intersection(set(target.items())) intersection = dict(intersection) return intersection == query
def merge_list_of_lists(list_of_lists): """ Merge a list of lists into one flat list list_of_lists: a list contains many lists as items Return merged a list """ merged_list = sum(list_of_lists, []) return merged_list
def convert_feature_to_vector(feature_list): """ :param feature_list: :return: """ index = 1 xt_vector = [] feature_dict = {} for item in feature_list: feature_dict[index] = item index += 1 xt_vector.append(feature_dict) return xt_vector
def _get_short_satellite_code(platform_code): """ Get shortened form of satellite, as used in GA Dataset IDs. (eg. 'LS7') :param platform_code: :return: >>> _get_short_satellite_code('LANDSAT_8') 'LS8' >>> _get_short_satellite_code('LANDSAT_5') 'LS5' >>> _get_short_satellite_code('LANDSAT_7') 'LS7' >>> _get_short_satellite_code('AQUA') 'AQUA' >>> _get_short_satellite_code('TERRA') 'TERRA' >>> _get_short_satellite_code('Invalid') Traceback (most recent call last): ... ValueError: Unknown platform code 'Invalid' """ if platform_code.startswith('LANDSAT_'): return 'LS' + platform_code.split('_')[-1] if platform_code in ('AQUA', 'TERRA', 'NPP'): return platform_code raise ValueError('Unknown platform code %r' % platform_code)
def max_sub_array(nums): """ Returns the max subarray of the given list of numbers. Returns 0 if nums is None or an empty list. Time Complexity: ? Space Complexity: ? Does this have to be contiguous? """ if nums == None: return 0 if len(nums) == 0: return 0 # # Arrange # input = [50, 3, -50, 50, 3] # output = 56 # input1 = [-2, -1] # output = [-1] max_so_far = 0 max_ending = 0 for i in nums: max_ending += i if max_ending > max_so_far: max_so_far = max_ending # when max sum is less than 0/negative num if max_so_far <= 0: return max(nums) return max_so_far
def seperate(aln_dict, leaf_dict): """ Separate the query sequences from the reference sequences Parameters ---------- aln_dict : Sequence dictionary with taxon label keys leaf_dict : Sequence dictionary with leaf label keys (queries are not in backbone tree) Returns ------- separate dictionaries containing query sequences and referece sequences with taxon label keys """ ref = dict() query = dict() for key, value in aln_dict.items(): if key not in leaf_dict: query[key] = value else: ref[key] = value return ref, query
def is_iterable(obj): """ Semantics: Test whether an object is iterable. By definition, it is an iterable if it has an iter method implemented. Returns: Boolean """ try: iter(obj) except: return False else: return True
def factorial(x): """ Input: x: some integer number Output: factorial of input number. """ fact = 1 for n in range(1, x+1): fact *= n return fact
def to_field_name(value): """ This template tag is used to convert a list of words into a single word that does not contain spaces. The words are capitalized as they are joined. It is used in some templates to get the correct knowledge base link for fields or components. Es. 'text classificationInfo' is converted into 'textClassificationInfo' """ str_list = value.split(' ') if str_list.__len__() == 0: return '' new_value = str_list[0].lower() for str_item in str_list[1:]: new_value = new_value + str_item[0].upper() + str_item[1:] return new_value
def anti_vowel(text): """takes a string text and returns the text with all of the vowels removed.""" result="" for i in text: if i in "aeiouAEIOU": pass else: result+=i return result
def clean_chars(command: str) -> str: """ replaces character with mongodb characters format :param command: discord command to be converted :type command: str :return: converted command :return type: str """ clean_freq_data = command if "." in clean_freq_data: clean_freq_data = clean_freq_data.replace(".", "(Dot)") if "$" in clean_freq_data: clean_freq_data = clean_freq_data.replace("$", "(Dollar_Sign)") return clean_freq_data
def get_second_of_day(t): """ Converts a seconds since epoch timestamp to the number of seconds, that have elapsed on the current day of the timestamp. For example the timestamp 1619891033 represents Saturday, May 1, 2021 5:43:53 PM. This would be converted to 63833 since at this day, this number of seconds have already elapsed of the 86400 seconds which one day has. """ return t % (24 * 3600)
def merge(arr1: list, arr2: list) -> list: """Merge two arrays for merge sort Args: arr1 (list): the first array arr2 (list): the second array Returns: list: the merged array """ merged = [] # Compare elements for both arrays while arr1 and arr2: if (arr1[0] > arr2[0]): merged.append(arr2[0]) arr2.pop(0) else: merged.append(arr1[0]) arr1.pop(0) # Either arr1 or arr2 is empty now # Empty arr1 while arr1: merged.append(arr1[0]) arr1.pop(0) # Empty arr2 while arr2: merged.append(arr2[0]) arr2.pop(0) return merged
def find_bound_tuple(a, b, bounds): """If a and b are both included in a bounds tuple, return it. Otherwise return None. """ def inside(num, spanish): return num >= spanish[0] and num <= spanish[1] for span in bounds: if inside(a, span) and inside(b, span): return span return None
def listToSqlStr(values): """Converts list items into SQL list sequence. :param list values: list of value to be converted into SQL list sequence :return: SQL list values representation :rtype: str >>> listToSqlStr([4, 5, "Ahoj"]) "(4, 5, 'Ahoj')" """ return str(values).replace("[", "(").replace("]", ")").replace('"', "'")
def parse_clnsig(acc, sig, revstat, transcripts): """Get the clnsig information Args: acc(str): The clnsig accession number, raw from vcf sig(str): The clnsig significance score, raw from vcf revstat(str): The clnsig revstat, raw from vcf transcripts(iterable(dict)) Returns: clnsig_accsessions(list): A list with clnsig accessions """ clnsig_accsessions = [] if acc: # New format of clinvar allways have integers as accession numbers try: acc = int(acc) except ValueError: pass # There are sometimes different separators so we need to check which # one to use if isinstance(acc, int): revstat_groups = [] if revstat: revstat_groups = [rev.lstrip('_') for rev in revstat.split(',')] sig_groups = [] if sig: for significance in sig.split('/'): splitted_word = significance.split('_') sig_groups.append(' '.join(splitted_word[:2])) for sign_term in sig_groups: clnsig_accsessions.append({ 'value': sign_term, 'accession': int(acc), 'revstat': ', '.join(revstat_groups), }) else: # There are sometimes different separators so we need to check which # one to use acc_groups = acc.split('|') sig_groups = sig.split('|') revstat_groups = revstat.split('|') for acc_group, sig_group, revstat_group in zip(acc_groups, sig_groups, revstat_groups): accessions = acc_group.split(',') significances = sig_group.split(',') revstats = revstat_group.split(',') for accession, significance, revstat in zip(accessions, significances, revstats): clnsig_accsessions.append({ 'value': int(significance), 'accession': accession, 'revstat': revstat, }) elif transcripts: clnsig = set() for transcript in transcripts: for annotation in transcript.get('clinsig', []): clnsig.add(annotation) for annotation in clnsig: clnsig_accsessions.append({'value': annotation}) return clnsig_accsessions
def encode(sequences, converter=["<None>", "<Unknown>"]): """encode a set of sequences using a converter, building it as it goes""" newSeqs = [] for seq in sequences: newSeq = [] for filename in seq: if filename not in converter: converter.append(filename) newSeq.append(converter.index(filename)) newSeqs.append(newSeq) return newSeqs, converter
def retype(dictobj, dict_type): """ Recursively modifies the type of a dictionary object and returns a new dictionary of type dict_type. You can also use this function instead of copy.deepcopy() for dictionaries. """ def walker(dictobj): for k in dictobj.keys(): if isinstance(dictobj[k], dict): yield (k, dict_type(walker(dictobj[k]))) else: yield (k, dictobj[k]) d = dict_type(walker(dictobj)) return d
def MarineRegionsOrg_LonghurstProvinceFileNum2Province(input, invert=False, rtn_dict=False): """ Get the Longhurst province Parameters ------- input (str): input string to use as key to return dictionary value invert (float): reverse the key/pair of the dictionary rtn_dict (bool): return the entire dictionary. Returns ------- (str) Notes ----- - This is listing order of the shape file from http://www.marineregions.org/sources.php#longhurst """ num2prov = { 0: u'BPLR', 1: u'ARCT', 2: u'SARC', 3: u'NADR', 4: u'GFST', 5: u'NASW', 6: u'NATR', 7: u'WTRA', 8: u'ETRA', 9: u'SATL', 10: u'NECS', 11: u'CNRY', 12: u'GUIN', 13: u'GUIA', 14: u'NWCS', 15: u'MEDI', 16: u'CARB', 17: u'NASE', 18: u'BRAZ', 19: u'FKLD', 20: u'BENG', 21: u'MONS', 22: u'ISSG', 23: u'EAFR', 24: u'REDS', 25: u'ARAB', 26: u'INDE', 27: u'INDW', 28: u'AUSW', 29: u'BERS', 30: u'PSAE', 31: u'PSAW', 32: u'KURO', 33: u'NPPF', 34: u'NPSW', 35: u'TASM', 36: u'SPSG', 37: u'NPTG', 38: u'PNEC', 39: u'PEQD', 40: u'WARM', 41: u'ARCH', 42: u'ALSK', 43: u'CCAL', 44: u'CAMR', 45: u'CHIL', 46: u'CHIN', 47: u'SUND', 48: u'AUSE', 49: u'NEWZ', 50: u'SSTC', 51: u'SANT', 52: u'ANTA', 53: u'APLR' } # Invert? if invert: num2prov = {v: k for k, v in list(num2prov.items())} # Return the dictionary if rtn_dict: return num2prov else: return num2prov[input]
def _join_dicts(*dicts): """ joins dictionaries together, while checking for key conflicts """ key_pool = set() for _d in dicts: new_keys = set(_d.keys()) a = key_pool.intersection(new_keys) if key_pool.intersection(new_keys) != set(): assert False, "ERROR: dicts to be joined have overlapping keys. Common hint: Have you specified your scheme dicts properly?" else: key_pool.update(new_keys) ret = {} for _d in dicts: ret.update(_d) return ret
def get_closest_dist(pts): """returns the closest point from the list of intersections. Uses Manhattan Distance as metric for closeness. Args: pts: a list of (x, y) tuples Returns the manhattan distance from (0,0) of the closest pt in pts """ dists = [(abs(pt[0]) + abs(pt[1])) for pt in pts] return min(dists)
def is_bool(value): """Must be of type Boolean""" return isinstance(value, bool)
def is_leapyear(year): """ determines whether a given year is a leap year :param year: year to check (numeric) :return: boolean """ flag = year % 400 == 0 or (year % 4 == 0 and year % 100 != 0) return flag
def percent(numerator, denominator): """ :param numerator: float Numerator of fraction :param denominator: float Denominator of fraction :return: str Fraction as percentage """ if denominator == 0: out = "0" else: out = str(int(numerator / float(denominator) * 100)) return out + "%"
def substitute_nested_terms(raw_query, substitutions): """ This function searches for keywords immediately followed by a dot ('.') that is not within double quotes and appends "_nested" to found keywords :param raw_query: :param substitutions: :return: Substituted raw_query """ subbed_raw_terms = raw_query in_quotes = False cursor = len(raw_query) - 1 while cursor > 1: if subbed_raw_terms[cursor] == '.' and not in_quotes: match = None for field in substitutions: if subbed_raw_terms[cursor - len(field):cursor] == field: match = field break if match is not None: subbed_raw_terms = subbed_raw_terms[:cursor] + "_nested" + subbed_raw_terms[cursor:] else: if subbed_raw_terms[cursor] == '"' and subbed_raw_terms[cursor - 1] != "\\": in_quotes = not in_quotes cursor -= 1 return subbed_raw_terms
def scale_range(value): """Scale a value from 0-320 (light range) to 0-9 (NeoPixel range). Allows remapping light value to pixel position.""" return round(value / 320 * 9)
def associate_prefix(firstname, lastname): """ Prepends everything after the first space-delineated word in [firstname] to [lastname]. """ if ' ' in firstname: name, prefix = firstname.split(' ',1) # split on first space else: name, prefix = firstname, '' space = ' '*(prefix is not '') last = prefix+space+lastname return name, last
def __percent_format(x, pos=0): """Tick formatter to render as percentage """ return '%1.0f%%' % (100 * x)
def DemoNode(in_): """ +-----------+ | DemoNode | |-----------| o in_<> | | out<> o +-----------+ """ return {"out": in_}
def calc_primers(volume, primerinitial, primerfinal, samples): """primer calculation""" primers = round((volume / primerinitial) * primerfinal * samples, 1) return primers
def _nest_level(pl: list) -> int: """Compute nested levels of a list iteratively""" if not isinstance(pl, list): return 0 level = 0 for item in pl: level = max(level, _nest_level(item)) return level + 1
def changes_dict_to_set_attribute(metakey, changes_dict, end=";"): """Convert dictionart of changes to set_attribute instructions""" result = [] for key, (value, old) in changes_dict.items(): result.append("set_attribute({!r}, {!r}, {!r}, old={!r})".format(metakey, key, value, old)) return "\n".join(result) + end
def floatlist(val): """Turn a string of comma-separated floats into a list of floats. """ return [float(v) for v in val.split(',')]
def connection(value): """ Return a string of comma-separated values """ if not value: return None else: # expecting a list of dicts like so: # [{'id': '5e7b63a0c279e606c645be7d', 'identifier': 'Some String'}] identifiers = [conn["identifier"] for conn in value] return ", ".join(str(v) for v in identifiers)
def closest(values, elements, scope=None, strict=True): """Return closest (index, elem) of sorted values If 2 elements have same distance to a given value, second elem will be return has closest. Example: > closest([1, 4], [0, 2, 3]) [(1, 2), (2, 3)] """ res = [] def add(val, index, elem): """Add elem to res""" diff = abs(elem - val) if scope is None: res.append((index, elem)) elif diff < scope if strict else diff <= scope: res.append((index, elem)) else: res.append((None, None)) elem_iter = iter(elements) val_iter = iter(values) try: elem = next(elem_iter) val = next(val_iter) index = 0 except StopIteration: raise ValueError("Can look for closest is values or elements is empty") last_diff, last_elem = abs(elem - val), elem while True: diff = abs(elem - val) if diff <= last_diff: last_diff, last_elem = diff, elem try: elem = next(elem_iter) except StopIteration: elem = None break index += 1 else: add(val, index - 1, last_elem) try: val = next(val_iter) except StopIteration: val = None break last_diff = abs(last_elem - val) if elem is None: add(val, index, last_elem) for val in val_iter: add(val, index, last_elem) return res
def printd (msg): """ prints the debug messages """ #for debug #print msg return 0 f= open(Config.MONITOR_LOG,'r+') f.seek(0, 2) f.write(str(msg)+"\n") f.close()
def isECBEncrypted(data): """We'll consider that the data is ECB-encrypted iof we find twice the same block (block length is assumed to be 16 bytes)""" if len(data) % 16 != 0: raise Exception('Data length must be a multiple of 16 bytes') blocks = [data[i*16:(i+1)*16] for i in range(len(data)//16)] res = {} for b in blocks: if b in res: return True res[b] = True return False
def e2f(b): """empty to float""" return 0 if b == "" else float(b)
def parse_spd_hexdump(filename): """Parse data dumped using the `spdread` command in LiteX BIOS This will read files in format: Memory dump: 0x00000000 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f ................ 0x00000010 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f ................ """ data = [] last_addr = -1 with open(filename) as f: for line in f: if line.startswith("0x"): tokens = line.strip().split() addr = int(tokens[0], 16) assert addr > last_addr values = [int(v, 16) for v in tokens[1:17]] data.extend(values) last_addr = addr return data
def simple_filter(**kwargs): """Return a simple filter that requires all keyword arguments to be equal to their specified value.""" criteria = [] for key, value in kwargs.items(): criteria.append({'type': 'SIMPLE', 'propertyName': key, 'operator': 'Equals', 'operand': value}) return {'type': 'COMPLEX', 'operator': 'And', 'criteria': criteria}
def grid_points_2d(length, width, div, width_div=None): """Returns a regularly spaced grid of points occupying a rectangular region of length x width partitioned into div intervals. If different spacing is desired in width, then width_div can be specified, otherwise it will default to div. If div < 2 in either x or y, then the corresponding coordinate will be set to length or width respectively.""" if div > 1: px = [-length / 2.0 + (x / (div - 1)) * length for x in range(div)] else: px = [length] if width_div is not None: wd = width_div else: wd = div if wd > 1: py = [-width / 2.0 + (y / (wd - 1)) * width for y in range(wd)] else: py = [width] pts = [] for x in px: for y in py: pts.append((x, y)) return pts
def get_norm_value(json_entity, prop): """\ Get a normalized value for a property (always as a list of strings). """ value = json_entity.get(prop, []) if not isinstance(value, list): value = [value] try: return [_ if isinstance(_, str) else _["@id"] for _ in value] except (TypeError, KeyError): raise ValueError(f"Malformed value for {prop!r}: {json_entity.get(prop)!r}")
def get_function_pointer(type_string): """ convert the function types to the pointer type """ return type_string[0:type_string.find('(')-1] + " (*)" + type_string[type_string.find('('):]
def absolute(n: int) -> int: """Gives the absolute value of the passed in number. Cannot use the built in function `abs`. Args: n - the number to take the absolute value of Returns: the absolute value of the passed in number """ if n < 0: n = -1 * n return n
def build_results_dict(tournament_results): """ builds results. Returns an object of the following form: { team-1: { matches_played: int, wins: int, draws: int, losses: int, points: int, } team-2: { ... } ... team-n: { ... } } """ if tournament_results == '': return {} team_stats_dict = {} array_of_input = [] lines = tournament_results.split('\n') for line in lines: array_of_input.append(line.split(';')) for result in array_of_input: for team in result[0:2]: if team not in team_stats_dict: team_stats_dict[team] = {'matches_played': 0, 'wins': 0, 'draws': 0, 'losses': 0, 'points': 0,} if result[2] == "win": team_stats_dict[result[0]]['wins'] += 1 team_stats_dict[result[0]]['points'] += 3 team_stats_dict[result[1]]['losses'] += 1 elif result[2] == "loss": team_stats_dict[result[0]]['losses'] += 1 team_stats_dict[result[1]]['wins'] += 1 team_stats_dict[result[1]]['points'] += 3 elif result[2] == "draw": team_stats_dict[result[0]]['draws'] += 1 team_stats_dict[result[1]]['draws'] += 1 team_stats_dict[result[0]]['points'] += 1 team_stats_dict[result[1]]['points'] += 1 team_stats_dict[result[0]]['matches_played'] += 1 team_stats_dict[result[1]]['matches_played'] += 1 return team_stats_dict
def size_scale(values, s_min, s_max): """ Parameters ---------- values : ndarray values to be displayed using the size of the scatter points s_min : float minimum value this set of values should be compared to s_max : float maximum value this set of values should be compared to Returns ------- sizes : ndarray arbitrary scaling of values which should be appropriate for linearly scaled data and be visually distinct """ return 30 + 200*(values-s_min)/(s_max-s_min)
def expand_scope_by_name(scope, name): """ expand tf scope by given name. """ if isinstance(scope, str): scope += '/' + name return scope if scope is not None: return scope.name + '/' + name else: return scope
def generate_file(size_in_mb: int) -> str: """ Generate a file of a given size in MB, returns the filename """ filename = f"{size_in_mb}MB.bin" with open(filename, "wb") as f: f.seek(size_in_mb * 1024 * 1024 - 1) f.write(b"\0") return filename
def get_recursively(search_dict, field): """ Takes a dict with nested lists and dicts, and searches all dicts for a key of the field provided. """ fields_found = [] keys=[] for key, value in search_dict.items(): if key == field: fields_found.append(value) keys.append([key]) elif isinstance(value, dict): results,recurKeys = get_recursively(value, field) for result in results: fields_found.append(result) for recurKey in recurKeys: tempKey=[key] tempKey+=recurKey keys.append(tempKey) elif isinstance(value, list): for ind in range(len(value)): item=value[ind] if isinstance(item, dict): more_results,more_recurKeys = get_recursively(item, field) for another_result in more_results: fields_found.append(another_result) for more_recurkey in more_recurKeys: tempKey=[ind] tempKey+=more_recurkey keys.append(tempKey) return fields_found, keys
def permute_all_atoms(labels, coords, permutation): """ labels - atom labels coords - a set of coordinates permuation - a permutation of atoms Returns the permuted labels and coordinates """ new_coords = coords[:] new_labels = labels[:] for i in range(len(permutation)): new_coords[permutation[i]] = coords[i] new_labels[permutation[i]] = labels[i] return new_labels, new_coords
def upper(value, n): """Converts a string into all uppercase""" return value.upper()[0:n]
def subtour_calculate(n, edges): """Given a list of edges, finds the shortest subtour""" visited = [False] * n cycles = [] lengths = [] selected = [[] for i in range(n)] for x, y in edges: selected[x].append(y) while True: current = visited.index(False) this_cycle = [current] while True: visited[current] = True neighbors = [x for x in selected[current] if not visited[x]] if len(neighbors) == 0: break current = neighbors[0] this_cycle.append(current) cycles.append(this_cycle) lengths.append(len(this_cycle)) if sum(lengths) == n: break return cycles[lengths.index(min(lengths))]
def fileNameOf(path): """Answer the file name part of the path. >>> fileNameOf('../../aFile.pdf') 'aFile.pdf' >>> fileNameOf('../../') is None # No file name True """ return path.split('/')[-1] or None
def remove_short_lines(lyrics): """ Takes the lyrics of a song and removes lines with less than 3 words. Normally these lines don't have any meaningful meaning """ try: paragraphs = lyrics.split("\r\n\r\n") new_paragraphs = list() new_lines = list() for paragraph in paragraphs: lines = paragraph.split("\r\n") for line in lines: tokens = line.split() if len(tokens) < 3: continue else: new_line = " ".join(tokens) new_lines.append(new_line) new_paragraph = "\r\n".join(new_lines) new_paragraphs.append(new_paragraph) new_lines = list() lyrics = "\r\n\r\n".join(new_paragraphs) return lyrics except Exception as e: print(e) return lyrics
def percent_change(d1, d2): """Calculate percent change between two numbers. :param d1: Starting number :type d1: float :param d2: Ending number :type d2: float :return: Percent change :rtype: float """ return (d2 - d1) / d1
def _build_arguments(keyword_args): """ Builds a dictionary of function arguments appropriate to the index to be computed. :param dict keyword_args: :return: dictionary of arguments keyed with names expected by the corresponding index computation function """ function_arguments = {"data_start_year": keyword_args["data_start_year"]} if keyword_args["index"] in ["spi", "spei"]: function_arguments["scale"] = keyword_args["scale"] function_arguments["distribution"] = keyword_args["distribution"] function_arguments["calibration_year_initial"] = \ keyword_args["calibration_start_year"] function_arguments["calibration_year_final"] = \ keyword_args["calibration_end_year"] function_arguments["periodicity"] = keyword_args["periodicity"] elif keyword_args["index"] == "pnp": function_arguments["scale"] = keyword_args["scale"] function_arguments["calibration_start_year"] = \ keyword_args["calibration_start_year"] function_arguments["calibration_end_year"] = \ keyword_args["calibration_end_year"] function_arguments["periodicity"] = keyword_args["periodicity"] elif keyword_args["index"] == "palmers": function_arguments["calibration_start_year"] = \ keyword_args["calibration_start_year"] function_arguments["calibration_end_year"] = \ keyword_args["calibration_end_year"] elif keyword_args["index"] != "pet": raise ValueError( "Index {index} not yet supported.".format(index=keyword_args["index"]) ) return function_arguments
def is_annotated_with_loads(prop: property) -> bool: """ Is the property annotated with @lohasattr(prop, )ads_attributes? """ return hasattr(prop.fget, '_loads_attributes')
def is_array(obj): """ Checks if a given sequence is a numpy Array object. Parameters ---------- obj : object The input argument. Returns ------- test result : bool The test result of whether seq is a numpy Array or not. >>> import numpy as np >>> is_array([1, 2, 3, 4, 5]) False >>> is_array(np.array([1, 2, 3, 4, 5])) True """ return hasattr(obj, 'dtype')
def partition_refinement(partition_a, partition_b): """Check if a refines b, returns Boolean (True/False). Trivial example, for a homogeneous network the bottom nodes (nothing merged) refines the top node (all merged), but not the other way round: >>> partition_refinement([0,1,2,3],[0,0,0,0]) True >>> partition_refinement([0,0,0,0],[0,1,2,3]) False Less trivially, >>> partition_refinement([0,1,2,2,1],[0,0,1,1,0]) True >>> partition_refinement([0,0,1,1,0],[0,1,2,2,1]) False Note that a partition is not considered a refinement of itself: >>> partition_refinement([0,1,1,2],[0,1,1,2]) False A few more examples, six balanced colourings of a particular graph: >>> partitions = [[0,0,0,0,0], ... [0,0,0,0,1], ... [0,1,0,1,1], ... [0,1,0,1,2], ... [0,1,2,3,1], ... [0,1,2,3,4]] >>> for a in partitions: ... cyc_a = cyclic_partition(a) ... for b in partitions: ... cyc_b = cyclic_partition(b) ... if partition_refinement(a,b): ... print("%s refines %s" % (cyc_a, cyc_b)) (1234)(5) refines (12345) (13)(245) refines (12345) (13)(24)(5) refines (12345) (13)(24)(5) refines (1234)(5) (13)(24)(5) refines (13)(245) (1)(25)(3)(4) refines (12345) (1)(25)(3)(4) refines (13)(245) (1)(2)(3)(4)(5) refines (12345) (1)(2)(3)(4)(5) refines (1234)(5) (1)(2)(3)(4)(5) refines (13)(245) (1)(2)(3)(4)(5) refines (13)(24)(5) (1)(2)(3)(4)(5) refines (1)(25)(3)(4) """ # if partition_a == partition_b: # return False # assert len(partition_a) == len(partition_b) # rank_a = max(partition_a) #works but assumes partition format # rank_b = max(partition_b) rank_a = len(set(partition_a)) rank_b = len(set(partition_b)) if rank_a <= rank_b: return False for i in range(rank_a): # assert i in partition_a, "Bad partition? %r" % partition_a # See where number "i" occurs in partition a, positions = [p for p, v in enumerate(partition_a) if v == i] # Make sure these all belong to the same partition in b if len({partition_b[p] for p in positions}) > 1: # Failed - b is not a refinement (sub partition) of a return False return True
def get_elapsed_time_string(elapsed_time, rounding=3): """Format elpased time Parameters ---------- elapsed_time : float Elapsed time in seconds rounding : int Number of decimal places to round Returns ------- processing_time : float Scaled amount elapsed time processing_time_unit : str Time unit, either seconds, minutes, or hours """ if elapsed_time < 60: processing_time = elapsed_time processing_time_unit = "seconds" elif 60 <= elapsed_time < 60**2: processing_time = elapsed_time/60 processing_time_unit = "minutes" else: processing_time = elapsed_time/(60**2) processing_time_unit = "hours" processing_time = round(processing_time, rounding) return processing_time, processing_time_unit
def mover_torre1(tablero, x_inicial, y_inicial, x_final, y_final): """ (list of list, int, int, int, int) -> list of list :param tablero: list of list que representa el tablero :param x_inicial: int que representa la posicion inicial en X :param y_inicial: int que representa la posicion inicial en Y :param x_final: int que representa la posicion final en X :param y_final: int que representa la posicion final en Y :return: list of list que representa un tablero final """ tab = tablero.copy() if (x_inicial == x_final or y_inicial == y_final) and tab[x_inicial][y_inicial].lower() == 't': if x_inicial != x_final: for x in range(x_inicial +1, x_final): if tab[x][y_inicial] != ' ': raise ValueError('El camino no es valido') tab[x_final][y_inicial] = 't' tab[x_inicial][y_inicial] = ' ' if y_inicial != y_final: for y in range(y_inicial +1, y_final): if tab[x_inicial][y] != ' ': raise ValueError('El camino no es valido') tab[x_inicial][y_final] = 't' tab[x_inicial][y_inicial] = ' ' return tab
def format_model_name(model_name, specific_params): """ Given the model name and input parameters, return a string ready to include as a name field in simulated graphs. """ batch_size = specific_params['batch_size'] if 'resnet' in model_name: layers = ''.join(filter(lambda x: x.isdigit(), model_name)) return f'ResNet-{layers} ({batch_size})' if 'densenet' in model_name: layers = ''.join(filter(lambda x: x.isdigit(), model_name)) return f'DenseNet-{layers} ({batch_size})' if 'inception' in model_name: version = model_name[-1] return f'Inception V{version} ({batch_size})' if 'treelstm' in model_name: return 'TreeLSTM' if model_name == 'unroll_gan': return 'Unrolled GAN' if model_name == 'lstm': return f'LSTM ({batch_size})' return model_name
def get_payment_callback_payload(Amount=500, CheckoutRequestID="ws_CO_061020201133231972", MpesaReceiptNumber="LGR7OWQX0R"): """Response received from the server as callback after calling the stkpush process request API.""" return { "Body":{ "stkCallback":{ "MerchantRequestID":"19465-780693-1", "CheckoutRequestID":CheckoutRequestID, "ResultCode":0, "ResultDesc":"The service request is processed successfully.", "CallbackMetadata":{ "Item":[ { "Name":"Amount", "Value":Amount }, { "Name":"MpesaReceiptNumber", "Value":MpesaReceiptNumber }, { "Name":"Balance" }, { "Name":"TransactionDate", "Value":20170727154800 }, { "Name":"PhoneNumber", "Value":254721566839 } ] } } } }
def check_dictionary_values(dict1: dict, dict2: dict, *keywords) -> bool: """Helper function to quickly check if 2 dictionaries share the equal value for the same keyword(s). Used primarily for checking against the registered command data from Discord. Will not work great if values inside the dictionary can be or are None. If both dictionaries lack the keyword(s), it can still return True. Parameters ---------- dict1: :class:`dict` First dictionary to compare. dict2: :class:`dict` Second dictionary to compare. keywords: :class:`str` Words to compare both dictionaries to. Returns ------- :class:`bool` True if keyword values in both dictionaries match, False otherwise. """ for keyword in keywords: if dict1.get(keyword, None) != dict2.get(keyword, None): return False return True
def FindOverlapLength( line_value, insertion_text ): """Return the length of the longest suffix of |line_value| which is a prefix of |insertion_text|""" # Credit: https://neil.fraser.name/news/2010/11/04/ # Example of what this does: # line_value: import com. # insertion_text: com.youcompleteme.test # Overlap: ^..^ # Overlap Len: 4 # Calculated as follows: # - truncate: # line_value = import com. # insertion_text = com.youcomp # - assume overlap length 1 # overlap_text = "." # position = 3 # overlap set to be 4 # com. compared with com.: longest_overlap = 4 # - assume overlap length 5 # overlap_text = " com." # position = -1 # return 4 (from previous iteration) # More complex example: 'Some CoCo' vs 'CoCo Bean' # No truncation # Iter 1 (overlap = 1): p('o') = 1, overlap = 2, Co==Co, best = 2 (++) # Iter 2 (overlap = 3): p('oCo') = 1 overlap = 4, CoCo==CoCo, best = 4 (++) # Iter 3 (overlap = 5): p(' CoCo') = -1, return 4 # And the non-overlap case "aaab" "caab": # Iter 1 (overlap = 1): p('b') = 3, overlap = 4, aaab!=caab, return 0 line_value_len = len( line_value ) insertion_text_len = len( insertion_text ) # Bail early if either are empty if line_value_len == 0 or insertion_text_len == 0: return 0 # Truncate so that they are the same length. Keep the overlapping sections # (suffix of line_value, prefix of insertion_text). if line_value_len > insertion_text_len: line_value = line_value[ -insertion_text_len : ] elif insertion_text_len > line_value_len: insertion_text = insertion_text[ : line_value_len ] # Worst case is full overlap, but that's trivial to check. if insertion_text == line_value: return min( line_value_len, insertion_text_len ) longest_matching_overlap = 0 # Assume a single-character of overlap, and find where this appears (if at # all) in the insertion_text overlap = 1 while True: # Find the position of the overlap-length suffix of line_value within # insertion_text overlap_text = line_value[ -overlap : ] position = insertion_text.find( overlap_text ) # If it isn't found, then we're done, return the last known overlap length. if position == -1: return longest_matching_overlap # Assume that all of the characters up to where this suffix was found # overlap. If they do, assume 1 more character of overlap, and continue. # Otherwise, we're done. overlap += position # If the overlap section matches, then we know this is the longest overlap # we've seen so far. if line_value[ -overlap : ] == insertion_text[ : overlap ]: longest_matching_overlap = overlap overlap += 1
def normalise_to_zero_one_interval(y, ymin, ymax): """Because I always forget the bloody formula""" if ymin > ymax: raise TypeError('min and max values the wrong way round!') return (y - ymin) / (ymax - ymin)
def get_main_categories(categories): """ Given a list of categories, returns the top level categories with name""" main_categories = {} for key in categories.keys(): if len(str(key)) <= 2: main_categories[key] = categories[key] return main_categories
def nextpow2(i): """ Find the next power of 2 for number i """ n = 1 while n < i: n *= 2 return n
def lookup(registered_collection, reg_key): """Lookup and return decorated function or class in the collection. Lookup decorated function or class in registered_collection, in a hierarchical order. For example, when reg_key="my_model/my_exp/my_config_0", this function will return registered_collection["my_model"]["my_exp"]["my_config_0"]. Args: registered_collection: a dictionary. The decorated function or class will be retrieved from this collection. reg_key: The key for retrieving the registered function or class. If reg_key is a string, it can be hierarchical like my_model/my_exp/my_config_0 Returns: The registered function or class. Raises: LookupError: when reg_key cannot be found. """ if isinstance(reg_key, str): hierarchy = reg_key.split("/") collection = registered_collection for h_idx, entry_name in enumerate(hierarchy): if entry_name not in collection: raise LookupError( "collection path {} at position {} never registered.".format( entry_name, h_idx)) collection = collection[entry_name] return collection else: if reg_key not in registered_collection: raise LookupError("registration key {} never registered.".format(reg_key)) return registered_collection[reg_key]
def parse_list_of_str(val): """Parses a comma-separated list of strings""" return val.split(',')
def dec(bytes: bytes) -> str: """Decodes bytes to a string using ASCII.""" return bytes.decode("ascii")
def least_difference(a, b, c): """Return the smallest difference between any two numbers among a, b and c. >>> least_difference(1, 5, -5) 4 """ diff1 = abs(a - b) diff2 = abs(b - c) diff3 = abs(a - c) return min(diff1, diff2, diff3)
def precision_k(actuals: list, candidates: list, k: int) -> float: """ Return the precision at k given a list of actuals and an ordered list of candidates """ if len(candidates) > k: candidates = candidates[:k] return len(set(actuals).intersection(candidates)) / min(k, len(candidates))
def replacenewline(str_to_replace): """Escapes newline characters with backslashes. Args: str_to_replace: the string to be escaped. Returns: The string with newlines escaped. """ if str_to_replace is None: return None return str_to_replace.replace( "\r\n", "\\n").replace("\r", "\\n").replace("\n", "\\n")
def isolate_path_filename(uri): """Accept a url and return the isolated filename component Accept a uri in the following format - http://site/folder/filename.ext and return the filename component. Args: uri (:py:class:`str`): The uri from which the filename should be returned Returns: file_component (:py:class:`str`): The isolated filename """ # Look for the last slash url_parse = uri.rpartition('/') # Take everything to the right of the last slash and seperate it on the '.' if it exists, otherwise return the # string as is if '.' in url_parse[2]: file_parse = url_parse[2].rpartition('.') file_component = file_parse[0] else: file_component = url_parse[2] return file_component
def dp_make_weight(egg_weights, target_weight, memo = {}): """ Find number of eggs to bring back, using the smallest number of eggs. Assumes there is an infinite supply of eggs of each weight, and there is always a egg of value 1. Parameters: egg_weights - tuple of integers, available egg weights sorted from smallest to largest value (1 = d1 < d2 < ... < dk) target_weight - int, amount of weight we want to find eggs to fit memo - dictionary, OPTIONAL parameter for memoization (you may not need to use this parameter depending on your implementation) Returns: int, smallest number of eggs needed to make target weight !!!! State is the least number of moves given a remaining weight. No state can depend on another with a larger weight. Hence, the state map can be topologically sorted and we can run DP!! !!!!! """ # memoize min eggs for each weight memo[0] = 0 for weight in range(1, target_weight+1): # base case is it takes 0 moves to get to 0 from 0 turns_at_this_weight = [] # bottom up approach starting at weight 1 for egg_weight in egg_weights: # weight remaing respective after subtracting each possible egg weight after_adding = weight - egg_weight # if we can get to this state if after_adding in memo: # we can get to 0 in 1 + how many it takes in the new state by optimal substructure turns_at_this_weight.append(1 + memo[after_adding]) # we have # turns for each egg weight, only store the best option memo[weight] = min(turns_at_this_weight) # return min at the targen weight in O(n) time return memo[target_weight]
def retangulo(lado_a,lado_b): """calcula a area de um retangulo""" area= lado_a*lado_b return area
def has_permission(request): """ Hard code has_permission for admin header """ return { 'has_permission': hasattr(request, 'user') and request.user.is_authenticated }
def try_int(num_string): """ short hand cast to number :param num_string: :return: int or None """ if num_string is not None: try: return int(num_string) except ValueError: pass return None
def parse_config(config_string): """ Parse the config string to a list of strings. Lines starting with '#' are ignored. Strings are split on commas :param config_string: String as read from the config file :return: List of general predicate strings to filter """ strings = [] for line in config_string.split('\n'): line = line.strip() if line == '' or line.startswith('#'): continue string_group = [x.strip() for x in line.split(',') if x.strip() != ''] strings.extend(string_group) return strings
def _recursive_flatten(cell, dtype): """Unpack mat files in Python.""" if len(cell) > 0: while not isinstance(cell[0], dtype): cell = [c for d in cell for c in d] return cell
def get_tab_indent(n): """ Get the desidered number of indentations as string Parameters ------------- n Number of indentations Returns ------------- str_tab_indent Desidered number of indentations as string """ return "".join(["\t"] * n)
def new_tuple(nt_type, values): """ Create a new tuple of the same type as nt_type. This handles the constructor differences between tuple and NamedTuples :param nt_type: type of tuple :param values: values as sequence :return: new named tuple """ if nt_type == tuple: # Use regular sequence-based ctor return tuple(values) else: # Use broadcast operator return nt_type(*values)
def truncate(str, length = 60, reverse = False): """ Truncate a string to the specified length. Appends '...' to the string if truncated -- and those three periods are included in the specified length """ if str == None: return str if len(str) > int(length): if reverse: return '...' + str[-(int(length) - 3):] else: return str[0:int(length) - 3] + '...' return str
def get_header_block(content, depth): """ generates the five column headers as exemplified below: AS/1 AS/1/LABEL AS/1/ID AS/1/NOTE AS/1/ABBR """ output = content + "/" + str(depth) + "\t" output += content + "/" + str(depth) + "/LABEL\t" output += content + "/" + str(depth) + "/ID\t" output += content + "/" + str(depth) + "/NOTE\t" output += content + "/" + str(depth) + "/ABBR" return output
def func_x_a_kwargs(x, a=2, **kwargs): """func. Parameters ---------- x: float a: int kwargs: dict Returns ------- x: float a: int kwargs: dict """ return x, None, a, None, None, None, None, kwargs
def get_binary_category(score): """Get an integer binary classification label from a score between 0 and 1.""" if score < 0.5: return 0 else: return 1
def has_repeated_n_gram(tokens, disallowed_n=3): """ Returns whether the sequence has any n_grams repeated. """ seen_n_grams = set() for i in range(len(tokens) - disallowed_n + 1): n_gram = tuple(tokens[i: i + disallowed_n]) if n_gram in seen_n_grams: return True seen_n_grams.add(n_gram) return False
def strtobool(val): """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. """ val = val.lower() if val in ('y', 'yes', 't', 'true', 'on', '1'): return True elif val in ('n', 'no', 'f', 'false', 'off', '0'): return False else: raise ValueError("invalid truth value %r" % (val,))
def invalid_request_body_with_invalid_priority( valid_client_model, valid_staff_model ): """ A fixture for creating a request body with invalid priority. Args: valid_client_model (Model): a valid client model created by a fixture. valid_staff_model (Model): a valid staff model created by a fixture. """ return { 'title': 'Improve customer care services', 'description': 'The current customer care services are reported to ' 'be abysmal with representatives dropping calls on customer or ' 'being rather unpleasant.', 'product_area': 'POLICIES', 'target_date': '2019-10-05T00:00:00Z', 'priority': 100, 'staff_id': 1, 'client_id': 1, }
def discreteTruncate(number, discreteSet): """ Truncates the number to the closest element in the positive discrete set. Returns False if the number is larger than the maximum value or negative. """ if number < 0: return False discreteSet.sort() for item in discreteSet: if number <= item: return item return False
def count_common_tags(tags_list1, tags_list2): """ :param tags_list1: The first list of tags :param tags_list2: The second list of tags :return: The number of tags in common between these 2 slides """ common_tags_cpt = 0 tags_List1_tmp = tags_List2_tmp = [] if len(tags_list1) < len(tags_list2): tags_List1_tmp = tags_list2 tags_List2_tmp = tags_list1 else: tags_List1_tmp = tags_list1 tags_List2_tmp = tags_list2 for tag1 in tags_List1_tmp: for tag2 in tags_List2_tmp: if tag1 == tag2: common_tags_cpt += 1 return common_tags_cpt
def parse_commands(filename): """ Reads the configuration file with the model name and features, separated by spaces model_name feat1 feat2 feat3 .... """ models = [] with open(filename) as f: for line in f.read().split('\n'): tokens = line.split(' ') if len(tokens[1:]) == 0: continue models.append((tokens[0], tokens[1:])) return models