content
stringlengths
42
6.51k
def bytestr_to_int(s: bytes) -> int: """converts bytes to integer""" i = 0 for char in s: i <<= 8 i |= char return i
def skyline_notelists(notelist): """ performs the skyline algorithm in its original formulation over the *notelist* in input. *notelist* is in the form returned by misc_tools.load_files Reference paper: A. L. Uitdenbogerd and J. Zobel, "Melodic matching techniques for large music databases," in Proceedings of the 7th ACM International Conference on Multimedia '99, Orlando, FL, USA, October 30 - November 5, 1999, Part 1., 1999, pp. 57-66. RETURNS : the list of predicted labels, where 1 is for melody note and 0 is for accompaniment """ # ordering notelist by onset notelist = sorted(notelist, key=lambda x: x[1]) predicted_label = [0 for n in range(len(notelist))] previous_onset = 99999999999 # the first time is not a new onset highest_pitch = 0 melody_index = 0 for i, (pitch, onset, offset, ismelody) in enumerate(notelist): # take all notes at this onset if onset > previous_onset: # this is a new onset predicted_label[melody_index] = 1 highest_pitch = pitch melody_index = i elif pitch > highest_pitch: # chose the highest pitch highest_pitch = pitch melody_index = i previous_onset = onset return predicted_label
def _create_and_return_new_file_name(result_audio_files): """ Create and append new filename to list of names by adding 1 to the last filename. Return new filename """ last_name = result_audio_files[-1] name_without_extension, extension = last_name.split(".") name_splitted_by_underscore = name_without_extension.split("_") if not name_splitted_by_underscore[-1].isdigit(): new_name = f"{name_without_extension}_1.{extension}" else: new_index = int(name_splitted_by_underscore[-1]) + 1 new_name = ( "_".join(name_splitted_by_underscore[:-1]) + "_" + str(new_index) + "." + extension ) with open(new_name, "w") as f: pass result_audio_files.append(new_name) return new_name
def solution(n: int = 998001) -> int: """ Returns the largest palindrome made from the product of two 3-digit numbers which is less than n. >>> solution(20000) 19591 >>> solution(30000) 29992 >>> solution(40000) 39893 """ answer = 0 for i in range(999, 99, -1): # 3 digit numbers range from 999 down to 100 for j in range(999, 99, -1): product_string = str(i * j) if product_string == product_string[::-1] and i * j < n: answer = max(answer, i * j) return answer
def remove_encoding_indicator(func_string): """ In many functions there is a following "A" or "W" to indicate unicode or ANSI respectively that we want to remove. Make a check that we have a lower case letter """ if (func_string[-1] == 'A' or func_string[-1] == 'W') and func_string[-2].islower(): return func_string[:-1] else: return func_string
def class_name(cls): """Returns name of the class.""" return cls.__class__.__name__
def compare_bits(olds, news): """Subtract 2D list to determine changes to bit state.""" rows = len(olds) cols = len(olds[0]) delta = [[0] * cols for i in range(rows)] for i in range(0, rows): for j in range(0, cols): delta[i][j] = news[i][j] - olds[i][j] return delta
def reformat(code, is_diag): """ Put a period in the right place because the MIMIC-3 data files exclude them. Generally, procedure codes have dots after the first two digits, while diagnosis codes have dots after the first three digits. """ if code == '': return code code = ''.join(code.split('.')) if is_diag: if code.startswith('E'): if len(code) > 4: code = code[:4] + '.' + code[4:] if len(code) > 4 else code else: code = code[:3] + '.' + code[3:] if len(code) > 3 else code else: code = code[:2] + '.' + code[2:] if len(code) > 2 else code return code
def is_left(p0, p1, p2): """ Input: three points P0, P1, and P2 Return: > 0 for P2 left of the line through P0 to P1 = 0 for P2 on the line < 0 for P2 right of the line """ p0x, p0y = p0 p1x, p1y = p1 p2x, p2y = p2 # logging.log(level=GEOM_ALG_INFO, # msg=(p1x - p0x) * (p2y - p0y) - (p2x - p0x) * (p1y - p0y)) return (p1x - p0x) * (p2y - p0y) - (p2x - p0x) * (p1y - p0y)
def list_to_ccd(weather_list: list, parameter_info: dict, file_path: str) -> bool: """ Converts a weather list into a Delphin weather file (.ccd) :param weather_list: List with hourly weather values :param parameter_info: Dict with meta data for the weather file. Should contain the following keys: location_name, year, description and intro. :param file_path: Full file path for where the .ccd file should be saved. :return: True """ # Write meta data file_obj = open(file_path, 'w') file_obj.write(f"# {parameter_info['location_name']}\n") file_obj.write(f"# Year {parameter_info['year']}\n") file_obj.write(f"# RIBuild - Hourly values, {parameter_info['description']} \n\n") file_obj.write(parameter_info["intro"] + "\n\n") # Write data day = 0 hour = 0 for i in range(len(weather_list)): # leap year 29th febuary removal if i % 24 == 0 and i != 0: hour = 0 day += 1 hour_str = str(hour) + ":00:00" data = weather_list[i] file_obj.write(f'{day:>{6}}{hour_str:>{9}} {data:.2f}\n') hour += 1 file_obj.close() return True
def get_server_tag(deploy_params): """ Get service deployment configuration e.g.: server: server_tag: "8c16m" """ server_tag = "" if deploy_params and "server" in deploy_params: server = deploy_params["server"] # server_name = server["server_name"] if "server_name" in server else "" server_tag = server["server_tag"] if "server_tag" in server else "" return server_tag
def set_blast_chunk_overlap(config): """Set overlap length for splitting long sequences.""" return config["settings"].get("blast_overlap", 0)
def get_tensor_name(node_name, output_slot): """Get tensor name given node name and output slot index. Args: node_name: Name of the node that outputs the tensor, as a string. output_slot: Output slot index of the tensor, as an integer. Returns: Name of the tensor, as a string. """ return "%s:%d" % (node_name, output_slot)
def accuracy_stat(topk): """ topk: list of topk accuracy values. Must be length of epochs return: average, maximum """ return (sum(topk) / len(topk)), (max(topk))
def calcBayes(priorA, probBifA, probB): """priorA: initial estimate of probability of A independent of B priorBifA: est. of probability of B assuming A is true priorBifNotA: est. of probability of B returns probability of A given B""" return priorA*probBifA/probB
def Indent(Level): """ This function merely creates a string of " ~t " (tab) characters for each indent level passed in the argument. It's intended to be used with Say(). For example, Indent(3) returns 3 tab characters. NOTE: TAB CHARACTERS TRANSLATE TO 3 SPACES, NOT ASCII CODE 9! """ RV = "" for x in range(0,Level): RV = RV + " ~t " return RV
def makereadoutstr(rospeed): """Return a shorted string for the read out setting""" if rospeed.upper()=='FAST': rostr='FA' elif rospeed.upper()=='SLOW': rostr='SL' else: rostr='' return rostr
def leapdays(y1, y2): """ Return number of leap years in range [y1, y2] Assume y1 <= y2 and no funny (non-leap century) years """ return (y2 + 3) / 4 - (y1 + 3) / 4
def is_intstring(s): """ Check if input argument is string Parameters ---------- s = Some integer """ try: int(s) return True except ValueError: return False
def g_coordinates(parameters, actual_pos): """returns 3 coordinates from a g-code""" # default values x = actual_pos[0] y = actual_pos[0] z = actual_pos[0] # parse text params = parameters.split(' ') for param in params: coordinate = param[0] value = float(param[1:]) if coordinate == 'x': x = value elif coordinate == 'y': y = value elif coordinate == 'z': z = value return([x, y, z])
def convArabicToOrdLetter(arabicNum, lowerCase=0): """ Convert the number to the corresponding letter, indexing the alphabet where A is 1 If lowerCase == 1 then the base letter corresponding to 1 is "a" (lower case alpha) """ if arabicNum > 26: raise ValueError("Overflow") if lowerCase ==1: baseNum = ord("a")-1 else: baseNum = ord("A")-1 return chr(arabicNum + baseNum)
def get_signed_value(obj): """ Returns signed integer from LLDB value. :param lldb.SBValue obj: LLDB value object. :return: Signed integer from LLDB value. :rtype: int | None """ return None if obj is None else obj.GetValueAsSigned()
def get_chunks(sequence, chunk_size): """Split sequence into chunks. :param list sequence: :param int chunk_size: """ return [ sequence[idx:idx + chunk_size] for idx in range(0, len(sequence), chunk_size) ]
def identity(*args): """Identity function. Accepts any positional arguments, and returns them. Packs into a tuple if there is more than one. Example:: assert identity(1, 2, 3) == (1, 2, 3) assert identity(42) == 42 assert identity() is None """ if not args: return None return args if len(args) > 1 else args[0]
def rot32( w, nLeft ): """ Rotate 32-bit word left by nLeft or right by -nLeft without creating a Python long. Timing depends on nLeft but not on w. """ nLeft &= 31 # which makes nLeft >= 0 if nLeft == 0: return w # Note: now 1 <= nLeft <= 31. # RRRsLLLLLL There are nLeft RRR's, (31-nLeft) LLLLLL's, # => sLLLLLLRRR and one s which becomes the sign bit. RRR = ( ( ( w >> 1 ) & 0x7fffFFFF ) >> ( 31 - nLeft ) ) sLLLLLL = -( (1<<(31-nLeft)) & w ) | (0x7fffFFFF>>nLeft) & w return RRR | ( sLLLLLL << nLeft )
def absolute_to_relative_timestamps(profile): """Change timestamps from absolute to relative times. :param profile: a memory profile dictionary from memory_profiler """ timestamps = profile['timestamp'] baseline = timestamps[0] profile['timestamp'][:] = [x - baseline for x in timestamps] return profile
def tuple2dict(tup): """TUP is an iterable of 2-tuples. Return a dict where the first element of each tuple hashes to the second. """ ret = {} for t in tup: ret[t[0]] = t[1] return ret
def cpu_used(cpuset): """ parse the cpu used as outputed by cgroup """ used = set() groups = cpuset.split(",") for group in groups: splitted = group.split("-") if len(splitted) == 1: # handle empty if not splitted[0]: continue used.add(int(splitted[0])) else: start = int(splitted[0]) end = int(splitted[1]) for i in range(start, end + 1): used.add(i) return list(used)
def prlimit_command(command_list, virtual_memory_limit): """ Prepend memory limiting arguments to a command list to be run with subprocess. This method uses the `prlimit` program to set the memory limit. The `virtual_memory_limit` size is in bytes. prlimit arguments: -v, --as[=limits] Address space limit. """ return ["prlimit", f"-v{virtual_memory_limit}"] + command_list
def get_point_uuid(msg): """ Returns a point uuid that is either the passed message """ point_uuid = str(msg) return point_uuid
def plain_method(apple, pear, banana=9): """ method docstring """ return (apple, pear, banana)
def split_naming(naming_format): """Gets the pre and post fix from the hostname""" return {'prefix': naming_format.split('{{')[0], 'postfix': naming_format.split('}}')[-1]}
def return_int(user_input): """Function to check and return an integer from user input""" try: user_int = int(user_input) except ValueError: print("Oops! Not a valid integer format.") else: return user_int
def _constructRequestURL(django_url_pattern): """Constructs a URL which will start a task binded with the given pattern. Args: django_url_pattern: pattern which the resulted URL will refer to """ return '/' + django_url_pattern[0][1:-1]
def parse_line(line): """Parse line of tree file to extract dependency information""" line = line.rstrip().lstrip(' -+|\\') parts = line.split(':') scope = None classifier = None if len(parts) == 4: [gid, aid, pkg, version] = parts elif len(parts) == 5: [gid, aid, pkg, version, scope] = parts elif len(parts) == 6: [gid, aid, pkg, classifier, version, scope] = parts else: return None if pkg == "pom": return None return {"group_id": gid, "artifact_id": aid, "packaging": pkg, "classifier": classifier, "version": version, "scope": scope}
def is_key(sline): """ Check if the given splitted line is an OGS key. Parameters ---------- sline : list of str given splitted line """ return sline[0][0] in ["$", "#"]
def _extract_and_sort_requests(pages): """Pull entries from har text and sort into chronological order""" entries = pages["log"]["entries"] entries.sort(key=lambda n: n["startedDateTime"]) return entries
def has(prop, object_or_dct): """ Implementation of ramda has :param prop: :param object_or_dct: :return: """ return prop in object_or_dct if isinstance(dict, object_or_dct) else hasattr(object_or_dct, prop)
def results_exist(parms_fixed, pool_results): """ Help function to check whether results existed already. Parameters =========== parms_fixed : list, index of parameters that are to fix pool_results : dict, contains both index of parameters fixed and the corresponding results Return ======= skip_cal """ if pool_results == {}: skip_cal = False elif parms_fixed in pool_results['parms']: index_measure = pool_results['parms'].index(parms_fixed) skip_cal = pool_results[f'measures_{index_measure}'] else: skip_cal = False return skip_cal
def suma (num1, num2): """Funcion que suma dos numeros imaginarios, los numeros deben ser parejas ordenadas (list 1D, list 1D) -> list 1D""" ans1 = num1[0] + num2[0] ans2 = num1[1] + num2[1] return (ans1, ans2)
def id_from_uri(uri): """ extracts the id from an ARCHE-URL like https://whatever.com/123 -> 123 :param uri: some ARCHE-URL :type uri: str :return: the actual ID, e.g. 123 :rtype: str: """ if uri.endswith('/'): uri = uri[:-1] a_id = uri.split('/')[-1] try: return f"{int(a_id)}" except ValueError: return ""
def check_diagonal_up(board, num_rows, num_cols): """check if any 4 are connected diagonally up(bottom left to top right) returns bool""" won = False for row in range(3, num_rows): for col in range(num_cols - 3): start = board[row][col] if start == " ": continue won = True for i in range(1, 4): if start != board[row - i][col + i]: won = False break if won: return won return won
def record_clock_sync_marker(syncId: str) -> dict: """Record a clock sync marker in the trace. Parameters ---------- syncId: str The ID of this clock sync marker """ return {"method": "Tracing.recordClockSyncMarker", "params": {"syncId": syncId}}
def get_channel_properties(port): """Retreives a channel's binding:profile dict""" return port["binding:profile"]
def index(redirects, index_map, k): """Find the index of an article name after redirect resolution""" k = redirects.get(k, k) return index_map.setdefault(k, len(index_map))
def _generate_result(middle_point, pypts, confi): """Format the result: Define the image recognition result format.""" ret = dict(result=middle_point, rectangle=pypts, confidence=confi) return ret
def traverse_lexemes(expr, fn): """ Apply a function to every expression in a lexeme tree, bottom-up. Eg. given [['a', 'and', 'b'], 'or', ['c', 'and', 'd']] It applies the function like: fn([fn(['a', 'and', 'b']), 'or', fn(['c', 'and', 'd'])]) This is recursive. We're not worried about stack overflow here. """ if isinstance(expr, str): return expr # base case return fn([traverse_lexemes(term, fn) for term in expr])
def list_to_int(digits): """Converts a list of integers corresponding to digits to a number. e.g.: list_to_int([4, 9, 2]) == 492. Args: digits: list of integers, which are the digits of the number.""" # start at 0 accumulator = 0 for digit in digits: # move the previous digits to the left accumulator *= 10 # append this digit accumulator += int(digit) # return the number return accumulator
def safe_update_ewma(prev_ewma, new_val, alpha): """ Safely updates an exponentially weighted moving average. If the previous EWMA is -1 (unknown), then the new EWMA is assumed to be the unweighted new value. If the new value is unknown, then the EWMA does not change. """ return ( new_val if prev_ewma == -1 else ( prev_ewma if new_val == -1 else alpha * new_val + (1 - alpha) * prev_ewma))
def RealToRelative(filepath, basepath): """Returns a relative path from an absolute basepath and filepath.""" path_parts = filepath.split('/') base_parts = basepath.split('/') while path_parts and base_parts and path_parts[0] == base_parts[0]: path_parts = path_parts[1:] base_parts = base_parts[1:] rel_parts = ['..'] * len(base_parts) + path_parts return '/'.join(rel_parts)
def get_events_and_selectors(kwargs): """Extract events and corresponding selectors from kwargs.""" events = { key: kwargs.pop(key) for key in list(kwargs.keys()) if key.startswith("on_") } selectors = {key: f"<{key[3:].upper()}>" for key in events} return events, selectors
def diff_score(v1, v2): """return the norm of the difference between both vectors""" diff_norm = sum((x1-x2)**2 for x1, x2 in zip(v1, v2))**0.5 if diff_norm == 0: return 0 else: return 1.0/diff_norm
def preconvert_flag(flag, name, type_): """ Converts the given `flag` to an acceptable flag by the wrapper. Parameters ---------- flag : `int` The flag to convert. name : `str` The name of the flag. type_ : `int` subtype The type of the output flag. Returns ------- flag : `type_` Raises ------ TypeError If `flag` was not given as `int` instance. ValueError If `flag` was given as `int` instance, but it's value is less than `0` or it's bit length is over `64`. """ flag_type = flag.__class__ if (flag_type is type_): pass elif issubclass(flag_type, int): flag = type_(flag) else: raise TypeError(f'`{name}` can be passed as `{type_.__name__}` instance, got {flag_type.__name__}.') if flag < 0 or flag > ((1<<64)-1): raise ValueError(f'`{name}` can be only uint64, got {flag!r}.') return flag
def write_iter_in_file(path_to_file_write, iterable, fun_prepline=None, mode="w"): """ Write a iterable as text line in file >>> mi_iterable = ["line1", "line2", "line3"] >>> write_iter_in_file("file.txt", mi_iterable) 3 :param path_to_file_write: path file where write :param iterable: Iterable where each element is a text line to write in disk :param fun_prepline: function with args count and line, to return a line to write in file. If None then apply this format each line: "{}\n".format(line.rstrip()). By default: None :param mode: mode w or a. By default: w :return: number of write lines """ count = 0 with open(path_to_file_write, mode) as f: if fun_prepline: for count, el in enumerate(iterable, 1): f.write(fun_prepline(count, el)) else: for count, el in enumerate(iterable, 1): f.write("{}\n".format(el.rstrip())) return count
def get_audio_muxings_from_configs(configs, stream_number, manifest_type): """ Returns all audio muxings of a given codec to be used with a given manifest type (DASH/HLS). """ muxings = [] for config in configs: muxings += [ muxing_spec for muxing_spec in config['muxing_list'] \ if muxing_spec['manifest_type'] == manifest_type and muxing_spec['stream_number'] == stream_number ] return muxings
def _UrlBaseName(url): """Return the last component of the URL.""" return url.rstrip('/').rpartition('/')[-1]
def floating_range(buckets): """ Computes a equal distributed list of bucket thresholds Args: buckets (int): Number of buckets Returns: List: List of bucket thresholds of length buckets """ return [x / 100 for x in list(range(0, 100, int(100 / (buckets - 1)))) + [100]]
def mark_ref_paired(osm,pid): """ Marks stops with common ref as paired="ref". Returns list of osm refs which are not in pid""" osmrefs = set(osm.keys()) pidrefs = set(pid.keys()) common = osmrefs.intersection(pidrefs) for ref in common: for stop in osm[ref]: stop["paired"] = "ref" for stop in pid[ref]: stop["paired"] = "ref" stop["osm_id"] = osm[ref][0]["osm_id"] osm_orphans = osmrefs.difference(pidrefs) return osm_orphans
def num_palindromes(n, leading_zeros=False): """Returns the number of all n-digit palindromes. leading_zeros: if ```True```, 037...730 is a valid palindrome (for example) """ return 10**((n-1)//2)*(9+leading_zeros)
def calc_profit(revenue_time_series, opex_time_series, cogs_time_series): """ Profit Formula Notes ------------ Profit = revenue from sales - running costs (OpEx and COGS) """ profit_time_series = revenue_time_series - opex_time_series - cogs_time_series return profit_time_series
def move_consonant_left(letters: list, positions: list) -> list: """Given a list of letters, and a list of consonant positions, move the consonant positions to the left, merging strings as necessary. >>> move_consonant_left(['a', 'b', '', '', 'bra'], [1]) ['ab', '', '', '', 'bra']""" for pos in positions: letters[pos - 1] = letters[pos - 1] + letters[pos] letters[pos] = "" return letters
def mask_select(items, bool_mask): """Throw out items corresponding to False in bool_mask, and keep others.""" assert len(items) == len(bool_mask) rv = [] for item, keep in zip(items, bool_mask): if keep: rv.append(item) return rv
def keep_only_max_recips(dic_ranks, max_recips=10): """ Keeps only top @max_recips for each mid in dic_ranks Input and output dic in same format : {mid:[sender1, sender2, ...], } Also converts mid key from str to int if needed ! """ cropped_recips = {} for mid, recipients in dic_ranks.items(): cropped_recips[int(mid)] = list(recipients[:max_recips]) return cropped_recips
def set_build_description(state, project, trigger_job): """ Set the build description, based on the state, project name, and job that triggered the project. :param state: <str> choices are "success", "failure", "error" or "pending" :param project: Project name associated with the running CodeBuild job :param trigger_job: The name of the CodeBuild project that triggered this build :return: <str> Description to be posted to the PR build """ if state == "success": return f"{project} succeeded for {trigger_job}." elif state == "failure" or state == "error": return f"{project} is in state {state.upper()} for {trigger_job}! Check details to debug." elif state == "pending": return f"{project} is pending for {trigger_job}..." else: return f"Unknown state: {state}"
def autotype_seq(entry): """Return value for autotype sequence Args: entry - dict Return: string """ return next((i.get('value') for i in entry['fields'] if i.get('name') == 'autotype'), "")
def get_cells_letter(cell_range): """ supporting function for better sorting dates, returns cell's range letter index. if it contains 2 letters, returns 'Z' + letter to release sorting """ cell_letter = str() range_first_coordinate = str(cell_range).split(':')[0] for letter in range_first_coordinate: if letter.isalpha(): cell_letter += letter if len(cell_letter) == 2: cell_letter = 'Z' + cell_letter[1] return cell_letter
def python_distance(a, b, stopvalue=-1): """Calculates the distance for use in similarity calculation. Python version.""" l1 = len(a) l2 = len(b) if stopvalue == -1: stopvalue = l2 current = range(l1+1) for i in range(1, l2+1): previous, current = current, [i]+[0]*l1 least = l2 for j in range(1, l1 + 1): change = previous[j-1] if a[j-1] != b[i-1]: change = change + 1 insert = previous[j] + 1 delete = current[j-1] + 1 current[j] = min(insert, delete, change) if least > current[j]: least = current[j] #The smallest value in the current array is the best (lowest) value #that can be attained in the end if the strings are identical further if least > stopvalue: return least return current[l1]
def calculate_manhattan_dist(idx, value, n): """calculate the manhattan distance of a single tile""" goalRow = value // n #row index at goal goalCol = value % n #col index at goal currentRow = idx // n #current row index currentCol = idx % n #current col index dist = abs(goalRow - currentRow) + abs(goalCol - currentCol) #manhattan return dist
def pil_to_rgb(pil): """Convert the color from a PIL-compatible integer to RGB. Parameters: pil: a PIL compatible color representation (0xBBGGRR) Returns: The color as an (r, g, b) tuple in the range: the range: r: [0...1] g: [0...1] b: [0...1] >>> '(%g, %g, %g)' % pil_to_rgb(0x0080ff) '(1, 0.501961, 0)' """ r = 0xff & pil g = 0xff & (pil >> 8) b = 0xff & (pil >> 16) return tuple((v / 255.0 for v in (r, g, b)))
def bs_progress_bar(*args, **kwargs): """A Standard Bootstrap Progress Bar. http://getbootstrap.com/components/#progress param args (Array of Numbers: 0-100): Percent of Progress Bars param context (String): Adds 'progress-bar-{context} to the class attribute param contexts (Array of Strings): Cycles through contexts for stacked bars param text (String): True: shows value within the bar, False: uses sr span param striped (Boolean): Adds 'progress-bar-striped' to the class attribute param animated (Boolean): Adds 'active' to the class attribute if striped param min_val (0): Used for the aria-min value param max_val (0): Used for the aria-max value """ bars = [] contexts = kwargs.get( 'contexts', ['', 'success', 'info', 'warning', 'danger'] ) for ndx, arg in enumerate(args): bars.append( dict(percent=arg, context=kwargs.get('context', contexts[ndx % len(contexts)])) ) return { 'bars': bars, 'text': kwargs.pop('text', False), 'striped': kwargs.pop('striped', False), 'animated': kwargs.pop('animated', False), 'min_val': kwargs.pop('min_val', 0), 'max_val': kwargs.pop('max_val', 100), }
def _match_suffix(reference:str, start:int, comparison: str) -> bool: """Skips the first character and compares the rest of comparison.""" reference_length = len(reference) comparison_length = min(len(comparison), reference_length - start) reference_position = start + 1 comparison_position = 1 while comparison_position < comparison_length: if comparison[comparison_position] != reference[reference_position]: return False comparison_position += 1 reference_position += 1 return True
def nullable_snowflake_tuple_to_string_array(nullable_snowflake_tuple): """ Converts a nullable snowflake tuple to an array representing it. Parameters ---------- nullable_snowflake_tuple : `None` or `tuple` of `int` The value to convert. Returns ------- array : `list` of `str` """ if nullable_snowflake_tuple is None: array = [] else: array = [str(snowflake) for snowflake in nullable_snowflake_tuple] return array
def create_fastq_line(read_id, sequence, q_values): """Create a fastq string from the necessary fields. Do not include newlines! :param read_id: unique identifier for read :param sequence: sequence of nucleotides :param q_values: quality score values associated with the sequence """ # make sure we have not included newline characters assert read_id.find("\n") == -1, "Remove newline characterss from read_id" assert sequence.find("\n") == -1, "Remove newline characters from sequence" assert q_values.find("\n") == -1, "Remove newline characters from q_values" assert len(sequence) == len(q_values), "sequence and q_values must to be the same size" if not read_id.startswith("@"): read_id = '@' + read_id line1 = read_id + "\n" line2 = sequence + "\n" line3 = "+\n" fastq = line1 + line2 + line3 + q_values return fastq
def jobStatus(alljobs, verbose=1): """ Print status for a list of jobs """ print('job status: gathered %d jobs' % len(alljobs)) jobs = [j for j in alljobs if not j.complete()] gjobs = [j for j in jobs if j.canrun()] if verbose >= 2: for i, j in enumerate(jobs): print('job %d: %s' % (i, j)) if verbose: print(' %d/%d to run' % (len(gjobs), len(jobs))) return gjobs, jobs
def coverage(cov): """Function to format coverage""" d = {'parsed' : 'None', 'l1_cond' : 'None', 'l1_hgt' : 'None', 'l2_cond' : 'None', 'l2_hgt' : 'None', 'l3_cond' : 'None', 'l3_hgt' : 'None', 'l4_cond' : 'None', 'l4_hgt' : 'None', 'unit' : 'None', 'string' : 'N/A'} if cov == "None": return d d['parsed'] = cov # add in the parsed string d['unit'] = 'feet' # set the unit. coverage_split = cov.split(" ") # enumerate it: see default dictionary cond and hgt vars. cloud_d = {'CLR' : 'Clear', 'FEW' : 'Few', 'SCT' : 'Scattered', 'BKN' : 'Broken', 'OVC' : 'Overcast'} string = '' for index, element in enumerate(coverage_split): if 'CLR' in element: d[f'l{index}_cond'] = 'Clear' d[f'l{index}_hgt'] = '0000' string = 'Clear' else: if index > 0: # adds in the comma appropriately. string += ", " # extract the conditions, make english-like, same with height. conditions = cloud_d[element[:3]] height = str(int(element[3:]) * 100) # add into dictionary at appropriate height level. # syntax a bit tricky, but iterating through l1_hgt, l2_hgt, etc d[f'l{str(index + 1)}_cond'] = conditions if height != '0': d[f'l{str(index + 1)}_hgt'] = height # form the string and append. string += conditions + " at " + height + " feet" else: d[f'l{str(index + 1)}_hgt'] = "0000" # form the string and append. string += conditions + " at surface" string += '.' # append a period to the string. d['string'] = string # add in the string. return d
def rshift(val, n): """ Arithmetic right shift, preserves sign bit. https://stackoverflow.com/a/5833119 . """ return (val % 0x100000000) >> n
def pick_one_in_increment(deck, increment): """This reverses a deck shuffled with the "deal with increment N" technique""" # 012301230123 # *--*--*--* => 0321 return [deck[i * increment % len(deck)] for i in range(len(deck))] # I suppose it can work only if gcd(len(deck), increment) == 1
def find_pareto_front(population): """Finds a subset of nondominated individuals in a given list :param population: a list of individuals :return: a set of indices corresponding to nondominated individuals """ pareto_front = set(range(len(population))) for i in range(len(population)): if i not in pareto_front: continue ind1 = population[i] for j in range(i + 1, len(population)): ind2 = population[j] # if individuals are equal on all objectives, mark one of them (the first encountered one) as dominated # to prevent excessive growth of the Pareto front if ind2.fitness.dominates(ind1.fitness) or ind1.fitness == ind2.fitness: pareto_front.discard(i) if ind1.fitness.dominates(ind2.fitness): pareto_front.discard(j) return pareto_front
def resolve_locations(ctx, strategy, d): """Resolve $(location) references in the values of a dictionary. Args: ctx: the 'ctx' argument of the rule implementation function strategy: a struct with an 'as_path(string) -> string' function d: {string: string} dictionary; values may contain $(location) references for labels declared in the rule's 'srcs' and 'tools' attributes Returns: {string: string} dict, same as 'd' except "$(location)" references are resolved. """ location_expressions = [] parts = {} was_anything_to_resolve = False for k, v in d.items(): # Look for "$(location ...)" or "$(locations ...)", resolve if found. # _validate_attributes already ensured that there's at most one $(location/s ...) in "v". if "$(location" in v: tokens = v.split("$(location") was_anything_to_resolve = True closing_paren = tokens[1].find(")") location_expressions.append("$(location" + tokens[1][:closing_paren + 1]) parts[k] = (tokens[0], tokens[1][closing_paren + 1:]) else: location_expressions.append("") resolved = {} if was_anything_to_resolve: # Resolve all $(location) expressions in one go. Should be faster than resolving them # one-by-one. all_location_expressions = "<split_here>".join(location_expressions) all_resolved_locations = ctx.expand_location(all_location_expressions) resolved_locations = strategy.as_path(all_resolved_locations).split("<split_here>") i = 0 # Starlark dictionaries have a deterministic order of iteration, so the element order in # "resolved_locations" matches the order in "location_expressions", i.e. the previous # iteration order of "d". for k, v in d.items(): if location_expressions[i]: head, tail = parts[k] resolved[k] = head + resolved_locations[i] + tail else: resolved[k] = v i += 1 else: resolved = d return resolved
def valid_alien_token() -> bool: """Check if there is a valid AliEn token. The function must be robust enough to fetch all possible xrd error states which it usually gets from the stdout of the query process. Args: None. Returns: True if there is a valid AliEn token, or false otherwise. """ # With JAliEn, this information is no longer available, so this is a no-op # that always just returns True. return True
def splitem(query): """ Split a query into choices >>> splitem('dog, cat') ['dog', 'cat'] Disregards trailing punctuation. >>> splitem('dogs, cats???') ['dogs', 'cats'] >>> splitem('cats!!!') ['cats'] Allow or >>> splitem('dogs, cats or prarie dogs?') ['dogs', 'cats', 'prarie dogs'] Honors serial commas >>> splitem('dogs, cats, or prarie dogs?') ['dogs', 'cats', 'prarie dogs'] Allow choices to be prefixed by some ignored prompt. >>> splitem('stuff: a, b, c') ['a', 'b', 'c'] """ prompt, sep, query = query.rstrip('?.!').rpartition(':') choices = query.split(',') choices[-1:] = choices[-1].split(' or ') return [choice.strip() for choice in choices if choice.strip()]
def _normalize_path(p: str): """Fix a given path to work with ExeFS filenames.""" if p.startswith('/'): p = p[1:] # while it is technically possible for an ExeFS entry to contain ".bin", # this would not happen in practice. # even so, normalization can be disabled by passing normalize=False to # ExeFSReader.open if p.lower().endswith('.bin'): p = p[:4] return p
def _filter_by_group_name_and_or_metadata_name(gvar, qs): """ Internal function to filter a query set by the specified group name. """ if 'group-name' in gvar['command_args']: for _ix in range(len(qs)-1, -1, -1): if qs[_ix]['group_name'] != gvar['command_args']['group-name']: del(qs[_ix]) if 'metadata-name' in gvar['command_args']: for _ix in range(len(qs)-1, -1, -1): if qs[_ix]['metadata_name'] != gvar['command_args']['metadata-name']: del(qs[_ix]) return qs
def convert_to_opml_fragment(outline, indent = 4, curr_indent = 0): """Converts the outline to an OPML fragment""" lines = [] for i in outline: if type(i) == str: lines.append("%s<outline text=\"%s\" />" % (curr_indent * " ", i)) else: lines.append("%s<outline text=\"%s\">" % (curr_indent * " ", i[0])) lines.append(convert_to_opml_fragment(i[1], indent, indent + curr_indent)) lines.append("%s</outline>" % (curr_indent * " ")) return '\n'.join(lines)
def safeFileName(name): """ convert to base64 encoding """ import base64 return base64.urlsafe_b64encode(name)
def bin2str(b): """ Binary to string. """ ret = [] for pos in range(0, len(b), 8): ret.append(chr(int(b[pos:pos + 8], 2))) return ''.join(ret)
def join_lists(lists): """Joins lists.""" return [x for l in lists for x in l]
def detector_tab_is_not_empty(list) : """ @param list : a list of result @rtype : False if all the items in the list are empty, True otherwise """ for item in list : if not(not(item)) : return True return False
def try_get_raw_exception_representation(exception): """ Tries to get raw exception representation. Parameters ---------- exception : ``BaseException`` The respective exception instance. Returns ------- raw_exception_representation : `str` """ raw_exception_representation_parts = [ '> repr(exception) raised, trying to get raw representation.\n' ] exception_name = getattr(type(exception), '__name__') if type(exception_name) is str: pass elif isinstance(exception_name, str): try: exception_name = str(exception_name) except: exception_name = '<Exception>' else: exception_name = '<Exception>' raw_exception_representation_parts.append(exception_name) raw_exception_representation_parts.append('(') try: args = getattr(exception, 'args', None) except: pass else: if (args is not None) and (type(args) is tuple): length = len(args) if length: index = 0 while True: element = args[index] try: element_representation = repr(element) except: element_representation = f'<parameter_{index}>' else: if type(element_representation) is not str: try: element_representation = str(element_representation) except: element_representation = f'<parameter_{index}>' raw_exception_representation_parts.append(element_representation) index += 1 if index == length: break raw_exception_representation_parts.append(', ') continue raw_exception_representation_parts.append(')') return ''.join(raw_exception_representation_parts)
def clamp(value, start=0, end=1): """ Clamp a number. If the number is inside the interval [start, end] it's returned unchanged. Otherwise, the nearest interval limit is returned. """ if value > end: return end if value < start: return start return value
def scientific(number): """returns a string in scientific number format""" return "{:.2e}".format(number)
def git_repo(repo): """ Tests if a repo URL is a git repo, then returns the repo url. """ # everything that starts with 'git://' if repo.startswith('git://'): return repo # generic (https://*.git) or (http://*.git) ending on git if (repo.startswith('https://') or repo.startswith('http://')) and repo.endswith('.git'): return repo # for all others we just check if they start with the typical urls of git services services = ['https://git.tuxfamily.org/', 'http://git.pond.sub.org/', 'https://gitorious.org/', 'https://git.code.sf.net/p/'] if any(repo.startswith(service) for service in services): return repo # the rest is not recognized as a git url return None
def selectionSort(li): """Find the smallest element and put it in the first position, then find the second smallest and put it in the second position and repeat until you get to the last position. >>> selectionSort([1, 2, 3, 4, 5]) [1, 2, 3, 4, 5] >>> selectionSort([5, 4, 3, 2, 1]) [1, 2, 3, 4, 5] >>> selectionSort([3, 2, 6, 1, 4, 2, 3, 1, 1, 5, 6, -2, 2.3]) [-2, 1, 1, 1, 2, 2, 2.3, 3, 3, 4, 5, 6, 6] """ for i in range(len(li) - 1): for j in range(i + 1, len(li)): if li[j] < li[i]: li[i], li[j] = li[j], li[i] return li
def tokenize(phrase): """Tokenize to substrings on spaces.""" substrings = [] for word in phrase.split(): j = 1 while True: for i in range(len(word) - j + 1): substrings.append(word[i:i + j]) if j == len(word): break j += 1 return substrings
def create_progress_bar(fraction, text): """ Utility method that creates a text-based progress bar >>> bar = create_progress_bar(fraction=0.5, text='hello') >>> bar '[=================================== hello ]' Parameters ---------- text : str Text string to embed inside progress bar fraction : float Fraction of 1.0 representing how far progress bar is Returns ------- str A text-based progress bar string of length 80 """ text = ' ' + text + ' ' length = len(text) bar = list('[{0:<78}]'.format('=' * min(78, int(round(fraction * 78))))) index = int((len(bar) - length) / 2) bar[index : index + length] = text return ''.join(bar)
def _get_bin(value, bins): """Returns the smallest index i of bins so that bin[i][0] <= value < bin[i][1], where bins is a list of tuples, like [(0,20), (20, 40), (40, 60)] """ for i in range(0, len(bins)): if bins[i][0] <= value < bins[i][1]: return i return -1
def iou(box1, box2, denom="min"): """ compute intersection over union score """ int_tb = min(box1[0]+0.5*box1[2], box2[0]+0.5*box2[2]) - \ max(box1[0]-0.5*box1[2], box2[0]-0.5*box2[2]) int_lr = min(box1[1]+0.5*box1[3], box2[1]+0.5*box2[3]) - \ max(box1[1]-0.5*box1[3], box2[1]-0.5*box2[3]) intersection = max(0.0, int_tb) * max(0.0, int_lr) area1, area2 = box1[2]*box1[3], box2[2]*box2[3] if denom == "min": control_area = min(area1, area2) else: control_area = area1 + area2 - intersection return intersection / control_area
def log2floor(n): """ Returns the exact value of floor(log2(n)). No floating point calculations are used. Requires positive integer type. """ assert n > 0 return n.bit_length() - 1
def get_bugzilla_url(bug_id): """Return bugzilla url for bug_id.""" return u'https://bugzilla.mozilla.org/show_bug.cgi?id=%d' % bug_id
def left_to_right_check(input_line: str, pivot: int): """ Check row-wise visibility from left to right. Return True if number of building from the left-most hint is visible looking to the right, False otherwise. input_line - representing board row. pivot - number on the left-most hint of the input_line. >>> left_to_right_check("412453*", 4) True >>> left_to_right_check("452453*", 5) False >>> left_to_right_check("132354*", 3) False """ visible = 0 input_line = input_line[1:-1] first = input_line[0] for i in range(1, len(input_line)): if input_line[i] > first: first = input_line[i] visible += 1 if visible == pivot - 1: return True return False