content
stringlengths
42
6.51k
def calc_x(f, t): """Calc x coordinate by params. :param f: the interp params :type f: dict :param t: the accumulation of steps :type t: int :return: x coordinate :rtype: float """ return f['a_x'] + f['b_x'] * t + f['c_x'] * t * t + f['d_x'] * t * t * t
def search(nums, target): """ Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand. You are given a target value to search. If found in the array return its index, otherwise return -1. Your algorithm's runtime complexity must be in the order of O(log n). :param nums: list[int] :param target: int :return: int """ if not nums: return -1 low, high = 0, len(nums) - 1 while low <= high: mid = (low + high) // 2 if target == nums[mid]: return mid if nums[low] <= nums[mid]: if nums[low] <= target <= nums[mid]: high = mid - 1 else: low = mid + 1 else: if nums[mid] <= target <= nums[high]: low = mid + 1 else: high = mid - 1 return -1
def output_type(to_check: str) -> str: """Determine whether the output value of a file is a digit or a string. Args: to_check (str): The string variant of the value Returns: str: 'dig' if parsing to float succeeded, 'str' otherwise """ try: checked = float(to_check) return 'dig' except: return 'str'
def can_be_split_in_sum(digits, target, base = 10): """Check if target can be reached by summing part of digits.""" # Examples: # 81 9 -> true (because 8+1) # 6724 82 -> true (because 6+72+4) # 8281 91 -> true (because 8+2+81) # 9801 99 -> true (because 98+0+1) # 100 1 -> true (because 1+0) if digits < target: return False if digits == target: return True power = base while power <= digits: # power increases, tail increases, head decreases head, tail = divmod(digits, power) assert head if target < tail: return False if can_be_split_in_sum(head, target - tail): return True power *= base assert divmod(digits, power) == (0, digits) return False
def bin_to_num(binstr): """ Convert a big endian byte-ordered binary string to an integer or long. """ return int.from_bytes(binstr, byteorder='big', signed=False)
def updater(old, new): """Recursively update a dict. """ for k, v in new.items(): if type(v) == dict: old[k] = updater(old.get(k, {}), v) else: old[k] = v return old
def checksum(data): """Compute and return the checksum as an int. data -- list of 20 bytes (as ints), in the order they arrived in. """ # group the data by word, little-endian data_list = [] for t in range(10): data_list.append( data[2*t] + (data[2*t+1]<<8) ) # compute the checksum on 32 bits chk32 = 0 for d in data_list: chk32 = (chk32 << 1) + d # return a value wrapped around on 15bits, and truncated to still fit into 15 bits checksum = (chk32 & 0x7FFF) + ( chk32 >> 15 ) # wrap around to fit into 15 bits checksum = checksum & 0x7FFF # truncate to 15 bits return int( checksum )
def emotionV(frequencyVec,scoreVec): """Given the frequency vector and the score vector, compute the happs. Doesn't use numpy, but equivalent to `np.dot(freq,happs)/np.sum(freq)`.""" tmpSum = sum(frequencyVec) if tmpSum > 0: happs = 0.0 for i in range(len(scoreVec)): happs += frequencyVec[i]*float(scoreVec[i]) happs = float(happs)/float(tmpSum) return happs else: return -1
def get_input_tool_name(step_id, steps): """Get the string with the name of the tool that generated an input.""" inp_provenance = '' inp_prov_id = str(step_id) if inp_prov_id in steps: name = steps[inp_prov_id]['name'] if 'Input dataset' in name: inp_provenance = "(%s)" % name else: inp_provenance = "(output of **%s** {%% icon tool %%})" % name return inp_provenance
def format_time(time_to_go): """Returns minutes and seconds string for given time in seconds""" if time_to_go < 60: return "%ds" % time_to_go return "%dm %ds" % (time_to_go / 60, time_to_go % 60)
def canonical_switch_pattern_print(s): """Canonically prints a parsed switch pattern. Parameters ---------- s : Dict[str, int] A parsed switch pattern. Returns ------- str Canonical print out. """ txt = "" for e in sorted(s): txt += "%s %d\n" % (e, s[e]) return txt[:-1]
def is_numeric(number:str): """ Check to see if a value is "numeric" :param number str - String to check for numericy :return bool - True if it can be cast to float, otherwise false """ # argument.isnumeric() # number[0] == "-" try: float(number) return True except ValueError: # not numeric return False
def test_acc_formatter(test_acc: float) -> str: """Wrap given test accuracy in a nice text message.""" return f"Test Accuracy: {test_acc}"
def big_endian_to_int(b: bytes) -> int: """ Big endian representation to integer. :param b: big endian representation :return: integer """ return int.from_bytes(b, "big")
def update_Q(Qsa, Qsa_next, reward, alpha, gamma): """ updates the action-value function estimate using the most recent time step """ return Qsa + (alpha * (reward + (gamma * Qsa_next) - Qsa))
def softclip_regions(read_start, cigar): """ The softclip regions function iterates over the cigar string and returns the positions and if they are "normal" or a softclipped region. :param read_start: An integer representing the start of the read (including unmapped bases) :param cigar: a list containing tuples representing the cigar string. :return regions: a 2d list containing the regions of a read and specifying if they are "normal" or softclipped. """ regions = [] cursor = read_start for element in cigar: if element[0] == 4: regions.append([cursor, cursor+element[1], 'softclip']) else: regions.append([cursor, cursor+element[1], 'normal']) cursor += element[1] return regions
def _is_long_optname(n): """ Determine whether a given option name is "long" form. """ return n.startswith("--")
def quadruple_quad_function(a, b, c, d): """ [MF 125 @ 25:04] """ p1 = ((a + b + c + d) ** 2 - 2 * (a ** 2 + b ** 2 + c ** 2 + d ** 2)) ** 2 p2 = 64 * a * b * c * d return p1 - p2
def byte_format(n, roundto=2): """ Runtime: O(1) """ units = ["B", "KB", "MB", "GB", "TB", "PB"] # ... id = 0 factor = 1024 # or 1000 while n > factor: n = float(n) / factor id += 1 n = "%.{0}f".format(roundto) % n return "{0} {1}".format(n, units[id])
def bin_centers(x): """ Given a list of bin edges, return a list of bin centers. Useful primarily when plotting histograms. """ return [(a + b) / 2.0 for a, b in zip(x[:-1], x[1:])]
def dedup_and_title_case_names(names): """Should return a list of names, each name appears only once""" names = (set(names)) title_names = [name.title() for name in names] return title_names
def declare_temp(var_type, var_name): """Create a declaration of the form (declare (temporary) <var_type> <var_name) """ return [['declare', ['temporary'], var_type, var_name]]
def scale_dim(w, h, max_size): """Scale a tuple such that it fits within a maximum size. Parameters ---------- w : int Width h : int Height max_size : int The maximum width or height. `w` and `h` will be scaled such that neither is larger than `max_size`, maintaining the aspect ratio. Returns ------- (int, int) The scaled width and height returned as a tuple. """ if w > h: ratio = float(max_size) / float(w) else: ratio = float(max_size) / float(h) return (int(ratio * w), int(ratio * h))
def rgb_as_int(rgb): """Coerce RGB iterable to a tuple of integers""" if any([v >= 1. for v in rgb]): _rgb = tuple(rgb) else: _rgb = tuple((round(255 * c) for c in rgb)) return _rgb
def bin_mask(mask): """binary mask representation (ex: 255.255.255.0) input mask = decimal mask -->str """ mask = int(mask) decmask = mask*str(1) + (32-mask)*str(0) o1 = str(int(decmask[ 0: 8] , 2)) o2 = str(int(decmask[ 8: 16], 2)) o3 = str(int(decmask[16: 24], 2)) o4 = str(int(decmask[24: 32], 2)) return o1+'.'+o2+'.'+o3+'.'+o4
def truncate_seq_from_asterisk_onwards(seq): """Truncate a protein sequence after we see an asterisk. Also remove the asterisk. # Arguments: seq (string): string representation of a sequence # Returns: ret (string): string representation of truncated sequence """ idx = seq.find("*") ret = seq if idx >= 0: ret = seq[:idx] return(ret)
def radixSort(arr): """Sorts an unordered array using the radix sort method.""" n = len(str(max(arr))) queue = [[] for i in range(10)] for loop in range(0, n): for curr in arr: queue[int(curr/10**loop % 10)].append(curr) del arr[:] for bucket in queue: arr.extend(bucket) del bucket[:] return arr
def GetPrincipleQuantumNumber(atNum): """ Get principal quantum number for atom number """ if atNum <= 2: return 1 if atNum <= 10: return 2 if atNum <= 18: return 3 if atNum <= 36: return 4 if atNum <= 54: return 5 if atNum <= 86: return 6 return 7
def next_departure(bus_id: int, min_time: int) -> int: """Compute the next departure of ``bus_id`` leaving earliest at ``min_time``.""" # ERROR (pschanely, 2021-04-19): # When min_time is zero we get ZeroDivisionError here. wait_time = bus_id % min_time return min_time + wait_time
def convert_corr_id(corr_id): """ This method converts the correlation id into an integer that is small enough to be used with a modulo operation. :param corr_id: The correlation id as a String """ # Select the final 4 digits of the string reduced_string = corr_id[-4:] reduced_int = int(reduced_string, 16) return reduced_int
def isnumeral(numeral: str, base_alphabet: str) -> bool: """ Returns `True` if the input str is a bitstring, `False` otherwise. Parameters ---------- numeral: str The numeral string to check. base_alphabet: str A string containig the characters to be used as base. Returns ------- out: bool `True` if `numeral` is a numeral in the given base, `False` otherwise. RAISES ------ TypeError If input `numeral` or `base_alphabet` are not str. """ if not isinstance(numeral, str): raise TypeError( f"Invalid numeral type '{type(numeral)}'. Expected str." ) if not isinstance(base_alphabet, str): raise TypeError( f"Invalid base_alphabet type '{type(base_alphabet)}'. Expected str." ) a: set = set(base_alphabet) n: set = set(numeral) return n.issubset(a)
def mean(a_list): """ Finds mean of list. a_list: a list Return value: mean """ return float(sum(a_list)) / len(a_list)
def fix_polygon(pol, width, height): """ This fixes polygons so there are no points outside the edges of the image Arguments: pol: a polygon (list) width, height: of the image NOTE: this corrects for a problem with Darwin it may become unnecessary in the future Returns: a polygon (list) @author Dinis Gokaydin <d.gokaydin@nationaldrones.com> """ for pt in pol: if pt['x'] >= width: pt['x'] = width - 1 if pt['x'] < 0: pt['x'] = 0 if pt['y'] >= height: pt['y'] = height - 1 if pt['y'] < 0: pt['y'] = 0 return pol
def triangle_tangent(triangle): """ Takes section of curve defined by 3 points, finds tangent to the second point (This tangent is defined by its 2 points - triangle[1] and triangle[2]') Input: triangle [[xy], [xy], [xy]] Output: tangent [[xy], [xy]] """ assert len(triangle) == 3, "Input must be 3 points (finding tangent impossible). ~ abstract_geometry.offset_bezier[triangle_tangent()]" triangle = [xy.copy() for xy in triangle] # make a copy of input list to protect it (prevent state change) (ac_line, cb_line) = (triangle[:2], triangle[1:]) ac_x_shift = ac_line[1][0] - ac_line[0][0] ac_y_shift = ac_line[1][1] - ac_line[0][1] tangent = [ triangle[1], [triangle[2][0] + ac_x_shift, triangle[2][1] + ac_y_shift] ] return tangent
def number_negatives(seq): """Number of negative residues a protein sequence""" # Convert sequence to upper case seq = seq.upper() # Count E's and D's, since these are the negative residues return seq.count('E') + seq.count('D')
def knapsack_0_1_recursive(w, wt, vt, n): """ A naive recursive implementation of 0-1 Knapsack Problem A simple solution for 0-1 knapsack problem is to consider all subsets of items and calculate the total weight and value of all subsets. Consider the only subsets whose total weight is smaller than W. From all such subsets, pick the maximum value subset. To consider all subsets of items, there can be two cases for every item: 1. the item is included in the optimal subset. 2. not included in the optimal set. Therefore, the maximum value that can be obtained from n items is max of following two values. 1. Maximum value obtained by n-1 items and W weight (excluding nth item). 2. Value of nth item plus maximum value obtained by n-1 items and W minus weight of the nth item (including nth item). If weight of nth item is greater than W, then the nth item cannot be included and case 1 is the only possibility. It should be noted that the recursive version function computes the same sub-problems again and again. Time complexity of this naive recursive solution is exponential (2^n). :param w: total capacity :type w: int :param wt: weight of each element :type wt: list[int] :param vt: value of each element :type vt: list[int] :param n: number of elements :type n: int :return: the maximum value that can be put in a knapsack of capacity w :rtype: int """ # base case if w == 0 or n == 0: return 0 # if weight of the nth item is more than Knapsack of capacity # w, then this item cannot be included in the optimal solution if wt[n - 1] > w: return knapsack_0_1_recursive(w, wt, vt, n - 1) else: # return the maximum of two cases: # (1) nth item included # (2) not included # state transition equation: # dp[i][j] = max{dp[i-1][j], dp[i-1][j - w[i]] + v[i]} return max( vt[n - 1] + knapsack_0_1_recursive(w - wt[n - 1], wt, vt, n - 1), knapsack_0_1_recursive(w, wt, vt, n - 1))
def parseFilename(filename): """ Parse out filename from any specified extensions. Returns rootname and string version of extension name. Modified from 'pydrizzle.fileutil' to allow this module to be independent of PyDrizzle/MultiDrizzle. """ # Parse out any extension specified in filename _indx = filename.find('[') if _indx > 0: # Read extension name provided _fname = filename[:_indx] extn = filename[_indx+1:-1] # An extension was provided, so parse it out... if repr(extn).find(',') > 1: _extns = extn.split(',') # Two values given for extension: # for example, 'sci,1' or 'dq,1' _extn = [_extns[0],int(_extns[1])] elif repr(extn).find('/') > 1: # We are working with GEIS group syntax _indx = str(extn[:extn.find('/')]) _extn = [int(_indx)] elif isinstance(extn, str): # Only one extension value specified... if extn.isdigit(): # We only have an extension number specified as a string... _nextn = int(extn) else: # We only have EXTNAME specified... _nextn = extn _extn = [_nextn] else: # Only integer extension number given, or default of 0 is used. _extn = [int(extn)] else: _fname = filename _extn = None return _fname,_extn
def merge_duplicate_rows(rows, db): """ @param rows: rows to be grouped by @param db: database name, string """ rows = list(rows) keys = set() for row in rows: for k in row[db]: keys.add(k) first_row = rows[0] other_rows = rows[1:] for row in other_rows: for i in keys: try: aa = first_row[db][i] except KeyError: try: first_row[db][i] = row[db][i] except KeyError: pass continue if i in row[db]: if row[db][i] != first_row[db][i]: if not isinstance(aa, list): aa = [aa] aa.append(row[db][i]) first_row[db][i] = aa else: continue return first_row
def zero_mean_unit_variance(mode='per_sample', axes='xyzc', mean=0, std=1, eps=1e-07): """ normalize the tensor to have zero mean and unit variance """ dict_zero_mean_unit_variance = {'name': 'zero_mean_unit_variance', 'kwargs': { 'mode': mode, 'axes': axes, 'mean': mean, 'std': std, 'eps': eps } } return dict_zero_mean_unit_variance
def assemble_api_url(domain, operators, protocol='https'): """Assemble the requests api url.""" return '{}://{}{}'.format(protocol, domain, operators)
def energy_supply_sector_model(hourly): """Return sample sector_model """ return { 'name': 'energy_supply', 'description': "Supply system model", 'classname': 'EnergySupplyWrapper', 'path': '../../models/energy_supply/run.py', 'inputs': [ { 'name': 'natural_gas_demand', 'dims': ['lad', 'hourly'], 'coords': { 'lad': ['a', 'b'], 'hourly': hourly }, 'absolute_range': [0, float('inf')], 'expected_range': [0, 100], 'dtype': 'float', 'unit': 'GWh' } ], 'outputs': [], 'parameters': [], 'interventions': [], 'initial_conditions': [] }
def get_region(filename, codename, version): """ Get the region of an update """ if 'eea_global' in filename or 'eea_global' in codename or 'EU' in version: region = 'EEA' elif 'id_global' in filename or 'id_global' in codename or 'ID' in version: region = 'Indonesia' elif 'in_global' in filename or 'in_global' in codename or 'IN' in version: region = 'India' elif 'ru_global' in filename or 'ru_global' in codename or 'RU' in version: region = 'Russia' elif 'global' in filename or 'global' in codename or 'MI' in version: region = 'Global' else: region = 'China' return region
def binarySearch(array, l, r, x): """ Search function using binary search algorithm.(Recursive) array - sorted array l - left index(in python 0) r - right index(len(array)) x - value of index search """ if r >= l: # calculate mid of array mid = (l + r) // 2 if array[mid] == x: return mid # if mid is bigger then reduce section with mid as rigt border elif array[mid] > x: return binarySearch(array, l, mid, x) # elif mid is smaller then reduce section with mid as left border elif array[mid] < x: return binarySearch(array, mid, r, x) else: return -1
def merge(left, righ): """Merging two sorted lists. :param left: Sorted list. :param right: Sorted list. :return: A merged sorted list """ t = len(left) + len(righ) # total items to merge f = [] # place holder for final merged list l_i, r_i = 0, 0 # initializing left and right index # execute until list is full while len(f) != t: # if all items of left list have been exhausted # extend final list with leftovers from right list if l_i > len(left) - 1: f.extend(righ[r_i:len(righ)]) break # if all items of right list have been exhausted # extend final list with leftovers from left list if r_i > len(righ) - 1: f.extend(left[l_i:len(left)]) break # if current-left is smaller than current-right - add to final list if left[l_i] < righ[r_i]: f.append(left[l_i]) # adding current left l_i += 1 # incrementig left-index else: f.append(righ[r_i]) # adding current right r_i += 1 # incrementing right-index return f
def get_recommendations(commands_fields, app_pending_changes): """ :param commands_fields: :param app_pending_changes: :return: List of object describing command to run >>> cmd_fields = [ ... ['cmd1', ['f1', 'f2']], ... ['cmd2', ['prop']], ... ] >>> app_fields = { ... 'f2': {'field': 'f2', 'user': 'api', 'updated': '00:00'} ... } >>> from pprint import pprint >>> pprint(get_recommendations(cmd_fields, app_fields)) [{'command': 'cmd1', 'field': 'f2', 'updated': '00:00', 'user': 'api'}] """ recommended_cmds = [] for cmd in commands_fields: cmd_name = cmd[0] cmd_fields = cmd[1] for field in cmd_fields: if field in app_pending_changes.keys(): recommended_cmds.append({ 'command': cmd_name, 'field': field, 'user': app_pending_changes[field]['user'], 'updated': app_pending_changes[field]['updated'], }) break return recommended_cmds
def downleft(i,j,table): """Returns the product to down-left diagonal""" product = 1 for num in range(4): if i+num>19 or j-num<0: product *= 1 else:product *= int(table[i+num][j-num]) return product
def is_numeric(value): """ Returns True if value is numeric, returns False otherwise :param value: a data value returned by a RETS server :type value: str :rtype: bool :return: True for numeric value, False otherwise """ try: float(value) # can include things like '2e100' except ValueError: # Value was not numeric return False except TypeError: # Value was None return False return True
def strip_csv_header(csv_str: str, sep: str = '\n') -> str: """Remove header line from `csv_str`""" if not csv_str: return '' idx: int = csv_str.find(sep) if idx <= 0: return '' return csv_str[idx + 1 :]
def smi_tokenizer(smi): """ Tokenize a SMILES molecule or reaction """ import re pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])" regex = re.compile(pattern) tokens = [token for token in regex.findall(smi)] assert smi == ''.join(tokens) return ' '.join(tokens)
def best_fit_slope_and_intercept(x_points, y_points): """Line of best fit for a set of points.""" # https://stackoverflow.com/questions/22239691/code-for-best-fit-straight-line-of-a-scatter-plot-in-python x_bar = sum(x_points)/len(x_points) y_bar = sum(y_points)/len(y_points) nr_of_points = len(x_points) # or len(y_points) num = sum([xi*yi for xi, yi in zip(x_points, y_points)]) - nr_of_points * x_bar * y_bar de_num = sum([xi**2 for xi in x_points]) - nr_of_points * x_bar**2 gain = num / de_num offset = y_bar - gain * x_bar #print('best fit line:\ny = {:.2f} + {:.2f}x'.format(offset, gain)) return gain, offset
def get_indentation_for_comment(line: str) -> int: """Get the number of spaces before a comment""" return len(line.split("#")[0])
def replace_mapped_chars(pattern): """ handles special escaped characters when coming from galaxy """ mapped_chars = {"'": "__sq__", "\\": "__backslash__"} for key, value in mapped_chars.items(): pattern = pattern.replace(value, key) return pattern
def exact(str1, str2): """Do exact comparison of two strings. """ if (str1 == '') or (str2 == ''): return 0.0 elif (str1 == str2): return 1.0 else: return 0.0
def delete_test_mode(prefix='', rconn=None): """Delete test mode. Return True on success, False on failure""" if rconn is None: return False try: rconn.delete(prefix+'test_mode') except: return False return True
def from_formatted_lines(lines): """ Return text from a list of `lines` strings using the Debian Description rules for handling line prefixes and continuations. """ if not lines: return lines # first line is always "stripped" text = [lines[0].strip()] for line in lines[1:]: line = line.rstrip() if line.startswith(' '): # starting with two or more spaces: displayed verbatim. text.append(line[1:]) elif line == (' .'): # containing a single space followed by a single full stop # character: rendered as blank lines. text.append('') elif line.startswith(' .'): # containing a space, a full stop and some more characters: for # future expansion.... but we keep them for now text.append(line[2:]) elif line.startswith(' '): # starting with a single space. kept stripped text.append(line.strip()) else: # this should never happen!!! # but we keep it too text.append(line.strip()) return '\n'.join(text).strip()
def get_name_of(symbol): """Name consists of symbol [1] and filename [2]""" elements = symbol.split(':')[1:3] return ':'.join(elements)
def collect_vowels(s): """ (str) -> str Return the vowels (a, e, i, o, and u) from s. >>> collect_vowels('Happy Anniversary!') 'aAiea' >>> collect_vowels('xyz') '' """ vowels = '' for char in s: if char in 'aeiouAEIOU': vowels = vowels + char return vowels
def is_magic_choice(attr_name: str) -> bool: """Determine iff attr_name meets magic choice formatting requirements. This is a helper for MagicKind metaclass that determines if an attribute name should be treated as a user defined magic-value choice Args: attr_name: name of an attribute Returns: True iff all the following are true about attr_name: * Is a valid python identifier. * Is upper case. * Does not begin with an underscore. """ return ( attr_name.isupper() and attr_name.isidentifier() and not attr_name.startswith("_") )
def format_to_8_bit(n: int) -> str: """ >>> format_to_8_bit(10) '00001010' """ bits = (n % 2, (n >> 1) % 2, (n >> 2) % 2, (n >> 3) % 2, (n >> 4) % 2, (n >> 5) % 2, (n >> 6) % 2, n >> 7) return "{7}{6}{5}{4}{3}{2}{1}{0}".format(*bits)
def _freshly_booted(commands, step_type): """Check if the ramdisk has just started. On the very first boot we fetch the available steps, hence the only command agent executed will be get_XXX_steps. For later reboots the list of commands will be empty. """ return ( not commands or (len(commands) == 1 and commands[0]['command_name'] == 'get_%s_steps' % step_type) )
def stats_box(points): """ Returns the extrema and dimensions for a box enclosing the points Args: points (list): a list of points in [(x1, y1),(x2, y2),(xN, yN)] form Returns: min_x (float): minimum x coordinate min_y (float): minimum y coordinate max_x (float): maximum x coordinate max_y (float): maximum y coordinate width (float): width across x-coordinates height (float): width across y-coordinates Example Returns:: min_x, min_y, max_x, max_y, width, height = stats_box(points) """ x_coordinates = [] y_coordinates = [] for x_coordinate, y_coordinate in points: x_coordinates.append(x_coordinate) y_coordinates.append(y_coordinate) min_x = min(x_coordinates) max_x = max(x_coordinates) min_y = min(y_coordinates) max_y = max(y_coordinates) return min_x, min_y, max_x, max_y, abs(max_x - min_x), abs(max_y - min_y)
def read_example_index(example_index_file): """Read example index file and parse into dictionary. Parameters ---------- example_index_file : str /path/to/file.ext Returns ------- dict Dictionary of example indices. Returns empty dictionary if no index file given. """ example_index = {} if example_index_file: with open(example_index_file, "r") as f: for line in f.readlines(): seq_id = line.split()[0] example_index[seq_id] = 1.0 return example_index
def is_standard_source(source: str) -> bool: """Returns `True` if source is a standard ZenML source. Args: source: class_source e.g. this.module.Class[@pin]. """ if source.split(".")[0] == "zenml": return True return False
def color(raw_string, colour): """ @returns a bold font usage: color("raw string here", 'red') """ black = ('28', '1') red = ('31','1') green = ('32','1') return '\x1b[%sm%s\x1b[0m' % (';'.join(eval(colour)), raw_string)
def assign_mice_to_holes(mice, holes): """ There are N mice and N holes are placed in a straight line. Each hole can accomodate only 1 mouse. A mouse can stay at his position, move one step right from x to x + 1, or move one step left from x to x - 1. Any of these moves consumes 1 minute. Assign mice to holes so that the time when the last mouse gets inside a hole is minimized. """ mice = sorted(mice) holes = sorted(holes) max_time = 0 for m, h in zip(mice, holes): max_time = max(max_time, abs(m - h)) return max_time
def _split(start, end, count, min_interval_sec = 120): """ Split section from `start` to `end` into `count` pieces, and returns the beginning of each piece. The `count` is adjusted so that the length of each piece is no smaller than `min_interval`. Returns: -------- List of the offset of each block's first chat data. """ if not (isinstance(start,int) or isinstance(start,float)) or \ not (isinstance(end,int) or isinstance(end,float)): raise ValueError("start/end must be int or float") if not isinstance(count,int): raise ValueError("count must be int") if start>end: raise ValueError("end must be equal to or greater than start.") if count<1: raise ValueError("count must be equal to or greater than 1.") if (end-start)/count < min_interval_sec: count = int((end-start)/min_interval_sec) if count == 0 : count = 1 interval= (end-start)/count if count == 1: return [start] return sorted( list(set( [int(start + interval*j) for j in range(count) ])))
def median_(numList, printed=False): """Find the median of a set of numbers.""" sort_list, list_len = sorted(numList), len(numList) if list_len%2 == 0: # The median equals the two middle indexes added then divided by 2. median = (sort_list[int((list_len/2) + 0.5)] + sort_list[int((list_len/2) - 0.5)])/2 else: # The median equals the middle index. median = sort_list[int(list_len/2)] if printed: print(median) else: return median
def string_to_bool(value): """ Convert the given unicode string ``value`` to a boolean object. If ``value`` is ``'1'``, ``True`` is returned. If ``value`` is ``'0'``, ``False`` is returned. Any other value raises a :exc:`~exceptions.ValueError`. """ if value not in ('1', '0'): raise ValueError('Not a boolean value: {0!r}'.format(value)) return value == '1'
def get_gt_web(best_matches, query_gt_dict): """ Given best matches and query ground truth, return list of ground truths corresponding to best_matches (for deployment use) Args: best_matches : list of best matching files query_gt_dict : dictionary indicating the positive and negative examples for the query Returns: list of ground truths corresponding to best_matches """ # Create python list to store preds preds = [] # Iterate through the best matches and find predictions for i, pic in enumerate(best_matches): img_name = "{}".format(pic.split("/")[-1]) if img_name in query_gt_dict['positive']: preds.append(1) elif img_name in query_gt_dict['negative']: preds.append(-1) else: preds.append(0) return preds
def clean_java_timecode(java_time_code_string): """ converts millisecond time (string) to an integer normal unix time. """ return int(java_time_code_string[:10])
def merge_sort(lst): """ Takes in an unsorted list and runs the merge sort algorithm to organize the values. In: Int List Out: Int List with values lowest to greatest """ def recurse(lst): """A recursive helper function that splits a list into sublists""" n = len(lst) if n > 1: mid = n // 2 left = lst[:mid] right = lst[mid:] left = recurse(left) right = recurse(right) lst = merge(left, right) return lst def merge(left, right): """Merges the 2 sorted lists into one larger sorted list""" new_lst = [] while left and right: if left[0] <= right[0]: new_lst.append(left.pop(0)) elif right[0] < left[0]: new_lst.append(right.pop(0)) if left: new_lst += left if right: new_lst += right return new_lst return recurse(lst)
def _validate_sources(generated_sources, original_sources): """ Check if all original soruces exist in generated source :type generated_sources: list :type original_sources: list :rtype: bool """ generated_sources = list(set(generated_sources)) original_sources = list(set(original_sources)) not_existent_source = [] for source in original_sources: if source not in generated_sources: not_existent_source.append(source) if not_existent_source: print('WARN: Some sources did exist in generated file') print(not_existent_source) return False return True
def valid_vlan_name(vlan_name): """ create a valid VLAN name (removed certain unwanted charaters) :param vlan_name: :return: """ invalid_chars = list(";,.#+*=?%$()[]") clean_string = vlan_name.replace(" ", "_") clean_string = clean_string.replace("-", "_") clean_string = "".join(e for e in clean_string if e not in invalid_chars) return clean_string
def snake2kebab(string: str) -> str: """Convert a ``snake_cased`` string to a ``kebab-cased`` one. :param string: String to be converted. :type string: str :returns: The given string converted to snake case. :rtype: str """ return string.replace("_", "-")
def create_user(ssh_fn, name): """Create a user on an instance using the ssh_fn and name. The ssh_fn is a function that takes a command and runs it on the remote system. It must be sudo capable so that a user can be created and the remote directory for the user be determined. The directory for the user is created in /var/lib/{name} :param ssh_fn: a sudo capable ssh_fn that can run commands on the unit. :type ssh_fn: Callable[[str], str] :param name: the name of the user to create. :type name: str :returns: the directory of the new user. :rtype: str """ dir_ = "/var/lib/{name}".format(name=name) cmd = ["sudo", "useradd", "-r", "-s", "/bin/false", "-d", dir_, "-m", name] ssh_fn(cmd) return dir_.strip()
def has_marker(stacktrace, marker_list): """Return true if the stacktrace has atleast one marker in the marker list.""" for marker in marker_list: if marker in stacktrace: return True return False
def convert_dict(my_dict): """Convert dictionaries from Netmiko format to NAPALM format.""" new_dict = {} for k, v in my_dict.items(): new_dict[k] = v hostname = new_dict.pop('host') new_dict['hostname'] = hostname device_type = new_dict.pop('device_type') new_device_type = device_type.split('_')[1] new_dict['device_type'] = new_device_type return new_dict
def non_qualified_code(code): """ Some codes, e.g. ISO 3166-2 subdivision codes, are compound and are formatted as "{country_code}-{subdivision_code}". For validation cases we often care about extracting the non-qualified subdivision code in such cases. """ return code.split("-", 2)[1]
def _make_pixel( val ): """ Construct a pixel tuple of (R,G,B,A) from the unsigned integer val. Used to explicitly embed font metric data into the png """ return ( (val & 0x000000ff, (val & 0x0000ff00) >> 8, (val & 0x00ff0000) >> 16, (val & 0xff000000) >> 24) )
def has_next(collection, limit): """Return whether collection has more items.""" return len(collection) and len(collection) == limit
def execute(op_data): """Builds up an accumulator based on the op data. If the pointer goes beyond program input, it terminates and returns True, else False.""" accum = 0 ptr = 0 visited = set() while ptr not in visited: visited.add(ptr) ins, val = op_data[ptr] if ins == "acc": accum += val ptr += 1 elif ins == "nop": ptr += 1 elif ins == "jmp": ptr += val if ptr >= len(op_data): return accum, True return accum, False
def _format_to_rows(dl): """Helper method to take data in DOL (dict of lists) format, and convert it to LOD (list of dict) Args: data (list): Dict of lists to be converted Returns: list: A list of dicts representing data in row form """ return [dict(zip(dl, t)) for t in zip(*dl.values())]
def delta_decode(in_array): """A function to delta decode an int array. :param in_array: the input array of integers :return the decoded array""" if len(in_array) == 0: return [] this_ans = in_array[0] out_array = [this_ans] for i in range(1, len(in_array)): this_ans += in_array[i] out_array.append(this_ans) return out_array
def f(x, r): """ the function in question """ return x * r * (1 - x)
def fibonacci(n): """ Deliberately inefficient recursive implementation of the Fibonacci series. Parameters ---------- n : int Nonnegative integer - the index into the Fibonacci series. Returns ------- fib : int The value of the Fibonacci series at n. """ return n if n < 2 else fibonacci(n - 1) + fibonacci(n - 2)
def logistic4(x, A, B, C, D): """4-parameter logistic model y = D + (A - D) / (1 + ((x / C)**B)) A is min B is Hill coef C is inflection D is max """ return D + (A - D) / (1 + ((x / C)**B))
def pancake_sort(arr): """Sort Array with Pancake Sort. :param arr: Collection containing comparable items :return: Collection ordered in ascending order of items Examples: >>> pancake_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> pancake_sort([]) [] >>> pancake_sort([-2, -5, -45]) [-45, -5, -2] """ cur = len(arr) while cur > 1: # Find the maximum number in arr mi = arr.index(max(arr[0:cur])) # Reverse from 0 to mi arr = arr[mi::-1] + arr[mi + 1 : len(arr)] # Reverse whole list arr = arr[cur - 1 :: -1] + arr[cur : len(arr)] cur -= 1 return arr
def trim_keys(dict_): """remove dict keys with leading and trailing whitespace >>> trim_keys({' name': 'value'}) == {'name': 'value'} True """ return {k.strip(): v for k, v in dict_.items()}
def stream_encoding(stream): """Return the appropriate encoding for the given stream.""" encoding = getattr(stream, 'encoding', None) # Windows returns 'cp0' to indicate no encoding return encoding if encoding not in (None, 'cp0') else 'utf-8'
def average_precision(gt, pred): """ Computes the average precision. This function computes the average prescision at k between two lists of items. Parameters ---------- gt: set A set of ground-truth elements (order doesn't matter) pred: list A list of predicted elements (order does matter) Returns ------- score: double The average precision over the input lists """ if not gt: return 0.0 score = 0.0 num_hits = 0.0 for i,p in enumerate(pred): if p in gt and p not in pred[:i]: num_hits += 1.0 score += num_hits / (i + 1.0) return score / max(1.0, len(gt))
def finalize_results(results): """Inspect results and select the most probable OS :param results: dictionary of results with probabilities :return: dictionary of IPs with the most probable OS assigned """ out = {} for ip, result in results.items(): final = "Unknown" best = 0 apple = 0 darwin = 0 iOS = 0 Mac = 0 for OS in result: if "Darwin" in OS: apple += result[OS] darwin += result[OS] elif "iOS" in OS: iOS += result[OS] darwin += result[OS] elif "Mac OS X" in OS: apple += result[OS] Mac += result[OS] if result[OS] > best: final = OS best = result[OS] if "Darwin" in final or "iOS" in final or "Mac" in final: out[ip] = final continue if apple > (best * 3): if Mac >= darwin and Mac >= iOS: out[ip] = "Mac OS X" continue if iOS >= darwin: out[ip] = "iOS" continue out[ip] = "Darwin" continue out[ip] = final return out
def int_or_ratio(alpha, n): """Return an integer for alpha. If float, it's seen as ratio of `n`.""" if isinstance(alpha, int): return alpha return int(alpha * n)
def filter_docker_compose_files_list(list, version): """Returns a filtered list of known docker-compose files version shall be one of "git", "docker". """ assert version in ["git", "docker"] _DOCKER_ONLY_YML = [ "docker-compose.yml", "docker-compose.enterprise.yml", "docker-compose.auditlogs.yml", "docker-compose.connect.yml", "other-components-docker.yml", ] _GIT_ONLY_YML = ["git-versions.yml", "git-versions-enterprise.yml"] def _is_known_yml_file(entry): return ( entry.startswith("git-versions") and entry.endswith(".yml") or entry == "other-components.yml" or entry == "other-components-docker.yml" or (entry.startswith("docker-compose") and entry.endswith(".yml")) ) return [ entry for entry in list if _is_known_yml_file(entry) and ( version == "all" or ( (version == "git" and entry in _GIT_ONLY_YML) or (version == "docker" and entry in _DOCKER_ONLY_YML) or (entry not in _GIT_ONLY_YML + _DOCKER_ONLY_YML) ) ) ]
def reply(read=20, write=20): """Return the kwargs for creating the Reply table.""" return { 'AttributeDefinitions': [ { 'AttributeName': 'Id', 'AttributeType': 'S' }, { 'AttributeName': 'ReplyDateTime', 'AttributeType': 'S' }, { 'AttributeName': 'PostedBy', 'AttributeType': 'S' }, { 'AttributeName': 'Message', 'AttributeType': 'S' }, ], 'TableName': 'Reply', 'KeySchema': [ { 'AttributeName': 'Id', 'KeyType': 'HASH', }, { 'AttributeName': 'ReplyDateTime', 'KeyType': 'RANGE', }, ], 'ProvisionedThroughput': { 'ReadCapacityUnits': read, 'WriteCapacityUnits': write, }, 'GlobalSecondaryIndexes': [ { 'IndexName': 'PostedBy-Message-Index', 'KeySchema': [ { 'AttributeName': 'PostedBy', 'KeyType': 'HASH' }, { 'AttributeName': 'Message', 'KeyType': 'RANGE' }, ], 'Projection': { 'ProjectionType': 'ALL', }, 'ProvisionedThroughput': { 'ReadCapacityUnits': read, 'WriteCapacityUnits': write, } }, ] }
def part1(input): """ >>> part1([199, 200, 208, 210, 200, 207, 240, 269, 260, 263]) 7 """ return sum(1 if input[i] < input[i+1] else 0 for i in range(len(input)-1))
def getLength(array): """ Get the length of an array like object. INPUT <= array object OUTPUT => integer array length """ count = 0 for _ in array: count += 1 return count
def split_space_text(text_list): """ This Function splits text to words """ temp_text_list = [] for i in range(len(text_list)): if " " in text_list[i]: temp_text_list.extend(text_list[i].split()) text_list[i] = "" print(temp_text_list) print(text_list) text_list.extend(temp_text_list) return(text_list)
def extract_track_url(search): """ Get the first Spotify track url from a given search. Extended description of function. Parameters ---------- search : dict Contains information relating to Spotify API track search request. Returns ------- url : str Spotify URL for the first track received from search query. """ if 'tracks' in search: tracks = search['tracks'] if 'items' in tracks: items = tracks['items'] # take the first url we can find for item in items: if 'external_urls' in item: external_urls = item['external_urls'] if 'spotify' in external_urls: url = external_urls['spotify'] return url
def friend_date(a, b): """Given two friends, do they have sny hobbies in common? - a: friend #1, a tuple of (name, age, list-of-hobbies) - b: same, for friend #2 Returns True if they have any hobbies in common, False is not. >>> elmo = ('Elmo', 5, ['hugging', 'being nice']) >>> sauron = ('Sauron', 5000, ['killing hobbits', 'chess']) >>> gandalf = ('Gandalf', 10000, ['waving wands', 'chess']) >>> friend_date(elmo, sauron) False >>> friend_date(sauron, gandalf) True """ if set(a[2]) & set(b[2]): return True else: return False # can even do by converting to boolean! # # return bool(set(a[2] & set(b[2])
def extract_pure_newick_tree_string(raw_tree_content): """Read tree content, parse, and return tree string""" tmp_tree_str = '' tree_start_flag = False lines = raw_tree_content.split('\n') for line in lines: line = line.strip() if line.startswith('('): tree_start_flag = True if not tree_start_flag: continue if line.startswith('//') or line.startswith('#'): break else: tmp_tree_str += line return tmp_tree_str