content
stringlengths
42
6.51k
def say_again(s, exclaim): """ Returns the string 's' repeated 3 times. If exclaim is true, add exclamation marks. """ result = s + s + s # can also use "s * 3" which is faster (Why?) if exclaim: result = result + '!!!' return result
def sort_by_tc(events): """Sorts events by their rec_start_tc.""" events.sort(key=lambda e: (e.rec_start_tc.frames, e.track)) return events
def inOrderTestTreeNode(root): """ """ lst = [] if root is None: return lst if root.left is not None: lst.extend(inOrderTestTreeNode(root.left)) lst.append(root.data) if root.right is not None: lst.extend(inOrderTestTreeNode(root.right)) return lst
def _comment(string: str) -> str: """Return string as a comment.""" lines = [line.strip() for line in string.splitlines()] sep = "\n" return "# " + f"{sep}# ".join(lines)
def max_sub_array(nums): """ Returns the max subarray of the given list of numbers. Returns 0 if nums is None or an empty list. Time Complexity: O(n) Space Complexity: O(1) """ if nums == None: return 0 if len(nums) == 0: return 0 max_sum = 0 current_sum = 0 for num in nums: current_sum = max(0, current_sum + num) max_sum = max(max_sum, current_sum) if max_sum <= 0: return max(nums) return max_sum
def splitAndTrim(string, delimiter): """ split the string and remove all empty parts """ parts = string.split(delimiter) results = [] for item in parts: if len(item) == 0: continue results.append(item) return results
def to_xml_name(name): """Convert field name to tag-like name as used in QuickBase XML. >>> to_xml_name('This is a Field') 'this_is_a_field' >>> to_xml_name('800 Number') '_800_number' >>> to_xml_name('A & B') 'a___b' >>> to_xml_name('# of Whatevers') '___of_whatevers' """ xml_name = ''.join((ch if ch.isalnum() else '_') for ch in name.lower()) if not xml_name[0].isalpha(): xml_name = '_' + xml_name return xml_name
def unique_days_of_one_stream(input_dict: dict) -> set: """ This function takes a dictionary of stream ids with dates of each stream and makes a unique set of dates for all the stream ids :param dict input_dict: a dictionary of stream ids as keys with dates as values :return: a set of dates for all the stream ids of one stream for a day :rtype: set """ merged_dates = [] for stream_id in input_dict: merged_dates = list(set(merged_dates + input_dict[stream_id])) merged_dates_set = set(merged_dates) return merged_dates_set
def make_unscored_output_diac(x_entropies, y_entropies, test_set, vocab_map1, vocab_map2): """ Append test items and their entropy values in two vector spaces. :param x_entropies: dictionary, mapping targets to entropy values :param y_entropies: dictionary, mapping targets to entropy values :param test_set: list of tuples, each tuple a test item :param vocab_map1: dictionary that maps row strings to integer ids :param vocab_map2: dictionary that maps row strings to integer ids :return unscored_output: list of tuples of test items plus their entropy values for x and y for each test pair (x,y) """ unscored_output = [] for (x, y, label, relation) in test_set: if x not in vocab_map1 or y not in vocab_map2: # Assign a special score to out-of-vocab pairs unscored_output.append((x, y, label, relation, -999.0, -999.0)) continue print (x, y, label, relation, x_entropies[x], y_entropies[y]) unscored_output.append((x, y, label, relation, x_entropies[x], y_entropies[y])) return unscored_output
def count_bags_dfs(graph, start): """DFS. `queue` contains (node, multiplier) tuples.""" total_bags = 0 queue = [(start, 1)] while len(queue): at, multiplier = queue.pop() total_bags += multiplier for next in graph[at].keys(): queue.append((next, multiplier * graph[at][next])) return total_bags - 1
def get_device_hostnames(device_response): """Return a dictionary that maps device mac addresses to hostnames.""" return dict( zip( [value["mac"] for value in device_response["data"]], [value["hostname"] for value in device_response["data"]], ) )
def dfs_imp_list(g, s): """ Imperative Depth First Search for adjacency list :param g: the graph to visit. It is assumed the list only contains int data type. :type g: list(list(int)) :param s: the vertex to start from. :type s: int :return: list of visited vertices """ visited_vertices = [] stack = [s] while not stack == []: v = stack.pop() if v not in visited_vertices: visited_vertices.append(v) for u in g[v]: stack.append(u) return visited_vertices
def indent(t, indent=0, sep='\n'): # type: (str, int, str) -> str """Indent text.""" return sep.join(' ' * indent + p for p in t.split(sep))
def fcn_VR_FMS(r_div_R): """ Transversal velocity factor of FMS in Eq. (31) in [2] """ return (1/4.)*(3.*r_div_R - r_div_R**3.0 + 2.0)
def unreliable_test(test_fr, unacceptable_fr, test_runs, min_run): """Check for an unreliable test. A test should be added to the set of tests believed not to run reliably when it has more than min_run executions with a failure percentage greater than unacceptable_fr. """ return test_runs >= min_run and test_fr > unacceptable_fr
def keywords2tag(tag) : """ remove the last number in tag e.g. "span1" to "span" """ i = 0 for x in tag[::-1] : # reversed checking if x.isdigit(): i += 1 if i > 0 : return tag[:-i] else : return tag
def get_direction_sign(here, there): """Return sign (+1 or -1) required to move from here to there.""" return 1 if here < there else -1
def _get_dbg_fn(code): """ Create a wrapper for the python statements, that encodes the debugging logic for the decision tree. """ spacer_basic = ' ' # Create a function. wrapper_code = "def debug_fn(): \n" # For each line, add a tab in front and a newline at the end. upd_code = [spacer_basic + e + "\n" for e in code] # Finally, join everything wrapper_code += ''.join(upd_code) # Finally return the wrapped up code. return wrapper_code
def get_parameter_name(name): """ Gets parameter name from parameter key. """ return name[name.rfind(".") + 1 :]
def _conf_string_to_bool(conf_string: str): """Normally in Python all non empty strings convert to True. We want it to not be as easy to accidentally turn on a configuration flag, so we only treat the explicit string "true" as True. Casing doesn't matter. """ return conf_string.lower() == 'true'
def float_eq(__x: float, __y: float) -> bool: """ Function: float_eq Summary: returns if 2 float nums are equal Examples: >>> a = 0.1 + 0.1 + 0.1 >>> b = 0.3 >>> float_eq(a, b) >>> True Attributes: @param (__x): just a number @param (__y): just a number Returns: bool """ return abs(__x - __y) < 1e-5
def egcd(a,b): # a > b > 0 """ Extended great common divisor, returns x , y and gcd(a,b) so ax + by = gcd(a,b) """ if a%b==0: return (0,1,b) q=[] while a%b != 0: q.append(-1*(a//b)) (a,b)=(b,a%b) (a,b,gcd)=(1,q.pop(),b) while q:(a,b)=(b,b*q.pop()+a) return (a,b,gcd)
def find_carg_coverages(carg_node_list, reference_coverage, coverages): """ Find CARG coverages by searching coverages for the ones with 1 in exactly the places carg requires it to be and nowhere else. e.g. CARG is in node_id 2, reference coverage is [0,1,2,3], CARG coverages are [0,0,1,X0], [0,0,1,0] but not [0,1,1,0] or [0,1,X0,0] :param carg_node_list: List of node ids corresponding to a single CARG :param reference_coverage: List of node ids in order which coverages are constructed :param coverages: List of coverages obtained by running rule application :return: List of coverages corresponding to the give CARG """ # CARG node positions in reference carg_node_positions = [reference_coverage.index(node_id) for node_id in carg_node_list] carg_coverages = [] # Search rule application coverages that have 1 in exactly those coverages and nowhere else for coverage in coverages: if not all([True if coverage[index] == '1' else False for index in carg_node_positions]): continue if sum(1 for node in coverage if node == '1') != len(carg_node_positions): continue carg_coverages.append(coverage) return carg_coverages
def validate_dice_seed(dice, min_length): """ Validates dice data (i.e. ensures all digits are between 1 and 6). returns => <boolean> dice: <string> representing list of dice rolls (e.g. "5261435236...") """ if len(dice) < min_length: print("Error: You must provide at least {0} dice rolls".format(min_length)) return False for die in dice: try: i = int(die) if i < 1 or i > 6: print("Error: Dice rolls must be between 1 and 6.") return False except ValueError: print("Error: Dice rolls must be numbers between 1 and 6") return False return True
def bitdepth(name): """Converts the name of numpy.dtype (string) to bit-depth (string) Args: name (str): Supported types: ['uint8', 'uint16', 'float32'] Returns: str: The NSI equivalent of the datatype for a volume. Raise: TypeError: If requested type is not supported. """ name = str(name) supported_types = { 'uint8': 'UCHAR', 'uint16': 'USHORT', 'float32': 'FLOAT', '8': 'UCHAR', '16': 'USHORT', '32': 'FLOAT' } if name in supported_types: return supported_types[name] # If we reach this point, the type is not supported raise TypeError(f"bitdepth() argument must be a string, not '{type(name)}'")
def normalizeDuration(periodType, timeToElapse): """Returns number of days given periodType and timeToElapse""" if periodType == "weeks": return timeToElapse*7 if periodType == "months": return timeToElapse*30 return timeToElapse
def is_npy(filename): """ The first bytes are: x93NUMPY see: https://github.com/numpy/numpy/blob/master/doc/neps/npy-format.rst """ from numpy.lib.format import MAGIC_PREFIX with open(filename, 'rb') as infile: return infile.read(6) == MAGIC_PREFIX
def standardizeCapitalization(input_string, algorithm): """ Converts title case words (e.g., ' The ') to lowercase e.g., ' the '). Allows conversion algorithms for multiple scenarios (e.g., author names vs titles) and languages via keyword arguments of 'algorithm' parameter. Args: input_string (str): the string to be converted. algorithm: capitalization algorithm to be used Keyword Args: "English title" (algorithm): Returns: The converted string Examples: >>> from triplicator.bibTools import standardizeCapitalization >>> standardizeCapitalization("Phantom Of The Opera", "en_title") 'Phantom of the Opera' """ import re formatted_string = input_string # convert title case to lowercase (DBpedia format) if algorithm is "en_title": formatted_string = re.sub(" In ", " in ", formatted_string) formatted_string = re.sub(" The ", " the ", formatted_string) formatted_string = re.sub(" A ", " a ", formatted_string) formatted_string = re.sub(" An ", " an ", formatted_string) formatted_string = re.sub(" As ", " as ", formatted_string) formatted_string = re.sub(" On ", " on ", formatted_string) formatted_string = re.sub(" At ", " at ", formatted_string) formatted_string = re.sub(" For ", " for ", formatted_string) formatted_string = re.sub(" With ", " with ", formatted_string) formatted_string = re.sub(" From ", " from ", formatted_string) formatted_string = re.sub(" By ", " by ", formatted_string) formatted_string = re.sub(" Of ", " of ", formatted_string) formatted_string = re.sub(" Vs ", " vs ", formatted_string) formatted_string = re.sub(" And ", " and ", formatted_string) formatted_string = re.sub(" Be ", " be ", formatted_string) formatted_string = re.sub(" Been ", " been ", formatted_string) formatted_string = re.sub(" Not ", " not ", formatted_string) formatted_string = re.sub(" Is ", " is ", formatted_string) formatted_string = re.sub(" Isn\'t ", " isn\'t ", formatted_string) formatted_string = re.sub(" Are ", " are ", formatted_string) formatted_string = re.sub(" Aren\'t ", " aren\'t ", formatted_string) formatted_string = re.sub(" Does ", " does ", formatted_string) formatted_string = re.sub(" Doesn\'t ", " doesn\'t ", formatted_string) formatted_string = re.sub(" Do ", " do ", formatted_string) formatted_string = re.sub(" Don\'t ", " don\'t ", formatted_string) formatted_string = re.sub(" Was ", " was ", formatted_string) formatted_string = re.sub(" Wasn\'t ", " wasn\'t ", formatted_string) formatted_string = re.sub(" Were ", " were ", formatted_string) formatted_string = re.sub(" Weren\'t ", " weren\'t ", formatted_string) formatted_string = re.sub(" Did ", " did ", formatted_string) formatted_string = re.sub(" Didn\'t ", " didn\'t ", formatted_string) # This list is not exhaustive else: raise Exception ('Unknown algorithm parameter: "' + algorithm + '". Please enter a valid capitalization algorithm such as "en_title".') return formatted_string
def rot32(w, n_left): """ Rotate 32-bit word left by nLeft or right by -nLeft without creating a Python long. Timing depends on nLeft but not on w. """ n_left &= 31 # which makes nLeft >= 0 if n_left == 0: return w # Note: now 1 <= nLeft <= 31. # RRRsLLLLLL There are nLeft RRR's, (31-nLeft) LLLLLL's, # => sLLLLLLRRR and one s which becomes the sign bit. RRR = (((w >> 1) & 0x7fffFFFF) >> (31 - n_left)) sLLLLLL = -((1 << (31 - n_left)) & w) | (0x7fffFFFF >> n_left) & w return RRR | (sLLLLLL << n_left)
def compress(ls,ind): """create sublist from given indices >>> compress([7,8,9,10],[2,3]) [9, 10] """ return [ls[i] for i in ind]
def prefix_mask(lenght): """return a prefix legnth that matches the first "lenght" bit of the address""" return (2**128-1)-(2**(128-lenght) -1)
def remove_docstring_indent(doc_str: str) -> str: """Remove the additional indent of a multiline docstring. This can be helpful, if docstrings are combined programmatically. """ lines = doc_str.split("\n") if len(lines) <= 1: return doc_str first_non_summary_line = next(line for line in lines[1:] if line) indent = len(first_non_summary_line) - len(first_non_summary_line.lstrip()) cut_lines = [lines[0]] for line in lines: cut_lines.append(line[indent:]) return "\n".join(cut_lines)
def data_to_line(data, sep='\t'): """Transform iterable into line to write in a file, with a separarator.""" data_str_list = [str(x) for x in data] data_str_all = sep.join(data_str_list) return data_str_all + '\n'
def calculate_dir(start, target): """ Calculate the direction in which to go to get from start to target. start: a tuple representing an (x,y) point target: a tuple representing an (x,y) point as_coord: whether you want a coordinate (-1,0) or a direction (S, NW, etc.) """ dx = target[0] - start[0] dy = target[1] - start[1] if dx < 0: dx = -1 elif dx > 0: dx = 1 if dy < 0: dy = -1 elif dy > 0: dy = 1 return (dx, dy)
def merge_config(*dicts): """ This takes a list of python dicts and merges them into a single dict. It is set up to assure that later arguments will take precedence over earlier ones by default. Parameters ---------- dicts: list List of dicts to merge into a single dict Returns ------- updated: dict Arguments combined into a single dict. Later arguments take precedence over earlier arguments and dicts take precedence over non-dict values. """ updated = {} # grab all of the keys keys = set() for o in dicts: keys = keys.union(set(o)) for key in keys: values = [o[key] for o in dicts if key in o] # find values that are dicts so we can recurse through them maps = [value for value in values if isinstance(value, dict)] if maps: updated[key] = merge_config(*maps) else: # if not a dict, then return the last value we have since later arguments # take precendence updated[key] = values[-1] return updated
def calc_cos_plant_uptake(GEE, LRU, CO2, COS): """ calculate plant COS uptake from CO2 gross ecosystem productivity, leaf relative uptake, atmospheric CO2 concentration, and atmospheric COS concentration. INPUT PARAMETERS: GEE: np.ndarray of gross primary production, kg C m-2 s-1 LRU: leaf relative uptake (umol CO2 m-2 s-1 (ppm CO2)-1) / (pmol COS m-2 s-1 (ppt COS)-1) CO2: atmospheric CO2 concentration (ppm) COS: atmospheric COS concentration (ppt) RETURNS: plant COS flux (mol COS m-2 yr-1) NOTES: LRU, CO2, and COS may be numpy arrays of any dimensions that are broadcastable to self.GPP_124x124.shape. This permits flexibility in specifying each as an invariant scalar, a spatially varying but temporally invariant 2 dimensional array, or a temporally and spatially varying 3 or 4 dimensional array. """ # define some constants for unit conversion g_per_kg = 1e3 # grams per kilogram molC_per_gC = 1.0 / 12.011 # moles carbon per gram carbon umol_per_mol = 1e6 # micromoles per mole mol_per_pmol = 1e-12 # calculate the COS plant flux in pmol m-2 s-1 f_COS_plant = (GEE * LRU * (COS/CO2) * g_per_kg * molC_per_gC * umol_per_mol * mol_per_pmol) return(f_COS_plant)
def is_downloadable(url): """ check if the url contains a downloadable resource """ if (url.find('pdf')!=-1) or (url.find('epub')!=-1) or (url.find('mobi')!=-1): print(url) return True else: return False
def get_true_anomalies(w, dtheta): """ Compute the initial and final true anomalies. Parameters ---------- w: float Argument of periapsis. dtheta: float Transfer angle. Returns ------- nu_1: float Initial true anomaly. nu_2: float Final true anomaly. """ nu_1 = -w nu_2 = nu_1 + dtheta return nu_1, nu_2
def get_best(summary, metric, fields=None): """Filters a summary to obtain the best average total of a metric for each optimizer.""" rv = dict() for summary_key, value in summary.items(): if fields is None: rv_key = None else: rv_key = tuple([summary_key._asdict()[field] \ for field in fields \ if summary_key._asdict()[field] is not None]) if (rv_key not in rv) or (metric(value) < metric(rv[rv_key])): rv[rv_key] = value if fields is None: return rv[None] else: return rv
def translate_list(dictionary, list_to_be_translated): """For a given list with entries and a dictionary, it return a new list with the translated entries""" translated = [] for i in list_to_be_translated: translated.append(dictionary[i]) return translated
def generate_block_number( chapter_number, heading_number, subheading_number, paragraph_number ): """ Returns chapter_number.subheading_number.paragraph_number if chapter_number is available, or heading_number.subheading_number.paragraph_number if not. Doesn't include any part that isn't available. (e.g. can also return chapter_number.subheading_number only or heading_number.paragraph_number only) """ block_number = ".".join( [ str(number) for number in ( chapter_number or heading_number, subheading_number, paragraph_number, ) if number ] ) return block_number
def create_errors(error, error_barwidth): """Creates dict of errors for barplot""" if isinstance(error, str): error_y = {} else: error_y = { 'type': 'data', 'array': error, 'thickness': error_barwidth, 'width': int((error_barwidth * 2.5) / 2), 'visible': True } return error_y
def restrict_to_vocab(txt, vocab): """ Remove characters that do not belong the a vocabulary. """ txt = ''.join(char for char in txt if char in vocab) return txt
def format_dnb_company_investigation(data): """ Format DNB company investigation payload to something DNBCompanyInvestigationSerlizer can parse. """ data['dnb_investigation_data'] = { 'telephone_number': data.pop('telephone_number', None), } return data
def convert_list_for_sql(my_list): """ Convert a python list to a SQL list. The function is primarly used when trying to format SQL queries by passing an argument. Arguments: my_list: list of elements to be used in a SQL query Example: 1. convert_list_for_sql([1, 2, 3]) returns '1, 2, 3' 2. convert_list_for_sql(['Simone', 'Dan']) returns ''Simone', 'Dan'' """ final_list = [] for item in my_list: if isinstance(item, str): _item = '\'{}\''.format(item) else: _item = item final_list.append(_item) return ", ".join([str(item) for item in final_list])
def _lod_to_dol(lod): """Convert a list-of-dicts into a dict-of-lists. All the dicts in the input list must have the same keys. """ assert isinstance(lod, list) assert len(lod) > 0 keys = lod[0].keys() dol = {k: [] for k in keys} for d in lod: for k in keys: dol[k].append(d[k]) return dol
def parse_dict(d): """ Given a single layer dictionary d, return a string representation of the form "key1=val1,key2=val2,..." d (dict): Any dictionary, preferrably with no nesting returns (str): String represenation of d described above """ return ",".join([str(key)+"="+str(value) for key, value in d.items()])
def ip2int(ip_addr): """Converts string IP address representation into integer.""" i = [int(x) for x in ip_addr.split('.')] return (i[0] << 24) | (i[1] << 16) | (i[2] << 8) | i[3]
def _bit_count(num): """ return number of set bits Counting bits set, Brian Kernighan's way* unsigned int v; // count the number of bits set in v unsigned int c; // c accumulates the total bits set in v for (c = 0; v; c++) { v &= v - 1; } //clear the least significant bit set This method goes through as many iterations as there are set bits. So if we have a 32-bit word with only the high bit set, then it will only go once through the loop. * The C Programming Language 2nd Ed., Kernighan & Ritchie, 1988. This works because each subtraction "borrows" from the lowest 1-bit. For example: loop pass 1 loop pass 2 ----------- ----------- 101000 100000 - 1 - 1 = 100111 = 011111 & 101000 & 100000 = 100000 = 0 It is an excellent technique for Python, since the size of the integer need not be determined beforehand. (from https://wiki.python.org/moin/BitManipulation) """ count = 0 while num: num &= num - 1 count += 1 return count
def natTftp(ctx, mach, nicnum, nat, args): """This command shows/alters TFTP settings usage nat <vm> <nicnum> tftp [prefix <prefix>| bootfile <bootfile>| server <server>] prefix - alters prefix TFTP settings bootfile - alters bootfile TFTP settings server - sets booting server """ if len(args) == 1: server = nat.TFTPNextServer if server is None: server = nat.network if server is None: server = '10.0.%d/24' % (int(nicnum) + 2) (server, mask) = server.split('/') while server.count('.') != 3: server += '.0' (a, b, c, d) = server.split('.') server = '%d.%d.%d.4' % (a, b, c) prefix = nat.TFTPPrefix if prefix is None: prefix = '%s/TFTP/' % (ctx['vb'].homeFolder) bootfile = nat.TFTPBootFile if bootfile is None: bootfile = '%s.pxe' % (mach.name) msg = 'server:%s, prefix:%s, bootfile:%s' % (server, prefix, bootfile) return (0, [msg]) else: cmd = args[1] if len(args) != 3: print('invalid args:', args) print(natTftp.__doc__) return (1, None) if cmd == 'prefix': nat.TFTPPrefix = args[2] elif cmd == 'bootfile': nat.TFTPBootFile = args[2] elif cmd == 'server': nat.TFTPNextServer = args[2] else: print("invalid cmd:", cmd) return (1, None) return (0, None)
def splitted(_set): """ Transforms a set {0,1,2} into a list of singletons [[0],[1],[2]]""" splitted = [] for el in _set: splitted.append([el]) return splitted
def removeDuplicates(l): """Return a list that is l with the duplicate entries removed.""" #The JavaScript idiom for this is: # results = results.filter(function (v, i) { # return results.indexOf(v) === i; # }); # This can't be done like this in Python because the filter callback # only takes one argument. So, we have to do it by hand. result = [] for i in range(len(l)): v = l[i] if l.index(v) == i: result.append(v) return result
def glob_to_re(glob): """Translate a shell glob-like pattern to a regular expression. Currently there is no way to quote glob metacharacters "*" and "?". Parameters: glob (str) : A glob like pattern using "*" to match any number of chars and "?" to match a single char. Returns: str : An equivalent regular expression. """ globOffset = 0 globLen = len(glob) regex = '^' while globOffset < globLen: globChar = glob[globOffset] globOffset = globOffset + 1 if globChar == "*": regex = regex + '.*' elif globChar == "?": regex = regex + '.' elif globChar in [".","[","]","\\","^","$","+","{","}","|","(",")"]: regex = regex + '\\' + globChar else: regex = regex + globChar regex = regex + "$" return regex
def ior(a, b): """Same as a |= b.""" a |= b return a
def preprocess(image): """ modify image range from [0,1] to [-1, 1] return: range modified image """ return image * 2 - 1
def get_forecast_variable(gt_id): """Returns forecast variable name for the given ground truth id Args: gt_id: ground truth data string ending in "precip" or "tmp2m" """ if "tmp2m" in gt_id: return "tmp2m" if "precip" in gt_id: return "prate" raise ValueError("Unrecognized gt_id "+gt_id)
def checksum(sin): """ Determine validity of a Canadian Social Insurance Number. Validation is performed using a modified Luhn Algorithm. To check the Every second digit of the SIN is doubled and the result is summed. If the result is a multiple of ten, the Social Insurance Number is considered valid. https://en.wikipedia.org/wiki/Social_Insurance_Number """ # Remove spaces and create a list of digits. checksumCollection = list(sin.replace(' ', '')) checksumCollection = [int(i) for i in checksumCollection] # Discard the last digit, we will be calculating it later. checksumCollection[-1] = 0 # Iterate over the provided SIN and double every second digit. # In the case that doubling that digit results in a two-digit # number, then add the two digits together and keep that sum. for i in range(1, len(checksumCollection), 2): result = checksumCollection[i] * 2 if result < 10: checksumCollection[i] = result else: checksumCollection[i] = result - 10 + 1 # The appropriate checksum digit is the value that, when summed # with the first eight values, results in a value divisible by 10 check_digit = 10 - (sum(checksumCollection) % 10) check_digit = (0 if check_digit == 10 else check_digit) return check_digit
def page_not_found(e): """Return a custom 404 error.""" return 'nothing here!', 404
def prefix_replace(original, old, new): """ Replaces the old prefix of the original string by a new suffix :param original: string :param old: string :param new: string :return: string """ return new + original[len(old):]
def merge_dicts(dict_a, dict_b, default_key="default"): """ Recursively merge two nested dicts into a single dict. When keys match their values are merged using a recursive call to this function, otherwise they are just added to the output dict. """ if dict_a is None or dict_a == {}: return dict_b if dict_b is None or dict_b == {}: return dict_a if type(dict_a) is not dict: if default_key in dict_b: return dict_b dict_a = {default_key: dict_a} if type(dict_b) is not dict: if default_key in dict_a: return dict_a dict_b = {default_key: dict_b} all_keys = set(dict_a.keys()).union(set(dict_b.keys())) out_dict = dict() for key in all_keys: out_dict[key] = merge_dicts( dict_a.get(key), dict_b.get(key), default_key) return out_dict
def convert_taxa(rough_taxa, formatting_keys='%1.2f', hundredx=False): """Formats lists of numbers for table generation INPUTS: rough_taxa -- a list of lists with a descriptor string followed by a list of corresponding values formatting_keys -- a string describing the way the value should be formatting using string formats. For example, %1.2f, %2d, %i. A value of 'SKIP' will ignore that value and remove it from the output list. OUTPUTS: formatted_taxa -- a list of string with formatting for the final table. """ # Checks the rough_taxa argument is sane if not isinstance(rough_taxa, list): raise TypeError('rough_taxa must have be a list of at least one ' 'lists.\nrough_taxa is a %s.' % rough_taxa.__class__) elif len(rough_taxa) == 0: raise ValueError('rough taxa must have be a list of at least one ' 'lists.\nrough_taxa does not have any elements.') elif not isinstance(rough_taxa[0], list): raise TypeError('rough taxa must have be a list of at least one ' 'lists.\nThe first element in rough taxa is a %s.' % rough_taxa[0].__class__) num_ent = len(rough_taxa[0]) for entry in rough_taxa: if not isinstance(entry, list): raise TypeError('rough_taxa must be a list of lists') if not len(entry) == num_ent: raise ValueError('list size is inconsistant') num_rough = num_ent-1 if isinstance(formatting_keys, list): num_keys = len(formatting_keys) else: num_keys = 1 if isinstance(hundredx, list): num_hund = len(hundredx) else: num_hund = 1 if not isinstance(formatting_keys, (list, str)): raise TypeError('formatting_keys must be a list or string.') if not num_rough == num_keys and isinstance(formatting_keys, list): raise ValueError('The number of elements in rough_taxa (%i) and the ' 'number of elements in formatting_keys (%i) must be ' 'equal.' % (num_rough, num_keys)) elif not isinstance(hundredx, (list, bool)): raise TypeError('hundredx must be a list or bool.') elif not num_rough == num_hund and isinstance(hundredx, list): raise ValueError('The number of elements in rough_taxa(%i) and the ' 'number of elements in hundredx(%i) must be equal.' % (num_rough, num_hund)) # Converts formatting keys and hundredx to lists if isinstance(formatting_keys, str): formatting_keys = [formatting_keys]*num_rough if isinstance(hundredx, bool): hundredx = [hundredx]*num_rough # Creates formatted list formatted_taxa = [] for element in rough_taxa: taxon = element[0] element.pop(0) new_element = [taxon] for idx, item in enumerate(element): if formatting_keys[idx] == 'SKIP': continue if hundredx[idx]: item = item * 100 new_element.append(formatting_keys[idx] % item) formatted_taxa.append(new_element) return formatted_taxa
def __natoms(natoms, nshells): """ Number of atoms for computing coordination """ from numpy import sum if natoms > 0: return natoms # based on fcc result = sum([12, 6, 24, 12, 24, 8, 48, 6, 32][:nshells]) result = int(result) if nshells < 12: result += 6 else: result *= 8 return result
def list2str(list): """Given a list return a string""" return "[" + ", ".join([str(x) for x in list]) + "]"
def vals_are_constant(vlist, cval=None): """determine whether every value in vlist is equal to cval (if cval == None, use vlist[0])""" if vlist == None: return 1 if len(vlist) < 2: return 1 if cval == None: cval = vlist[0] for val in vlist: if val != cval: return 0 return 1
def price(value): """Returns the number as decimal with 5-digit precision""" try: value = round(float(value), 5) except (ValueError, TypeError, UnicodeEncodeError): return '' return '${0:0.5f}'.format(value)
def name_sorting(nodes, direction, data): """Sort nodes by name.""" reverse = direction == 'desc' return sorted(nodes, reverse=reverse, key=lambda n: str(n))
def alphabetic(string: str, decimals: bool = True): """Removes all of the non alphabetical letters from the string""" if string is None: return "" if decimals: return "".join(l for l in str(string) if l.isalpha() or l.isdecimal()) return "".join(l for l in str(string) if l.isalpha())
def identical(a,b): """Are arrays a and b identical?""" from numpy import all if len(a) != len(b): value = False else: value = all(a==b) return value
def viscosity(T, mu_0=1.83245E-5, T_0=296.15, S=110.4): """ calculates viscosity of the air Parameters ---------- T : float measurement temperature mu_0 : float reference viscosity T_0 : float reference temperature S : float Sutherland constant of air Returns ------- viscosity : float viscosity at T """ return (mu_0 * ((T_0 + S) / (T + S)) * ((T / T_0)**1.5))
def kronecker(i, j): """Kronecker delta function, 1 if i = j, otherwise 0.""" return 1 if i == j else 0
def _split_uppercase(word: str) -> set: """ EverGreen -> Ever, Green """ pos_upper = [pos for pos, letter in enumerate(word) if letter.isupper()] pos_upper.append(len(word)) simple_words = set([]) for left, right in zip(pos_upper[:-1], pos_upper[1:]): simple_words.add(word[left: right]) return simple_words
def hmsToHour(s, h, m, sec): """Convert signed RA/HA hours, minutes, seconds to floating point hours.""" return s * (h + m/60.0 + sec/3600.0)
def MultipleReplace(Text, ReplaceDict): """ Perform multiple replacements in one go using the replace dictionary in format: { 'search' : 'replace' } """ NewText = Text for Search, Replace in ReplaceDict.items(): NewText = NewText.replace(Search, str(Replace)) return NewText
def run(path: str, timeoutSeconds: int, arguments: dict) -> str: """ This method runs a notebook and returns its exit value """ print("skip dbutils.notebook.run({},{},{})".format(path, timeoutSeconds, arguments)) return ''
def _get_node(response, *ancestors): """ Traverse tree to node """ document = response for ancestor in ancestors: if ancestor not in document: return {} else: document = document[ancestor] return document
def get_distance(a, b): """Return the manhattan distance between points a and b.""" return abs(a[0] - b[0]) + abs(a[1] - b[1])
def get_leaves(node): """ Get the leaf nodes in the passed expression tree (the logical expression operands) """ if not node: return [] leaves = [] stack = [node] while stack: n = stack.pop() if not n: continue if not (n.left or n.right): leaves.append(n) else: stack.append(n.left) stack.append(n.right) return leaves
def line(loc, strg): """ Returns the line of text containing loc within a string, counting newlines as line separators. """ lastCR = strg.rfind("\n", 0, loc) nextCR = strg.find("\n", loc) return strg[lastCR + 1 : nextCR] if nextCR >= 0 else strg[lastCR + 1 :]
def weekday_to_str(num): """ Translate weekday to string """ data = { '0': 'mon', '1': 'tue', '2': 'wed', '3': 'thu', '4': 'fri', '5': 'sat', '6': 'sun', } return data.get(str(num))
def url(pattern, handler, kwargs=None, name=None): """Converts parameters to tupple of length four. Used for convenience to name parameters and skip unused. """ return pattern, handler, kwargs, name
def member( x, set ): """test for set membership""" try: set.index( x ) return True except ValueError: return False
def pct_diff(x, y): """ pct_diff(x, y) Returns the percent difference between two numbers, base number x, and "new" number y. """ pct = round((abs(y - x) / x) * 100, 2) print(str(pct) + '%') return pct/100
def equivalent_interest(i, c, p): """ Author: Thomas Richmond Purpose: Convert a periodically compounded interest rate to an equivalent interest for a certain payment period Parameters: i [float] - Interest being compounded at rate c, expressed as a decimal c [integer] - Compounding periods per year p [integer] - Payment periods per year """ return (1 + i) ** (c / p) - 1
def is_crack(x, y): """Determine whether a pair of particles define the crack.""" crack_length = 0.3 output = 0 p1 = x p2 = y if x[0] > y[0]: p2 = x p1 = y # 1e-6 makes it fall one side of central line of particles if p1[0] < 0.5 + 1e-6 and p2[0] > 0.5 + 1e-6: # draw a straight line between them m = (p2[1] - p1[1]) / (p2[0] - p1[0]) c = p1[1] - m * p1[0] # height a x = 0.5 height = m * 0.5 + c if (height > 0.5 * (1 - crack_length) and height < 0.5 * (1 + crack_length)): output = 1 return output
def get_spec_id(spec): """Get a quasi identifier for the spec, with the name, version, hash, spackmon id""" return "%s@%s/%s:%s" % ( spec["name"], spec["version"], spec["full_hash"], spec["id"], )
def _count_dependency_matches(deps1, deps2): """ Checks how many of the deps1 have a matching dependency in deps2 :param deps1: list or set of Dependency objects :param deps2: list or set of Dependency objects :return: integer """ matches = 0 for dep1 in deps1: for dep2 in deps2: if dep1.is_equivalent(dep2): matches += 1 break return matches
def generate_performance_payload(start_time, cpu_util_pct, mem_util_pct, net_info): """ Request POST the cipher result to the django and mysql :param cipher_result: CipherResult :return: payload """ payload = "{\"startTime\":\"" + str(start_time) \ + "\",\"cpuUtilPct\":\"" + '%0.4f' % cpu_util_pct \ + "\",\"memUsedPct\":\"" + '%0.4f' % mem_util_pct \ + "\",\"getTotal\":\"" + str(net_info['in']) \ + "\",\"sentTotal\":\"" + str(net_info['out']) \ + "\"}" return payload
def is_use_existing(exists, expected, use_anyway): """Determine if the resource is an existing resource that can be used. :param exists: Whether we found the resource in target API. :param expected: Whether we expected to find the resource in target API. :param use_anyway: If we should use it, even if we didn't expect it. :return: bool """ return (exists and expected) or (exists and not expected and use_anyway)
def extract_project_name(content): """ Extract the Cargo project name from the Cargo.toml file. :param str content: The Cargo.toml parsed dictionnary :returns: The project name, otherwise None :rtype: str """ try: return content['package']['name'] except KeyError: return None
def list_ep(als,step=1): """ Return a nested list of ends of consecutive runs given an ordered list of numbers. Arguments: - als - an ordered list of numbers - step - step size, to define "consecutive runs" [ 1 ] """ # first check if the input list is sorted in ascending order try: assert als == sorted(als) except AssertionError: print("input list needs to be in ascending order") return # initialize output list out = [] # no. of elements n = len(als) # if there are no elements, terminate if n == 0: return [] # last no. stored, initialize with one number less than the first lns = als[0]-1 # for every number for x in range(n): # first element e1 = als[x] # if it is not greater than the last stored no., # then nothing more to do for this no. if e1 <= lns: continue # initial range end-points for this no. r1 = e1 r2 = e1 # initialize expected no. expn = e1 # compare this no. with all subsequent no.s for y in range(x+1,n): # second element e2 = als[y] # expected no. increment by step size in every loop expn += step # if the next no. matches the expectation if e2 == expn: # update right end-point of the range with the current no. r2 = e2 # when you reach a no. that doesn't else: # update the same end-point with the previous no. r2 = als[y-1] break # store current end-points out.append((r1,r2)) # update the last number stored lns = out[-1][1] return out
def safe_str(maybe_str): """To help with testing between python 2 and 3, this function attempts to decode a string, and if it cannot decode it just returns the string. """ try: return maybe_str.decode('utf-8') except AttributeError: return maybe_str
def diff_to_str(diff: int) -> str: """Convert a diff id to a string with leading "D".""" return 'D{}'.format(diff)
def filter_n(*args): """ Filter any number of lists of corresponding items based on some function of the first list. """ filter_function = args[0] to_filter = args[1:] to_return = [list() for _ in to_filter] for i in range(len(to_filter[0])): # For each run of entries if filter_function(to_filter[0][i]): # If the key passes the filter for j in range(len(to_filter)): # Keep the whole row if i < len(to_filter[j]): to_return[j].append(to_filter[j][i]) # return all the lists as a tuple, which unpacks as multiple return values return tuple(to_return)
def lists2dict(list_of_pipeline_description, url, d): """ Convert a list of splited module names to a hierachic dictionary with list leafs that contain the url to the module docuementation. Parameters ---------- list_of_pipeline_description: list of list of str (mandatory) the splited module names to organize bu modules url: str (mandatory) the url to the module documentation Returns ------- d: hierachic dict each key is a sub module of the module. Leafs contain a list with the url to the documentation. """ # Go through all pipeline descriptions for l in list_of_pipeline_description: # Reach a leaf (a pipeline) if len(l) == 1: d.setdefault(l[0], []).append(url or "") # Continue the recursion else: if not l[0] in d: d[l[0]] = lists2dict([l[1:]], url, {}) else: d[l[0]].update(lists2dict([l[1:]], url, d[l[0]])) return d
def linearize_subtable(subtable, table_page_title, table_section_title): """Linearize the highlighted subtable and return a string of its contents.""" table_str = "" if table_page_title: table_str += "<page_title> " + table_page_title + " </page_title> " if table_section_title: table_str += "<section_title> " + table_section_title + " </section_title> " table_str += "<table> " for item in subtable: cell = item["cell"] row_headers = item["row_headers"] col_headers = item["col_headers"] # The value of the cell. item_str = "<cell> " + cell["value"] + " " # All the column headers associated with this cell. for col_header in col_headers: item_str += "<col_header> " + col_header["value"] + " </col_header> " # All the row headers associated with this cell. for row_header in row_headers: item_str += "<row_header> " + row_header["value"] + " </row_header> " item_str += "</cell> " table_str += item_str table_str += "</table>" return table_str
def postorder_traverse_re(root): """Post-order traversal (recursive).""" values = [] def traverse(node): if not node: return traverse(node.left) traverse(node.right) values.append(node.val) traverse(root) return values
def format_str_timestamp(timestamp_str): """Format a str timestamp adding teh char `Z` in the end of it, if needed For instance: the '2014-12-10T12:00:00.123123' is converted to '2014-12-10T12:00:00.123123Z'. Args: timestamp_str (str): The timestamp of the monitoring metric Returns: str: The timestamp as datetime object """ if not timestamp_str.endswith('Z'): return "{}Z".format(timestamp_str) return timestamp_str
def encode_keys(mapping, key_encode): """Encodes all keys in mapping with ``key_encode`` callable. Returns tuple of: key mapping (encoded key => key) and value mapping (encoded key => value). >>> mapping = {'k1': 1, 'k2': 2} >>> keys, mapping = encode_keys(mapping, ... lambda k: str(base64_encode(k).decode('latin1'))) >>> sorted(keys.items()) [('azE=', 'k1'), ('azI=', 'k2')] >>> sorted(mapping.items()) [('azE=', 1), ('azI=', 2)] """ key_mapping = {} encoded_mapping = {} for key in mapping: encoded_key = key_encode(key) key_mapping[encoded_key] = key encoded_mapping[encoded_key] = mapping[key] return key_mapping, encoded_mapping
def flatten_dict(multi_level_dict) -> dict: """Flatten a dictionary.""" flattened_dict = {} for entry in multi_level_dict: if isinstance(multi_level_dict[entry], dict): new_elements = flatten_dict(multi_level_dict[entry]) flattened_dict.update(new_elements) else: flattened_dict[entry] = multi_level_dict[entry] return flattened_dict
def is_source_line(source_str, file_ext): """Returns True if the line appears to contain source code, False otherwise.""" if file_ext in ['.c', '.cpp', '.cxx', '.h', '.m', '.java', '.rs']: if source_str.find(';') > 0: return True elif file_ext in ['.py']: if len(source_str) > 0: return True return False