content
stringlengths
42
6.51k
def _sequence2num(sequence): """ Returns the input sequence as a number. """ if isinstance(sequence, str): sequence = int(sequence[2]) return sequence
def compare_solutions(dict_1, dict_2, keys): """ Compare solution obtained from NEMDE approximation with observed NEMDE solution. Keys and values in dict_1 are used as the basis. For each key specified in 'keys' the model and acutal NEMDE solution is compared. Keys in dict_1 but not in 'keys' have their value set corresponding values in dict_1. Parameters ---------- dict_1 : dict Model solution dict_2 : dict NEMDE solution keys : list Keys on which to compare solution. """ # Container for output out = {} for k, v in dict_1.items(): if k in keys: out[k] = { 'model': v, 'actual': float(dict_2[k]), 'difference': v - float(dict_2[k]), 'abs_difference': abs(v - float(dict_2[k])) } else: out[k] = v return out
def dot(v, u): """Compute dot product of vectors *v* and *u*.""" return sum(u[i] * v[i] for i in range(len(v)))
def unique_chars(word): """ Returns a sorted string containing the unique characters""" ordered = sorted(word) return "".join(ordered)
def update_config_resgrp(config): """load resource group related config info""" cnf_az_cluster = config["azure_cluster"] cnf_az_cluster["resource_group"] = cnf_az_cluster.get( "resource_group", config["cluster_name"] + "ResGrp") cnf_az_cluster["vnet_name"] = config["cluster_name"] + "-VNet" cnf_az_cluster["subnet_name"] = cnf_az_cluster.get( "subnet_name", config["cluster_name"] + "-subnet") cnf_az_cluster["storage_account_name"] = config["cluster_name"] + "storage" cnf_az_cluster["nsg_name"] = config["cluster_name"] + "-nsg" cnf_az_cluster["nfs_nsg_name"] = config["cluster_name"] + [ "", "-nfs"][int(int(config["azure_cluster"]["nfs_node_num"]) > 0)] + "-nsg" return config
def dic_lowest(dic): """ Gets the key with the lowest value in the dictionary. Parameters ---------- dic: The dictionary to get the lowest value out of Returns ------- The key with the lowest value in the dictionary. """ lowest = 100000000 s = "error" for sp in dic: if dic[sp] < lowest: lowest = dic[sp] s = sp return s
def get_user_name(user_json): """Derive user name used in export from user profile JSON. User name is the bit of their login ID before the @. # e.g. "someuser@ourdomain.net" --> "someuser" It's possible that the user structure passed in will not have a login ID at all, in which case there is no user name. """ #return user_json["login_id"].split("@")[0] return user_json["login_id"].split("@")[0] if ('login_id' in user_json) else None
def timestamp(): """Returns string with a current time timestamp""" import datetime return datetime.datetime.now().strftime("%Y%m%dT%H%M%S")
def brightness_to_percentage(byt): """Convert brightness from absolute 0..255 to percentage.""" return int((byt * 100.0) / 255.0)
def check_shots_vs_bounds(shot_dict, mosaic_bounds, max_out_of_bounds = 3): """Checks whether all but *max_out_of_bounds* shots are within mosaic bounds Parameters ---------- shot_dict : dict A dictionary (see czd_utils.scancsv_to_dict()) with coordinates of all shots in a .scancsv file: {shot: [x_coords, y_coords], ...} mosaic_bounds : list A list of bounds to a .Align file (see get_mos_bounds()): [min_x, max_x, min_y, max_y] max_out_of_bounds : int, optional Max number of out-of-bounds shots allowed for a \ 'match' between mosaic and .scancsv. The default is 3. Returns ------- Boolean True or False, depending on whether all but *max_out_of_bounds* \ shots are within mosaic bounds """ total_out_of_bounds = 0 min_x, max_x, min_y, max_y = mosaic_bounds for eachcoords in shot_dict.values(): if not min_x <= eachcoords[0] <= max_x or not min_y <= eachcoords[1] <= max_y: total_out_of_bounds += 1 return total_out_of_bounds <= max_out_of_bounds
def f_to_c(f_temp): """ Converts from Farenheint to Celsius """ try: return (float(f_temp) - 32.0) * 5.0 / 9.0 except ValueError: return None
def mock_render_to_string(template_name, context): """ Return a string that encodes template_name and context """ return str((template_name, sorted(context.items())))
def union(a, b): """Returns a new list of the union between a and b""" return set(a) | set(b)
def name_value(obj): """ Convert (key, value) pairs to HAR format. """ return [{"name": k, "value": v} for k, v in obj.items()]
def group(s, n): """Repack iterator items into groups""" # See http://www.python.org/doc/2.6/library/functions.html#zip return list(zip(*[iter(s)] * n))
def float_range(start, stop=None, step=None): """Return a list containing an arithmetic progression of floats. Return a list of floats between 0.0 (or start) and stop with an increment of step. This is in functionality to python's range() built-in function but can accept float increments. As with range(), stop is omitted from the list. """ if stop is None: stop = float(start) start = 0.0 if step is None: step = 1.0 cur = float(start) l = [] while cur < stop: l.append(cur) cur += step return l
def string_to_functional(dict_string): """ Turn input string for function option into a (string, dictionary) representing the functional to be used in calculations. Args: dict_string: String that provides the string/float pairs, separated by a space. Returns: tuple: Tuple of (String that represents the functional, Dictionary of string/float pairs). """ dict_pairs = dict_string.split(" ") functional = dict_pairs[0] functional_dict = dict( zip(dict_pairs[1::2], [float(number) for number in dict_pairs[2::2]]) ) # Turn functional string into dict if functional == "pbeu": pbeu_dict = dict() pbeu_dict["LDAUU"] = functional_dict return functional, pbeu_dict else: return functional, functional_dict
def to_psql_array(lst): """ Convert lists to psql friendly format. """ return "{" + ",".join(map('{}'.format, lst)) + "}"
def check_game_over(boardPlaceTestList): """Check in the "boardPlaceTestList" if there is still a piece that can be placed on the board. If there is not, retrun gameOverTest as true else as false.""" gameOverTest = False for currentTest in boardPlaceTestList: if currentTest == True: gameOverTest = False break elif currentTest == False: gameOverTest = True return gameOverTest
def merge(less, L, R): """ Merge two lists of tuples by their first element First argument is the comparison function returns true if arg1 is "less" than arg2 """ i = 0 k = 0 result = [] while i < len(L) or k < len(R): if i == len(L): result.append(R[k]) k += 1 elif k == len(R) or less(L[i], R[k]): result.append(L[i]) i += 1 else: result.append(R[k]) k += 1 return result
def two_of_three(x, y, z): """Return a*a + b*b, where a and b are the two smallest members of the positive numbers x, y, and z. 1. Find minimum of all inputs 2. Find maximum of all minimums 3. Use ** as `to the power of` 4. Add them together >>> two_of_three(1, 2, 3) 5 >>> two_of_three(5, 3, 1) 10 >>> two_of_three(10, 2, 8) 68 >>> two_of_three(5, 5, 5) 50 """ return min(x, y, z) ** 2 + max(min(x, y), min(x, z), min(y, z)) ** 2
def dict_diff(expected: dict, actual: dict) -> dict: """ :param expected: :param actual: :return: """ diff = {} for key in actual.keys(): if expected.get(key) is not actual.get(key): diff.update({key: actual.get(key)}) return diff
def is_requirement(line): """ Return True if the requirement line is a package requirement. Returns: bool: True if the line is not blank, a comment, a URL, or an included file """ return not ( line == '' or line.startswith('-r') or line.startswith('#') or line.startswith('-e') or line.startswith('git+') or line.startswith('-c') )
def percentage(percent: float) -> str: """ Formats percentage into a string logged in stats :param percent: Percentage :return: formatted duration """ return "{:.2f}%".format(percent)
def _find_file(searched_file_name, rfiles): """Search for a filename in an array for {fname:fcontent} dicts""" for rfile in rfiles: if rfile.has_key(searched_file_name): return rfile return None
def hexagonal(n): """Returns the n-th hexagonal number""" return n*(2*n-1)
def datastore_start_cmd(port, assignment_options): """ Prepares command line arguments for starting a new datastore server. Args: port: An int - tcp port to start datastore server on. assignment_options: A dict containing assignment options from ZK. Returns: A list of command line arguments. """ start_cmd = ['appscale-datastore', '--type', assignment_options.get('backend', 'cassandra'), '--port', str(port)] if assignment_options.get('verbose'): start_cmd.append('--verbose') return start_cmd
def _get_block_sizes_v1(resnet_size): """Retrieve the size of each block_layer in the ResNet model. The number of block layers used for the Resnet model varies according to the size of the model. This helper grabs the layer set we want, throwing an error if a non-standard size has been selected. Args: resnet_size: The number of convolutional layers needed in the model. 6n+2. Returns: A list of block sizes to use in building the model. Raises: KeyError: if invalid resnet_size is received. Remarks: Used in gt_ressize_dependent_params_v1. """ choices = { 8: [1, 1, 1], # 0.09M, (16, 32, 64) 14: [2, 2, 2], # 0.18M 20: [3, 3, 3], # 0.25M 32: [5, 5, 5], # 0.46M 44: [7, 7, 7], # 0.66M 56: [9, 9, 9], # 0.85M 110: [18, 18, 18], # 1.7M 218: [36, 36, 36] # 3.4M } try: return choices[resnet_size] except KeyError: err = ('Could not find layers for selected Resnet v1 size.\n' 'Size received: {}; sizes allowed: {}.'.format( resnet_size, choices.keys())) raise ValueError(err)
def variable_set_up_computer(num): """Return the computer's choice""" if num == 1: return 'The computer chose rock.' elif num == 2: return 'The computer chose paper.' elif num == 3: return 'The computer chose scissors.'
def skip_odd_numbers(value): """ Example filter function """ return value % 2 == 0
def kernel_sigmas(n_kernels, lamb, use_exact): """ get sigmas for each guassian kernel. :param n_kernels: number of kernels (including exactmath.) :param lamb: :param use_exact: :return: l_sigma, a list of simga """ bin_size = 2.0 / (n_kernels - 1) l_sigma = [0.00001] # for exact match. small variance -> exact match if n_kernels == 1: return l_sigma l_sigma += [bin_size * lamb] * (n_kernels - 1) return l_sigma
def fromStr(valstr): """try to parse as int, float or bool (and fallback to a string as last resort) Returns: an int, bool, float, str or byte array (for strings of hex digits) Args: valstr (string): A user provided string """ if(len(valstr) == 0): # Treat an emptystring as an empty bytes val = bytes() elif(valstr.startswith('0x')): # if needed convert to string with asBytes.decode('utf-8') val = bytes.fromhex(valstr[2:]) elif valstr == True: val = True elif valstr == False: val = False else: try: val = int(valstr) except ValueError: try: val = float(valstr) except ValueError: val = valstr # Not a float or an int, assume string return val
def fast_fib(n, memo = {}): """ Assume n is an int > 0, memo used only by recursive calls returns Fibonacci of n. """ if n == 0 or n == 1: return 1 try: return memo[n] except KeyError: result = fast_fib(n-1, memo) + fast_fib(n-2, memo) memo[n] = result return result
def is_project_in_server(server: dict, github_path: str) -> bool: """Check if project is in server""" # pylint: disable=use-a-generator return any( [project.get("githubPath") == github_path for project in server.get("projects", {})])
def stringify(val): """ Accepts either str or bytes and returns a str """ try: val = val.decode('utf-8') except (UnicodeDecodeError, AttributeError): pass return val
def markovToLatex(markov, labels, precision=2): """Prints the transition matrix in a latex format.""" lstring = '' # print the labels lstring += '\\text{' + str(labels[0]) + '}' for i in range(1, len(labels)): lstring += ' \\text{ ' + str(labels[i]) + '}' lstring += '\\\\\n' # header lstring += '\\begin{bmatrix}\n' # print the transition matrix for i in range(len(markov)): rowstring = str(round(markov[i][0], precision)) for j in range(1, len(markov)): value = round(markov[i][j], precision) rowstring += ' & {0}'.format(value) # add line termination \\ and \n lstring += rowstring + '\\\\\n' # footer lstring += '\\end{bmatrix}\n' return lstring
def input_to_list(input_data, capitalize_input=False): """ Helper function for handling input list or str from the user. Args: input_data (list or str): input from the user to handle. capitalize_input (boo): whether to capitalize the input list data or not. Returns: list: returns the original list or list that was split by comma. """ input_data = input_data if input_data else [] input_data = input_data if isinstance(input_data, list) else [s for s in input_data.split(',') if s] if capitalize_input: return [i.title() for i in input_data] else: return input_data
def logout(session): """Logout If the user has a teamID in the session it is removed and success:1 is returned. If teamID is not in session success:0 is returned. """ if 'tid' in session: session.clear() return {"success": 1, "message": "Successfully logged out."} else: return {"success": 0, "message": "You do not appear to be logged in."}
def parse_unknown_args(**unk_args): """Parse kwargs like `dict(foo=False, bar=42)` into parameters like `--no-foo --bar 42`.""" parsed = [] for key, value in unk_args.items(): base_name = key.replace('_', '-') if isinstance(value, bool): # boolean values don't follow the normal "--key value" format; have # to handle them like "--key" or "--no-key" instead. if value: parsed.append('--' + base_name) else: parsed.append('--no-' + base_name) else: # otherwise we assume normal "--key value" format. parsed.extend(['--' + base_name, str(value)]) return parsed
def _flow_tuple_reversed(f_tuple): """Reversed tuple for flow (dst, src, dport, sport, proto)""" return (f_tuple[1], f_tuple[0], f_tuple[3], f_tuple[2], f_tuple[4])
def cmap(i, j, n): """Given a pair of feed indices, return the pair index. Parameters ---------- i, j : integer Feed index. n : integer Total number of feeds. Returns ------- pi : integer Pair index. """ if i <= j: return (n * (n + 1) // 2) - ((n - i) * (n - i + 1) // 2) + (j - i) else: return cmap(j, i, n)
def CFMtom3sec(VCFM): """ Convertie le debit volumique en CFM vers m3/sec Conversion: 2118.8799727597 CFM = 1 m3/sec :param VCFM: Debit volumique [CFM] :return Vm3sec: Debit volumique [m3/sec] """ Vm3sec = VCFM / 2118.8799727597 return Vm3sec
def getY(line, x): """ Get a y coordinate for a line """ m, c = line y = int((m * x) + c) return y
def job_name(job): """ find a friendly name for the job defined""" return '_'.join(j for j in job).replace('=', '-').replace('--','')
def _cubic_interpolation(x, xtab0, xtab1, ytab0, ytab1, yptab0, yptab1): """Cubic interpolation of tabular data. Translated from the cubeterp function in seekinterp.c, distributed with HEASOFT. Given a tabulated abcissa at two points xtab[] and a tabulated ordinate ytab[] (+derivative yptab[]) at the same abcissae, estimate the ordinate and derivative at requested point "x" Works for numbers or arrays for x. If x is an array, xtab, ytab and yptab are arrays of shape (2, x.size). """ dx = x - xtab0 # Distance between adjoining tabulated abcissae and ordinates xs = xtab1 - xtab0 ys = ytab1 - ytab0 # Rescale or pull out quantities of interest dx = dx / xs # Rescale DX y0 = ytab0 # No rescaling of Y - start of interval yp0 = yptab0 * xs # Rescale tabulated derivatives - start of interval yp1 = yptab1 * xs # Rescale tabulated derivatives - end of interval # Compute polynomial coefficients a = y0 b = yp0 c = 3 * ys - 2 * yp0 - yp1 d = yp0 + yp1 - 2 * ys # Perform cubic interpolation yint = a + dx * (b + dx * (c + dx * d)) return yint
def format_data(account): """Format the account data into printable format.""" account_name = account["name"] account_descr = account["description"] account_country = account["country"] return f"{account_name}, a {account_descr} from {account_country}"
def dict_to_title(argmap): """ Converts a map of the relevant args to a title. """ # python 3, this will be sorted and deterministic. print(argmap) exclude_list = ["train-file", "classes-file", "val-file", "cmd", "role-file"] return "_".join([k + "=" + v for k, v in argmap.items() if k not in exclude_list])
def avg(num_1, num_2): """computes the average of two numbers""" return (num_1 + num_2) / 2.0
def select_proxy(scheme, host, port, proxies): """Select a proxy for the url, if applicable. :param scheme, host, port: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs """ proxies = proxies or {} if host is None: return proxies.get(scheme, proxies.get('all')) proxy_keys = [ scheme + '://' + host, scheme, 'all://' + host, 'all', ] proxy = None for proxy_key in proxy_keys: if proxy_key in proxies: proxy = proxies[proxy_key] break return proxy
def get_segment_length(fs, resolution_shift=0): """ Calculates a segment length such that the frequency resolution of the resulting analysis is in the region of ~1Hz subject to a lower limit of the number of samples in the signal. For example, if we have a 10s signal with an fs is 500 then we convert fs-1 to the number of bits required to hold this number in binary (i.e. 111110011 so 9 bits) and then do 1 << 9 which gives us 100000000 aka 512. Thus we have ~1Hz resolution. :param resolution_shift: shifts the resolution up or down by the specified number of bits. :return: the segment length. """ return 1 << ((fs - 1).bit_length() - int(resolution_shift))
def productExceptSelf(nums): """ :type nums: List[int] :rtype: List[int] """ arr = [1] for num in nums[:-1]: arr.append(num*arr[-1]) tmp = 1 for i in range(len(nums)-1, -1, -1): arr[i] = arr[i]*tmp tmp = tmp*nums[i] return arr
def manhattan_distance(x, y): """Manhattan distance from origin to a point(x, y)""" return abs(0 - x) + abs(0 + y)
def gen_image_name(reference: str) -> str: """ Generate the image name as a signing input, based on the docker reference. Args: reference: Docker reference for the signed content, e.g. registry.redhat.io/redhat/community-operator-index:v4.9 """ no_tag = reference.split(":")[0] image_parts = no_tag.split("/") return "/".join(image_parts[1:])
def find_unique_name(name, names, inc_format='{name}{count:03}', sanity_count=9999999): """ Finds a unique name in a given set of names :param name: str, name to search for in the scene :param names: list<str>, set of strings to check for a unique name :param inc_format: str, used to increment the name :param sanity_count: int, used to prevent infinite search loop. Increment if needed (default=9999999) """ count = 0 ret = name while ret in names: count += 1 ret = inc_format.format(name=name, count=count) if sanity_count and count > sanity_count: raise Exception('Unable to find a unique name in {} tries, try a different format.'.format(sanity_count)) return ret
def GetExpandedList(l, s): """return a list where every elment in l is duplicated s times. """ nl = [] for x in l: nl += [x] * s return nl
def byte_from_str(size, unit): """ :param size: 3.6 :param unit: T / TB :return: """ unit = unit.upper() try: size = float(size) except: size = float(size[:-1]) if unit == 'T' or unit == 'TB': return size * 1024 * 1024 * 1024 * 1024 elif unit == 'G' or unit == "GB": return size * 1024 * 1024 * 1024 elif unit == 'M' or unit == "MB": return size * 1024 * 1024 elif unit == 'K' or unit == "KB": return size * 1024 else: return size
def valid_refined(candidate): """ Utility function that determines if a passed permutation is valid. Specifically, there exists one and only one "pair" of matching digits. """ permutation = str(candidate) for char in permutation[::-1]: if 2 == permutation.count(char): return True # didn't find a true condition return False
def egcd(n: int, m: int) -> dict: """Extended Euklidean algorithm. Finds greatest common divisor of n and m Args: n -- first number m -- second number Return: dictionary with specified keys: reminder -- greatest common divisor a, b -- answers to equation an + bm = reminder """ a, a_ = 0, 1 b, b_ = 1, 0 c, d = n, m q = c // d r = c % d while r: c, d = d, r a_, a = a, a_ - q * a b_, b = b, b_ - q * b q = c // d r = c % d return {"reminder": d, "a": a, "b": b}
def has_common_element(a, b): """ Return True if iterables a and b have at least one element in common. """ return not len(set(a) & set(b)) == 0
def clean_principals_output(sql_result, username, shell=False): """ Transform sql principals into readable one """ if not sql_result: if shell: return username return [username] if shell: return sql_result return sql_result.split(',')
def range_n_inv(n, end=0, step=-1): """ Return the sequence [start, start+1, ..., start+N-1]. """ return list(range(n, end, step))
def exec_command(command): """ Execute the command and return the exit status. """ import subprocess from subprocess import PIPE pobj = subprocess.Popen(command, stdout=PIPE, stderr=PIPE, shell=True) stdo, stde = pobj.communicate() exit_code = pobj.returncode return exit_code, stdo, stde
def is_valid_to_add(prefix, new_char, available): """ We assume that prefix is already a valid block-chain, by induction. Returns whether or not prefix + new_char is a valid block-chain. We can do this fast, in one iteration over prefix, by keeping track of the smallest character we've seen so far, and then for each subsequent character, seeing whether it forms a triple with the new character that would cause it to not be a block-chain. """ smallest = None for c in prefix: if smallest is None or c < smallest: smallest = c elif smallest < c and c < new_char: # invalid block-chain return False """ Turns out that we can go one step further than this. Since we know that every other character in available has to go after this new_char, we know that for any character in available, the new char must be the largest (i.e. nothing can be larger than new_char) smallest < new_char < character in available > We could probably make this faster using a priority queue, but given that my highly optimised version in C++ wasn't fast enough, I think I'm missing something more important. """ if smallest < new_char: for c in available: if new_char < c: return False return True
def _isASCII(mthd: str) -> bool: """ Check if the given method contains only ASCII characters. From https://stackoverflow.com/a/27084708/5768407. :param mthd: the method to verify contains only ASCII characters :returns: returns a boolean representing whether or not the given method contains only ASCII characters """ try: mthd.encode(encoding="utf-8").decode("ascii") except UnicodeDecodeError: return False else: return True
def _split_ports_per_fabric(slot_grouping, fabric_data): """Splits the slots per fabric which are to be placed on the same VIOS. :param slot_grouping: The slots which are to be placed in the same vios Ex: [3, 6] Here the slots 3 and 6 are to be placed on the same vios. :param fabric_data: Dictionary where the key is the fabric name. The value is another dictionary with the slots and the p_port_wwpns. Ex: { 'A': {'slots': [3, 4, 5], p_port_wwpns: [1, 2, 3] }, 'B': {'slots': [6, 7, 8], p_port_wwpns: [4, 5] } } The slot indicates which slots from the client slots align with which fabric. :return resp: The slots which can be placed on the same VIOS alone are returned. {'A': {'slots': [3], p_port_wwpns: [1, 2, 3] }, 'B': {'slots': [6], p_port_wwpns: [4, 5] } } """ resp = {} for fabric in fabric_data: slots = [x for x in fabric_data[fabric]['slots'] if x in slot_grouping] if not slots: continue resp[fabric] = {'slots': slots, 'p_port_wwpns': fabric_data[fabric]['p_port_wwpns']} return resp
def copy(obj, deep_copy=True): """ A function that allows the use of deep coping or shallow coping a value. :param obj: value to copy. :param deep_copy: flag that indicated if a deep copy should be done. :return: return a copy of the object. """ if deep_copy: return obj.copy() return obj
def fibonacci_sequence(n): """implement problem 2""" a = 0 b = 1 total = 0 if a < 0: print("input smaller than zero") elif n == 0: return a elif n == 1: return b else: while total < n: c = a + b a = b b = c if b % 2 == 0: total += b # print(b) return total
def set_metadata_language(dir): """ Set the language code in the metadata <dc:language> tag by going recursively over every metadata.opf file in the Calibre library. Some Catalan ebooks have the language metadata incorrectly coded as: <dc:language>cat</dc:language> in the content.opf file. This is changed to <dc:language>ca</dc:language> and the calibre "polish books" plugin is then run on Catalan language books exclusively (by filtering by language) with "update metadata in the book file". """ from pathlib import Path import fileinput # origin/destination strings: s0 = '<dc:language>cat</dc:language>' s1 = '<dc:language>ca</dc:language>' # list all the candidate files in the Calibre library: extension = '.opf' files = Path(dir).rglob(f'*{extension}') # read each file and search for matching string, then replace in-place: r, i = [], 0 for file in files: for line in fileinput.input(file, inplace=True): if line.find(s0): r.append(str(file)) i += 1 #line.replace(s0, s1) print(f'{i} replacements were made!') print('The following files were modified:\n', r) return r
def parse_message(data): """Return a tuple containing the command, the key, and (optionally) the value cast to the appropriate type.""" command, key, value, value_type = data.strip().split(';') if value_type: if value_type == 'LIST': value = value.split(',') elif value_type == 'INT': value = int(value) else: value = str(value) else: value = None return command, key, value
def title(message, underline='='): """Return a string formated as a Markdown title. underline argument will be printed on the line below the message. """ return '{}\n{}\n\n'.format(message, underline * len(message))
def _get_combined_beta_probability(p_beta: float, opinion): """ S -> I :param p_beta: :param opinion: positive opinion reduce the probability of infection :return: combined infected probability """ opinion_rate = 1 if opinion == 1: opinion_rate /= 2 return p_beta * opinion_rate
def is_range_common_era(start, end): """ does the range contains CE dates. BCE and CE are not compatible at the moment. :param start: :param end: :return: False if contains BCE dates. """ return all([start.get("is_common_era"), end.get("is_common_era")])
def titlecomment(line): """Condition for a line to be a title comment""" return line.startswith('//') and len(line.lstrip('//').strip()) > 0
def getStrongs(word, strongs_index): """ Retrieves the strongs found for a word :param word: :param strongs_index: :return: a list of strong numbers """ if word in strongs_index: return strongs_index[word] else: return []
def pad_seq(dataset, field, max_len, symbol): """ pad sequence to max_len with symbol """ n_records = len(dataset) for i in range(n_records): assert isinstance(dataset[i][field], list) while len(dataset[i][field]) < max_len: dataset[i][field].append(symbol) return dataset
def bitDescription(bits, *descriptions): """Return a description of a bit-wise value.""" ret = [] for desc in descriptions: if len(desc) == 2: yes, no = desc[1], None else: yes, no = desc[1:3] if bits & (1 << desc[0]): if yes: ret.append(yes) else: if no: ret.append(no) return ', '.join(ret)
def concat_code_sequences(val): """ Post-process a code field into a string, allowing it to be initially written as a sequence of strings. (which provides room to write comments) """ if isinstance(val, list): return ' // '.join(x.strip() for x in val) return val
def as_icon(icon: str) -> str: """Convert icon string in format 'type/icon' to fa-icon HTML classes.""" icon_type, icon_name = icon.split("/") if icon_type.lower() == "branding": icon_type = "fab" else: icon_type = "fas" return f'{icon_type} fa-{icon_name}'
def mk_ref_id(refname: str) -> str: """Create gerrit CD/ABCD name, refname must not be empty. >>> mk_ref_id("1") '01/1' >>> mk_ref_id("41242") '42/41242' """ refid = refname[-2:] if len(refname) > 1 else ("0" + refname[-1]) return refid + "/" + refname
def camel_farm(identifier): """ Convert from underscored to camelCase. """ words = identifier.split('_') return ''.join([words[0]] + [word.capitalize() for word in words[1:]])
def transform_wire_string_to_tuple(wirestring): """ Transforms String representation to integer :param wirestring: String configuration which occurs in output.gate.txt and output.inputs.txt file :return: decomposition of the String into Tuple of three Integers """ wirelist = wirestring.split(':') return int(wirelist[0]), int(wirelist[1]), int(wirelist[2])
def clean_codeblock(text): """Remove codeblocks and empty lines, return lines.""" text = text.strip(" `") lines = text.split("\n") clean_lines = [] if lines[0] in ["py", "python"]: lines = lines[1:] for line in lines: if line.strip() != "": clean_lines.append(line) return clean_lines
def count_common_letters(strings): """ :type strings: list[str] :rtype: int """ if len(strings) < 2: return 0 common_elements = set(strings[0]) for s in strings[1:]: common_elements = common_elements.intersection(set(s)) return len(common_elements)
def check_meta(file_content, options): """Check if given tag is in current file and adds FileContent object.""" if options['tags'] == [None]: return True for elm in file_content: if 'meta' in elm: for tag in elm['meta'].get('tags', []): if tag in options['tags']: return True return False
def second_kulczynski_sim(u, v): """ Computes the second Kulczynski similarity between sets u and v. sim = (1/2) * ((a / (a + b)) + (a / (a + c)) ) Where a = # of items in intersection(u, v) b = # of items only in u c = # of items only in v params: u, v: sets to compare returns: float between 0 and 1, where 1 represents perfect similarity and 0 represents no similarity """ a = len(u.intersection(v)) b = len(u) - a c = len(v) - a zero = 1e-10 sim = ((a / (a + b + zero)) + (a / (a + c + zero))) / 2 return sim
def get_out_nodes(in_node_dict): """Create output dictionary from input dictionary. Parameters ---------- in_node_dict : dict of int to list of int Dictionary maps node index to closest input ancestors. It can be created with get_in_nodes. Returns ------- out : dict of int to list of int Dictionary maps node index to closest output nodes. """ out_node_dict = {} for key in in_node_dict: out_node_dict[key] = [] for key, val in in_node_dict.items(): for item in val: if item in out_node_dict: out_node_dict[item].append(key) else: out_node_dict[item] = [key] return out_node_dict
def minDistance(word1, word2): """The minimum edit distance between word 1 and 2.""" if not word1: return len(word2 or '') or 0 if not word2: return len(word1 or '') or 0 size1 = len(word1) size2 = len(word2) tmp = list(range(size2 + 1)) value = None for i in range(size1): tmp[0] = i + 1 last = i for j in range(size2): if word1[i] == word2[j]: value = last else: value = 1 + min(last, tmp[j], tmp[j + 1]) last = tmp[j+1] tmp[j+1] = value return value
def make_flattened_values(dic, key_order, fun): """ dic is a dictionary mapping keys in key_order to a list. each element of a list is another dictionary. i.e., dic looks like dic = { k0 : [ {l0 : v00}, {l1 : v01} ... ], k1 : [ {l0 : v10}, {l1 : v11} ... ], ... } key_order is a list of keys of (k0, k1, k2, ...) that specifies the order in which items are taken from dic. e.g., key_order = [k1, k3, k2, k0] f is a fuction applied to each item of the list """ vals = [] for k in key_order: vals.extend([fun(i, d) for i, d in enumerate(dic[k])]) return vals
def get_file_ext(ext, fileName): """ Helper function for making sure the correct file ending is appended. Parameters ------------- ext = str, file extension. ex) ".json" or ".txt" fileName = str, path to file. Can include custom directory, uses processes/ by default. Returns ------------- typeName, fileName = str, file paths including the extension (typeName) and without (fileName). """ if ext != fileName[-len(ext):].lower(): typeName = ''.join([fileName, ext]) else: fileName = fileName[0:-len(ext)] typeName = ''.join([fileName, ext]) return typeName, fileName
def get_min_max(ints): """ Return a tuple(min, max) out of list of unsorted integers. Args: ints(list): list of integers containing one or more integers """ if len(ints) == 0: print('Cannot get min and max from empty array') return None min_num, max_num = 0, 0 if len(ints) > 1: min_num = ints[0] max_num = ints[1] else: min_num = ints[0] max_num = ints[0] for number in ints: if number < min_num: min_num = number if number > max_num: max_num = number return min_num, max_num
def deep_merge_dicts(lhs: dict, rhs: dict) -> dict: """ Deep merging two dicts """ for key, value in rhs.items(): if isinstance(value, dict): node = lhs.setdefault(key, {}) deep_merge_dicts(node, value) else: lhs[key] = value return lhs
def compare(a, b): """ Compare two base strings, disregarding whitespace """ import re return re.sub(r"\s*", "", a) == re.sub(r"\s*", "", b)
def fibonacci_iterative(nth_nmb: int) -> int: """An iterative approach to find Fibonacci sequence value. YOU MAY NOT MODIFY ANYTHING IN THIS FUNCTION!!""" old, new = 0, 1 if nth_nmb in (0, 1): return nth_nmb for __ in range(nth_nmb - 1): old, new = new, old + new return new
def is_allowed_char(ch): """ Test if passed symbol is allowed in abbreviation @param ch: Symbol to test @type ch: str @return: bool """ return ch.isalnum() or ch in "#.>+*:$-_!@"
def _get_fitting_type(fitting_angle, rounded): """Returns fitting type for expansions and reductions. Parameters: fitting_angle: Fitting angle. Usually is 180 for square fittings. rounded: Rounded fitting. Usually is False for square fittings. """ if fitting_angle != 180 and rounded: return 'ambiguous' elif rounded: return 'rounded' elif fitting_angle != 180: return 'tapered' else: return 'square'
def recursive_gcd(x: int, y: int): """keep call the recursive_gcd function until y = 0""" if y == 0: return x return recursive_gcd(y, x%y)
def get_src(src, pt, tform): """ Args: src (2D indexable): The generalized array (supporting "float indexing") pt: The point of the image we would like to fill tform: Callable """ # The dest[pt] = src[tform(pt)] tformed = tform(pt) ret = src(tformed[1], tformed[0]) return ret
def generate_big_rules(L, support_data, min_conf): """ Generate big rules from frequent itemsets. Args: L: The list of Lk. support_data: A dictionary. The key is frequent itemset and the value is support. min_conf: Minimal confidence. Returns: big_rule_list: A list which contains all big rules. Each big rule is represented as a 3-tuple. """ big_rule_list = [] sub_set_list = [] for i in range(0, len(L)): for freq_set in L[i]: for sub_set in sub_set_list: if sub_set.issubset(freq_set): conf = support_data[freq_set] / support_data[freq_set - sub_set] big_rule = (freq_set - sub_set, sub_set, conf) if conf >= min_conf and big_rule not in big_rule_list: # print freq_set-sub_set, " => ", sub_set, "conf: ", conf big_rule_list.append(big_rule) sub_set_list.append(freq_set) return big_rule_list
def new_sleep_summary_detail( wakeupduration, lightsleepduration, deepsleepduration, remsleepduration, wakeupcount, durationtosleep, durationtowakeup, hr_average, hr_min, hr_max, rr_average, rr_min, rr_max, ): """Create simple dict to simulate api data.""" return { "wakeupduration": wakeupduration, "lightsleepduration": lightsleepduration, "deepsleepduration": deepsleepduration, "remsleepduration": remsleepduration, "wakeupcount": wakeupcount, "durationtosleep": durationtosleep, "durationtowakeup": durationtowakeup, "hr_average": hr_average, "hr_min": hr_min, "hr_max": hr_max, "rr_average": rr_average, "rr_min": rr_min, "rr_max": rr_max, }
def xoai_abstract(source, *args, **kwargs): """ CZ: EN: """ value = [] for lang_vers in source: lang = lang_vers["@name"] field = lang_vers["element"]["field"] if isinstance(field, list): for abstract in field: value.append( { "name": abstract["#text"], "lang": lang } ) else: value.append( { "name": field["#text"], "lang": lang } ) return {"abstract": value}