content
stringlengths
42
6.51k
def get_residue_ranges(numbers): """Given a list of integers, creates a list of ranges with the consecutive numbers found in the list. Parameters ---------- numbers: list A list of integers Returns ------ list A list with the ranges of consecutive numbers found in the list """ nums = sorted(set(numbers)) gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s + 3 < e] edges = iter(nums[:1] + sum(gaps, []) + nums[-1:]) return list(zip(edges, edges))
def obter_tamanho (m): """Esta funcao devolve o tamanho, em x e em y, do mapa dado como argumento""" return (m["dim"][0], m["dim"][1])
def decode_payload(payload: str) -> str: """ Decode payload with URL-safe base64url. """ from base64 import urlsafe_b64decode result: bytes = urlsafe_b64decode(payload + '=' * (4 - len(payload) % 4)) return result.decode()
def get_children_templates(pvc_enabled=False): """ Define a list of all resources that should be created. """ children_templates = { "service": "service.yaml", "ingress": "ingress.yaml", "statefulset": "statefulset.yaml", "configmap": "configmap.yaml", "secret": "secret.yaml", } if pvc_enabled: children_templates["pvc"] = "pvc.yaml" return children_templates
def retrieve_boxes_and_masks(scene, objs): """Retrieve bounding boxes and segmentation masks associated to given objs in given scene This assumes REFCLEVR annotations """ if len(objs) == 0: return [], [], [] boxes = [scene["objects"][o.id]["bbox"] for o, _ in objs] tokens = [tok for _, tok in objs] return boxes, None, tokens # The dataset use Run-Length-Encoding for the segmentation mask, but unfortunately it is transposed. # Here we use coco tools to decode, transpose, re-encode then extract bounding boxes raw_rles = [ {"counts": json.loads("[" + scene["obj_mask"][str(scene["objects"][o.id]["idx"])] + "]"), "size": [480, 320]} for o, _ in objs ] tokens = [tok for _, tok in objs] rles = coco_mask.frPyObjects(raw_rles, 320, 480) masks = coco_mask.decode(rles).transpose(1, 0, 2) rles = coco_mask.encode(np.asfortranarray(masks)) boxes = coco_mask.toBbox(rles) all_seg = [] for mask in masks.transpose(2, 0, 1): contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentation = [] for contour in contours: # Valid polygons have >= 6 coordinates (3 points) if contour.size >= 6: segmentation.append(contour.flatten().tolist()) all_seg.append(segmentation) return boxes, all_seg, tokens
def format_supply_flow(amount, unit, location, activity_type, flow_object, time, determining_flow, counter, mapping_dict={}): """Return a list of RDF triples as Python for a supply ``Flow``. ``amount`` is a float. The following are strings **with URI prefixes already substituted**: * ``unit`` * ``location`` * ``activity_type`` * ``unit`` * ``flow_object`` * ``time`` ``determining_flow`` is a boolean indicating whether the flow is a determining flow. ``counter`` is an instance of ``collections.Counter`` used to count blank nodes. ``mapping_dict`` is a dictionary that we keep to do lookups in the future. It has the form: .. code-block:: python { "flows": {(flow_object, location): flow_uri}, "activities: {(activity_type_uri, location_uri): activity_uri} } Where ``activity_uri`` and ``flow_uri`` are generated by this function. This mapping dictionary is needed for the use functions, as we need the URI references. """ activity_uri = "brdfsuex:{}".format(counter) flow_uri = "brdfsuex:{}".format(counter+1) output = [{ # Activity instance "@id" : activity_uri, "@type" : "bont:Activity", "bont:activityType" : activity_type, "bont:location": location, "bont:temporalExtent": time, }, { # Flow instance "@id": flow_uri, "@type" : "bont:Flow", "bont:outputOf": activity_uri, "om2:hasNumericalValue": amount, "bont:objectType" : flow_object, "om2:hasUnit" : "om2:" + unit }] if determining_flow: output[0]["bont:determiningFlow"] = flow_uri mapping_dict[(activity_type, location)] = activity_uri return output, mapping_dict
def getStringForAndDifferential(a, b, c): """ AND = valid(x,y,out) = (x and out) or (y and out) or (not out) """ command = "(({0} & {2}) | ({1} & {2}) | (~{2}))".format(a, b, c) return command
def _scaling(mean_tau, sd_tau2): """ Returns the chi-2 scaling factor from the mean and variance of the uncertainty model, as reported in equation 5.4 of Al Atik (2015) """ return (sd_tau2 ** 2.) / (2.0 * mean_tau ** 2.)
def string_float(string: str) -> float: """Convert a string to a float Parameters ---------- string : str String to be converted Returns ------- number : float Analysis of filings text """ if string.strip().replace(",", "").replace("-", "") == "": return 0 return float(string.strip().replace(",", "").replace("-", ""))
def pairs(s): """Split string into list of pairs.""" return [s[i:i + 2] for i in range(0, len(s), 2)]
def to_spongebob_case(string: str) -> str: """ Converts a given string to spongebob case (alternating caps) Returns ------- :class:`str` New string in sarcastic (spongebob) case """ return ''.join( letter.upper() if i % 2 else letter.lower() for i, letter in enumerate(string) )
def confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None): """ Returns the confusion matrix between rater's ratings """ assert (len(rater_a) == len(rater_b)) if min_rating is None: min_rating = min(rater_a + rater_b) if max_rating is None: max_rating = max(rater_a + rater_b) num_ratings = int(max_rating - min_rating + 1) conf_mat = [[0 for i in range(num_ratings)] for j in range(num_ratings)] for a, b in zip(rater_a, rater_b): conf_mat[a - min_rating][b - min_rating] += 1 return conf_mat
def _NormalizeString(string): """Normalizes a string to account for things like case.""" return string.strip().upper() if string else None
def binexp(x: int, n: int) -> int: """Binary exponentiation Parameters: x (int): Base n (int): Exponent (power) Returns: int: Result """ res = 1 while n > 0: if n & 1 > 0: res *= x x *= x n >>= 1 return res
def get_download_sizes_list(filesize, parts): """ function to get a list of sizes to be downloaded by each thread """ lst = range(filesize) sizes_list = [len(lst[i::parts]) for i in range(parts)] return sizes_list
def split(string: str, delimiter: str = ' ') -> list: """ Splits string based on input delimiter :param string: input string/text to be split based on delimiter :param delimiter: input delimiter to split the user text :return: [list[str]] list of splitted strings """ if isinstance(string, str) and isinstance(delimiter, str): return string.split(delimiter) raise ValueError( "Expected (string, delimiter) of type (str, str) got ({}, {}). ".format(type(string), type(delimiter)))
def _map_tensor(functions, tensors): """ Apply the composition of all functions to all given tensors. If a tensor is None, it remains as None. :param functions: iterable collection of functions. Each must take a tensor and return a tensor of the same size. The first function is applied first. :param tensors: iterable collection of tensors. :return: tuple of tensors with identical shapes to input. """ new_tensors = [] for tensor in tensors: if tensor is None: new_tensors.append(None) else: for fn in functions: tensor = fn(tensor) new_tensors.append(tensor) return tuple(new_tensors)
def find_related_forms(self,form_name,form_dict,foreign_forms=None): """ Finds the form_name value in the form_dict. If it is found, the function will call itself using form_dict[form_name]. The form_dict is a dictionary with the keys being a form name and the value being the name of the form they have a foreign key relation with. Ex: form_dict['Microarray 1'] = 'Prior Gen Testing' This function will continue until no more related forms are found, and will return a list of them, in order from highest to deepest form relation """ if foreign_forms is None: foreign_forms = []; if form_name in form_dict and not form_name in foreign_forms: foreign_forms.append(form_name); find_related_forms(self,form_dict[form_name],form_dict,foreign_forms); return foreign_forms;
def apply_service_association(association, data): """ Apply association of labels to contents Parameters ---------- association: topics: types: :return: """ for service_type, contents in association.items(): for c in contents: if service_type not in data[c]['service_type']: data[c].update(service_type=service_type) return data
def get_calender_ids_from_list(email, calendar_list): """ Parse calendar_list and to retrieve calendar_ids, separated to active and deleted Response: { "active":["id1","id2"], "deleted":["id3","id4"] } """ assert calendar_list['items'] response = {} response['email'] = email response['active'] = [] response['deleted'] = [] for item in calendar_list['items']: if 'deleted' in item.keys(): if item['deleted']: response['deleted'].append(item['id']) else: response['active'].append(item['id']) else: response['active'].append(item['id']) return response
def pass_dummy_scans(algo_dummy_scans, dummy_scans=None): """ Graft manually provided number of dummy scans, if necessary. Parameters ---------- algo_dummy_scans : int number of volumes to skip determined by an algorithm dummy_scans : int or None number of volumes to skip determined by the user Returns ------- skip_vols_num : int number of volumes to skip """ if dummy_scans is None: return algo_dummy_scans return dummy_scans
def line_points_to_grid(l_points: list) -> list: """ Args: l_points: all points occupied by lines in [x1, y1] format Returns: grid where [x, y] value contains the number of lines that occupy that coordinate """ # determine size of grid largest_x = max([i[0] for i in l_points]) largest_y = max([i[1] for i in l_points]) # instantiate empty grid grid = [[0]*(largest_x + 1) for i in range((largest_y + 1))] # iterate through all points and add counts to grid coordinates for point in l_points: grid[point[1]][point[0]] += 1 return grid
def _lcs_length(x, y): """ Computes the length of the longest common subsequence (lcs) between two strings. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence >>> _lcs_length('ABCDE', 'CD') 2 >>> _lcs_length('the police killed the gunman'.split(), 'gunman police killed'.split()) 2 :param x: sequence of words :param y: sequence of words :return: Length of LCS between x and y """ n, m = len(x), len(y) len_table = {} for i in range(n + 1): for j in range(m + 1): if i == 0 or j == 0: len_table[i, j] = 0 elif x[i - 1] == y[j - 1]: len_table[i, j] = len_table[i - 1, j - 1] + 1 else: len_table[i, j] = max(len_table[i - 1, j], len_table[i, j - 1]) return len_table[n, m]
def get_primes(max): """Returns all prime numbers up to and including [max] Args: max: highest number to be returned. If [max] is prime, list is inclusive. """ primes = [2, 3] c = 2 while primes[-1] <= max: next = primes[-1] + c if next > max: break is_prime = True for prime in primes: if next % prime == 0: is_prime = False c +=2 break if is_prime: primes.append(next) c = 2 return primes
def trim_lost_U(seq_F, LOSTUSEQS): """ test for lost U at the 3' end of the PCR primer sequence """ keepgoing = 1 for lostuseq in LOSTUSEQS: if keepgoing: if len(seq_F) < len(lostuseq): break if seq_F[:len(lostuseq)] == lostuseq: seq_F = seq_F[len(lostuseq):] #if LOSTUSEQ[0] found, also look for LOSTUSEQ[1] etc. else: keepgoing = 0 return seq_F
def is_not_null(val): """Check if a value is not null, This is needed b/c we are parsing command line arguements and 'None' and 'none' can be used. """ return val not in [None, 'none', 'None']
def linscale(seq, minval=0.0, maxval=1.0): """Linearly scales all the values in the sequence to lie between the given values. Shifts up to minval and scales by the difference ``maxval-minval`` If all values are identical, then sets them to `minval`.""" m, M = min(seq), max(seq) def sc(s, m=m, M=M): if m==M: return minval return minval + ((s-m)/float(M-m))*(maxval-minval) seq = [sc(s) for s in seq] return seq
def _mean(listvalue): """ The mean value of the list data. """ return sum(listvalue)/len(listvalue)
def quaternion_real(quaternion): """Return real part of quaternion. >>> quaternion_real([3, 0, 1, 2]) 3.0 """ return float(quaternion[0])
def is_leap_year(year): """ if the given year is a leap year, then return true else return false :param year: The year to check if it is a leap year :returns: It is a Leap leap year (yes or no). """ return (year % 100 == 0) if (year % 400 == 0) else (year % 4 == 0)
def fibUnderN(n): """ This function returns a list of fibonacci numbers under n --param n : integer --return list : all fibs under n """ fibs = [0,1] ctr = 2 while (fibs[-1] + fibs[-2]) < n: fibs.append(fibs[ctr-1] + fibs[ctr-2]) ctr += 1 return fibs.copy()
def complex_pass(count): """Windows complexity requires a password to contain three of the four groups: digits, lowercase letters, uppercase letters, or symbols.""" if count['d'] and count['l'] and count['u']: return True elif count['d'] and count['l'] and count['s']: return True elif count['d'] and count['u'] and count['s']: return True elif count['l'] and count['u'] and count['s']: return True else: return False
def perforationskineffect(s_h=0, s_v=0, s_wb=0): """ Calculate perforation skin effect or s_p given... s_h == horizontal skin effect s_v == vertical pseudoskin s_wb == well bore blockage effect """ s_p = s_h + s_v + s_wb return(s_p)
def clip_box(box, shape): """ Clip box for given image shape. Args: box (array_like[int]): Box for clipping in the next format: [y_min, x_min, y_max, x_max]. shape (tuple[int]): Shape of image. Returns: array_like[int]: Clipped box. """ ymin, xmin, ymax, xmax = box if ymin < 0: ymin = 0 elif ymin >= shape[0]: ymin = shape[0] - 1 box[0] = ymin if xmin < 0: xmin = 0 elif xmin >= shape[1]: xmin = shape[1] - 1 box[1] = xmin if ymax < 0: ymax = 0 elif ymax >= shape[0]: ymax = shape[0] - 1 box[2] = ymax if xmax < 0: xmax = 0 elif xmax >= shape[1]: xmax = shape[1] - 1 box[3] = xmax return box
def from_str(s): """ generate genotype from string """ genotype = eval(s) return genotype
def min_bin (mins): """Bins the minutes of when our followers engage into 00 and 30""" l = [] for _min in mins: if _min < 30: l.append('00') else: l.append('30') return l
def recursive_fibonacci(n): """ Find the Fibonacci number of order n by recursion """ if n < 2: return n else: return recursive_fibonacci(n-1) + recursive_fibonacci(n-2)
def formatIntervalHours(cHours): """ Format a hours interval into a nice 1w 2d 1h string. """ # Simple special cases. if cHours < 24: return '%sh' % (cHours,); # Generic and a bit slower. cWeeks = cHours / (7 * 24); cHours %= 7 * 24; cDays = cHours / 24; cHours %= 24; sRet = ''; if cWeeks > 0: sRet = '%sw ' % (cWeeks,); if cDays > 0: sRet = '%sd ' % (cDays,); if cHours > 0: sRet += '%sh ' % (cHours,); assert len(sRet) > 0; assert sRet[-1] == ' '; return sRet[:-1];
def strtobool(val: str) -> bool: """ strtobool --------- PEP 632 https://www.python.org/dev/peps/pep-0632/ is depreciating distutils Following code is somewhat shamelessly copied from the original source. Convert a string representation of truth to True or False. - True values are `'y', 'yes', 't', 'true', 'on', and '1'` - False values are `'n', 'no', 'f', 'false', 'off', and '0'` - Raises `ValueError` if `val` is anything else. """ val = val.lower() if val in {'y', 'yes', 't', 'true', 'on', '1'}: return True if val in {'n', 'no', 'f', 'false', 'off', '0'}: return False raise ValueError(f'invalid truth value for {val}')
def text_content(seconds): """ Returns text content containing game stats to be sent to user Parameters: seconds (int): the amount of time played """ content = "Thank you so much for playing Champions are Coming. In total, you have been playing for " + str(seconds) + " seconds. Play again to beat your time!" return content
def _bibtex_get_publication_type(ins): """Aux Function""" out = 'article' # XXX currently only article supported. return out
def slice(seq, length, type='list'): """ Slice a sequence into several sub sequences by length. Args: seq (list | tuple): The sequence to be sliced. length (list[int] | int): The expected length or list of lengths. type (str, optional): The type of returned object. Expected values include ``'list'`` and ``'tuple'``. Default: ``'list'``. Returns: list[list]: The sliced sequences. """ assert type in ('list', 'tuple') if isinstance(length, int): assert len(seq) % length == 0 length = [length] * int(len(seq) / length) elif not isinstance(length, list): raise TypeError("'length' must be an integer or a list of integers") elif sum(length) != len(seq): raise ValueError('the total length do not match the sequence length') out, idx = [], 0 for i in range(len(length)): out.append(seq[idx:idx + length[i]]) idx += length[i] if type == 'tuple': out = tuple(out) return out
def compare(a, b, *, tol=1e-6): """ if ||a - b|| < tol, return 0 otherwise return float(a > b) """ if abs(a - b) < tol: return 0.0 elif a > b: return 1.0 else: return -1.0
def find_factors(number): """Returns the prime factorisation of a number.""" factors = set() for i in range(1, int(number**0.5) + 1): if number % i == 0: factors.add(i) factors.add(number // i) return factors
def multiply(value, arg): """ Multiplies the value by the arg. Returns the value if an exception occurs. """ try: output = int(value) * int(arg) except: output = value return output
def is_prime(n): """Determine whether the given integer n is prime.""" result = True for i in range(n - 1, 2, -1): if n % i == 0: # If n is divisible by a smaller integer, then n is not prime. result = False break return result
def sol(n): """ Every time the queue is reduced to half the length so it will take logn to reduce it to 1. So the last power of 2 is the soln """ for i in range(n, 0, -1): if i&(i-1) == 0: return i
def pluperfect_number(n: int) -> bool: """Return True if n is a pluperfect number or False if it is not >>> all(armstrong_number(n) for n in PASSING) True >>> any(armstrong_number(n) for n in FAILING) False """ if not isinstance(n, int) or n < 1: return False # Init a "histogram" of the digits digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] digit_total = 0 sum = 0 temp = n while temp > 0: temp, rem = divmod(temp, 10) digit_histogram[rem] += 1 digit_total += 1 for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))): sum += cnt * i ** digit_total return n == sum
def parse_version_commands(v_dict, description): """ Take dict with version commands as values and tool names as keys and parse into list of strings that will print tool name and version in yaml format in log files. """ cmds = ['echo "START METHODS PRINTING"'] cmds.append('echo "description: {}"'.format(description).replace('\n', '')) cmds.append('echo "versions:"') for cv in v_dict: cmds.append('echo " {cv}: "$({version})'.format(cv=cv, version=v_dict[cv])) cmds.append('echo "END METHODS PRINTING"') return cmds
def unprefix_schema_dict(schema_in): """Turn a.b.c into just c for every key in a nested schema dictionary - iterate through all keys and turn each one from foo.bar to just bar, and apply recursively if any of the values are dictionaries themselves.""" schema_out = {} keys = list(schema_in.keys()) for key in keys: stripped_key = key.split('.')[-1] # Turn a.b.c.d.e into e schema_out[stripped_key] = schema_in[key] if type(schema_in[key]) == dict: # Apply recursively. schema_out[stripped_key] = unprefix_schema_dict(schema_in[key]) return schema_out
def cumhist(hist): """Takes a histogram and makes a cumulative histogram out of it""" ret = {} cur = 0 for k in sorted(hist): ret[k] = hist[k]+cur cur = ret[k] return ret
def _swap_edges_in_cycle(cycle, M): """ """ Mprime = dict(M) e = None cycle = cycle + [cycle[0]] for i in range(len(cycle) - 1): if cycle[i] in Mprime: if M[cycle[i]] == cycle[i + 1]: e = (cycle[i], cycle[i + 1]) Mprime[cycle[i]] = cycle[i - 1] Mprime[cycle[i - 1]] = Mprime[cycle[i]] else: e = (cycle[i], cycle[i - 1]) Mprime[cycle[i]] = cycle[i + 1] Mprime[cycle[i + 1]] = Mprime[cycle[i]] return e, Mprime
def model_snowdepthtrans(Sdepth = 0.0, Pns = 100.0): """ - Name: SnowDepthTrans -Version: 1.0, -Time step: 1 - Description: * Title: snow cover depth conversion * Author: STICS * Reference: doi:http://dx.doi.org/10.1016/j.agrformet.2014.05.002 * Institution: INRA * Abstract: snow cover depth in cm - inputs: * name: Sdepth ** description : snow cover depth Calculation ** inputtype : variable ** variablecategory : state ** datatype : DOUBLE ** default : 0.0 ** min : 0.0 ** max : 500.0 ** unit : m ** uri : * name: Pns ** description : density of the new snow ** inputtype : parameter ** parametercategory : constant ** datatype : DOUBLE ** default : 100.0 ** min : ** max : ** unit : cm/m ** uri : - outputs: * name: Sdepth_cm ** description : snow cover depth in cm ** variablecategory : state ** datatype : DOUBLE ** min : 0.0 ** max : 500.0 ** unit : cm ** uri : """ Sdepth_cm = Sdepth * Pns return Sdepth_cm
def get_skill_entry(name, skills_data) -> dict: """ Find a skill entry in the skills_data and returns it. """ for e in skills_data.get('skills', []): if e.get('name') == name: return e return {}
def vpvs2poisson(vpvs_ratio): """ Convert Vp/Vs ratio to Poisson's ratio. Parameters ---------- vpvs_ratio : float Vp/Vs ratio. Returns ------- poisson_ratio : float Poisson's ratio. """ s = vpvs_ratio ** 2 return 0.5 * (s - 2) / (s - 1)
def recognize_greeting(statement): """ Recognizes if string statement starts with Hi or Hey or any other greeting. Args: statement (str): a string from the commandline from the user Returns: bool: True if statement is a greeting. False otherwise. >>> recognize_greeting('hi') True """ statement = statement.lower() if statement.startswith('hi') or statement.startswith('hey'): return True return False
def avg(arr): """Count average.""" return sum(arr) / float(len(arr))
def calc_crc24q(message: bytes) -> int: """ Perform CRC24Q cyclic redundancy check. If the message includes the appended CRC bytes, the function will return 0 if the message is valid. If the message excludes the appended CRC bytes, the function will return the applicable CRC. :param bytes message: message :return: CRC or 0 :rtype: int """ POLY = 0x1864CFB crc = 0 for octet in message: crc ^= octet << 16 for _ in range(8): crc <<= 1 if crc & 0x1000000: crc ^= POLY return crc & 0xFFFFFF
def split_sentences(txt, splitchar=".", include_splitchar=False): """Split sentences of a text based on a given EOS char.""" out = [s.split() for s in txt.strip().split(splitchar) if len(s) > 0] return out
def almost_equal(x, y, rel_tol=0, abs_tol=0): """Check if two objects are equal within certain relative and absolute tolerances. The operations abs(x + y) and abs(x - y) need to be well-defined for this function to work. :param x: first object to be compared :param y: second object to be compared :param rel_tol: relative tolerance for the comparison :param abs_tol: absolute tolerance for the comparison :return: True if the elements are equal within tolerance, False otherwise :rtype: bool """ diffxy = abs(x - y) if diffxy <= abs_tol: return True sumxy = abs(x + y) # Rough check that the ratio is smaller than 1 to avoid division by zero if sumxy < diffxy: return False return diffxy / sumxy <= rel_tol
def intersect(list1, list2): """ Compute the intersection of two sorted lists. Returns a new sorted list containing only elements that are in both list1 and list2. This function can be iterative. """ intersection = [] for dummy_idx in list1: if dummy_idx in list2 and dummy_idx not in intersection: intersection.append(dummy_idx) return intersection
def is_json_object(json_data): """Check if the JSON data are an object.""" return isinstance(json_data, dict)
def from_base(num_base: int, dec: int) -> float: """Returns value in e.g. ETH (taking e.g. wei as input).""" return float(num_base / (10 ** dec))
def get_catalog_path(layer): """Get the catalog path for the designated layer if possible. Ensures we can pass map layers to the subprocess. If it's already a string, assume it's a catalog path and return it as is. Args: layer (layer object or string): Layer from which to retrieve the catalog path. Returns: string: Catalog path to the data """ if hasattr(layer, "dataSource"): return layer.dataSource else: return layer
def _separate_dir_and_file(path): """ Helper function for separating file directory and the file """ temp = path.rfind("/") if temp == -1: return ".", path return path[:temp], path[temp + 1:]
def get_all_combinations(node_lists): """ Generate a list of all possible combinations of items in the list of lists `node_lists`. Each combination takes one item from each list contained within `node_lists`. The order of items in the returned lists reflects the order of lists in `node_lists`. For example, if `node_lists` was [[A, B, C], [N], [X, Y]], the returned combinations would be [[A, N, X], [A, N, Y], [B, N, X], [B, N, Y], [C, N, X], [C, N, Y]]. """ items = [[]] for node_list in node_lists: items = [item + [node] for node in node_list for item in items] return items
def sum(*args): """Calculate sum of args. If using numpy the builtin sum doesn't work always! @param args: list of floats to sum @type args: list of float @return: sum of args @rtype: float """ r = args[0] for x in args[1:]: r += x return r
def partition(array, start, end): """ Perform Partition Operation on array a. Time Complexity: O(nLogn) Auxiliary Space: O(n) :param a: Iterable of elements :param start: pivot value for array :param end: right limit of array :return: return i value for function, used in partitioning of array. """ i = start - 1 pivot = array[end] for j in range(start, end): if array[j] <= pivot: i += 1 array[i], array[j] = array[j], array[i] i += 1 array[i], array[end] = array[end], array[i] return i
def equivalent_depth(h0, lambda_soil, alpha_air, delta_snow, lambda_snow): """ :param h0: Depth of the gas pipeline axis, m :param lambda_soil: Heat conductivity coefficient of soil, W/(m*K) :param alpha_air: Soil-air heat transfer coefficient, W/(m2*K) :param delta_snow: Thickness of snow surface, m :param lambda_snow: Heat conductivity coefficient of snow, W/(m*K) :return: equivalent depth, m """ return h0 + lambda_soil * (1/alpha_air + delta_snow/lambda_snow)
def get_chunks(t_start, t_stop, n_chunks): """Group frame indices into given number of 'chunks'. Args: t_start (int): Frame index to start at (inclusive) t_stop (int): Frame index to stop at (exclusive) n_chunks (int): Number of chunks Returns: List of 2-tuples containing (start, stop) for each chunk. """ # Validate input if t_stop <= t_start: raise ValueError('Start frame not before stop frame') if n_chunks <= 0: raise ValueError('Number of chunks not positive int') if n_chunks > (t_stop - t_start): raise ValueError('More chunks than frames') # Determine size of chunks sz = (t_stop - t_start) // n_chunks # First n-1 chunks chunks = [] for k in range(n_chunks - 1): chunks.append((t_start + k * sz, t_start + (k + 1) * sz)) # Final chunk chunks.append((t_start + (n_chunks - 1) * sz, t_stop)) return chunks
def fbpe_key(code): """ input: 'S0102-67202009000300001' output: 'S0102-6720(09)000300001' """ begin = code[0:10] year = code[12:14] end = code[14:] return '%s(%s)%s' % (begin, year, end)
def calculate_discount(initial: int, current: int) -> int: """ Calculates the % difference between initial and current price. Note: when initial is 0 (that is, old price was lower than the new one - very unlikely in Steam), we assume that increase is (new price * 100)%. :param initial: initial price :param current: current price :return integer: representing the discount """ if initial is None or current is None: return 0 if current == 0: return -100 difference = current - initial # Division by 0 is not allowed. 1, however, will not change the price. initial = 1 if initial == 0 else initial percent = (difference / initial) * 100 return int(round(percent, 0))
def default_state_progress_report(n_steps, found_states, all_states, timestep=None): """ Default progress reporter for VisitAllStatesEnsemble. Note that it is assumed that all states have been named. Parameters ---------- n_steps : int number of MD frames generated so far found_states : iterable the set of states that have been found all_states : iterable the set of all states of interest timestep : float or quantity the timestep (optional). If given, the amount of time simulated will be reported along with the number of MD frames. Returns ------- str : formatted string with information about progress so far """ report_str = "Ran {n_steps} frames" if timestep is not None: report_str += " [{}]".format(str(n_steps * timestep)) report_str += (". Found states [{found_states}]. " "Looking for [{missing_states}].\n") found_states_str = ",".join([s.name for s in found_states]) # list comprehension instead of sets (to preseve order) missing_states = [s for s in all_states if s not in found_states] missing_states_str = ",".join([s.name for s in missing_states]) return report_str.format(n_steps=n_steps, found_states=found_states_str, missing_states=missing_states_str)
def make_batch_sizes(num_records, max_batch_size): """Function that generates a sequence of batch sizes from total number of records and batch size. Parameters ---------- num_records : int Overall number of records max_batch_size : int Number of records in a batch Returns ------- tuple of integers Tuple with batch sizes (in terms of number of records) """ if num_records <= max_batch_size: return tuple([num_records]) nb = num_records / max_batch_size mbs = max_batch_size batches = [mbs for _ in range(int(nb))] remainder = int((nb % int(nb)) * mbs) if remainder > 0: batches += [remainder] return tuple(batches)
def dump_deb822(fields): """ Format the given Debian control fields as text. :param fields: The control fields to dump (a dictionary). :returns: A Unicode string containing the formatted control fields. """ lines = [] for key, value in fields.items(): # Check for multi-line values. if "\n" in value: input_lines = value.splitlines() output_lines = [input_lines.pop(0)] for line in input_lines: if line and not line.isspace(): # Make sure continuation lines are indented. output_lines.append(u" " + line) else: # Encode empty continuation lines as a dot (indented). output_lines.append(u" .") value = u"\n".join(output_lines) lines.append(u"%s: %s\n" % (key, value)) return u"".join(lines)
def _parse_values(value_string): """Parse comma-delimited values from a string""" values = list() for raw in value_string.split(','): raw = raw.strip() if len(raw) > 1 and raw[0] == '"' and raw[-1] == '"': raw = raw[1:-1].strip() values.append(raw) return values
def slicer(a, k): """ a = no. of problems / chapter k = no. of problems / page """ sliced = [] for i, step in enumerate(range(0, a, k)): sliced.append([x for x in range(a)][step:k*i+k]) return sliced
def docline(obj): """ Returns the first line of the object's docstring or None if there is no __doc__ on the object. """ if not obj.__doc__: return None lines = list(filter(None, obj.__doc__.split("\n"))) return lines[0].strip()
def quick_sort(arr): """Returns the array arr sorted using the quick sort algorithm >>> import random >>> unordered = [i for i in range(5)] >>> random.shuffle(unordered) >>> quick_sort(unordered) [0, 1, 2, 3, 4] """ less = [] equal = [] greater = [] if len(arr) < 1: return arr pivot = arr[len(arr) // 2] # pivot at mid point for num in arr: if num < pivot: less.append(num) elif num == pivot: equal.append(num) elif num > pivot: greater.append(num) return quick_sort(less) + equal + quick_sort(greater)
def mask_value(mask: str, value: int) -> int: """Mask a value""" value_str = f"{value:b}".zfill(len(mask)) masked_str = "".join([v if m == "X" else m for v, m in zip(value_str, mask)]) return int(masked_str, 2)
def create_method_args_string(*args, **kwargs): """Returns a string representation of args and keyword args. I.e. for args=1,2,3 and kwargs={'a':4, 'b':5} you'd get: "1,2,3,a=4,b=5" """ # While %s turns a var into a string but in some rare cases explicit # repr() is less likely to raise an exception. arg_strs = [repr(arg) for arg in args] arg_strs += ['%s=%s' % (repr(key), repr(value)) for (key, value) in kwargs.items()] return ', '.join(arg_strs)
def any_match(record, patterns): """Check record for match with any of the patterns :param record: record string to search for patterns :param patterns: list of regexes :return: True of record matched with any of the patterns; False otherwise """ return any(map(lambda x: x.search(record), patterns))
def tail(sequence): """Get all but the first element in a sequence. Parameters ---------- sequence : Sequence[A] The sequence to decapitate. Returns ------- Sequence[A] The decapitated sequence. """ return sequence[1:]
def ALMACombineCals(Cal1, Cal2=None, Cal3=None, Cal4=None): """ Combine a unique list of calibrators Drops None sources returns list of unique names * Cal1 = List of calibrators (from ALMACalModel) * Cal2 = List of calibrators, ignore if None * Cal3 = List of calibrators, ignore if None * Cal4 = List of calibrators, ignore if None """ ################################################################ clist = [] # Calibrator list for Cal in Cal1: if Cal['Source'] not in clist: if Cal['Source']: clist.append(Cal['Source'].strip()) if Cal2: for Cal in Cal2: if Cal['Source'] not in clist: if Cal['Source']: clist.append(Cal['Source'].strip()) if Cal3: for Cal in Cal3: if Cal['Source'] not in clist: if Cal['Source']: clist.append(Cal['Source'].strip()) if Cal4: for Cal in Cal4: if Cal['Source'] not in clist: if Cal['Source']: clist.append(Cal['Source'].strip()) return clist
def sorted_stories_list(hackerNewsList): """Sorting the list in decreasing order with respect to votes""" return sorted(hackerNewsList,key=lambda x:x['votes'],reverse=True)
def roll(lst, shift): """Roll elements of a list. This is similar to `np.roll()`""" return lst[-shift:] + lst[:-shift]
def as_list(string, sep=",", strip=True): """Convert string to list, splitting on comma by default.""" string = string.strip() items = string.split(sep) if strip: items = [item.strip() for item in items] return items
def vo_from_fqan(fqan): """ Get the VO from a full FQAN Args: fqan: A single fqans (i.e. /dteam/cern/Role=lcgadmin) Returns: The vo + group (i.e. dteam/cern) """ components = fqan.split('/')[1:] groups = [] for c in components: if c.lower().startswith('role='): break groups.append(c) return '/'.join(groups)
def calculatecg(seq): """This function calculates the GC content of num_a user inputted sequence This is achieved by counting the number of occurances for the bases A, C, G & T and then applies the following formula Count(G + C)/Count(A + T + G + C) * 100%. """ # Counts all occurrences of the letters A, C, G & T num_a = seq.count('A') num_c = seq.count('C') num_g = seq.count('G') num_t = seq.count('T') # Returns the gc content after applying the following formula # Count(G + C)/Count(A + T + G + C) * 100% return str((num_g + num_c) / (num_a + num_c + num_g + num_t) * 100)
def get_next_coin(coin): """Return the next coin. >>> get_next_coin(1) 5 >>> get_next_coin(5) 10 >>> get_next_coin(10) 25 >>> get_next_coin(2) # Other values return None """ if coin == 1: return 5 elif coin == 5: return 10 elif coin == 10: return 25
def _add_thumb(s): """ Modifies a string (filename, URL) containing an image filename, to insert '.thumb' """ parts = s.split(".") parts.insert(-1, "thumb") if parts[-1].lower() not in ['jpeg', 'jpg']: parts[-1] = 'jpg' return ".".join(parts)
def add_tuple(tuple_a=(), tuple_b=()): """Add two tuples.""" if len(tuple_a) < 2: if len(tuple_a) == 0: tuple_a = 0, 0 else: tuple_a = tuple_a[0], 0 if len(tuple_b) < 2: if len(tuple_b) == 0: tuple_b = 0, 0 else: tuple_b = tuple_b[0], 0 return (tuple_a[0] + tuple_b[0], tuple_a[1] + tuple_b[1])
def app_request_type_label(app, request_type) -> str: """Format a label based on the application name and request type.""" return f"{app}\n({request_type})"
def _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf): """Compute the endpoints of the interval for row i.""" if i == 0: j1, j2 = -ll - ceilf - 1, ll + ceilf - 1 else: # i + 1 = 2*ip1div2 + ip1mod2 ip1div2, ip1mod2 = divmod(i + 1, 2) if ip1mod2 == 0: # i is odd if ip1div2 == n + 1: j1, j2 = n - ll - ceilf - 1, n + ll + ceilf - 1 else: j1, j2 = ip1div2 - 1 - ll - roundf - 1, ip1div2 + ll - 1 + ceilf - 1 else: j1, j2 = ip1div2 - 1 - ll - 1, ip1div2 + ll + roundf - 1 return max(j1 + 2, 0), min(j2, n)
def to_int(value): """ Parses a string to int value :param value: :return: """ return int(value) if value else 0
def get_coordinates(bounding_box): """Create bounding box coordinates for the map.""" coordinates = [] coordinates.append([bounding_box[1], bounding_box[0]]) coordinates.append([bounding_box[1], bounding_box[2]]) coordinates.append([bounding_box[3], bounding_box[2]]) coordinates.append([bounding_box[3], bounding_box[0]]) coordinates.append([bounding_box[1], bounding_box[0]]) return coordinates
def strip(message, args, pipeline_data): """ Strips characters from the start and end of the data if they exist """ chars = args[0] message['data'] = message['data'].strip(chars) return chars
def _parse_quad_str(s): """Parse a string of the form xxx.x.xx.xxx to a 4-element tuple of integers""" return tuple(int(q) for q in s.split('.'))
def render_data_properties(props): """ :param props: :return: """ template = """<tr><td>%s</td></tr>""" return "".join([template % prop for prop in props])
def first_char_to_upper(string): """Converts first letter to upper case Args: string: A string. Returns: A string whose first letter to upper case. For example: "userGroup" to "UserGroup" """ if len(string) == 0: return string else: return string[0].upper() + string[1:]