content
stringlengths
42
6.51k
def continuous_to_real(continuous: float, lower_bound: float, upper_bound: float) -> float: """ Convert the continuous variable to its corresponding real value """ val = (upper_bound - lower_bound) * continuous + lower_bound if val < lower_bound: return lower_bound elif val > upper_bound: return upper_bound else: return val
def process_fid_to_avg_gate_fid(F_pro: float, d:int): """ Converts """ F_avg_gate = (d*F_pro+1)/(d+1) return F_avg_gate
def succeeded(job): """Check if the CR has either a Ready or Succeeded condition""" if "status" not in job: return False if "conditions" not in job["status"]: return False for condition in job["status"]["conditions"]: if "Succeeded" in condition["type"]: return condition["status"] == "True" if "Ready" in condition["type"]: return condition["status"] == "True" return False
def difference(seq1, seq2): """ Return all items in seq1 only """ return [item for item in seq1 if item not in seq2]
def unary_rec_list_op( op, A ): """Apply an unary operation to a multivariable polynomial: op(A)""" if type(A) is list: return [ unary_rec_list_op( op, a ) for a in A ] else: return op( A )
def _build_summary(top_sentences, all_sents, max_len, sents_to_add=None): """ Auxillary function for summary building. Attempts to fit as many sentences into a summary as possible. Specifically, tries to add each sentence to the summary, starting from the best one and making sure to not go over a tweet's length Arguments: `top_sentences` A list of sentence indices sorted in decreasing order of importance `all_sents` All sentences from the original document `max_len` The maximum length of the summary in characters `sents_to_add` A list of sentence indices already added to the summary Returns a tuple containing: - The list of sentence indices to be contained in the summary - The length of the generated summary in characters """ # Try to add each sentence to the summary, starting from the best one # and making sure to not go over a tweet's length if sents_to_add is None: sents_to_add = set() summary_size = 0 for i in top_sentences: if i not in sents_to_add: full_sent = all_sents[i].text new_size = summary_size + len(full_sent) if summary_size + new_size <= max_len: sents_to_add.add(i) summary_size += len(full_sent) + 1 # +1 because of the space/newline between sentences return sents_to_add, summary_size
def integer_to_roman(num: int) -> str: """ LeetCode No. 12. Integer to Roman Given a integer numeral, convert it to an roman. https://en.wikipedia.org/wiki/Roman_numerals >>> tests = {3: "III", 154: "CLIV", 3999: "MMMCMXCIX"} >>> all(integer_to_roman(key) == value for key, value in tests.items()) True """ roman_dict = { 1000:'M', 900:'CM', 500:'D', 400:'CD', 100:'C', 90:'XC', 50:'L', 40:'XL', 10:'X', 9:'IX', 5:'V', 4:'IV', 1:'I'} ret='' for k in roman_dict: while(num-k>=0): num-=k ret+=roman_dict[k] return ret
def process_args(args_array): """Function: process_args Description: Will process each of the -kN and -lN pairs and parse them into search criteria string. Arguments: (input) args_array -> Array of command line options and values. (output) status -> True|False - If an error has occurred. (output) qry -> Mongo search query criteria. """ args_array = dict(args_array) status = False qry = {} # Process key|value pairs. for item in range(1, 6): key = "-k" + str(item) val = "-l" + str(item) # Missing -kN, but have -lN. if key not in args_array and val in args_array: print("WARNING: Missing key for value: %s = '%s'" % (val, args_array[val])) status = True break # -kN option is missing, skip. elif key not in args_array: continue sub_qry = {} # Create list of value(s) for key. try: sub_qry["$in"] = args_array[val] except KeyError: print("WARNING: Missing value for key: %s = '%s'" % (key, args_array[key])) status = True break qry[args_array[key]] = sub_qry return status, qry
def _get_field_type_probability(type_freq) -> str: """ Determines type of field based on the type of the vast majority of values. """ total = sum(type_freq.values()) type_pct = {x:type_freq[x]/total for x in type_freq} if total < 10: return 'unknown' for key in type_pct: if type_pct[key] >= 0.95: return key else: return 'unknown'
def get_display_name_from_arn(record): """Get the display name for a version from its ARN. Args: record A record returned by AWS. Returns: A display name for the task definition version. """ arn_parts = record.split(":") name = arn_parts[5] version = arn_parts[6] name_parts = name.split("/") family = name_parts[1] return str(family) + ":" + str(version)
def get_handler_name(handler): """ Name (including class if available) of handler function. Args: handler (function): Function to be named Returns: string: handler name as string """ if '__self__' in dir(handler) and 'name' in dir(handler.__self__): return handler.__self__.name + '.' + handler.__name__ else: return handler.__name__
def decode(obj, encoding='utf-8'): """Decode the bytes of an object to an encoding""" try: return obj.decode(encoding) except (AttributeError, UnicodeDecodeError): return obj
def diff(orig, new): """ Calculates the difference between two dictionaries. Any key with a child list or dict which is, itself, changed will be considered changed. :param orig: the original (unmodified) dictionary :param new: the modified dictionary :return: a dictionary contianing only those keys which were changed. """ updated = {} for k, v in new.items(): if k not in orig: updated[k] = v elif isinstance(v, list): if len(v) != len(orig[k]): updated[k] = v else: has_change = False for i in range(len(v)): if isinstance(v[i], dict) and diff(orig[k][i], v[i]): has_change = True break elif v[i] != orig[k][i]: has_change = True break if has_change: # the update needs to contain the ENTIRE new list, so # Zoho doesn't zap non-updated elements updated[k] = v elif isinstance(v, dict): if diff(v, orig[k]): # the update needs to contain the ENTIRE new dict, so # Zoho doesn't zap non-updated values updated[k] = v elif v != orig[k]: updated[k] = v return updated
def all_prime(no)->list: """ Return the list of all prime numbers till the number given by user. This function uses the concept the sieve. """ prime=[0 for i in range(no+1)] no_prime=[] for i in range(2,no+1): if prime[i]==0: for j in range(i*i,no+1,i): prime[j]=1 for i in range(2,no+1): if prime[i]==0: no_prime.append(i) return no_prime
def gain_ctrl (val=None): """ Set or get gain control """ global _gain_ctrl if val is not None: _gain_ctrl = val return _gain_ctrl
def _curve_color(idx): """Return a unique color for the given idx (if idx < distinct_colors).""" colors = [ (0, .8, .8), # cyan (1, 0, 1), # violet (0, .8, 0), # green (0, 0, .8), # blue (1, .5, 0), # orange (1, 0, 0), # red ] return colors[idx % len(colors)]
def in_bisect(oneword,t): """search word list in bisect.""" head = 0 tail = len(t)-1 while head <= tail: if oneword > t[(head + tail)/2]: head = (head + tail)/2+1 elif oneword < t[(head + tail)/2]: tail = (head + tail)/2-1 else: #print (head + tail)/2 return (head + tail)/2 #print head return head
def multicom(adj_matrix, seedset, scoring, cut, explored_ratio=0.8, one_community=True): """ Algorithm for multiple local community detection from a seed node. It implements the algorithm presented by Hollocou, Bonald and Lelarge in "Multiple Local Community Detection". Note: Modified by Hebatallah Mohamed :param adj_matrix: compressed sparse row matrix or numpy 2D array Adjacency matrix of the graph. :param seedset: int (change) Id of the seed nodes around which we want to detect communities. :param scoring: function Function (adj_matrix: numpy 2D array, seed_set: list or set of int) -> score: numpy 1D array. Example: approximate_ppr :param cut: function Function (adj_matrix: numpy 2D array, score: numpy 1D array) -> sweep cut: set of int. Example: conductance_sweep_cut :param explored_ratio: float, default 0.8 Parameter used to control the number of new seeds at each step. :return: seeds: list of int Seeds used to detect communities around the initial seed (including this original seed). communities: list of set Communities detected around the seed node. """ scores = [] communities = list() #adj_matrix = convert_adj_matrix(adj_matrix) if (one_community): scores = scoring(adj_matrix, seedset) community = cut(adj_matrix, scores) communities.append(community) else: for seed in seedset: scores = scoring(adj_matrix, [seed]) community = cut(adj_matrix, scores) communities.append(community) return seedset, communities
def kolotina(mu_1, delta, theta): """ Calculates the Kolotilina bound of a graph given the - largest eignvalue of the graph mu_1 """ return mu_1 / (mu_1 - delta + theta)
def is_namedtuple_like(x): """Helper which returns `True` if input is `collections.namedtuple`-like.""" try: for fn in x._fields: _ = getattr(x, fn) return True except AttributeError: return False
def make_success_response(status, content): """ make success response function :param status: :param content: :return: """ return dict(status=status, content=content)
def check_not_finished_board(board: list) -> bool: """ Check if skyscraper board is not finished, i.e., '?' present on the game board. Return True if finished, False otherwise. >>> check_not_finished_board(['***21**', '4?????*', '4?????*', '*?????5', '*?????*', '*?????*', '*2*1***']) False >>> check_not_finished_board(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***']) True >>> check_not_finished_board(['***21**', '412453*', '423145*', '*5?3215', '*35214*', '*41532*', '*2*1***']) False """ for line in board: if '?' in line: return False return True
def perfect_score(student_info): """Create a list that contains the name and grade of the first student to make a perfect score on the exam. :param student_info: list - of [<student name>, <score>] lists. :return: list - first `[<student name>, 100]` or `[]` if no student score of 100 is found. """ result = [] for item in student_info: if item[1] == 100: result = item break return result
def alpha_over_k_rho_func(rho, rhoV, l, lV): """Inverse function for alpha/k from J/k""" aoK = rho / (l - rho * (l - 1)) partial_rho = aoK**2 * l / rho**2 partial_l = - aoK**2 * (1 - rho) / rho aoKV = partial_rho**2 * rhoV + partial_l**2 * lV return [aoK, aoKV]
def add_one(input): """ Returns the list with 1 added to every element of input Example: add_one([1,2,3,1]) returns [2,3,4,2] Parameter input: The data to process Precondition: input an iterable, each element an int """ result = [] for x in input: x = x+1 result.append(x) return result
def BotToPlatform(bot_platform): """Takes a bot platform value and returns a platform string.""" if bot_platform.startswith('win'): return 'windows' elif bot_platform.startswith('linux'): return 'linux' elif bot_platform.startswith('mac'): return 'mac' else: raise ValueError('Unknown platform %s' % bot_platform)
def bitmask_bool(bitmask, value): """ Returns True or False depending on whether a particular bit has been set. Microsoft uses bitmasks as a compact way of denoting a number of boolean settings. The second bit, for example, might be the ADS_UF_ACCOUNTDISABLE bit, so if the second bit is a 1, then the account is disabled, and if it is a 0, then it is not. As an example, to create the 'disabled' property of a User object, we use the userAccountControl property and ADS_UF_ACCOUNTDISABLE constant. bitmask_bool(user.user_account_control, constants.ADS_UF_ACCOUNTDISABLE) This will return True if the bit is set in user.user_account_control. @param: bitmask: a number representing a bitmask @param value: the value to be checked (usually a known constant) @return: True if the bit has been set, False if it has not. """ if int(bitmask) & int(value): return True else: return False
def rounded_indices(ls, max_length): """ Linearly removes elements from list so the length of result will be equal to max_length For example rounded_indices(range(12), 3) = [0, 4, 8] :param ls: :param max_length: :return: """ coeff = len(ls) / max_length if coeff <= 1: return ls result = [] original_index = 0 new_index = 0 while new_index < len(ls): result.append(ls[new_index]) original_index += 1 new_index = int(round(coeff * original_index)) return result
def standard_exception_view(self, request): """We want the webob standard responses for any webob-based HTTP exception. Applies to subclasses of :class:`webob.HTTPException`. """ # webob HTTPException is a response already return self
def count_genotypes(genotypeList,StateGenPosData, x, y): """Counts how many of each genotype are present at each step""" allMos = 0 nonEggs = 0 Adults = 0 for i in range(len(genotypeList)): gt = genotypeList[i] b = sum(1 for item in StateGenPosData if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y)) c = sum(1 for item in StateGenPosData if 'adult' in item[0] and 'XX' in item[1] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y)) d = sum(1 for item in StateGenPosData if 'adult' in item[0] and gt in item[1] and item[2]==(x,y)) ## for item in StateGenPosData: ## print(item[0],item[1],item[2]) ## if 'adult' in item[0] and gt in item[1] and item[2]==(x,y): ## d+=1 ## print('yay') ## if not 'new' in item[0] and not 'egg' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y): ## c+=1 ## if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y): ## b+=1 allMos = allMos + b nonEggs = nonEggs + c Adults = Adults + d return allMos, nonEggs, Adults
def unwrap_text(text): """Turn wrapped text into flowing paragraphs, ready for rewrapping by the console, browser, or textwrap. """ all_grafs = [] cur_graf = [] for line in text.splitlines(): line = line.strip() if line: cur_graf.append(line) else: all_grafs.append(' '.join(cur_graf)) cur_graf = [] if cur_graf: all_grafs.append(' '.join(cur_graf)) return '\n\n'.join(all_grafs)
def parcours_sinusoidal(n): """Retourne la liste des indices (ligne, colonne) des cases correspondant a un parcours sinusoidal d'un tableau de taille n x n. Ex: pour T = [ [1,2,3], [4,5,6], [7,8,9] ] le parcours correspond aux cases 1,4,7,8,5,2,3,6,9 et la fonction retournera la liste d'indices : [(0,0),(1,0),(2,0),(2,1),(2,2) ...] """ return [(y, x if y % 2 == 0 else 9 - x) for y in range(0, n) for x in range(0, n)]
def poissons_ratio(vp, vs): """ Calculate Poisson's Ratio based on the definition given in the Specfem3D source code :type vp: float or np.array :param vp: P-wave velocity :type vs: float or np.array :param vs: S-wave velocity :rtype: float or np.array :return: Poissons ratio """ return 0.5 * (vp * vp - 2 * vs * vs) / (vp * vp - vs * vs)
def R2_a(FQmax, MYmax, qF, qM, ra, mu_Tmin): """ R2 Determining the required minimum clamp load Fkerf (Section 5.4.1) The required minimum clamp load Fkerf is determined while taking into account the following requirements: a) Friction grip to transmit a transverse load FQ and/or a torque about the bolts axis My """ # a) FKQ = ((FQmax / (qF * mu_Tmin)) + (MYmax / (qM * ra * mu_Tmin))) # (R2/1) # return FKQ #
def fill_dict (feed_dict, placeholders, data): """Feeds a dictionary of data into a dictionary of placeholders.""" for k in data: feed_dict[placeholders[k]] = data[k] return feed_dict
def validateForInteger(val): """Check if the parameter can be converted to type int""" try: val = int(val) return val except: return -1
def prefix_filter(prefix): """ filter out the variable_scope """ ind = prefix.index('/') return prefix[ind+1:]
def is_metadata_nonempty(metadata): """ Helper function to acknowledge metadata that are 'empty' by multiple measures. While empty metadata is not necessarily incorrect, it could be a heavy indicator of those that are 'incomplete' or actually not processing. :param metadata: (Union[None, set, dict]) -- object containing metadata :return: bool: whether the metadata are non-empty by all measures. """ if metadata not in [None, {}, dict()]: return True else: return False
def is_resign_act(board_size, action): """Check if the action is resign.""" return action == board_size ** 2
def histbins(bins): """Adjust the bins from starts to centers, this is useful for plotting""" bins2 = [] if len(bins) == 1: bins2 = [bins[0]] else: for i in range(len(bins) - 1): bins2.append((bins[i] + bins[i+1]) / 2.0) bins2.append(bins[-1] + (bins[-1] - bins[-2]) / 2.0) return bins2
def unify_stringlist(L: list): """Adds asterisks to strings that appear multiple times, so the resulting list has only unique strings but still the same length, order, and meaning. For example: unify_stringlist(['a','a','b','a','c']) -> ['a','a*','b','a**','c'] """ assert all([isinstance(_, str) for _ in L]) return [L[i] + "*" * L[:i].count(L[i]) for i in range(len(L))]
def CobbDouglas(constance,factors,alphas_factors,goods,alphas_goods): """In this functions, we're going to use two loops to compute the result. The first one is to iterate through goods and the second one to iterate through factors""" """Constance c is the first component of the CobbDouglass function. So we start with a result equal to c and next we multiply the other components""" result=constance """Iterates through goods to compute the X**alpha for each good and multiply to the result""" for good in goods: result*=goods[good]**alphas_goods[good] """Iterate through factors to compute the Y**alpha for each factor and multiply this to the result""" for factor in factors: result*=factors[factor]**alphas_factors[factor] return result
def cut_rod_bottom_up_cost(p, n, c): """ Only difference from book is p[i-1] instead of p[i] due to indexing, also create to arrays to n+1 since range doesn't include end bound. """ r = [0 for k in range(n+1)] for j in range(1, n+1): q = -100000 for i in range(1, j+1): # There is only ever one cut per j loop, we are just changing # where that cut takes place. The other cuts are "embedded" in the # subproblems stored in r. The r[0] iteration represents no cut, # so otherwise there is a cut which is account for in "c". if i == j: new_q = p[i-1] + r[j-i] else: new_q = p[i-1] + r[j-i] - c # Now compute the max like normal q = max(q, new_q) r[j] = q return r[n]
def remove_the_mean_person(queue, person_name): """Remove the mean person from the queue by the provided name. :param queue: list - names in the queue. :param person_name: str - name of mean person. :return: list - queue update with the mean persons name removed. """ queue.remove(person_name) return queue
def is_correct(pred_sentence, pred_sentences, gold_sets): """ A predicted sentence is correctly identified if it is part of a gold rationale, and all other sentences in the gold rationale are also predicted rationale sentences. """ for gold_set in gold_sets: gold_sents = gold_set["sentences"] if pred_sentence in gold_sents: if all([x in pred_sentences for x in gold_sents]): return True else: return False return False
def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list)
def f1score(precision_value, recall_value, eps=1e-5): """ Calculating F1-score from precision and recall to reduce computation redundancy. Args: precision_value: precision (0-1) recall_value: recall (0-1) eps: epsilon to use Returns: F1 score (0-1) """ numerator = 2 * (precision_value * recall_value) denominator = precision_value + recall_value + eps return numerator / denominator
def bytes_formatter(b): """ Formatter that formats bytes into kb/mb/gb etc... """ for u in ' KMGTPEZ': if abs(b) < 1024.0: return "%3.1f%s" % (b, u) b /= 1024.0 return "%.1f%s" % (b, 'Y')
def clean_up_names(item_names): """Cleans up item names :param item_names: String of item names :return: String of cleaned up item names """ unwanted = (".png", "_sm", "iron_", "enchanted_") if "fishing_rod" in item_names: item_names = item_names.replace("fishing_rod", "fishingrod") for chars in unwanted: if chars in item_names: item_names = item_names.replace(chars, "") item_names = item_names.split("_") item_names = [ "fishing_rod" if item == "fishingrod" else item for item in item_names ] return " ".join(item_names)
def find_substring(s: str, marker_1: str, marker_2: str) -> str: """Extracts a substring between two markers""" start = s.find(marker_1) + len(marker_1) s_temp = s[start:] end = s_temp.find(marker_2) return s_temp[:end]
def _format_tags(tags): """Reformats a string of tags. Extracts a list of strings from a string in diamond notation. Args: tags: Tags in diamond notation as one string. Returns: A list of tags as list of strings. Example: ['publications', 'online-publications] """ tags_ = tags.replace('<', '').split('>') return tags_[0:(len(tags_)-1)]
def ReadStampFile(filename): """Return the contents of the stamp file, or '' if it doesn't exist.""" try: with open(filename, 'r') as f: return f.read() except IOError: return ''
def escape_quote_character_in_field(field_data: bytes, quotechar: bytes, escapechar: bytes) -> bytes: """ >>> escape_quote_character_in_field('test'.encode('ISO-8859-1'), quotechar=b'"', escapechar=b'"') b'test' >>> escape_quote_character_in_field('te"st'.encode('ISO-8859-1'), quotechar=b'"', escapechar=b'"') b'te""st' """ if quotechar in field_data: field_data = field_data.replace(quotechar, escapechar + quotechar) return field_data
def _legalize_tensor_name(tensor_name: str) -> str: """Converts tensor name from 'name:index' to 'name__index' format.""" return tensor_name.replace(':', '__')
def markdown(s: str) -> str: """Wraps a string into a markdown code block.""" return f"```\n{s}\n```"
def match_system_cyberwatch(system): """Function used to match the system specified in the file with Cyberwatch syntax""" if system == "windows": return "CbwRam::RemoteAccess::WinRm::WithNegotiate", 5985 if system == "linux": return "CbwRam::RemoteAccess::Ssh::WithPassword", 22 if system == "network device": return "CbwRam::RemoteAccess::Snmp", 161 print("System '{}' not recognized, setting default as 'Linux' and port 22".format(system)) return "CbwRam::RemoteAccess::Ssh::WithPassword", 22
def get_index_in_eph(time, eph_start, eph_stop): """ get the index in eph file if not match return none """ for i in range(len(eph_start)): if (time >= eph_start[i]) & (time <= eph_stop[i]): return i else: continue return -1
def get_smaller_neighbour(plots, i, j): """ Finds a neighbouring plot with elevation strictly smaller than (i, j).""" n = len(plots) neighbours = [] if i > 0: neighbours.append((i-1, j)) if i < n-1: neighbours.append((i+1, j)) if j > 0: neighbours.append((i, j-1)) if j < n-1: neighbours.append((i, j+1)) min_elevation = plots[i][j] min_elevation_plot = None for m in neighbours: if plots[m[0]][m[1]] <= min_elevation: min_elevation = plots[m[0]][m[1]] min_elevation_plot = m return min_elevation_plot
def output_block_template(name, data, padding=None): """ Given a dict output a block adjusted template using keys as parameters. To ensure the order of the parameters supply an OrderedDict instead of a normal dict. Omitted entries are still considered when determining automatic padding. @param name: The template name (without "Template:"-prefix) @param data: A dict where each key is a parameter name. Entries where the value is None are omitted, entries where the value is an empty string are outputted. @param padding: The number of characters from | to = on each row. Set to 0, to disable or to None to automatically determine minimum padding. @type adjust: int or None @return basestring """ if padding is None: if not data: padding = 0 else: padding = max(len(key) for key in data.keys()) + 2 template = '{{{{{:s}\n'.format(name) for k, v in data.items(): if v is not None: template += '| {param} = {val}\n'.format( param=k.ljust(padding - 2), val=v) template += '}}' return template
def distance(a: bytes, b: bytes) -> int: """Return the XOR distance between two IDs.""" return int.from_bytes(a, 'big') ^ int.from_bytes(b, 'big')
def primality_test_naive(n): """Does primality test for n in a naive way. Complexity O(sqrt(n))""" print("Checking " + str(n)) if n % 2 == 0: n += 1 for i in range(2, n): if i*i > n: break if n % i == 0: print(str(n) + " is composite") return False print(str(n) + " is prime") return True
def nested_lookup(dictionary, *entry): """get a nested entry from a dictionary""" try: if isinstance(entry, str): v = dictionary[entry] else: v = None for e in entry: # print("key:", e, "value:", v if v is not None else "<none yet>") v = v[e] if v is not None else dictionary[e] except: raise Exception("could not find entry '" + "/".join(list(entry)) + "' in the dictionary") return v
def apply_filter(results, threshold): """Run one simulation with a given threshold. :param dict results: resource with pathways :param int threshold: necessary genes to enrich a pathway :rtype: dict """ filtered_results = {} for database_name, pathways in results.items(): pathways_pass_filter = [] for pathway_name, genes_mapped in pathways.items(): if genes_mapped < threshold: continue pathways_pass_filter.append(pathway_name) filtered_results[database_name] = pathways_pass_filter return filtered_results
def as_dict_based_diff(di): """Converting to dict-based diff format for dicts for convenience. NB! Only one level, not recursive. This step will be unnecessary if we change the diff format to work this way always. """ return {e.key: e for e in di}
def is_html_mime_type(mime_type): """Return whether the specified MIME type is valid for HTML.""" return (mime_type in ("text/html", "application/xhtml+xml"))
def socket_recvall(socket, length, bufsize=4096): """A helper method to read of bytes from a socket to a maximum length""" data = b"" while len(data) < length: data += socket.recv(bufsize) return data
def remove_non_ascii(s): """ Dirty hack to replace non-ASCII characters in a string with spaces """ if s is None: return None return ''.join(c if ord(c) < 128 else ' ' for c in s)
def istamil_alnum( tchar ): """ check if the character is alphanumeric, or tamil. This saves time from running through istamil() check. """ return ( tchar.isalnum( ) or tchar.istamil( ) )
def offset_hex(hex_string, offset=1): """ Returns hex string offset by the given amount. Useful for generating keys for which a given node_hash is reponsible, i.e. offset the node's hash by a negative amount """ original_length = len(hex_string) offset_string = '{:x}'.format(int(hex_string, 16) + offset) padded_offset_string = (original_length - len(offset_string)) * '0' + offset_string return padded_offset_string
def _find_largest_common_prefix(values): """ Searches through a list of values to find the longest possible common prefix amongst them. Useful for optimizing more costly searches. Supports lists of strings or tuples. If tuples are used, the first value is assumed to be the value to search on. :param values: List of values (strings or tuples containing a string in the first position) :return: String prefix common to all values """ if isinstance(values[0], tuple): prefix, *_ = values[0] else: prefix = values[0] for value in values: key = value[0] if isinstance(value, tuple) else value while key[:len(prefix)] != prefix and len(prefix) > 0: prefix = prefix[:-1] return prefix
def abs(value): """Returns the absolute value of a number. Args: value (int): A number. Returns: int: The absolute value of the number. """ if value < 0: return -value return value
def strip_yaml(text): """ strip starging yaml, between first --- and next --- from text :param text: :return: """ if text[:3] != '---': return text else: stext = text.split('\n') stext.pop(0) n = 0 for ln in stext: if ln != '---': n += 1 else: n += 1 return '\n'.join(stext[n:])
def preserve_case(match, replace): """Return 'replace' with the letter case of 'match'.""" if match.isupper(): return replace.upper() elif match.islower(): return replace.lower() elif match.lower() == match.upper(): return replace elif len(replace) > len(match): match += match[-1] * (len(replace) - len(match)) else: match = match[:len(replace)] out = [] start = 0 transform = type(replace) for pos, c in enumerate(match): if c.islower(): f = type(replace).lower elif c.isupper(): f = type(replace).upper else: f = transform if transform is not f: if pos: out.append(transform(replace[start:pos])) start = pos transform = f out.append(transform(replace[start:])) return ''.join(out)
def preempt_exp_conf(experiment_config, db): """Fixture that returns an |experiment_config| where preemptible_runners is True. Implicitly depnds on db fixture because most users of this fixture need it.""" experiment_config['preemptible_runners'] = True return experiment_config
def parse_version(*args, **kwargs): """ Package resources is a very slow load """ import pkg_resources return pkg_resources.parse_version(*args, **kwargs)
def _diagnoses_to_hccs(icd_mapping, hcc_hierachy, diagnoses, age, sex): """Returns a list of hierarchical condition categories, implied by a set of diagnoses Arguments: diagnoses {[string]} -- A list of ICD-10 codes Returns: [int] -- A list of HCCs, represented as ints """ # Normalize codes by uppercasing and stripping out periods diagnoses = [d.strip().upper().replace(".", "") for d in diagnoses] hccs = set() # get the union of all hccs implied by individual diagnoses for d in diagnoses: # some special case edits based on V22I0ED2.TXT if sex == 2 and d in {"D66", "D67"}: hccs.update({48}) elif age < 18 and d in { "J410", "J411", "J418", "J42", "J430", "J431", "J432", "J438", "J439", "J440", "J441", "J449", "J982", "J983", }: hccs.update({112}) elif age < 6 or age > 18 and d == "F3481": pass else: # If not special case, default to general mapping hccs.update(icd_mapping.get(d, [])) # remove HCCs that are already implied by more specific categories in the # hierarchy for cc in hccs.copy(): hccs.difference_update(hcc_hierachy.get(cc, [])) return hccs
def division(numerator, denominator): """ if d = 0 return 0, otherwise return n/d """ return numerator/denominator if denominator else 0
def _divide_runs(runs, processes): """ Splits a certain amount of runs among proncesses as evenly as possible. >>> _divide_runs(1000, 4) [250, 250, 250, 250] >>> _divide_runs(5, 10) [1, 1, 1, 1, 1, 0, 0, 0, 0, 0] >>> _divide_runs(5, 2) [3, 2] """ quotient, remainder = divmod(runs, processes) return [quotient + 1] * remainder + [quotient] * (processes - remainder)
def kernal_mus(n_kernels): """ get the mu for each gaussian kernel. Mu is the middle of each bin :param n_kernels: number of kernels (including exact match). first one is exact match :return: l_mu, a list of mu. """ l_mu = [1] if n_kernels == 1: return l_mu bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1] l_mu.append(1 - bin_size / 2) # mu: middle of the bin for i in range(1, n_kernels - 1): l_mu.append(l_mu[i] - bin_size) return l_mu
def is_supported_value_type(value): """ checks if the given value type is supported. Supported Types: - strings - bytes - numbers - tuples - lists - dicts - sets - booleans - None """ if ( isinstance(value, (str, bytes, int, float, bool)) or value is None ): return True if isinstance(value, tuple): for sub_value in value: if not is_supported_value_type(sub_value): return False return True if isinstance(value, list): for sub_value in value: if not is_supported_value_type(sub_value): return False return True if isinstance(value, dict): for sub_key, sub_value in value.items(): if not is_supported_value_type(sub_key): return False if not is_supported_value_type(sub_value): return False return True if isinstance(value, set): for sub_value in value: if not is_supported_value_type(sub_value): return False return True return False
def split_str_w_esc(str_, delimiter, escape='\\'): """ Split string based on delimiter defined in call and the escape character \\ To escape use of the delimiter in the strings. Delimiter may be multi character. Returns list of elements split from the input str """ ret = [] current_element = [] iterator = iter(str_) for ch in iterator: if ch == escape: try: next_character = next(iterator) # Do not copy escape character if intended to escape either the # delimiter or the escape character itself. Copy the escape # character if it is not in use to escape one of these # characters. if next_character not in [delimiter, escape]: current_element.append(escape) current_element.append(next_character) except StopIteration: current_element.append(escape) elif ch == delimiter: # split! (add current to the list and reset it) ret.append(''.join(current_element)) current_element = [] else: current_element.append(ch) ret.append(''.join(current_element)) return ret
def printme(o:str): """Prints `o` fancily Parameters ---------- o : str Some input """ return f'{o} is the right answer to "Who wants to be a Millionaire!"'
def extract_all_wiki_links(entry): """ Extract all wikipedia links associated to an entry :param entry: entry data :return: """ if "sitelinks" in entry: return entry['sitelinks']
def generate_graph(width, height): """ Create rectangular graph of specified dimensions :param width :param height :return dict of nodes and adjacency lists """ assert isinstance(width, int) and isinstance(height, int) num_targets = width * height graph = {} id_to_coord = {} coord_to_id = {} def get_neighbors(i, j): neighbors = [] if i > 0: neighbors.append((i-1, j)) if j > 0: neighbors.append((i, j-1)) if i < width - 1: neighbors.append((i+1, j)) if j < height - 1: neighbors.append((i, j+1)) # assume we can stay at the same cell for multiple rounds neighbors.append((i, j)) return neighbors # create nodes id = 0 for i in range(width): for j in range(height): id_to_coord[id] = (i,j) # store coordinates for each target id coord_to_id[(i,j)] = id graph[id] = set() id += 1 # set neighbors id = 0 for i in range(width): for j in range(height): # go through neighbors neighbors = get_neighbors(i, j) for neighbor in neighbors: graph[id].add(coord_to_id[neighbor]) id += 1 return graph, id_to_coord, coord_to_id
def replace_with_separator(text, separator, regexs): """Get text with replaced separator if provided regular expressions were matched. Parameters ---------- text : str Input text. separator : str The separator between words to be replaced. regexs : list of `_sre.SRE_Pattern` Regular expressions used in processing text. Returns ------- str Text with replaced separators. """ replacement = r"\1" + separator + r"\2" result = text for regex in regexs: result = regex.sub(replacement, result) return result
def itow(iTOW_in_ms): """ Time/date as an integer week number (TOW) and a time of week expressed in seconds. """ # Convert from ms to seconds itow_total_seconds = iTOW_in_ms / 1000 # Calcuate number of seconds in day = 24 * 60 * 60 hour = 60 * 60 minute = 60 # The day will be itow_day = int(itow_total_seconds / day) itow_hour = int((itow_total_seconds - (itow_day * day)) / hour) itow_minute = int((itow_total_seconds - (itow_day * day) - (itow_hour * hour)) / minute) itow_seconds = int((itow_total_seconds - (itow_day * day) - (itow_hour * hour)) - (itow_minute * minute)) return itow_day, itow_hour, itow_minute, itow_seconds
def suffix(key, term): """ suffix a key with a suffix Works if they key is a string or a tuple >>> suffix('x', '.dtype') 'x.dtype' >>> suffix(('a', 'b', 'c'), '.dtype') ('a', 'b', 'c.dtype') """ if isinstance(key, str): return key + term elif isinstance(key, tuple): return key[:-1] + (suffix(key[-1], term),) else: return suffix(str(key), term)
def _segmentrepr(obj): """ >>> _segmentrepr([1, [2, 3], [], [[2, [3, 4], [0.1, 2.2]]]]) '(1, (2, 3), (), ((2, (3, 4), (0.1, 2.2))))' """ try: it = iter(obj) except TypeError: return "%g" % obj else: return "(%s)" % ", ".join(_segmentrepr(x) for x in it)
def encode_function_data(initializer=None, *args): """Encodes the function call so we can work with an initializer. Args: initializer ([brownie.network.contract.ContractTx], optional): The initializer function we want to call. args (Any, optional): The arguments to pass to the initializer function Returns: [bytes]: Return the encoded bytes. """ if not len(args): args = b"" if initializer: return initializer.encode_input(*args) return b""
def prune_tree(tree, cutoff, posteriors): """ removes edges with the posterior lower than the cutoff """ new_tree = [] for e in tree: try: if posteriors[e] > cutoff: new_tree.append(e) except KeyError: if posteriors[e[::-1]] > cutoff: new_tree.append(e) return new_tree
def exists(env): """ Check if `cscope` command is present """ return env['CSCOPE'] if 'CSCOPE' in env else None
def _create_columns_from_format_string(format_string): """ "+++00^^++" -> ((0,3),(5,7),(7,9)) 0 stands for column to ommit """ columns = [] last_change_idx = 0 last_character = -1 for i, c in enumerate(format_string+"x"): # add 'x' to introduce change at the end if last_character != c: if last_change_idx != i and last_character != '0': columns.append((last_change_idx,i)) last_change_idx = i last_character = c return tuple(columns)
def html_formatted_decomposition(decomposition_string, delimiter=";"): """Returns a formatted html string representation of a query decomposition to be displayed on an html HIT layout. Parameters ---------- decomposition_string : str decompositiong string with delimited steps delimiter : str delimiter at the end of decomposition steps Returns ------- str Returns formatted html string, numbered steps, each step in a new line """ html_decomposition = "" # remove last delimiter if decomposition_string[-1] == delimiter: decomposition_string = decomposition_string[:-1] # break into steps steps = decomposition_string.split(delimiter) for i in range(len(steps)): html_decomposition += '<br>' + str(i+1) + '. ' + steps[i] return html_decomposition
def str_shorten(string, length=10): """Returns a truncated version of string. Default length is 10""" return string if len(string) <= length else string[:length]
def compare_digest(a, b): """ ** From Django source ** Run a constant time comparison against two strings Returns true if a and b are equal. a and b must both be the same length, or False is returned immediately """ if len(a) != len(b): return False result = 0 for ch_a, ch_b in zip(a, b): result |= ord(ch_a) ^ ord(ch_b) return result == 0
def get_time_tied_leading_trailing(event, previous_score, last_goal_time): """ Calculate time of previous score state according to current event time and time of last goal scored. """ if previous_score['home'] == previous_score['road']: return 'tied', event['time'] - last_goal_time elif previous_score['home'] > previous_score['road']: return 'home_leading', event['time'] - last_goal_time elif previous_score['home'] < previous_score['road']: return 'road_leading', event['time'] - last_goal_time
def awk(s: str, n: int, d: str = ' ') -> str: """Act like awk and select the nth element Args: s: string to split up n: the element to select d: the delimiter between elements Returns: """ split = s.strip().split(d) if len(split) >= n: # awk is 1-indexed return split[n - 1] return ''
def sort_by_start_date(courses): """ Returns a list of courses sorted by their start date, latest first. """ courses = sorted( courses, key=lambda course: (course.has_ended(), course.start is None, course.start), reverse=False ) return courses
def blockdims_dict_to_tuple(old, new): """ >>> blockdims_dict_to_tuple((4, 5, 6), {1: 10}) (4, 10, 6) """ newlist = list(old) for k, v in new.items(): newlist[k] = v return tuple(newlist)
def dict_with_date_buckets(a_dict): """Given an Elasticsearch aggregation, return the 'buckets' part. Also sort it in descending order if it's a range facet because Elasticsearch always returns those in ascending order. """ if 'buckets' in a_dict: buckets = a_dict['buckets'] buckets.sort(key=lambda x: -x['from']) return buckets else: for k, v in a_dict.items(): if isinstance(v, dict) and 'buckets' in v: return a_dict[k]['buckets'] raise Exception('Should not happen: aggregations dict from Elasticsearch ' 'does not have an aggregation with "buckets" array')