content
stringlengths
42
6.51k
def _layer_size_score(size, hole_count, hole_area): """ Heuristic used for determining the correct file number interpretation. Lower is better. """ board_area = size[0] * size[1] if board_area == 0: return 0 hole_percentage = hole_area / board_area hole_score = (hole_percentage - 0.25) ** 2 size_score = (board_area - 8) ** 2 return hole_score * size_score
def GetUniqueFunctions(traces): """ Returns the unique set of functions for the input list of traces. """ functions = set() for trace in traces: functions = functions | trace.UniqueFunctions() return sorted(list(functions))
def hex2str(hex): """ Decodes a hex string into a regular string """ return bytes.fromhex(hex[2:]).decode("utf-8")
def backward_differences(f, h, x): """ Forward Finite Differences. Approximating the derivative using the Backward Finite Method. Parameters: f : function to be used h : Step size to be used x : Given point Returns: Approximation """ return (f(x) - f(x - h)) / h
def construct_change_message(obj, formsets, add, dic_changes={}): """ Construct a JSON structure describing changes from a changed object. Translations are deactivated so that strings are stored untranslated. Translation happens later on LogEntry access. """ change_message = [] if add: change_message.append({'added': dic_changes}) elif obj: change_message.append({'changed': {'fields': dic_changes}}) return change_message
def constrain(input: float, low: float, high: float) -> float: """ input: radian float an number to be constrained to a range low<-> high low: radian float minimum value the input can be high: radian float maximum value the input can be """ return max(min(input, high), low)
def leading_zero(num): """ Adds a leading 0 to single digit numbers. Converts numbers to string """ str_num = str(num) if not str_num.isdigit(): # Check if it's a number return str_num if len(str_num) < 2: return '0' + str_num return str_num
def expandCigar(cigar): """ Turns the abbreviated cigar into the full array 0 = M 1 = I 2 = D """ ret = [] for t,s in cigar: #remove tails... if t not in [0,1,2]: continue ret.extend([t]*s) return ret
def net_score(results): """ takes in a results tuple and returns net score after a substraction """ if type(results) != tuple: raise TypeError return int(results[0]) - int(results[1])
def relay_extra_info_descriptors_query_path(digests): """ Generates a query path to request extra-info descriptors by digests from a directory server. For example: >>> digests = ["A94A07B201598D847105AE5FCD5BC3AB10124389", ... "B38974987323394795879383ABEF4893BD4895A8"] >>> relay_extra_info_descriptors_query_path(digests) # doctest: +ELLIPSIS '/tor/extra/d/A94A07B201598D847105...24389+B3897498732339479587...95A8' These query paths are defined in appendix B of [dir-spec]_. By convention, these URLs use upper-case hex-encoded SHA-1 digests and so this function will ensure that digests are upper-case. Directory server implementations should not rely on this behaviour. :param list(str) digests: The hex-encoded SHA-1 digests for the descriptors. :returns: Query path as a :py:class:`str`. """ digests = [d.upper() for d in digests] return "/tor/extra/d/" + "+".join(digests)
def multiple_help_text(item): """Return help text for option with multiple=True.""" return f" Use the option multiple times to specify more than one {item} [multiple]"
def remove_parenthesis(string): """Remove parentheses and text within them. For nested parentheses, removes the whole thing. """ ret = '' skip1c = 0 skip2c = 0 for i in string: if i == '[': skip1c += 1 elif i == '(': skip2c += 1 elif i == ']' and skip1c > 0: skip1c -= 1 elif i == ')' and skip2c > 0: skip2c -= 1 elif skip1c == 0 and skip2c == 0: ret += i return ret
def merge_dicts(*dicts, **kwargs): """ merge_dicts(*dicts, cls=None, deep=False) Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the passed dicts and therefore, values of rear objects have precedence in case of field collisions. The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is inferred from the first dict object in *dicts*. When *deep* is *True*, dictionary types within the dictionaries to merge are updated recursively such that their fields are merged. This is only possible when input dictionaries have a similar structure. Example: .. code-block:: python merge_dicts({"foo": 1, "bar": {"a": 1, "b": 2}}, {"bar": {"c": 3}}) # -> {"foo": 1, "bar": {"c": 3}} # fully replaced "bar" merge_dicts({"foo": 1, "bar": {"a": 1, "b": 2}}, {"bar": {"c": 3}}, deep=True) # -> {"foo": 1, "bar": {"a": 1, "b": 2, "c": 3}} # inserted entry bar.c merge_dicts({"foo": 1, "bar": {"a": 1, "b": 2}}, {"bar": 2}, deep=True) # -> {"foo": 1, "bar": 2} # "bar" has a different type, so this just uses the rear value """ # get or infer the class cls = kwargs.get("cls", None) if cls is None: for d in dicts: if isinstance(d, dict): cls = d.__class__ break else: raise TypeError("cannot infer cls as none of the passed objects is of type dict") # start merging deep = kwargs.get("deep", False) merged_dict = cls() for d in dicts: if not isinstance(d, dict): continue if deep: for k, v in d.items(): # just take the value as is when it is not a dict, or the field is either not # existing yet or not a dict in the merged dict if not isinstance(v, dict) or not isinstance(merged_dict.get(k), dict): merged_dict[k] = v else: # merge by recursion merged_dict[k] = merge_dicts(merged_dict[k], v, cls=cls, deep=deep) else: merged_dict.update(d) return merged_dict
def firstdigit(n): """ Finding the first non-zero integer digit """ if n<0: # No more negatives n=n*(-1) if n==0: # Return zero if zero - not much can be done here, we'll skip it later. return int(0) if n<1: # Skip '0.' then iterate for i in str(n)[2:]: if int(i)>0: # Return first non-zero for cases like 0.40 return int(i) else: # Just that first digit return int(str(n)[0])
def index_power(array: list, n: int) -> int: """ Find Nth power of the element with index N. """ if len(array) <= n: ans = -1 else: ans = array[n] ** n return ans
def quarterly_compound_interest(p: int, i: float, n: int) -> float: """ Calculates the total return on a standard deposit and interest rate every period. Args principal: amount to be deposited every period interest: expressed in decimal 3% = .03 periods: the number of periods to calculate for """ value = 0 total_value = 0 periods = n while periods > 0: value = p * (1 + i) ** periods total_value = total_value + value periods -= 4 print(value, total_value) return total_value
def hamming(str1, str2): """Hamming distance between two strings""" return sum(a!=b and not( a=='N' or b=='N' ) for a,b in zip(str1, str2))
def identify_errors(tokens, dictionary): """Compare words in documents to words in dictionary. Args: tokens (list): List of all tokens in the document. dictionary (set): The set of approved words. Returns: set : Returns the set of tokens in the documents that are not also dictionary words. """ return set(tokens).difference(dictionary)
def sort_hsvs(hsv_list): """ Sort the list of HSV values :param hsv_list: List of HSV tuples :return: List of indexes, sorted by hue, then saturation, then value """ bars_with_indexes = [] for index, hsv_val in enumerate(hsv_list): bars_with_indexes.append((index, hsv_val[0], hsv_val[1], hsv_val[2])) bars_with_indexes.sort(key=lambda elem: (elem[1], elem[2], elem[3])) return [item[0] for item in bars_with_indexes]
def single_fscore_PR(prec, rec, beta): """ Computes an F-score from a precision and recall score """ betasq = beta ** 2 if prec == 0. or rec == 0.: return 0. return (1+betasq) / (1./prec + betasq/rec)
def average(avg_list): """Averages all values in the list.""" if len(avg_list) == 0: return 0 avg = sum(avg_list) / len(avg_list) return avg
def maskSequence(sequence, regions, mask_char="N"): """mask sequence with regions.""" nmasked = 0 for start, end in regions: sequence[start:end] = mask_char * (end - start) nmasked += end - start return sequence, nmasked
def check_uniprot(alias): """ Return True if the protein has UniProt identifier """ return len(alias) == 1 or alias.split(':')[0] == 'uniprotkb'
def tax2015(income): """ 15% on the first $44,701 of taxable income, + 22% on the next $44,700 of taxable income (on the portion of taxable income over $44,701 up to $89,401), + 26% on the next $49,185 of taxable income (on the portion of taxable income over $89,401 up to $138,586), + 29% of taxable income over $138,586. """ amt = income tax_amt = 0 first_amt = min(44701, amt) tax_amt = first_amt * 0.15 amt = amt - first_amt second_amt = min(44700, amt) tax_amt = tax_amt + (second_amt * 0.22) amt = amt - second_amt third_amt = min(49185, amt) tax_amt = tax_amt + (third_amt * 0.26) amt = amt - third_amt tax_amt = tax_amt + (amt * 0.29) return tax_amt
def format_heading(level, text): """Create a heading of <level> [1, 2 or 3 supported].""" underlining = ['=', '-', '~', ][level - 1] * len(text) return '%s\n%s\n\n' % (text, underlining)
def _get_codon(codon, dna): """Get codon.""" return codon.replace("U", "T") if dna else codon
def is_data(line): """ Function utilized by itertool's groupby method in determining the delimiter between our blocks of data. """ return True if line.strip() else False
def build_mount(disk, path, read_only): """Build a Mount object for a Pipeline request. Args: disk (str): Name of disk to mount, as specified in the resources section. path (str): Path to mount the disk at inside the container. read_only (boolean): If true, disk is mounted read only in the container. Returns: An object representing a Mount. """ return { 'disk': disk, 'path': path, 'readOnly': read_only, }
def get_bottom_strip(chonk): """ Compute the bottom horizontal strip of a 2D list. """ return chonk[-1]
def build_delete(table, where): """ Build a delete request. Parameters ---------- table : str Table where query will be directed. where: iterable The list of conditions to constrain the query. Returns ------- str Built query string. """ return ( f"DELETE FROM \"{table}\" WHERE " + " AND ".join(f"{w} = :{w}" for w in where) )
def normalize(ratings_dict, average): # Done """ Returns the normalized ratings of the user. """ normalized_dict = {} for i in ratings_dict: normalized_dict.update({i : ratings_dict[i] - average}) return normalized_dict
def duration_hrm(time): """Find the duration of an ECG strip by subtracting the initial time from the final time Args: time: list of time values read from a CSV file Returns: duration: the duration of the imported ECG signal (in seconds) """ duration = time[-1] - time[0] # print(time[-1]) # print(time[0]) print(duration) return duration
def is_job(job): """Check if a given job representation corresponds to a job and not a folder or a view. :param job: Jenkins Job representation as a dictionary :type job: dict :returns: Whether the job representation actually corresponds to a job :rtype: bool """ job_class = job["_class"].lower() return not ("view" in job_class or "folder" in job_class or "multibranch" in job_class)
def split_smiles(smiles: str) -> list: """Splits a SMILES string into individual tokens. This will split a SMILES string into tokens representing individual parts of the SMILES string. Each individual token will be one of the following, - A single atom (including hydrogens or charge if specified) - A ring connection number (single digit or multi-digit) - Another SMILES character Args: smiles: The input SMILES string. Returns: A list of tokens. """ result = [] while smiles != '': if smiles[:2] == 'Cl' or smiles[:2] == 'Br': token_length = 2 elif smiles[0] == '%': # Ring linkage numbers >9 are prefixed by '%' token_length = 3 elif smiles[0] == '[': token_length = smiles.index(']') + 1 else: token_length = 1 result.append(smiles[:token_length]) smiles = smiles[token_length:] return result
def calc_order(wavelength, gap_size): """ Returns the FP interferential order. Parameters ---------- wavelength (float): gap_size (float): Returns ------- order (float) """ return 2 * (gap_size * 1e-6) / (wavelength * 1e-10)
def levenshtein(a, b): """Calculates the Levenshtein distance between a and b. The code was taken from: http://hetland.org/coding/python/levenshtein.py """ n, m = len(a), len(b) if n > m: # Make sure n <= m, to use O(min(n,m)) space a, b = b, a n, m = m, n current = list(range(n + 1)) for i in range(1, m + 1): previous, current = current, [i] + [0] * n for j in range(1, n + 1): add, delete = previous[j] + 1, current[j - 1] + 1 change = previous[j - 1] if a[j - 1] != b[i - 1]: change = change + 1 current[j] = min(add, delete, change) return current[n]
def getMiddleOfLengthInsert(length): """ Get the position for inserting an item in the middle the given length. INPUT <= integer length OUTPUT => integer position """ return (length + 1) // 2
def update_dropped_frames_metric(ml_channel_id, ml_channel_name): """Update the metrics of the "Dropped Frames (sum)" dashboard dashboard widget""" result = [] entry = ["MediaLive", "DroppedFrames", "ChannelId", ml_channel_id, "Pipeline", "0", {"label": ml_channel_name + "-0"}] result.append(entry) entry = ["MediaLive", "DroppedFrames", "ChannelId", ml_channel_id, "Pipeline", "1", {"yAxis": "right", "label": ml_channel_name + "-1"}] result.append(entry) return result
def order_tweets_by_polarity(tweets, positive_highest=True): """Sort the tweets by polarity, receives positive_highest which determines the order. Returns a list of ordered tweets.""" return sorted(tweets, key=lambda x: x.polarity, reverse=positive_highest)
def remove_offsets(thetas, axis_offsets, rot_directions): """ """ thetas_normalized = [] # Invert the offsets offsets axis_offsets_inverted = [-i for i in axis_offsets] # To remove an offset, we first flip the rotation direction (if necessary), # then subtract the angle offset for i, theta in enumerate(thetas): thetas_normalized.append(theta) if rot_directions[i]: thetas_normalized[i] = thetas_normalized[i] * (-1) thetas_normalized[i] = thetas_normalized[i] - axis_offsets_inverted[i] return thetas_normalized
def make_reverse_dict(in_dict, warn=True): """ Build a reverse dictionary from a cluster dictionary Parameters ---------- in_dict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. Returns ------- out_dict : dict(int:int) A single valued dictionary pointing from source index to cluster key for each source in a cluster. Note that the key does not point to itself. """ out_dict = {} for k, v in in_dict.items(): for vv in v: if vv in out_dict: if warn: print("Dictionary collision %i" % vv) out_dict[vv] = k return out_dict
def ends_with(strn, suffix): """Check to see if the specified string ends with the suffix. Checks the specified string to see if it ends with the specified suffix. :param str strn: The string to check. :param str suffix: The suffix to search the specified string for. :returns: True if the string ends with the specified suffix; Otherwise, False. :rtype: bool """ return strn.endswith(suffix)
def findSelectedFields(fieldSearchList, fieldNames): """ fieldSearchList is a list of fields, potentially with wild cards. fieldNames is the real list of field names. Returns a list of all fields that match the SearchList. """ prefixes = [] exactMatches = [] for f in fieldSearchList: if f.endswith("*"): prefixes.append(f.rstrip("*")) else: exactMatches.append(f) fieldsShown = [] for f in fieldNames: if f in exactMatches: fieldsShown.append(f) continue for pf in prefixes: if f.startswith(pf): fieldsShown.append(f) break return fieldsShown
def MOSQ_LSB(A): """get less significant byte.""" return (A & 0x00FF)
def simply_alphabet(seq): """Replace all U amino acids with C. Parameters: seq: str, peptide sequence """ return seq.replace("U", "C")
def get_unique_smiles(smiles): """Given a list of smiles, return a list consisting of unique elements in it. Parameters ---------- smiles : list of str Molecules in SMILES Returns ------- list of str Sublist where each SMIES occurs exactly once """ unique_set = set() for mol_s in smiles: if mol_s not in unique_set: unique_set.add(mol_s) return list(unique_set)
def indexOfShortestString(listOfStrings): """ return index of shortest string from a non-empty list of strings, False otherwise By "shortest", we mean a value that is no longer than any other value in the list There may be more than one string that would qualify, For example in the list ["dog","bear","wolf","cat"] both dog and cat are shortest strings In such cases, return the one with the lowest index (in this case, dog) return False for empty list, not a list, or a list with at least one non-string item >>> indexOfShortestString([]) False >>> indexOfShortestString('foo') False >>> indexOfShortestString(['foo']) 0 >>> indexOfShortestString(['bear','cat','dog','mouse']) 1 >>> """ if type(listOfStrings)!=list or listOfStrings==[]: # Return False if not a list or is empty return False currentMinIndex = 0 for i in range(0,len(listOfStrings)): if type(listOfStrings[i])!=str: return False # Return False if not all elements are ints if len(listOfStrings[i]) < len(listOfStrings[currentMinIndex]): currentMinIndex = i return currentMinIndex
def largest_power_of_two(n): """Returns the largest j such that 2**j divides n Based on https://www.geeksforgeeks.org/highest-power-of-two-that-divides-a-given-number/#:~:text=Highest%20power%20of%202%20that%20divides%205%20is%201. https://stackoverflow.com/questions/13105875/compute-fast-log-base-2-ceiling-in-python """ return((n & (~(n - 1))).bit_length() - 1)
def byte_to_readable(size_bytes): """Function to convert bytes to human readable format (MB,GB ...) Parameters ---------- size_bytes: float size in bytes :return: Return size in human readable format :rtype: str """ for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if size_bytes < 1024.0: return "%3.1f %s" % (size_bytes, x) size_bytes /= 1024.0
def total_number_of_clusters(tree) -> int: """Get the number of leaves in the tree.""" if tree is None: return 1 return sum(total_number_of_clusters(subtree) for subtree in tree.subregions)
def binary_search(a, x): """Find index of x in a in logarithmic time.""" low = 0 high = len(a) - 1 while low <= high: mid = (low + high) // 2 guess = a[mid] if guess == x: return mid elif guess < x: low = mid + 1 else: high = mid - 1 return None
def recurse_tree(tree, tkns, i, leaf_size): """Return a recursively built tree.""" is_leaf = (i + 1) == len(tkns) tkn = tkns[i] try: tree['children'][tkn] except KeyError: tree['children'][tkn] = { 'name': tkn, 'parent': 'root', 'size': -1, 'children': {}, } if i > 0: tree['children'][tkn]['parent'] = tkns[i - 1] if is_leaf: tree['children'][tkn]['size'] = leaf_size if is_leaf: return tree['children'][tkn] return recurse_tree(tree['children'][tkn], tkns, i + 1, leaf_size)
def mat2vec_index(N, i, j): """ Convert a matrix index pair to a vector index that is compatible with the matrix to vector rearrangement done by the mat2vec function. """ return i + N * j
def merge_intervals(intervals, srt=True, pad=0): """ >>> merge_intervals( [('chr1', 1, 4), ('chr1', 2, 5), ('chr2', 3, 5)] ) >>> [['chr1', 1, 5], ['chr2', 3, 5]] """ if srt: sorted_by_lower_bound = sorted(intervals, key=lambda tup: (tup[0], tup[1])) # by chrom, start, end else: sorted_by_lower_bound = intervals if pad: sorted_by_lower_bound = [(c, 0 if i - pad < 0 else i - pad, j + pad) for c, i, j in intervals] merged = [] for higher in sorted_by_lower_bound: if not merged: merged.append(higher) continue elif higher[0] != merged[-1][0]: # Dont merge intervals on different chroms merged.append(higher) else: lower = merged[-1] # Last item on merged (end of interval) # test for intersection between lower and higher: # we know via sorting that lower[0] <= higher[0] if higher[1] <= lower[2]: merged[-1] = (lower[0], lower[1], max(higher[2], lower[2])) else: merged.append(higher) return merged
def jsonify_dict(d): """Turns python booleans into strings so hps dict can be written in json. Creates a shallow-copied dictionary first, then accomplishes string conversion. Args: d: hyperparameter dictionary Returns: hyperparameter dictionary with bool's as strings """ d2 = d.copy() # shallow copy is fine by assumption of d being shallow def jsonify_bool(boolean_value): if boolean_value: return "true" else: return "false" for key in d2.keys(): if isinstance(d2[key], bool): d2[key] = jsonify_bool(d2[key]) return d2
def verse(bottle: int): """Sing a verse""" if bottle > 2: verse = '\n'.join([ f'{bottle} bottles of beer on the wall,', f'{bottle} bottles of beer,', 'Take one down, pass it around,', f'{bottle-1} bottles of beer on the wall!', ]) elif bottle == 2: verse = '\n'.join([ f'{bottle} bottles of beer on the wall,', f'{bottle} bottles of beer,', 'Take one down, pass it around,', f'{bottle-1} bottle of beer on the wall!', ]) else: # bottle verse = '\n'.join([ f'{bottle} bottle of beer on the wall,', f'{bottle} bottle of beer,', 'Take one down, pass it around,', f'No more bottles of beer on the wall!' ]) return verse
def get_headers(data, extra_headers=None): """ Takes the response data as well as any additional headers and returns a tuple of tuples of headers suitable for passing to start_response() """ response_headers = { "Content-Length": str(len(data)), } if extra_headers: response_headers.update(extra_headers) return list(response_headers.items())
def dict_is_all_none(dict_item: dict) -> bool: """ Test if all dictionary items are None. :param dict_item: A dictionary object to be testes. :return bool: True if all keys have None as value, False otherwise """ for _key, _value in dict_item.items(): if _value is not None: return False return True
def parse_idx(img_name): """ Simple helper function that takes an image name and return the index position of the image. """ bk = 0 #Find where the significant digit appears prefix = img_name.split('.')[0][3:] for idx,alpha in enumerate(prefix): if int(alpha) == 0: continue else: bk = idx break num = int(prefix[bk:]) - 1 #Since image names start from 1 return num
def remove(bowl_a, bowl_b): """Return bowl b without the "ingredients" of bowl_a.""" for ingredient in bowl_a: # If an ingredient is also in bowl_a and bowl_b - remove it. if ingredient in bowl_b: bowl_b.remove(ingredient) return bowl_b
def oct_to_decimal(oct_string: str) -> int: """ Convert a octal value to its decimal equivalent >>> oct_to_decimal("12") 10 >>> oct_to_decimal(" 12 ") 10 >>> oct_to_decimal("-45") -37 >>> oct_to_decimal("2-0Fm") Traceback (most recent call last): ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("") Traceback (most recent call last): ... ValueError: Empty string was passed to the function >>> oct_to_decimal("19") Traceback (most recent call last): ... ValueError: Non-octal value was passed to the function """ oct_string = str(oct_string).strip() if not oct_string: raise ValueError("Empty string was passed to the function") is_negative = oct_string[0] == "-" if is_negative: oct_string = oct_string[1:] if not oct_string.isdigit() or not all(0 <= int(char) <= 7 for char in oct_string): raise ValueError("Non-octal value was passed to the function") decimal_number = 0 for char in oct_string: decimal_number = 8 * decimal_number + int(char) if is_negative: decimal_number = -decimal_number return decimal_number
def gmail_timestamp_to_epoch_seconds(epoch_time_ms: int) -> int: """ Convert GMail `internalDate` into epoch time in seconds. Arguments: epoch_time_ms {int} -- the GMail `internalDate` epoch time which is in milliseconds. Returns: int -- epoch time in seconds """ epoch_time_sec = int(int(epoch_time_ms) / 1000) return epoch_time_sec
def unique_shortened_url_string(has_url, generate_url, length=5): """ Get a unique shortened URL string. Args: has_url a function that takes a string and returns True if that string already exists (and can't be used). generate_url a function to generate a URL. Presumably, not referentially transparent. length the length of the string. """ shortened = generate_url(length) while has_url(shortened): # Try again until we get a unique one. shortened = generate_url(length) return shortened
def format_time(secs): """Format times for Kokytos""" m, s = divmod(int(secs), 60) h, m = divmod(m, 60) d, h = divmod(h, 24) if d: # Yes, vola is this shit :*( return "{}::{:02}:{:02}:{:02}".format(d, h, m, s) if h: return "{}:{:02}:{:02}".format(h, m, s) if m: return "{:02}:{:02}".format(m, s) return "{}s".format(s)
def to_bool(value: str) -> bool: """ Converts a string argument to the bool representation (or throws a ValueError if the value is not one of '[Tt]rue' or '[Ff]alse'. """ if value in ["True", "true", True]: return True if value in ["False", "false", False]: return False raise ValueError(f"Value {value} cannot be converted into a bool")
def string_to_scopes(scopes): """Converts stringifed scopes value to a list. Args: scopes (Union[Sequence, str]): The string of space-separated scopes to convert. Returns: Sequence(str): The separated scopes. """ if not scopes: return [] return scopes.split(' ')
def _fix_indra_edges(stmt_json_list): """Temporary fixes to latest INDRA representation.""" for stmt in stmt_json_list: if stmt.get('type') == 'RasGef': stmt['type'] = 'Gef' if stmt.get('type') == 'RasGap': stmt['type'] = 'Gap' return stmt_json_list
def myreplace(old, new, s): """ Replace all occurrences of old with new in s. """ return new.join(s.split(old))
def transpose_if_needed(*args, transpose=False, section=slice(None)): """ This function takes a list of arrays and returns them (or a section of them), either untouched, or transposed, according to the parameter. Parameters ---------- args : sequence of arrays The input arrays. transpose : bool If True, return transposed versions. section : slice object Section of output data to return. Returns ------- list of arrays The input arrays, or their transposed versions. """ return list(None if arg is None else arg.T[section] if transpose else arg[section] for arg in args)
def isResponseEmpty(response): """ Checks if the response from dynamodb doesn't contain any stops """ if not response or len(response["Items"]) == 0 or "stops" not in response["Items"][0] or \ len(response["Items"][0]["stops"]) == 0: return True return False
def get_changed_files(repo): """return a list of all of the changed files based on the last commit""" if repo != None: return list(repo.commit(repo.head.object.hexsha).stats.files.keys()) return []
def gradeHospital(array): """Take an array and grade the hospital based on the number of breaches.""" if type(array) == list: numBreaches = len(list(filter(lambda x: x == 'breach', array))) if numBreaches == 0: return 'Safe' elif numBreaches > 3: return 'Urgent Inspection' else: return 'Trouble' else: print("Error formating text:") print(array) exit(1)
def get_month_string(m, numeric=False): """ get month as string """ if numeric: return str(m).zfill(2) d = {} d.update({1: 'JAN'}) d.update({2: 'FEB'}) d.update({3: 'MAR'}) d.update({4: 'APR'}) d.update({5: 'MAY'}) d.update({6: 'JUN'}) d.update({7: 'JUL'}) d.update({8: 'AUG'}) d.update({9: 'SEP'}) d.update({10: 'OCT'}) d.update({11: 'NOV'}) d.update({12: 'DEC'}) return d[m]
def rescale_phase(phase, max_phase=0.2): """ Shift phase points if greater than max_phase to negative """ return [p - 1 if p > 1 - max_phase else p for p in phase]
def _denormalize_(value): """ converts value from [-1:+1] range to [0:255] """ return int(round((value + 1.0)*127.5))
def TanimotoDist(ex1, ex2, attrs): """ >>> v1 = [0,1,0,1] >>> v2 = [1,0,1,0] >>> TanimotoDist(v1,v2,range(4)) 1.0 >>> v2 = [1,0,1,1] >>> TanimotoDist(v1,v2,range(4)) 0.75 >>> TanimotoDist(v2,v2,range(4)) 0.0 # this tests Issue 122 >>> v3 = [0,0,0,0] >>> TanimotoDist(v3,v3,range(4)) 1.0 """ inter = 0.0 unin = 0.0 for i in attrs: if (ex1[i] or ex2[i]): unin += 1 if (ex1[i] and ex2[i]): inter += 1 if (unin != 0.0): return (1 - inter / unin) else: return 1.0
def quoted(s): """ Given a string, return a quoted string as per RFC 3501, section 9.""" if isinstance(s, str): return '"' + s.replace('\\', '\\\\').replace('"', '\\"') + '"' else: return b'"' + s.replace(b'\\', b'\\\\').replace(b'"', b'\\"') + b'"'
def error_503(error): """Maintenance.""" return 'PixyShip is down for maintenance', 503
def _clean_review_date_string(date_string): """ Cleans week_3.tweet date string. Args: date_string: A Review.review date string. Returns: A cleaned week_3.tweet date string in format """ date_list = date_string.split() year = date_list[5] month = date_list[1] day = date_list[2] time = date_list[3] return year + " " + month + " " + day + " " + time
def inter_over_union(interval_1, interval_2): """Intersection over union for two intervals.""" a, b = interval_1 c, d = interval_2 intersection = max(0, min(b, d) - max(a, c)) if intersection > 0: union = max(b, d) - min(a, c) else: union = (b - a) + (d - c) return intersection / union
def type_to_extension(notebook_type): """ Return the file extension for a given notebook type """ if notebook_type == 'jupyter': return 'ipynb' raise RuntimeError(f"unknown notebook type {notebook_type}")
def normalize_none(field_value): """ Helper function to normalize strings that should be None """ none_strings = ['n/k', 'none known', 'unknown', 'no/unknown', '0x0'] if not field_value: normalized_field_value = None elif type(field_value) == str and field_value.lower() in none_strings: normalized_field_value = None else: normalized_field_value = field_value return normalized_field_value
def _isredirect(values): """Check if rewrite values is redirect. """ return [v.split().pop() in ('redirect', 'permanent') for v in values]
def format_time(seconds, nanos): """ return float of seconds.nanos when nanos set, or seconds when not """ if nanos: return float(seconds) + float(nanos) / 1000000000.0 return int(seconds)
def type_(type_): """Returns a string representation of the 'real type\_'.""" try: type_ = float(type_) if type_.is_integer(): return int if not type_.is_integer(): return float except ValueError: return str
def is_valid_ohlc(ohlc_row): """ ohlc format: (open,high,low,close,volume) """ _open, _high, _low, _close, _volume = ohlc_row isin_bunds = lambda v: v >= _low and v <= _high return _high >= _low and isin_bunds(_open) and isin_bunds(_close) and _volume >= 0
def fib(num): """Compute fibonacci number.""" if num in (0, 1): # fib(0) == 0, fib(1) == 1 return num return fib(num - 2) + fib(num - 1)
def opposite(k, Nj): """ Computes 1-based index of the generator placed spatially opposite to k. k is also a 1-based index. """ z = 1 + (-1+k+Nj/2)%Nj return int(z)
def calculate_acceleration(c, t): """ Calculates an acceleration given a set of quintic coefficients and a time. Args c: List of coefficients generated by a quintic polynomial trajectory generator. t: Time at which to calculate the acceleration Returns Acceleration """ return 20 * c[0] * t**3 + 12 * c[1] * t**2 + 6 * c[2] * t + 2 * c[3]
def normalize(rendered): """Return the input string without non-functional spaces or newlines.""" out = ''.join([line.strip() for line in rendered.splitlines() if line.strip()]) out = out.replace(', ', ',') return out
def _percent_diff(expected: float, predicted: float) -> float: """ Calculate the percent difference. Args: expected: The value that the predicted should reproduce. predicted: The value that attempts to match the expected. Returns: The percent difference between the two values. """ return (predicted - expected) / expected * 100.
def calc_elementary_y_intervals(claims): """Calculate the elementary y-intervals for a list of claims. Basically, just get an ordered list of all the unique y-coords, then calculate the lengths of the intervals between them. For the sample data, this should return (1, 2, 2, 2) Params: claims - Iterable containing Claim objects to process""" # Get the union of the sets of top and bottom y-coordinates unique_coords = {c.rect.y1 for c in claims} | {c.rect.y2 for c in claims} # We already know 0 is the origin so throw it out if it's there unique_coords.discard(0) # Turn the set into a sorted list unique_coords = sorted(list(unique_coords)) # Subtract the previous item in the list from the current one to get the interval # between them. intervals, prev = [], 0 for i in unique_coords: intervals.append(i-prev) prev = i return tuple(intervals)
def get_max_size(image_list): """ Calculate the maximum height and width of all images in image_list. Parameters ---------- image_list : list List of images """ width_max = 0; height_max = 0; for image in image_list: image_size = image.size width_max = max(image_size[0],width_max) height_max = max(image_size[1],height_max) return width_max, height_max
def parse_args(args): """ Parses the command line arguments. For now, the only arg is `-d`, which allows the user to select which database file that they would like to use. More options might be added in the future or this option might be changed. """ if args[0] == "-d": return ' '.join(args[1:]).strip() else: return None
def search(list, n): """ search using binary--- The idea of binary search is to use the information that the array is sorted and reduce the time complexity :param list: :param n: :return: """ l = 0 u = len(list) - 1 while l <= u: mid = (l + u) // 2 if list[mid] == n: globals()['pos'] = mid return True else: if list[mid] < n: l = mid + 1 else: u = mid - 1 return False
def make_enumitems(values): """Comma-separated names used in enum items. APPLE, BANANA """ return ", ".join(values)
def drop_duplicates(object_list, **kwargs): """ Dropping duplicates base on ID. By default id = pandaid. :param object_list: list of dicts :param kwargs: id: name of id param :return: unique_object_list: list of dicts """ id_param = 'pandaid' if 'id' in kwargs: id_param = kwargs['id'] object_dict = {} unique_object_list = [] for obj in object_list: id = obj[id_param] drop_flag = False if id in object_dict: # This is a duplicate. Drop it. drop_flag = True else: object_dict[id] = 1 if not drop_flag: unique_object_list.append(obj) return unique_object_list
def _getURIParts(uri): """ return tuple of (bucket, path) for given URI """ if uri.startswith("s3://"): uri = uri[5:] if uri.startswith('/'): raise ValueError("invalid uri") n = uri.find('/') if n < 0: raise ValueError("invalid uri") fields = (uri[:n], uri[n:]) return fields
def look4stringinline(s, line): """ Look for string <s> in line, only comparing the first 4 characters of each word This is because cloudy does the same. Case should not matter: >>> look4stringinline('punch pressure', 'PUNC FINAL PRES') True And it is OK to have strings with less than 4 characters: >>> look4stringinline('PDR', 'save pdr') True And here is an example that should fail: >>> look4stringinline('save whatever', 'save foobar') False """ words = s.split() for word in words: if len(word) > 4: word = word[:4] if not word.upper() in line.upper(): return False return True
def HighlightAlignedHtml(hyp, ref, err_type): """Generate a html element to highlight the difference between hyp and ref. Args: hyp: Hypothesis string. ref: Reference string. err_type: one of 'none', 'sub', 'del', 'ins'. Returns: a html string where disagreements are highlighted. Note `hyp` is highlighted in green, and marked with <del> </del> `ref` is highlighted in yellow. If you want html with nother styles, consider to write your own function. Raises: ValueError: if err_type is not among ['none', 'sub', 'del', 'ins']. or if when err_type == 'none', hyp != ref """ highlighted_html = '' if err_type == 'none': if hyp != ref: raise ValueError('hyp (%s) does not match ref (%s) for none error' % (hyp, ref)) highlighted_html += '%s ' % hyp elif err_type == 'sub': highlighted_html += """<span style="background-color: yellow"> <del>%s</del></span><span style="background-color: yellow"> %s </span> """ % (hyp, ref) elif err_type == 'del': highlighted_html += """<span style="background-color: red"> %s </span> """ % ( ref) elif err_type == 'ins': highlighted_html += """<span style="background-color: green"> <del>%s</del> </span> """ % ( hyp) else: raise ValueError('unknown err_type ' + err_type) return highlighted_html