content
stringlengths
42
6.51k
def validate_collect(context, param, value): """Handle the report collection flag.""" if value is not None: return value else: return True
def n50(lengths): """ Calculate N50 stats :param lengths: :return: """ cumulative = 0 size = sum(lengths) for l in sorted(lengths): cumulative += l if float(cumulative) / size >= 0.5: return l
def _replace_booleans(source): """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise precedence is changed to boolean precedence. """ return source.replace('|', ' or ').replace('&', ' and ')
def get_rating_for_source(list_of_ratings, source): """Helper function for getting the rating from list of sources""" for rating in list_of_ratings: if rating["Source"] == source: return rating["Value"] return "N/A"
def parse_lambda_tags_from_arn(arn): """Generate the list of lambda tags based on the data in the arn Args: arn (str): Lambda ARN. ex: arn:aws:lambda:us-east-1:123597598159:function:my-lambda[:optional-version] """ # Cap the number of times to split split_arn = arn.split(":") # If ARN includes version / alias at the end, drop it if len(split_arn) > 7: split_arn = split_arn[:7] _, _, _, region, account_id, _, function_name = split_arn return [ "region:{}".format(region), "account_id:{}".format(account_id), "functionname:{}".format(function_name), ]
def make_repr_str(obj, fields): """Make a string representing an object from a subset of its attributes. Can be used as a decent default implementation of __repr__. Works with well-behaved objects. May go wrong if there are circular references and general oddness going on. Returns a constructor-like string like "Class(`arg1`=arg1, ...)" Note that no object lookup is performed, other than to get the class name. It is up to the calling function to provide the fields, in the `fields` object. Parameters ---------- obj: object The object to be represented. fields: list[tuple] Tuples of length 2, where the first element represents the name of the field, and the second its value. Returns ------- repr_str: str String representing the object. """ args = ', '.join(['{}={}'.format(*f) for f in fields]) return '{}({})'.format(obj.__class__.__name__, args)
def host_dict(host): """Convert a host model object to a result dict""" if host: return host.state else: return {}
def manhattan(rating1, rating2): """Computes the Manhattan distance. Both rating1 and rating2 are dictionaries of the form {'The Strokes': 3.0, 'Slightly Stoopid': 2.5}""" distance = 0 commonRatings = False for key in rating1: if key in rating2: distance += abs(rating1[key] - rating2[key]) commonRatings = True if commonRatings: return distance else: return -1
def genQrelStr(queryId, docId, relGrade): """Produces a string representing one QREL entry :param queryId: question/query ID :param docId: relevanet document/answer ID :param relGrade: relevance grade :return: a string representing one QREL entry """ return f'{queryId} 0 {docId} {relGrade}'
def _bop_not(obj): """Boolean not.""" return not bool(obj)
def cent(q: int, halfmod: int, logmod: int, val: int) -> int: """ Constant-time remainder. :param q: Input modulus :type q: int :param val: Input value :type val: int :param halfmod: q//2 :type halfmod: int :param logmod: ceil(log2(q)) :type logmod: int :return: Value in the set [-(q//2), ..., q//2] equivalent to x mod q. :rtype: int """ y: int = val % q intermediate_value: int = y - halfmod - 1 return y - (1 + (intermediate_value >> logmod)) * q
def geometric(n, p): """ Calculate the distribution of trials until the first success/fail Args: n (int): the total number of trials p (float): the probability of success of 1 trial Returns: float: the probability of occurance """ return (1-p)**(n-1) * p
def get_threshold_approaching_datapoints(threshold, num_exp_points, num_lin_points): """ Use if you want to measure the behaviour when the benchmark approaches some threshold value. First, `num_exp_points` many measurement are taken, from `threshold / 2 ** num_exp_points` to `threshold / 2`. Then, `num_lin_points` many measurements are taken from `threshold / 2` to `threshold`. """ datapoints = [] for i in range(num_exp_points, 0, -1): datapoints.append(int(threshold / 2 ** i)) lin_step = int(threshold / (2 * num_lin_points + 1)) start = int(threshold / 2) for i in range(1, num_lin_points + 1): datapoints.append(start + i * lin_step) return datapoints
def GenerateHostIPs(boot_vm_index, count): """Logic must be aligned with large_scale_boot/boot_script.sh.""" hostnames = [] for vm_id in range(boot_vm_index, boot_vm_index + count): hostnames.append('10.0.{octet3}.{octet4}'.format( octet3=vm_id // 256, octet4=vm_id % 256)) return hostnames
def get_raw_smn1_cn(full_length_cn, smn1_fraction): """ Return the raw SMN1 CN by muliplying full-length SMN CN and SMN1 fraction. """ lcount = [] for site_fraction in smn1_fraction: lcount.append(round(full_length_cn * site_fraction, 2)) return lcount
def hsv2rgb_raw(hsv): """ Converts an HSV tuple to RGB. Intended for internal use. You should use hsv2rgb_spectrum or hsv2rgb_rainbow instead. """ HSV_SECTION_3 = 0x40 h, s, v = hsv # The brightness floor is minimum number that all of # R, G, and B will be set to. invsat = 255 - s brightness_floor = (v * invsat) // 256 # The color amplitude is the maximum amount of R, G, and B # that will be added on top of the brightness_floor to # create the specific hue desired. color_amplitude = v - brightness_floor # figure out which section of the hue wheel we're in, # and how far offset we are within that section section = h // HSV_SECTION_3 # 0..2 offset = h % HSV_SECTION_3 # 0..63 rampup = offset rampdown = (HSV_SECTION_3 - 1) - offset # compute color-amplitude-scaled-down versions of rampup and rampdown rampup_amp_adj = (rampup * color_amplitude) // (256 // 4) rampdown_amp_adj = (rampdown * color_amplitude) // (256 // 4) # add brightness_floor offset to everything rampup_adj_with_floor = rampup_amp_adj + brightness_floor rampdown_adj_with_floor = rampdown_amp_adj + brightness_floor r, g, b = (0, 0, 0) if section: if section == 1: # section 1: 0x40..0x7F r = brightness_floor g = rampdown_adj_with_floor b = rampup_adj_with_floor else: # section 2; 0x80..0xBF r = rampup_adj_with_floor g = brightness_floor b = rampdown_adj_with_floor else: # section 0: 0x00..0x3F r = rampdown_adj_with_floor g = rampup_adj_with_floor b = brightness_floor return (r, g, b)
def _matches_expected_response_header(request_headers, response_headers): """ Returns true if the Content-Type value of the response header matches the Accept value of the request header, false otherwise :param request_headers: the headers for a cosmos request :type request_headers: dict[str, str] :param response_headers: the headers for a cosmos response :type response_headers: dict[str, str] :return: true if the Content-Type value of the response header matches the Accept value of the request header, false otherwise :rtype: bool """ return (request_headers.get('Accept') in response_headers.get('Content-Type'))
def date(date): """Function to format date""" if date == 'None': return {'parsed' : 'None', 'day' : 'None', 'time' : 'None', 'unit' : 'None', 'string' : 'None'} # {'parsed' : '141953Z', 'day' : '14', 'time': '1953', 'unit' : 'Z', # 'string' : '14th at 19:53z'}, day = date[:2] time = date[2:-1] #1st 2nd 3rd, ..., 20th, etc if int(day) in [11, 12, 13]: postfix = 'th' elif int(day[-1]) == 1: postfix = 'st' elif int(day[-1]) == 2: postfix = 'nd' elif int(day[-1]) == 3: postfix = 'rd' else: postfix = 'th' # construct the string string_repr = f"{int(day)}{postfix} at {time[:2]}:{time[2:]}z" return {'parsed' : date, 'day' : day, 'time' : time, 'unit' : 'Z', 'string' : string_repr}
def clean_bit(val: int, bitNo: int) -> int: """ Set a specified bit to '0' """ return val & ~(1 << bitNo)
def _generate_fake_input_arg_modulesdk(arg_spec): """Generate a fake argument value for inputs of module spec Args: arg_spec (dict) : argument specification from yaml module spec Returns: object: sample fake value Raises: NotImplementedError: if arg type is not implemented """ if arg_spec["type"] == "AzureMLDataset": return "/mnt/fakeinputdatasetpath" if arg_spec["type"] == "AnyDirectory": return "/mnt/fakeinputdirectorypath" if arg_spec["type"] == "AnyFile": return "/mnt/fakeinputfilepath/file.txt" if "default" in arg_spec: return arg_spec["default"] if arg_spec["type"] == "String": return "0" if arg_spec["type"] == "Integer": return "0" if arg_spec["type"] == "Boolean": return False if arg_spec["type"] == "Float": return "0.32" if arg_spec["type"] == "Enum": return arg_spec["options"][0] raise NotImplementedError( "input type {} is not implemented in our test suite yet".format( arg_spec["type"] ) )
def parse_float(arg, reverse=False): """Pass in string for forward, float for reverse.""" if reverse: return '%f' % arg else: return float(arg)
def product(val1, val2, val3): """Returns the product of the three input values """ product = val1 * val2 * val3 return product
def http_code_2_return_code(code): """Tranform http status code to system return code.""" return (code - 301) % 255 + 1
def split_location_string(loc): """ Return the parts of a location string (formerly used for a real estate unit location) """ return loc.capitalize().replace(", ", ",").split(",")
def scraped_item_source_data(json_data): """Matches scraped items with sources then returns list of tuples.""" scraped_items = json_data['scraped_items'] sources = json_data['sources'] return [ (itm, src) for itm in scraped_items for src in sources if itm['source'] == src['domain'] ]
def nth(n): """Returns the string representation of the specified nth number.""" s = str(n) if s[-1] == '1': return s + 'st' elif s[-1] == '2': return s + 'nd' elif s[-1] == '3': return s + 'rd' return s + 'th'
def valid_chrom(arg): """Check if a string represents a valid chromosome""" arg = arg.lower() if arg[:3] == 'chr': return True if arg.isdigit() or arg in ('x', 'y', 'mt'): return True if arg[:8] == 'scaffold' or arg[:2] == 'gl' or arg[:3] == 'par': return True return False
def to_symbol(i): """Covert ids to text.""" if i == 0: return "" if i == 11: return "+" if i == 12: return "*" return str(i-1)
def validate(data): """Valdiate the result data. """ if data is None: return False errors = data.get("errors", dict()) features = data.get("features", dict()) public_records = data.get("public_records", dict()) if len(features) or len(public_records): return True else: return False
def parse_gb_alleles_col(_bytes): """ Parse 'alleles' column of table snp146 in UCSC genome browser """ if _bytes == b'': return [] else: # "A,C,".split(',') = ['A', 'C', ''] # Remove the last empty string return _bytes.decode("utf-8").split(',')[:-1]
def is_prime(num): """ Checks if the given number is a prime number or not. """ if num > 1: for i in range(2, num): if (num % i) == 0: return False else: return True else: return False
def greet_welcome(language): """ Greets a given language in their language otherwise uses Welcome. :param language: string determining language to greet. :return: A greeting - if you have it in your database. It should default to English if the language is not in the database, or in the event of an invalid input. """ database = {'english': 'Welcome', 'czech': 'Vitejte', 'danish': 'Velkomst', 'dutch': 'Welkom', 'estonian': 'Tere tulemast', 'finnish': 'Tervetuloa', 'flemish': 'Welgekomen', 'french': 'Bienvenue', 'german': 'Willkommen', 'irish': 'Failte', 'italian': 'Benvenuto', 'latvian': 'Gaidits', 'lithuanian': 'Laukiamas', 'polish': 'Witamy', 'spanish': 'Bienvenido', 'swedish': 'Valkommen', 'welsh': 'Croeso'} if language not in database: return "Welcome" return database[language]
def color_to_hex(color): """ Converts a (R, G, B) tuple into a hex color string. """ return "#" + "".join('{:02X}'.format(x) for x in color)
def find_indices(lst, condition): """ Find indices of lst satisfying the specified condition. Input: lst - list Input: condition - lambda function Output: list of indices - list """ return [index for index, elem in enumerate(lst) if condition(elem)]
def sign(value): """Returns the sign of the given value as either 1 or -1""" return value / abs(value)
def recv_exactly(sock, n): """ https://eli.thegreenplace.net/2011/08/02/length-prefix-framing-for-protocol-buffers Raise RuntimeError if the connection closed before n bytes were read. """ buf = b"" while n > 0: data = sock.recv(n) if data == b'': raise RuntimeError('unexpected connection close') buf += data n -= len(data) return buf
def joinAsStrs(objlist,sep=""): """Join a list of objects in their string representational form.""" retstr = '' for o in objlist: if type(o) is str: retstr += o + sep else: retstr += str(o) + sep avoidend = len(sep) if avoidend > 0: return retstr[:-avoidend] else: return retstr
def flatten(list_of_lists): """ Flattens a list of lists: >>> flatten([[1, 2] [3, 4]]) [1, 2, 3, 4] I wish Python had this in the standard lib :( """ return list((x for y in list_of_lists for x in y))
def _get_account_idx_for_deploy(accounts: int, deploy_idx: int) -> int: """Returns account index to use for a particular transfer. :param accounts: Number of accounts within batch. :param deploy_idx: Index of deploy within batch. :returns: Ordinal index of account used to dispatch deploy. """ return deploy_idx if accounts == 0 else \ deploy_idx % accounts or accounts
def encodeNodeList(nodeList): """Converts a nodeList into data bytes. nodeList -- a list of nodes in the Kaolin nodeList format Returns a list of byte values representing the encoding. """ encodedList = [] for node in nodeList: encodedValue = 0 #Routing encodedValue += {'endpoint': 0x000000, #0b00000000 00000000 00000000 'midpoint': 0x400000, #0b01000000 00000000 00000000 'tee': 0x800000, #0b10000000 00000000 00000000 'isolated': 0xC00000, #0b11000000 00000000 00000000 }[node['routing']] #Connection encodedValue += {'none': 0x000000, #0b00000000 00000000 00000000 'pad': 0x100000, #0b00010000 00000000 00000000 'smd': 0x200000, #0b00100000 00000000 00000000 'hole': 0x300000, #0b00110000 00000000 00000000 }[node['connection']] #Orientation encodedValue += {'vertical': 0x000000, #0b00000000 00000000 00000000 'horizontal': 0x040000, #0b00000100 00000000 00000000 'compressed': 0x080000, #0b00001000 00000000 00000000 'none': 0x0C0000, #0b00001100 00000000 00000000 }[node['orientation']] #Y Position encodedValue += (int(node["yPosition"]*100))<<9 # 9 bits, 0.01" resolution #X Position encodedValue += (int(node["xPosition"]*100)) # 9 lower nine bits, 0.01" resolution encodedList += [(encodedValue & 0xFF0000)>>16, (encodedValue & 0x00FF00)>>8, (encodedValue & 0x0000FF)] #big endian return encodedList
def mscore(score_u, score_t): """ Return 2 for win, 1 for tie, 0 for loss, -1 for Forfeit """ if score_u < 0 or score_t < 0: return -1 if score_u < score_t: return 0 if score_u > score_t: return 2 return 1
def twos_comp_combine(msb, lsb): """Reproduced from post by davek, http://forum.pololu.com/viewtopic.php?f=32&t=9370""" twos_comp = 256*msb + lsb if twos_comp >= 32768: return twos_comp - 65536 else: return twos_comp
def is_associative(value): """ Checks if `value` is an associative object meaning that it can be accessed via an index or key. Args: value (mixed): Value to check. Returns: bool: Whether `value` is associative. Example: >>> is_associative([]) True >>> is_associative({}) True >>> is_associative(1) False >>> is_associative(True) False .. versionadded:: 2.0.0 """ return hasattr(value, "__getitem__")
def number_of_stutters(s): """ What comes in: -- a string s What goes out: Returns the number of times a letter is repeated twice-in-a-row in the given string s. Side effects: None. Examples: -- number_of_stutters('xhhbrrs') returns 2 -- number_of_stutters('xxxx') returns 3 -- number_of_stutters('xaxaxa') returns 0 -- number_of_stutters('xxx yyy xxxx') returns 7 -- number_of_stutters('xxxyyyxxxx') returns 7 -- number_of_stutters('') returns 0 Type hints: :type s: str """ # ------------------------------------------------------------------------- # DONE: 4. Implement and test this function. # The testing code is already written for you (above). # ------------------------------------------------------------------------- count = 0 for k in range( 1, len(s)): if s[k] == s[k - 1]: count = count + 1 return count
def clean_hanging_newline(t): """ Many editors will silently add a newline to the final line of a document (I'm looking at you, Vim). This function fixes this common problem at the risk of removing a hanging newline in the rare cases where the user actually intends it. """ if t and t[-1] == "\n": return t[:-1] return t
def exp_string(expansion): """Return the string to be expanded""" if isinstance(expansion, str): return expansion return expansion[0]
def get_cmap_ks_score(genes_signature, genes_profile): """ genes_signature: ordered list of genes in the signature, aka tags (based on correlation or differential expression) genes_profile: ordered list of (up and down) genes in the instance/perturbation profile """ genes_signature = set(genes_signature) t = float(len(set(genes_profile) & genes_signature)) n = float(len(genes_profile)) j = 0 values_a = [] values_b = [] for i, gene in enumerate(genes_profile): if gene in genes_signature: j += 1 v_j = i + 1 a = (j / t) - (v_j / n) b = (v_j / n) - ((j-1) / t) values_a.append(a) values_b.append(b) if len(values_a) == 0 or len(values_b) == 0: return 0.0 a = max(values_a) b = max(values_b) if a >= b: ks = a else: ks = -b return ks
def __assignColorToMat(matID: int, mat_to_col: dict, matcolor: list): """ PRIVATE FUNCTION. Used to assign different colors for each material model assign to the fiber section. @param matID (int): ID of the material model. @param mat_to_col (dict): Dictionary to check with material model has which color. @param matcolor (list): List of colors. @returns dict: Updated dictionary. """ if not matID in mat_to_col: if len(mat_to_col) >= len(matcolor): print("Warning: not enough colors defined for fiber section plot (white used)") mat_to_col[matID] = 'w' else: mat_to_col[matID] = matcolor[len(mat_to_col)] return mat_to_col
def insert_shift_array_sorted(arr, val): """ This function inserts a value in a sorted array, at it's numericly associated index """ new = [] # Find location to insert c = 0 found = False for x in arr: if arr[x-1] > val: found = True break c += 1 if found: new.extend(arr[:c]) new.append(val) new.extend(arr[c:]) # I want this to work, but the return of Extend() doesn't seem to stay with the interpeter # AttributeError: 'NoneType' object has no attribute 'append' #new.extend(arr[:c]).append(val).extend(arr[c:]) else: new = arr return new
def convert_from_str(strval): """Returns param as int, long, float, or string, whichever is best.""" try: ret = int(strval) return ret except ValueError: pass try: ret = float(strval) return ret except ValueError: if strval == 'None': return None return strval
def flatten_dict(data, parent_name="", sep=".", key_converter=None, skip_key_check=None): """ Flattens a dictionary to a single layer with child keys separated by `sep` charactor Example: input: parent_name = "root" data = { "parent_obj": { "child_obj": { "grand_child_obj": "my value" } }, "foo": "bar" } output: { "root.parent_obj.child_obj.grand_child_obj": "my value", "root.foo": "bar" } """ if not skip_key_check: skip_key_check = lambda *_: False flattened = {} for key, val in data.items(): child_name = key if not parent_name else f"{parent_name}{sep}{key}" if isinstance(val, dict) and not skip_key_check(child_name): flattened.update( flatten_dict( val, parent_name=child_name, sep=sep, key_converter=key_converter, skip_key_check=skip_key_check ) ) else: if key_converter: child_name = key_converter(child_name) flattened[child_name] = val return flattened
def save_value_to_dict(dictionary, key, value): """ Prevents from saving empty values (such as None, [], {}) into dictionary :param dictionary: dictionary, where value will be saved :param key: key of dictionary :param value: value of dictionary :return: if value saved, return True, otherwise return false """ if value: if isinstance(value, list) and any(x is None for x in value): return False dictionary[key] = value return True return False
def get_named_parameters(models): """Get all model parameters recursively.""" named_parameters = [] if isinstance(models, list): for model in models: named_parameters += get_named_parameters(model) elif isinstance(models, dict): for model in models.values(): named_parameters += get_named_parameters(model) else: # models is actually a single pytorch model named_parameters += list(models.named_parameters()) return named_parameters
def pkginfo_unicode(pkg_info, field): """Hack to coax Unicode out of an email Message() - Python 3.3+""" text = pkg_info[field] field = field.lower() if not isinstance(text, str): if not hasattr(pkg_info, 'raw_items'): # Python 3.2 return str(text) for item in pkg_info.raw_items(): if item[0].lower() == field: text = item[1].encode('ascii', 'surrogateescape') \ .decode('utf-8') break return text
def cal_shipping_fee(subtotal, total_count): """ Shipping fee calculation simplified as much as possible """ if subtotal > 10000: shipping_fee = 0 else: shipping_fee = total_count * 500 return shipping_fee
def get_closest_ns_latency_from_config_file(config_file): """Returns closest ns after parsing pl_config file.""" least_latency = -1.0 f = open(config_file) for line in f: tokens = line.split() if tokens[1] == 'yes': try: latency = float(tokens[4]) if latency == -1.0: continue if least_latency == -1.0 or least_latency > latency: least_latency = latency except: continue return least_latency
def create_node(kind=None, args=None): """Create a kind, args Celery Script node.""" node = {} node['kind'] = kind node['args'] = args return node
def implicit_euler(xs, h, y0, f, **derivatives): """Implicit Euler""" ys = [y0] for k in range(len(xs) - 1): subsidiary_y = ys[k] + f(xs[k], ys[k]) * h next_y = ys[k] + f(xs[k + 1], subsidiary_y) * h ys.append(next_y) return ys
def minCostClimbingStairsA(cost): """ :type cost: List[int] :rtype: int """ f1=f2=0 for x in reversed(cost): f1,f2=x+min(f1,f2),f1 return min(f1,f2)
def null2none(s): """Null string (including all blank) -> None.""" if not s.strip(): return None return s
def find_proteins(seqs, peptide): """Find the peptide in the sequences db and return the corresponding protein names""" proteins = [] for seq, prots in seqs.items(): if peptide in seq: proteins.extend(prots) return proteins
def hash_id(*args: str or int) -> str: """A simple hashing function to generate `id`s given any number of string or number inputs.""" return ":".join([str(val) for val in args])
def aggregate_metrics(metric_list): """ Make aggregation for list of metrics :param metric_list: List of latest metrics :return: Aggregated list of metrics for CloudWatch """ agr_metrics = { 'maximum': max(metric_list), 'minimum': min(metric_list), 'samplecount': len(metric_list), 'sum': round(float(sum(metric_list)), 2), 'average': round(float(sum(metric_list)/len(metric_list)), 2) } return agr_metrics
def reverseString(s): """ :type s: List[str] :rtype: None Do not return anything, modify s in-place instead. """ lo = 0 hi = len(s) - 1 def swap(arr, i, j): temp = arr[i] arr[i] = arr[j] arr[j] = temp while lo <= hi: swap(s, lo, hi) lo += 1 hi -= 1 return s
def voxel_content_1(batch_idx: int, mesh_idx: int) -> int: """Sets the voxel content to 1.""" _ = batch_idx _ = mesh_idx return 1
def get_sentiment(score): """ Calculates the sentiment based on the compound score. """ result = 0 # Neutral by default if score >= 0.05: # Positive result = 1 elif score <= -0.05: # Negative result = -1 return result
def beacon_link(variant_obj, build=None): """Compose link to Beacon Network.""" build = build or 37 url_template = ("https://beacon-network.org/#/search?pos={this[position]}&" "chrom={this[chromosome]}&allele={this[alternative]}&" "ref={this[reference]}&rs=GRCh37") # beacon does not support build 38 at the moment # if build == '38': # url_template = ("https://beacon-network.org/#/search?pos={this[position]}&" # "chrom={this[chromosome]}&allele={this[alternative]}&" # "ref={this[reference]}&rs=GRCh38") return url_template.format(this=variant_obj)
def pad_sentences(sentences, padding_word=""): """ Pads all sentences to be the length of the longest sentence. Returns padded sentences. """ sequence_length = max(len(x) for x in sentences) padded_sentences = [] for i in range(len(sentences)): sentence = sentences[i] num_padding = sequence_length - len(sentence) new_sentence = sentence + [padding_word] * num_padding padded_sentences.append(new_sentence) return padded_sentences
def compute_area_signed(pr) -> float: """Return the signed area enclosed by a ring using the linear time algorithm at http://www.cgafaq.info/wiki/Polygon_Area. A value >= 0 indicates a counter-clockwise oriented ring.""" xs, ys = map(list, zip(*pr)) xs.append(xs[1]) ys.append(ys[1]) return sum(xs[i] * (ys[i + 1] - ys[i - 1]) for i in range(1, len(pr))) / 2.0
def calc_pages(limit, count): """ Calculate number of pages required for full results set. """ return int(count / limit) + (limit % count > 0)
def filter_by_list(data, key_func, term_list, include=True): """ inputs: data: iterable to be filtered key_func: function that takes a member of d and returns string to compare with term_list term_list: strings that are tested to see if they are substrings of key_func return value include: if True, then matches are included in output, else matches are excluded """ allow = any if include else lambda x: not any(x) return [d for d in data if allow(ext in key_func(d) for ext in term_list)]
def time_in_range(start, end, x): """Return true if x is in the range [start, end]""" if start <= end: return start <= x < end else: return start <= x or x < end
def build_nodestring(nodelist): """Convert get_nodelist output to string Args: nodelist (list): List of hostnames available (see get_nodelist()) Returns: str: String representation of nodelist """ return ''.join([n+',' for n in nodelist])[:-1]
def evalt(t): """ Evaluate tuple if unevaluated >>> from logpy.util import evalt >>> add = lambda x, y: x + y >>> evalt((add, 2, 3)) 5 >>> evalt(add(2, 3)) 5 """ if isinstance(t, tuple) and len(t) >= 1 and callable(t[0]): return t[0](*t[1:]) else: return t
def binary_to_int(bin_num): """ Converts binary number to int Parameters ---------- bin_num : str Binary number. E.g. bin(100) = '0b1100100' Returns ------- int Integer (converted from binary) """ return int(bin_num, 2)
def NormalizePath(path): """Returns a path normalized to how we write DEPS rules and compare paths. """ return path.lower().replace('\\', '/')
def _trapz_simps_overlap2(dy, dx): """Correction term in the squared error when combining trapezoidal and Simpson's rule. Only exact for equal spacing *dx* left and right of *dy*. err^2 = (h/6)^2 ((3 Df0)^2 + ((3+2)Df1)^2 + (8Df2)^2 + (4Df3)^2 + ...) |-- trapz ---| |--------- Simpson --------------- (3+2)^2 = 3^2 + 2^2 + 12 <--- 12 is the "overlap" correction """ return (dx/6)**2 * 12 * dy**2
def sum_terms_arithmetic_progression(start: int, stop: int, step: int) -> int: """Sum of terms of an arithmetic progression. An efficient way to get the sum of all multiples of a number up to a certain stop. Sum of all multiples of 3 up to 10 (3 + 6 + 9): >>> sum_terms_arithmetic_progression(3, 10, 3) 18 """ try: last = stop - stop % step except ZeroDivisionError: return start if stop == last: last -= step num_of_terms = ((last - start) // step) + 1 return ((start + last) * num_of_terms) // 2
def pad(seq, target_length, padding=None): """Extend the sequence *seq* with *padding* (default: None) if the length of *seq* is less than *target_length*. Modifies *seq* in place. >>> pad([], 5, 1) [1, 1, 1, 1, 1] >>> pad([1, 2, 3], 7) [1, 2, 3, None, None, None, None] >>> pad([1, 2, 3], 2) [1, 2, 3] :param list seq: list to padding :param int target_length: length to pad *seq* TestCommand :param padding: value to pad *seq* with :return: *seq* with appropriate padding """ length = len(seq) if length < target_length: seq.extend([padding] * (target_length - length)) return seq
def dot_appended_param(param_key, reverse=False): """Returns ``param_key`` string, ensuring that it ends with ``'.'``. Set ``reverse`` to ``True`` (default ``False``) to reverse this behavior, ensuring that ``param_key`` *does not* end with ``'.'``. """ if not param_key.endswith("."): # Ensure this enumerated param ends in '.' param_key += "." if reverse: # Since param_key is guaranteed to end with '.' by this point, # if `reverse` flag was set, now we just get rid of it. param_key = param_key[:-1] return param_key
def calculate_frequencies(tokens: list) -> dict: """ Calculates frequencies of given tokens :param tokens: a list of tokens without stop words :return: a dictionary with frequencies e.g. tokens = ['weather', 'sunny', 'man', 'happy'] --> {'weather': 1, 'sunny': 1, 'man': 1, 'happy': 1} """ if not isinstance(tokens, list): return {} if len(tokens) > 0 and not isinstance(tokens[0], str): return {} set_words = set(tokens.copy()) dict_freq = {word: tokens.count(word) for word in set_words} return dict_freq
def percentage_scores(hits_list, p_list, nr_of_groups): """Calculates the percent score which is the cumulative number of hits below a given percentage. The function counts the number of hits in the hits_list contains below a percentage of the maximum hit score (nr_of_groups). It then subtracts the expected value from that accumulated count value. such that random input should give scores around zero. If p_list = "all" then, instead of percentages, all classes are enumerated and hits counted. Parameters ---------- hits_list : list long list of hits that correspond to index of the replicate in the list of neighbors p_list : list or 'all' list of percentages to score. Percentages are given as integers, ie 50 is 50%. nr_of_groups : int number of groups that add_hit_rank was applied to. Returns ------- d : dict dictionary with percentages and scores or a full list of indexes and scores """ # get the number of compounds in this dataset d = {} total_hits = len(hits_list) if p_list == "all": # diff is the accumulated difference between the amount of hits and the expected hit number per bins diff = 0 # for a random distribution, we expect each bin to have an equal number of hits average_bin = total_hits / nr_of_groups for n in range(nr_of_groups): # count the number of hits that had the index n hits_n = hits_list.count(n) diff += hits_n - average_bin d[n] = diff else: # calculate the accumulated hit score at different percentages # the difference to the code above is that we now calculate the score for certain percentage points for p in p_list: # calculate the hits that are expected for a random distribution expected_hits = int(p * total_hits / 100) # calculate the value referring to the percentage of the full range of classes # in other words: how many hits are in the top X% of closest neighbors p_value = p * nr_of_groups / 100 # calculate how many hits are below the p_value accumulated_hits_n = len([i for i in hits_list if i <= p_value]) d[p] = accumulated_hits_n - expected_hits if p == 100 and d[p] != 0: print( "The percent score at 100% is {}, it should be 0 tho. Check your groupby_columns".format(d[p]) ) return d
def _typetransform(data, name): """ Transform the given data to either a list of string or a list of floats. :param data: list of unknown type to be converted :param name: name of the property related to this data, to be used in error messages """ assert isinstance(data, list) and len(data) > 0 if isinstance(data[0], str): return list(map(str, data)) elif isinstance(data[0], bytes): return list(map(lambda u: u.decode("utf8"), data)) else: try: return [float(value) for value in data] except Exception: raise Exception( f"unsupported type in property '{name}' values: " + "should be string or number" )
def collapse_ranges (ranges): """ given be a set of ranges (as a set of pairs of floats [start, end] with 'start <= end'. This algorithm will then collapse that set into the smallest possible set of ranges which cover the same, but not more nor less, of the domain (floats). We first sort the ranges by their starting point. We then start with the range with the smallest starting point [start_1, end_1], and compare to the next following range [start_2, end_2], where we now know that start_1 <= start_2. We have now two cases: a) when start_2 <= end_1, then the ranges overlap, and we collapse them into range_1: range_1 = [start_1, max[end_1, end_2] b) when start_2 > end_1, then ranges don't overlap. Importantly, none of the other later ranges can ever overlap range_1, because there start points are even larger. So we move range_1 to the set of final ranges, and restart the algorithm with range_2 being the smallest one. Termination condition is if only one range is left -- it is also moved to the set of final ranges then, and that set is returned. """ final = list() # return empty list if given an empty list if not ranges: return final # sort ranges into a copy list _ranges = sorted (ranges, key=lambda x: x[0]) START = 0 END = 1 base = _ranges[0] # smallest range for _range in _ranges[1:] : if _range[START] <= base[END]: # ranges overlap -- extend the base base[END] = max(base[END], _range[END]) else: # ranges don't overlap -- move base to final, and current _range # becomes the new base final.append (base) base = _range # termination: push last base to final final.append (base) return final
def is_int(value): """Invalid type. Expected `int`""" return isinstance(value, int)
def wildcard_to_regexp(instring): """ Converts a player-supplied string that may have wildcards in it to regular expressions. This is useful for name matching. instring: (string) A string that may potentially contain wildcards (* or ?). """ regexp_string = "" # If the string starts with an asterisk, we can't impose the beginning of # string (^) limiter. if instring[0] != "*": regexp_string += "^" # Replace any occurances of * or ? with the appropriate groups. regexp_string += instring.replace("*", "(.*)").replace("?", "(.{1})") # If there's an asterisk at the end of the string, we can't impose the # end of string ($) limiter. if instring[-1] != "*": regexp_string += "$" return regexp_string
def site_author(request, registry, settings): """Expose website URL from ``websauna.site_author`` config variable to templates. This is used in footer to display the site owner. """ return settings["websauna.site_author"]
def is_operator(token): """ Returns true if operator """ return token == "&" or token == "|"
def get_membind(strict_memory_containment): """Get memory binding to use""" if strict_memory_containment: return "local" return "none"
def readonly_url_field_as_twoline_table_row(field_label, field_value): """See readonly_field_as_table_row(). """ return {'field_label': field_label, 'field_value': field_value}
def Decode(x): """ Decoding input, if needed """ try: x = x.decode('utf-8') except AttributeError: pass return x
def dict_hex_finder(single_hex_dict: dict): """Pulls the 'hex' key, 'num_files' key, and the file list out of the dict. - Args: - single_hex_dict (dict): dict with one hex, plus various other keys - Returns: - 0 [str]: hex - 1 [list]: files - 2 [int]: number of files """ hex_val, files, num_files = '', '', 0 for k, v in single_hex_dict.items(): if k == 'num_files': num_files = v elif k == 'action': pass elif k == 'sub-action': pass else: hex_val = k files = v return hex_val, files, num_files
def sum_htcl_branches(nested_dict, adj_frac, sum_val): """Sum all leaf node values under a given nested dict level. Args: nested_dict (dict): The nested dict with values to sum. adj_frac (dict): Adjustment fraction to apply to values. sum_val (dict): Summed values. Returns: Summed total values, each adjusted by the input fraction. """ for (k, i) in sorted(nested_dict.items()): # Restrict summation of all values under the 'stock' key if k == "stock": continue elif isinstance(i, dict): sum_htcl_branches(i, adj_frac, sum_val) elif k in sum_val.keys(): sum_val[k] = sum_val[k] + nested_dict[k] * adj_frac[k] return sum_val
def string_to_type(string): """Convert user input to types. Useful for passing user input to ParseKey. Accept str, int, float, or bool. """ if string.lower() in ["string", "str"]: data_type = str elif string.lower() in ["float", "number", "decimal"]: data_type = float elif string.lower() in ["integer", "int"]: data_type = int elif string.lower() in ["boolean", "bool"]: data_type = bool else: raise RuntimeError("Invalid input. Enter a type str, int, float, or bool.") return data_type
def predict(hypothesis, example): """Predicts the outcome of the hypothesis (True/False)""" for h,a in zip(hypothesis, example): if h == '0': return False elif h != '?' and h != a: return False return True
def tree_reduce(f, tr) : """ f is a function of the form f(rootData, subTreeOutputsList) -> treeOutput and this applies it recursively to the tree. """ def trf(x) : return tree_reduce(f,x) return f(tr[0],tuple(map(trf,tr[1])))
def mul(mean1, var1, mean2, var2): """ Multiply Gaussian (mean1, var1) with (mean2, var2) and return the results as a tuple (mean, var). Strictly speaking the product of two Gaussian PDFs is a Gaussian function, not Gaussian PDF. It is, however, proportional to a Gaussian PDF, so it is safe to treat the output as a PDF for any filter using Bayes equation, which normalizes the result anyway. Parameters ---------- mean1 : scalar mean of first Gaussian var1 : scalar variance of first Gaussian mean2 : scalar mean of second Gaussian var2 : scalar variance of second Gaussian Returns ------- mean : scalar mean of product var : scalar variance of product Examples -------- >>> mul(1, 2, 3, 4) (1.6666666666666667, 1.3333333333333333) References ---------- Bromily. "Products and Convolutions of Gaussian Probability Functions", Tina Memo No. 2003-003. http://www.tina-vision.net/docs/memos/2003-003.pdf """ mean = (var1*mean2 + var2*mean1) / (var1 + var2) var = 1 / (1/var1 + 1/var2) return (mean, var)
def qmake_quote(path): """ Return a path quoted for qmake if it contains spaces. path is the path. """ if ' ' in path: path = '$$quote(%s)' % path return path
def dig_deeper(entry, field, res, depth=10): """A helper function for :func:`get_wiktionary_field_strings`. It recursively locates the target field. Args: entry (dict or list): the entity to investigate field (str): the field to look up res (list): the list of found entities to update depth (integer): maximum recursion depth (otherwise this does blow up memory for some entries like "cat") Returns: (list): the updated list of found entities """ if depth > 0: if isinstance(entry, dict): for key, val in entry.items(): if field == key: if entry[key]: # if isinstance(entry[key], str): res.append(entry[key]) return res elif isinstance(val, list): for i in val: depth -= 1 res = dig_deeper(val, field, res, depth) elif isinstance(entry, list): for i in entry: depth -= 1 res = dig_deeper(i, field, res, depth) return res else: return res
def lookup_claimReview_url(url, claimReview_db): """Looks up a URL for a ClaimReview in our DB :param url: str URL value for a ClaimReview :param claimReview_db: a ClaimReview database :returns: a dict :rtype: dict """ assert type(claimReview_db) is dict, '%s' % (type(claimReview_db)) assert claimReview_db.get('@type') == 'InMemoryClaimReviewDB' idx = claimReview_db.get('url2doc_index', {}).get(url, None) if idx: return claimReview_db['docs'][idx] else: return None