content
stringlengths
42
6.51k
def __row_helper(title, value, unit=None, seperator=None): """Helps format system information in a standardized way. Args: title (str): The title of the value. Left aligned. value (any): The value to be displayed. unit (str, optional): The value's unit. seperator (str): The seperator between the value and unit. Returns: String: Title (left aligned, 50 char) and value with unit (right aligned, 28 char). """ title = str(title) if seperator is None: seperator = " " if unit: value = "{}{}{}".format(value, seperator, unit) else: value = str(value) length_left = 78 - len(title) if length_left - len(value) < 1: return "{:30} {:>48}\n".format(title, value) else: return "{:{}} {:>{}s}\n".format(title, len(title), value, length_left)
def get_class_qualname(cls): """ Give a fully qualified name for a class (and therefore a function ref as well). Example: 'combtest.action.Action'. Note: Python3 has this, or something like it, called ``__qualname__``. We implement our own here to retain control of it, and give us Python2 compat. """ return cls.__module__ + "." + cls.__name__
def SUCCESS(obj): """Format an object into string of success color (green) in console. Args: obj: the object to be formatted. Returns: None """ return '\x1b[1;32m' + str(obj) + '\x1b[0m'
def _number_of_set_bits(x): """ Returns the number of bits that are set in a 32bit int """ # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f
def _findall(s, item): """Return index of each element equal to item in s""" return [i for i,x in enumerate(s) if x == item]
def format_vote_average(pick_vote_average): """ Ensures the movie rating is properly formatted (parameter is the inputted vote average) """ if bool(pick_vote_average) == True: vote_average = float(pick_vote_average) else: vote_average = None return vote_average
def port_str_to_int(port): """ Accepts a string port, validates it is a valid port, and returns it as an int. :param port: A string representing a port :return: The port as an int, or None if it was not valid. """ try: port = int(port) if port is None or port < 1024 or port > 49151: raise ValueError return port except ValueError: print('"' + str(port) + '" is not a valid port. Must be an integer between 1025 and 49150.') return None
def _determinant(v1, v2, v3) -> float: """ Returns determinant. """ e11, e12, e13 = v1 e21, e22, e23 = v2 e31, e32, e33 = v3 return e11 * e22 * e33 + e12 * e23 * e31 + \ e13 * e21 * e32 - e13 * e22 * e31 - \ e11 * e23 * e32 - e12 * e21 * e33
def pluralize(singular, using, pluralized=None): """Pluralize a word for output. >>> pluralize("job", 1) 'job' >>> pluralize("job", 2) 'jobs' >>> pluralize("datum", 2, "data") 'data' """ return ( singular if using == 1 else (pluralized if pluralized is not None else f"{singular}s") )
def has_method(o, name): """ return True if `o` has callable method `name` """ op = getattr(o, name, None) return op is not None and callable(op)
def getByte(arg: bytes, index: int) -> bytes: """ Return the single byte at index""" return arg[index:index+1]
def number_of_occurrences(element, sample): """ Function that returns the number of occurrences of an element in an array. :param element: an integer value. :param sample: an array of integers. :return: the number of occurrences of an element in an array. """ return sample.count(element)
def sorted_degree_map( degree_map ): """ Function which sorts hashtags by their degrees Args: degree_map: Returns: Sorted mapping """ ms = sorted( iter( degree_map.items() ), key=lambda k_v: (-k_v[ 1 ], k_v[ 0 ]) ) return ms
def _update_crystals(data, v): """Update rotation angle from the parameters value. Args: data: list of beamline elements from get_beamline() function. v: object containing all variables. Returns: data: updated list. """ for i in range(len(data)): if data[i]['type'] == 'crystal': try: # get crystal # crystal_id = int(data[i]['title'].replace('Crystal', '')) except Exception: crystal_id = 1 try: # update rotation angle data[i]['rotationAngle'] = getattr(v, 'op_DCM_ac{}'.format(crystal_id)) except Exception: pass if not data[i]['energy']: try: # update energy if an old srwlib.py is used data[i]['energy'] = v.op_DCM_e except Exception: data[i]['energy'] = v.w_e return data
def get_int_in_range(first, last): """ (int, int) -> int Prompt user for an integer within the specified range <first> is either a min or max acceptable value. <last> is the corresponding other end of the range, either a min or max acceptable value. Returns an acceptable value from the user """ if isinstance(first, int) and isinstance(last, int): if first > last: # If larger no. is provided 1st first, last = last, first # Switch the parameters # Insist on value in the range <first>...<last> try: in_value = int(input('Enter value in the range {0} .... {1} : '\ .format(first, last))) while in_value < first or in_value > last: print('{0} IS NOT in the range {1} .... {2}'.format(in_value, first, last)) in_value = int(input('Try again: ')) return in_value except ValueError as err: return err else: return 'Expected an integers. int_in_range({0}, {1}) not surpported' \ .format(type(first), type(last))
def eval_kt2(va, va0): """ evaluate kt2 from phase velocity. va: phase velocity with piezoelectric effect va0: phase velocity without piezoelectric effect """ K2 = (va/va0)**2 - 1 kt2 = K2/(1 + K2) return kt2
def sort_graded(task): """Sorts all tasks in the tasks_graded list, where tasks with the highest score is placed last in the list """ return task['score']
def ot2bio_absa(absa_tag_sequence): """ ot2bio function for ts tag sequence """ #new_ts_sequence = [] new_absa_sequence = [] n_tag = len(absa_tag_sequence) prev_pos = '$$$' for i in range(n_tag): cur_absa_tag = absa_tag_sequence[i] if cur_absa_tag == 'O': new_absa_sequence.append('O') cur_pos = 'O' else: # current tag is subjective tag, i.e., cur_pos is T # print(cur_ts_tag) cur_pos, cur_sentiment = cur_absa_tag.split('-') if cur_pos == prev_pos: # prev_pos is T new_absa_sequence.append('I-%s' % cur_sentiment) else: # prev_pos is O new_absa_sequence.append('B-%s' % cur_sentiment) prev_pos = cur_pos return new_absa_sequence
def quote_str(value): """ Return a string in double quotes. Parameters ---------- value : str Some string Returns ------- quoted_value : str The original value in quote marks Example ------- >>> quote_str('some value') '"some value"' """ return f'"{value}"'
def diffa(dist, alpha, r): """ Compute the derivative of local-local BDeu score. """ res = 0.0 for n in dist: for i in range(n): res += 1.0/(i*r+alpha) for i in range(sum(dist)): res -= 1.0/(i+alpha) return res
def find_integer(f): """Finds a (hopefully large) integer such that f(n) is True and f(n + 1) is False. f(0) is assumed to be True and will not be checked. """ # We first do a linear scan over the small numbers and only start to do # anything intelligent if f(4) is true. This is because it's very hard to # win big when the result is small. If the result is 0 and we try 2 first # then we've done twice as much work as we needed to! for i in range(1, 5): if not f(i): return i - 1 # We now know that f(4) is true. We want to find some number for which # f(n) is *not* true. # lo is the largest number for which we know that f(lo) is true. lo = 4 # Exponential probe upwards until we find some value hi such that f(hi) # is not true. Subsequently we maintain the invariant that hi is the # smallest number for which we know that f(hi) is not true. hi = 5 while f(hi): lo = hi hi *= 2 # Now binary search until lo + 1 = hi. At that point we have f(lo) and not # f(lo + 1), as desired.. while lo + 1 < hi: mid = (lo + hi) // 2 if f(mid): lo = mid else: hi = mid return lo
def histogram_as_list_of_lists_(source_text_as_list): """Return histogram as a list of lists: [['one', 1], ['fish', 4]]...""" output_dict = {} output_list_of_lists = [] for word in source_text_as_list: if word in output_dict.keys(): output_dict[word] += 1 else: output_dict[word] = 1 for key, value in output_dict.items(): output_list_of_lists.append([key, value]) return output_list_of_lists
def batch_matmul_alter_layout(attrs, inputs, tinfos, out_type): """Change batch_matmul layout.""" # not to change by default return None
def bytes2int(bytes): """PKCS#1 bytes to integer conversion, as used by RSA""" integer = 0 for byte in bytes: integer *= 256 if isinstance(byte,str): byte = ord(byte) integer += byte return integer
def hammingDistance(str1, str2): """ Returns the number of `i`th characters in `str1` that don't match the `i`th character in `str2`. Args --- `str1 : string` The first string `str2 : string` The second string Returns --- `differences : int` The differences between `str1` and `str2` """ # Convert strings to arrays a = list(str1) b = list(str2) # Determine what n equals if len(a) < len(b): n = len(a) else: n = len(b) # Increment the number of distances for each difference differences = 0 for i in range(n): if a[i] != b[i]: differences += 1 return differences
def dict_to_string(d, order=[], exclude=[], entry_sep="_", kv_sep=""): """ Turn a dictionary into a string by concatenating all key and values together, separated by specified delimiters. d: a dictionary. It is expected that key and values are simple literals e.g., strings, float, integers, not structured objects. order: a list of keys in the dictionary. Keys with low indices appear first in the output. exclude: a list of keys to exclude entry_sep: separator between key-value entries (a string) kv_sep: separator between a key and its value """ # dict_keys = dict( (ordered_keys[i], i) for i in range(len(ordered_keys)) ) keys = set(d.keys()) - set(exclude) list_out = [] for k in order: if k in keys: entry = k + kv_sep + str(d[k]) list_out.append(entry) keys.discard(k) # process the rest of the keys for k in sorted(list(keys)): entry = k + kv_sep + str(d[k]) list_out.append(entry) s = entry_sep.join(list_out) return s
def post_process_weird(system_mentions): """ Removes all mentions which are "mm", "hmm", "ahem", "um", "US" or "U.S.". Args: system_mentions (list(Mention): A list of system mentions. Returns: list(Mention): the filtered list of mentions. """ return sorted( [mention for mention in system_mentions if " ".join(mention.attributes["tokens"]).lower() not in ["mm", "hmm", "ahem", "um"] and " ".join(mention.attributes["tokens"]) != "US" and " ".join(mention.attributes["tokens"]) != "U.S."] )
def get_ip_parameters(ip_layer): """ Extract the ip parameters that are relevant to OS detection from an IP layer :param ip_layer: an IP packet (scapy.layers.inet.IP) :return: tuple - ip parameters. (df, ttl) """ if ip_layer is None: return None df = int(format(ip_layer.flags.value, "03b")[1]) ttl = ip_layer.ttl return df, ttl
def build_mapping(triples, entity_path, relation_path): """build mapping of entities and triples""" entity2id = {} relation2id = {} for triple in triples: head, rel, tail = triple[0], triple[1], triple[2] if head not in entity2id.keys(): entity2id[head] = len(entity2id) if tail not in entity2id.keys(): entity2id[tail] = len(entity2id) if rel not in relation2id.keys(): relation2id[rel] = len(relation2id) with open(entity_path, 'w') as f_e: f_e.write(str(len(entity2id)) + "\n") for entity, idx in entity2id.items(): f_e.write(entity + " " + str(idx)) f_e.write('\n') with open(relation_path, 'w') as f_r: f_r.write(str(len(relation2id)) + "\n") for relation, idx in relation2id.items(): f_r.write(relation + " " + str(idx)) f_r.write('\n') id2entity = {v:k for k,v in entity2id.items()} id2relation = {v:k for k,v in relation2id.items()} return entity2id, id2entity, relation2id, id2relation
def nframes(dur, hop_size=3072, win_len=4096) -> float: """ Compute the numbero of frames given a total duration, the hop size and window length. Output unitiy of measure will be the same as the inputs unity of measure (e.g. samples or seconds). N.B. This returns a float! """ return (dur - win_len) / hop_size + 1
def kmerize(ip_string, kmer_size): """ function that kmerizes an input_string and returns a list of kmers """ return [ip_string[i:i + kmer_size] for i in range(0, len(ip_string) - kmer_size + 1, 1)]
def nested_set(dictionary: dict, keys: list, value): """Set value to dict for list of nested keys >>> nested_set({'key': {'nested_key': None}}, keys=['key', 'nested_key'], value=123) {'key': {'nested_key': 123}} """ nested_dict = dictionary for key in keys[:-1]: nested_dict = nested_dict[key] nested_dict[keys[-1]] = value return dictionary
def is_name_dict_tuple(transform): """Check the transform is somehow like ('normalize_adj', dict(rate=1.0)) """ return len(transform) == 2 and isinstance(transform[0], str) and isinstance(transform[1], dict)
def upperLeftOrigin( largeSize, smallSize ): """ The upper left coordinate (tuple) of a small rectangle in a larger rectangle (centered) """ origin = tuple( map( lambda x: int( ( (x[0]-x[1])/2 ) ), zip( largeSize, smallSize )) ) return origin
def identity_index(obj, val): """ Same as obj.index(val) but will only return a position if the object val is found in the list. i.e. >>> identity_index(["1"], "1") will raise a ValueError because the two strings are different objects with the same content. >>> a = "1" >>> identity_index([a], a) 0 The difference to obj.index comes apparent here as well: >>> a="1", b="1" >>> identity_index([a,b], b) 1 >>> [a,b].index(b) 0 """ for i, curr_val in enumerate(obj): if curr_val is val: return i raise ValueError("%s does not contain the item %s" % (repr(obj), repr(val)))
def _compute_new_shape(length, height, width, trunc=True): """Computes the new target shape that results in the shortest side length. Parameters ---------- length : float Length of the resulting shortest side height : float Image height width : float Image width Returns ------- int or float New height int or float New width """ if height < width: new_height = length new_width = width * length / height elif width < height: new_width = length new_height = height * length / width else: new_width = length new_height = length if trunc: new_height = int(new_height) new_width = int(new_width) return new_height, new_width
def real_client_ip(xforwardedfor): """Only the last-most entry listed is the where the client connection to us came from, so that's the only one we can trust in any way.""" return xforwardedfor.split(',')[-1].strip()
def kth_node_in_BST(root, k): """ :param root: Binary Search Tree root :param k: kth smallest :return: kth smallest node """ def recursion(root, k): nonlocal target if not root: return k k = recursion(root.left, k) if not target: if k == 1: target = root else: k -= 1 if not target: k = recursion(root.right, k) return k target = None recursion(root, k) return target
def parseinput(inp):# with name """returns the times as a tuple of (start, end, name) in ints""" inp=inp.splitlines() if len(inp[0].split())>2: return [(int(i.split()[0]), int(i.split()[1]), ''.join(i.split(" ",2)[2:])) for i in inp] else: return [(int(i.split()[0]), int(i.split()[1])) for i in inp]
def torch_unravel_index(index, shape): """ x = torch.arange(30).view(10, 3) for i in range(x.numel()): assert i == x[unravel_index(i, x.shape)] https://discuss.pytorch.org/t/how-to-do-a-unravel-index-in-pytorch-just-like-in-numpy/12987/2 Parameters ---------- index : [type] [description] shape : [type] [description] Returns ------- [type] [description] """ out = [] for dim in reversed(shape): out.append(index % dim) index = index // dim return tuple(reversed(out))
def binary_sample_generator(meta, seqlen, n_bytes): """This function takes in the filesystem metadata generated by the main ``Consumer`` class, ``seqlen``, ``n_bytes`` and returns all the samples in the dataset. Each sample looks like this: .. code-block::python sample = { "data": [ (filepath_0, start_byte, end_byte), (filepath_0, start_byte, end_byte), # in some case there are more than one files in a sample ], "class": "string" } The logic of the code is as follows: For each class data go over the files. For each file check the total number of bytes in the file. keep adding the above tuple while the total number of bytes < seqlen. At the end of each file we increment the current buffer +1 to account for "<EOF>" tag. """ all_samples = [] req_size = seqlen * n_bytes # peepee(meta) for _c, _meta in meta.items(): # ----- for each label sizes = _meta["st_size"] filepath = _meta["filepath"] _f_idx = 0 _curr_size = sizes[_f_idx] _curr_seek = 0 while 1: sample = [] total_bytes = 0 while total_bytes < req_size: this_filepath = filepath[_f_idx] bytes_remaining_in_this_file = _curr_size - _curr_seek bytes_required = req_size - total_bytes if bytes_remaining_in_this_file > bytes_required: # in this case the data in this file is more than required and so this is a simple addition sample.append((this_filepath, _curr_seek, _curr_seek + bytes_required)) _curr_seek = _curr_seek + bytes_required total_bytes += bytes_required else: # in this case read till what is available in the file and the increment the file counter sample.append((this_filepath, _curr_seek, _curr_seek + bytes_remaining_in_this_file)) _curr_seek = 0 _f_idx += 1 total_bytes += bytes_remaining_in_this_file if _f_idx == len(filepath): # there are no more files in this class break # done here to avoid the index error _curr_size = sizes[_f_idx] all_samples.append({ "data": sample, "class": _c, }) if _f_idx == len(filepath): break return all_samples
def sequences_match(seq1,seq2,max_mismatches=0): """Determine whether two sequences match with specified tolerance Returns True if sequences 'seq1' and 'seq2' are consider to match, and False if not. By default sequences only match if they are identical. This condition can be loosen by specify a maximum number mismatched bases that are allowed. An 'N' in either (or both) sequences is automatically counted as a mismatched position, except for exact matches. """ if max_mismatches == 0: return (seq1 == seq2) mismatches = 0 for b1,b2 in zip(seq1,seq2): if b1 != b2 or b1 == 'N' or b2 == 'N': mismatches += 1 if mismatches > max_mismatches: return False return True
def int_to_tag_hex(value: int) -> str: """ Converts the *int* representation of tags to a *hex* string. Parameters ---------- value : int Raw tag representation as integer Returns ------- str Hexadecimal string representation """ return format(value, "x").zfill(4)
def _getTagsWith(s, cont, toClosure=False, maxRes=None): """Return the html tags in the 's' string containing the 'cont' string; if toClosure is True, everything between the opening tag and the closing tag is returned.""" lres = [] bi = s.find(cont) if bi != -1: btag = s[:bi].rfind('<') if btag != -1: if not toClosure: etag = s[bi+1:].find('>') if etag != -1: endidx = bi+2+etag lres.append(s[btag:endidx]) if maxRes is not None and len(lres) >= maxRes: return lres lres += _getTagsWith(s[endidx:], cont, toClosure=toClosure) else: spaceidx = s[btag:].find(' ') if spaceidx != -1: ctag = '</%s>' % s[btag+1:btag+spaceidx] closeidx = s[bi:].find(ctag) if closeidx != -1: endidx = bi+closeidx+len(ctag) lres.append(s[btag:endidx]) if maxRes is not None and len(lres) >= maxRes: return lres lres += _getTagsWith(s[endidx:], cont, toClosure=toClosure) return lres
def _compute_yield_pf_q30(metrics): """ Compute the number of bases passing filter + Q30 from a populated metrics dictionary generated by get_illumina_sequencing_metrics() """ if not metrics.get('num_clusters_pf'): return None num_q30_bases = 0 num_clusters_pf = metrics.get('num_clusters_pf') for read_type in ('read1', 'read2', 'index1', 'index2'): if metrics.get('%s_q30_fraction' % read_type): num_q30_bases += metrics.get('%s_q30_fraction' % read_type)\ *len(metrics['%s_q30_fraction_by_cycle' % read_type])\ *num_clusters_pf return int(round(num_q30_bases))
def is_local_link_information_valid(local_link_information_list): """Verify if a local link information list is valid. A local link information list is valid if: 1 - the list has only one local link information 2 - It has switch info defined 3 - The switch info has a server_hardware_id 4 - The switch info has information about being bootable 5 - The switch info's bootable value is boolean """ if len(local_link_information_list) != 1: return False local_link_information = local_link_information_list[0] switch_info = local_link_information.get('switch_info') if not switch_info: return False server_hardware_uuid = switch_info.get('server_hardware_id') bootable = switch_info.get('bootable') if not server_hardware_uuid: return False return isinstance(bootable, bool)
def get( coin, in_coin, try_conversion=None, exchange=None, aggregate=None, limit=None, all_data=None, to_timestamp=None, extra_params=None, sign=None, ): """ Get open, high, low, close, volumefrom and volumeto from the dayly historical data. The values are based on 00:00 GMT time. It uses BTC conversion if data is not available because the coin is not trading in the specified currency. :param try_conversion: If set to false, it will try to get only direct trading values. :param coin: The coin to get price of. [Max character length: 10] :param in_coin: The coin/currency to get price in. [Max character length: 10] :param exchange: The exchange to obtain data from (CryptoCompare aggregated average - CCCAGG - by default). [Max character length: 30] :param aggregate: Time period to aggregate the data over (in days). :param limit: The number of data points to return. It is always more than 1. :param all_data: Returns all data (only available on histo day). To enable pass True or 1. Note: if set, limit is ignored. :param to_timestamp: Last unix timestamp to return data for. :param extra_params: The name of your application (recommended to send it). [Max character length: 2000] :param sign: If set to true, the server will sign the requests (by default CryptoCompare doesn't sign them), this is useful for usage in smart contracts. :return: OHLCV price data from each day of the CryptoCompare historical data. """ # use limit-1 because it seems api interprets it as the last index # even though they described it as "The number of data points to return" if limit is not None: limit = limit-1 return { 'tryConversion': try_conversion, 'fsym': coin, 'tsym': in_coin, 'e': exchange, 'aggregate': aggregate, 'limit': limit, 'allData': all_data, 'toTs': to_timestamp, 'extraParams': extra_params, 'sign': sign, }
def comment(text, prefix): """Returns commented-out text. Each line of text will be prefixed by prefix and a space character. Any trailing whitespace will be trimmed. """ accum = ['{} {}'.format(prefix, line).rstrip() for line in text.split('\n')] return '\n'.join(accum)
def split_in_groups(item, group_size): """Splits an iterable in groups of tuples. If we take an incoming list/tuple like: ('id1', 'val1', 'id2', 'val2') and split it with group_size 2, we'll get: [('id1', 'val1'), ('id2', 'val2')] If we take a string like: 'abcdef' and split it with group_size 2, we'll get: ['ab', 'cd', 'ef'] """ return [item[idx_start:idx_start + group_size] for idx_start in range(0, len(item), group_size)]
def get_schema(is_ipv6, octet): """Get the template with word slots""" new_line = '\n' period = '.' space = ' ' non_words = [new_line, period, space] if is_ipv6: schema = [octet, octet, 'and', octet, octet, new_line, octet, octet, octet, octet, octet, octet, octet, period, new_line, octet, octet, octet, octet, octet, period, new_line] else: schema = ['The', octet, octet, octet, new_line, octet, 'in the', octet, octet, period, new_line, octet, octet, period, new_line] space_num = 0 # Add spaces before words except the first word. for i in range(1, len(schema)): i = i + space_num insert_space = True # If the current entry is a non_word, don't add a space. if schema[i] in non_words: insert_space = False # If the previous entry is a new_line, don't add a space. if schema[i-1] == new_line: insert_space = False if insert_space: schema.insert(i, space) space_num = space_num + 1 return schema
def calc_U_slip_quasisteady(eps, E, x, mu): """ Slip velocity (quasi-steady limit) """ u_slip_quasisteady = -eps*E**2*x/(2*mu) return u_slip_quasisteady
def get_prep_pobj_text(preps): """ Parameters ---------- preps : a list of spacy Tokens Returns ------- info : str The text from preps with its immediate children - objects (pobj) Raises ------ IndexError if any of given preps doesn't have any children """ info = "" if len(preps) == 0: return info for prep in preps: try: pobj = list(prep.rights)[0] info += prep.text + " " + pobj.text + " " except IndexError as e: print("Somehow this prep doesn't have any child", str(e)) return info
def mutate_dict(inValue, keyFn=lambda k: k, valueFn=lambda v: v, keyTypes=None, valueTypes=None, **kwargs): """ Takes an input dict or list-of-dicts and applies ``keyfn`` function to all of the keys in both the top-level and any nested dicts or lists, and ``valuefn`` to all If the input value is not of type `dict` or `list`, the value will be returned as-is. Args: inValue (any): The dict to mutate. keyFn (lambda): The function to apply to keys. valueFn (lambda): The function to apply to values. keyTypes (tuple, optional): If set, only keys of these types will be mutated with ``keyFn``. valueTypes (tuple, optional): If set, only values of these types will be mutated with ``valueFn``. Returns: A recursively mutated dict, list of dicts, or the value as-is (described above). """ # this is here as a way of making sure that the various places where recursion is done always # performs the same call, preserving all arguments except for value (which is what changes # between nested calls). def recurse(value): return mutate_dict(value, keyFn=keyFn, valueFn=valueFn, keyTypes=keyTypes, valueTypes=valueTypes, **kwargs) # handle dicts if isinstance(inValue, dict): # create the output dict outputDict = dict() # for each dict item... for k, v in inValue.items(): # apply the keyFn to some or all of the keys we encounter if keyTypes is None or (isinstance(keyTypes, tuple) and isinstance(k, keyTypes)): # prepare the new key k = keyFn(k, **kwargs) # apply the valueFn to some or all of the values we encounter if valueTypes is None or (isinstance(valueTypes, tuple) and isinstance(v, valueTypes)): v = valueFn(v) # recurse depending on the value's type # if isinstance(v, dict): # recursively call mutate_dict() for nested dicts outputDict[k] = recurse(v) elif isinstance(v, list): # recursively call mutate_dict() for each element in a list outputDict[k] = [recurse(i) for i in v] else: # set the value straight up outputDict[k] = v # return the now-populated output dict return outputDict # handle lists-of-dicts elif isinstance(inValue, list) and len(inValue) > 0: return [recurse(i) for i in inValue] else: # passthrough non-dict value as-is return inValue
def reduced_mass(a,b): """ Calculates the reduced mass for mass a and mass b Parameters ---------- a : Float Mass value. b : Float Mass value. Returns ------- red_m : Float Reduced mass of masses a and b. """ red_m = (a*b)/(a+b) return red_m
def word2vec(voca_list, input_set): """ (function) word2vec ------------------- Set a vector from the words Parameter --------- - None Return ------ - word vector """ # Convert words to a vector vec = [0] * len(voca_list) for word in input_set: if word in voca_list: vec[voca_list.index(word)] = 1 else: print('the word: %s is not in my vocabulary!' % word) return vec
def coin_sum(coins, target): """Return the number of combinations of currency denominations. This function can be used to solve problems like how many different ways can the value `target` be made using any number of values within `coins`. Parameters ---------- coins : array_like All possible values that can be used to make up the `target` value. These values should be integers. In the context of currency denomination, all of the values within `coins` should have the same units, which is also the minimum unit. For example, if `coins` contains both penny and pound, the values should be represented by pence unit, which accords with the integral requirement. target : int The resulting total value. In the context of currency denomination, it needs to have the same unit with values in `coins`. Returns ------- int The number of possible combinations to make up `target` using values in `coins`. Examples -------- The number of different ways to make up 2 pounds using 8 possible coins: 1 penny, 2 pence, 5 pence, 10 pence, 20 pence, 50 pence, 1 pound, and 2 pounds. >>> coin_sum([1, 2, 5, 10, 20, 50, 100, 200], 200) 73682 """ numbers = [1]*(target + 1) for i in coins[1:]: for j in range(i, target+1): numbers[j] += numbers[j-i] return int(numbers[target])
def motif_factors(m, M): """ Generates the set of non redundant motif sizes to be checked for division rule. Parameters ---------- m : <int> minimum motif size M : <int> maximum motif size Returns ------- factors : <list> sorted(descending) list of non-redundant motifs. """ factors = [] for i in range(M, m-1, -1): check = 0 for j in factors: if j % i == 0: check = 1; break if check == 0: factors.append(i) return factors
def reverse_dictionary(a): """Inverts a dictionary mapping. Args: a: input dictionary. Returns: b: output reversed dictionary. """ b = {} for i in a.items(): try: for k in i[1]: if k not in b.keys(): b[k] = [i[0]] else: b[k].append(i[0]) except: k = i[1] if k not in b.keys(): b[k] = [i[0]] else: b[k].append(i[0]) return b
def get_delta_angle(a1: float, a2: float, ) -> float: """ Get the difference between two (dihedral or regular) angles. Examples:: 3 - 1 = 2 1 - 3 = 2 1- 359 = 2 Args: a1 (float): Angle 1 in degrees. a2 (float): Angle 2 in degrees. Returns: float The difference between the angles in degrees. """ a1 %= 360 a2 %= 360 return min(abs(a1 - a2), abs(a1 + 360 - a2), abs(a1 - a2 - 360))
def GetTargetAndroidVersionNumber(lines): """Return the Android major version number from the build fingerprint. Args: lines: Lines read from the tombstone file, before preprocessing. Returns: 5, 6, etc, or None if not determinable (developer build?) """ # For example, "Build fingerprint: 'Android/aosp_flo/flo:5.1.1/...'" is 5. for line in lines: if line.startswith('Build fingerprint: '): fingerprint = line.split()[2] version = fingerprint.split('/')[2].split(':')[1].split('.')[0] try: return int(version) except ValueError: return None return None
def remove_header(data): """Remove first two bytes of file to fix the characters""" return data[2:]
def hexify(x): """Returns a single hex transformed value as a string""" return hex(x).split('x')[1] if x > 15 else '0' + hex(x).split('x')[1]
def is_using_reverse_process(input_shape): """Check if output of attention meachanism is a single Attention matrix or 2 attention matrices - one for A_in one for A_out [description] Arguments: input_shape {[tuple]} -- input_shape """ # dimension of attention layer output dim = len(input_shape) # (batch, 2, N, N) if we use A_in and A_out if dim == 4: return True # (batch, N, N) is we aren't elif dim == 3: return False else: raise ValueError(f"Invalid attention shape {input_shape}")
def list_deb (archive, compression, cmd, verbosity, interactive): """List a DEB archive.""" return [cmd, '--contents', '--', archive]
def move_down_left(rows, columns, t): """ A method that takes coordinates of the bomb, number of rows and number of columns of the matrix and returns coordinates of neighbour which is located at the left-hand side and bellow the bomb. It returns None if there isn't such a neighbour """ x, y = t if x == rows or y == 0: return None else: return (x + 1, y - 1)
def absolute_value(num): """This function returns the absolute value of the number""" if num >= 0: return num else: return -num
def FixDiffLineEnding(diff): """Fix patch files generated on windows and applied on mac/linux. For files with svn:eol-style=crlf, svn diff puts CRLF in the diff hunk header. patch on linux and mac barfs on those hunks. As usual, blame svn.""" output = '' for line in diff.splitlines(True): if (line.startswith('---') or line.startswith('+++') or line.startswith('@@ ') or line.startswith('\\ No')): # Strip any existing CRLF on the header lines output += line.rstrip() + '\n' else: output += line return output
def string_has_vlans(option): """Check if a string is a valid list of VLANs""" for r in option.split(","): r = r.strip().split("-") if not all(s.isdigit() and 0 < int(s) <= 4096 for s in r): return False return True
def _parse_wms(**kw): """ Parse leaflet TileLayer.WMS options. http://leafletjs.com/reference-1.2.0.html#tilelayer-wms """ return { 'layers': kw.pop('layers', ''), 'styles': kw.pop('styles', ''), 'format': kw.pop('fmt', 'image/jpeg'), 'transparent': kw.pop('transparent', False), 'version': kw.pop('version', '1.1.1'), 'crs': kw.pop('crs', None), 'uppercase': kw.pop('uppercase', False), }
def GetLinkType(messages, link_type_arg): """Converts the link type flag to a message enum. Args: messages: The API messages holder. link_type_arg: The link type flag value. Returns: An LinkTypeValueValuesEnum of the flag value, or None if absent. """ if link_type_arg is None: return None else: return messages.Interconnect.LinkTypeValueValuesEnum(link_type_arg)
def parse_biases(m, bias_model, bias_params): """ parse the biases in the ouput model inputs: m: model vector bias_model: the bias model bID_dict: the bias parameter dictionary from assign_bias_ID output: ID_dict: a dictionary giving the parameters and associated bias values for each ibas ID """ slope_bias_dict={} b_dict={param:list() for param in bias_params+['val']} for item in bias_model['bias_ID_dict']: b_dict['val'].append(m[bias_model['bias_ID_dict'][item]['col']]) for param in bias_params: b_dict[param].append(bias_model['bias_ID_dict'][item][param]) if 'slope_bias_dict' in bias_model: for key in bias_model['slope_bias_dict']: slope_bias_dict[key]={'slope_x':m[bias_model['slope_bias_dict'][key][0]], 'slope_y':m[bias_model['slope_bias_dict'][key][1]]} return b_dict, slope_bias_dict
def resource_name_for_asset_type(asset_type): """Return the resource name for the asset_type. Args: asset_type: the asset type like 'google.compute.Instance' Returns: a resource name like 'Instance' """ return asset_type.split('.')[-1]
def convert_coord(x_center, y_center, radius): """ Convert coordinates from central point to top left point :param x_center: x coordinate of the center :param y_center: y coordinate of the center :param radius: the radius of the ball :return: coordinates of top left point of the surface """ x = x_center - radius y = y_center - radius return x, y
def get_from_config(name, config): """ Get value from any level of config dict. Parameters ---------- name : str Name (key) of the value. config : dict Config dictionary. Returns ------- Requested value. Raises ------ ValueError If there is more than one entry with the requested name. Todo: offer passing in (partial) path as solution. """ if not isinstance(config, dict): raise ValueError( "Expected 'config' of type 'dict' (got '{}').".format(type(config).__name__) ) result = None try: result = config[name] except KeyError: for key, value in config.items(): if isinstance(value, dict): recursive_result = get_from_config(name, value) if recursive_result is not None: if result is None: result = recursive_result else: raise ValueError( "Config contained at least 2 entries named {}: {} and {}.".format( name, recursive_result, result ) ) elif isinstance(value, list): for entry in value: if isinstance(entry, dict): recursive_result = get_from_config(name, entry) if recursive_result is not None: if result is None: result = recursive_result else: raise ValueError( "Config contained at least 2 entries named {}: {} and {}.".format( name, recursive_result, result ) ) return result
def isdir(path, **kwargs): """Check if *path* is a directory""" import os.path return os.path.isdir(path, **kwargs)
def single_pairwise(arr, arg, taken): """Get a single index pair (i, j), where arr[i] + arr[j] == arg. Arguments: arr: an array of numbers arg: the target sum to look for taken: an array as long as arr designating if a particular index can be in a new index pair. """ i = -1 j = -1 # Compare the sum of every pair of values in # arr to arg, and return the first set of # indices where the pair of values add to arg. for i in range(len(arr)): for j in range(i + 1, len(arr)): if arr[i] + arr[j] == arg and taken[i] == taken[j] == False: return (i, j) # If no pair of values sum to arg, then we # indicate that with special value -1. return (-1, -1)
def merge(header_predictions, predictions, citations, dois, binder_links, long_title): """ Function that takes the predictions using header information, classifier and bibtex/doi parser Parameters ---------- header_predictions extraction of common headers and their contents predictions predictions from classifiers (description, installation instructions, invocation, citation) citations (bibtex citations) dois identifiers found in readme (Zenodo DOIs) Returns ------- Combined predictions and results of the extraction process """ print("Merge prediction using header information, classifier and bibtex and doi parsers") if long_title: predictions['long_title'] = {'excerpt': long_title, 'confidence': [1.0], 'technique': 'Regular expression'} for i in range(len(citations)): if 'citation' not in predictions.keys(): predictions['citation'] = [] predictions['citation'].insert(0, {'excerpt': citations[i], 'confidence': [1.0], 'technique': 'Regular expression'}) if len(dois) != 0: predictions['identifier'] = [] for identifier in dois: # The identifier is in position 1. Position 0 is the badge id, which we don't want to export predictions['identifier'].insert(0, {'excerpt': identifier[1], 'confidence': [1.0], 'technique': 'Regular expression'}) if len(binder_links) != 0: predictions['executable_example'] = [] for notebook in binder_links: # The identifier is in position 1. Position 0 is the badge id, which we don't want to export predictions['executable_example'].insert(0, {'excerpt': notebook[1], 'confidence': [1.0], 'technique': 'Regular expression'}) for headers in header_predictions: if headers not in predictions.keys(): predictions[headers] = header_predictions[headers] else: for h in header_predictions[headers]: predictions[headers].insert(0, h) print("Merging successful. \n") return predictions
def _row_name(index): """ Converts a row index to a row name. >>> _row_name(0) '1' >>> _row_name(10) '11' """ return '%d' % (index + 1)
def lump_discount(text, sessions=8): """Take the price of one event and multiply by the number of sessions - 1.""" total = float(text) * (int(sessions) - 1) return "{total:.2f}".format(total=total)
def has_attribute(t, key: str) -> bool: """ Check if a callable has an attribute :param t: the callable :param key: the key, the attributes name :return: True if callable contains attribute, otherwise False """ return hasattr(t, key)
def enum_name(cls): """ Return the name used for an enum identifier for the given class @param cls The class name """ return cls.upper()
def sort_tuples_by_idx(list_of_tuples, tuple_idx=1, reverse_flag=False): """Sort a list of (pam, score) tuples Args: list_of_tuples: list of tuples of the format [(str, float), ... , (str, float)] tuple_idx: [default: 1] tuple index which defines sorting reverse_flag: [default: False] if True, sort descending instead of ascending Returns: sorted data in same format Notes: - sorts by score (second tuple element) in ascending order """ return sorted(list_of_tuples, key=lambda tup: tup[tuple_idx], reverse=reverse_flag)
def _conn_str_sort_key(key): """Provide pseudo-consistent ordering of components of a connection string. This is of no value except to aid in debugging. :param key: key :return: numeric `key` value usable by e.g. `sorted` """ if key == 'host': return ' 1' if key == 'port': return ' 2' if key == 'dbname': return ' 3' if key == 'user': return ' 4' if key == 'password': return ' 5' return key.lower()
def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name
def add_coefficient(c): """ Simple function for converting coefficient into string """ if c < 0: return f'- {abs(c)}' elif c > 0: return f'+ {c}' else: return ''
def _parse_port_list(data, port_list=None): """return a list of port strings""" # 1,2,3,4,5,6,7,8,9,10,9,30,80:90,8080:8090 # overlapping and repeated port numbers are allowed if port_list is None: port_list = [] data = data.split(',') data_list = [p.strip() for p in data if ':' not in p and 'any' not in p] port_list.extend(data_list) return port_list
def frame_idx(fname): """Get frame index from filename: `name0001.asc` returns 1""" return int(fname[-8:-4])
def rstrip(val: str) -> str: """Remove trailing whitespace.""" return val.rstrip()
def strip_simple_quotes(s): """Gets rid of single quotes, double quotes, single triple quotes, and single double quotes from a string, if present front and back of a string. Otherwiswe, does nothing. """ starts_single = s.startswith("'") starts_double = s.startswith('"') if not starts_single and not starts_double: return s elif starts_single: ends_single = s.endswith("'") if not ends_single: return s elif s.startswith("'''") and s.endswith("'''") and len(s) >= 6: return s[3:-3] elif len(s) >= 2: return s[1:-1] else: return s else: # starts double ends_double = s.endswith('"') if not ends_double: return s elif s.startswith('"""') and s.endswith('"""') and len(s) >= 6: return s[3:-3] elif len(s) >= 2: return s[1:-1] else: return s
def remove_none_from_dict(dictionary): """ Recursively removes None values from a dictionary :param dictionary: the dictionary to clean :return: a copy of the dictionary with None values removed """ if not isinstance(dictionary, dict): return dictionary return {k: remove_none_from_dict(v) for k, v in dictionary.items() if v is not None}
def create_gps_markers(coords): """Build Marker based on latitude and longitude.""" geojson_dict = [] for i in coords: node = {"type": "Feature", "properties": {}, "geometry": {"type": "Point", "coordinates": None}} node["properties"]["name"] = i.name node["geometry"]["coordinates"] = [i.latitude, i.longitude] geojson_dict.append(node.copy()) return geojson_dict
def count_trees(map_input): """ Count the trees across the right/down path Args: map_input: Map data read from file Returns" total_trees: The total number of trees """ # Instantiate counter for trees total_trees = 0 # Start at position 0 for the toboggan toboggan_location = 0 # Get the width of the path path_width = len(map_input[0]) # Iterate over each path line by line for path in map_input: # Check if the toboggan hits a tree if '#' in path[toboggan_location]: total_trees = total_trees + 1 # Increase the toboggan's right location by 3 toboggan_location = toboggan_location + 3 # Since each path repeats we cheat a bit to make that work if toboggan_location >= path_width: toboggan_location -= path_width return total_trees
def result_passes_filter(summary,param_name,param_min,param_max): """ This will check if the parameter is between the two specified values """ passes_filter=True for ii in range(len(param_name)): if param_name[ii] in summary: passes_filter*=summary[param_name[ii]]>=param_min[ii] passes_filter*=summary[param_name[ii]]<=param_max[ii] return passes_filter
def chat_participants(participants_dict): """ "participants": [ { "name": "Akanksha Priyadarshini" }, { "name": "Saurav Verma" } """ return [e.get("name") for e in participants_dict]
def is_cat(label): """ Returns true iff the given label is a category (non-terminal), i.e., is marked with an initial '$'. """ return label.startswith('$')
def octets_from_int(i: int, bytesize: int) -> bytes: """Return an octet sequence from an integer. Return an octet sequence from an integer according to SEC 1 v.2, section 2.3.7. """ return i.to_bytes(bytesize, 'big')
def get_first_author_last_name(author_string): """ returns the last name of the first author of one of our normalised author strings. :param authors: :return: """ if author_string: parts = author_string.split(';') if parts: return parts[0].split(",")[0] return None
def _get_storage_account_name(storage_endpoint): """ Determines storage account name from endpoint url string. e.g. 'https://mystorage.blob.core.windows.net' -> 'mystorage' """ # url parse package has different names in Python 2 and 3. 'six' package works cross-version. from six.moves.urllib.parse import urlparse # pylint: disable=import-error return urlparse(storage_endpoint).netloc.split('.')[0]
def remove_revoked_deprecated(stix_objects): """Remove any revoked or deprecated objects from queries made to the data source""" # Note we use .get() because the property may not be present in the JSON data. The default is False # if the property is not set. return list( filter( lambda x: x.get("x_mitre_deprecated", False) is False and x.get("revoked", False) is False, stix_objects ) )
def findXYOffset(tuple): """ Old implementation of implementing cell to cm conversion """ homeX = -4.5 homeY = 4.5 baseX = -9 baseY = 9 xScale = 2-tuple[0] yScale = 2-tuple[1] xOffset = homeX + baseX * xScale yOffset = homeY + baseY * yScale unitScale = 1 #use this for changing units from cm return (xOffset*unitScale,yOffset*unitScale)