content
stringlengths
42
6.51k
def get_wordnet_cascade(ling_relations): """ Receives a list of linguistic relations (strings), and returns one of those linguistic relations with highest priority: copy > inflection > derivation > synonym > antonym > hypernym > hyponym > sister > cousin > None. """ relation = None if 'copy' in ling_relations: relation = 'copy' elif 'inflection' in ling_relations: relation = 'inflection' elif 'derivation' in ling_relations: relation = 'derivation' elif 'synonym' in ling_relations: relation = 'synonym' elif 'antonym' in ling_relations: relation = 'antonym' elif 'hypernym' in ling_relations: relation = 'hypernym' elif 'similar' in ling_relations: relation = 'similar' elif 'hyponym' in ling_relations: relation = 'hyponym' elif any(lr.startswith('sister') for lr in ling_relations): sister_rels = [lr for lr in ling_relations if lr.startswith('sister')] assert sister_rels relation = sister_rels[0] elif any(lr.startswith('cousin') for lr in ling_relations): cousin_rels = [lr for lr in ling_relations if lr.startswith('cousin')] assert cousin_rels relation = cousin_rels[0] return relation
def vaporHeatCapacity(T, hCP): """ vaporHeatCapacity(T, hCP) vaporHeatCapacity (J/mol) = A + B*T + C*T^2 + D*T^3 + E*T^4 Parameters T, temperature in K hCP, A=hCP[0], B=hCP[1], C=hCP[2], D=hCP[3], E=hCP[4] A, B, C, D and E are regression coefficients Returns heat capacity in J/mol at T """ return hCP[0] + hCP[1]*T + hCP[2]*T**2 + hCP[3]*T**3 + hCP[4]*T**4
def sumarSensores( readings ): """ Determine the weight of the user on the board in hundredths of a kilogram """ weight = 0 #weight = sum(calibrations.next()) #print("Peso") #print(weight) for sensor in ('right_top', 'right_bottom', 'left_top', 'left_bottom'): reading = readings[sensor] weight += reading return weight
def descending_score(results): """Sort results by second element in reverse.""" return sorted(results, reverse=True, key=lambda e: e[1])
def reverse_dots(name): """Reverse dotted IP addresses or domain names. Example: >>> reverse_dots('192.168.0.145') '145.0.168.192' >>> reverse_dots('email.example.com') 'com.example.email' """ a = name.split('.') a.reverse() return '.'.join(a)
def insertion_sort(numbers): """Will sort any given number iterable into a min first list.""" new = [] while len(new) < len(numbers): x = 0 for i in numbers: while True: try: if new[x] >= i: new.insert(x, i) x = 0 break else: x += 1 except IndexError: new.append(i) x = 0 break return new
def htmlentities(text): """Escape chars in the text for HTML presentation Args: text (str): subject to replace Returns: str : result of replacement """ for lookfor, replacewith in [ ("&", "&amp;"), (">", "&gt;"), ("<", "&lt;"), ("'", "&#39;"), ('"', "&quot;"), ]: text = text.replace(lookfor, replacewith) return text
def getChargeIndex(chargeFlags): """ Called from writeToCsv(), this function returns a list of the charges that have been selected by the user in the form [1,3,5] etc. :param chargeFlags: a list of bools, where bool at index i denotes if the user has selected to consider charge state i+1. That is, [True, False, False, False, True] means the user has chosen to consider +1 and +5 charge. :return chargeHeaers: a list of which charges are being considered. Eg: [1, 5] """ chargeHeaders = [i for i, e in enumerate(chargeFlags) if e] return chargeHeaders
def get_entry_by_id(id_key, id_key_value, dict): """return the dict entry with matching key value""" for value in dict.values(): if value[id_key] == id_key_value: return value #pprint(dict) raise KeyError(id_key + " of " + id_key_value + " not found")
def naive_fib(iterations): """ Example of a function that can have a long run time """ if 0 <= iterations <= 1: return iterations else: return naive_fib(iterations - 1) + naive_fib(iterations - 2)
def sum_weighted_losses(losses, weights): """ Args: losses: Dict of scalar tensors. weights: Dict of weights. """ loss = 0 for k in losses.keys(): if weights is None: loss = loss + losses[k] else: loss = loss + weights[k] * losses[k] return loss
def get_key(rpc): """The key is the first argument of the RPC.""" first = rpc.find('"') + 1 return rpc[first:rpc.find('"', first)]
def ticket_matuation_to_dict(mutation: str, meta_data: dict): """ Used in run_mutations to create a dict from. In order to Takes one mutation line of an IPFS batch Returns structured dictionary. -> INDEX of dataframe / database 'statehash_tx': -> hash / receipt of state or mutation(n) 'previous_statehash': -> hash / receipt of state or mutation(n-1) 'input_message' : -> value inputted in message input (could be privId or eventId) 'transition_type': -> type of mutation (w or f) 'transition_id': -> id of the mutation 'block_height': -> blockheight of the block anchoring the mutation. 'block_timestamp': -> timestamp of the block anchorring the mutation. 'transaction_hash': -> hash of the blockchain transaction that anchorred the tx. 'IPFS_hash': -> hash of the IPFS file that contains the mutation """ m = list(mutation.split(",")) output_dict = { 'statehash_tx': m[0], 'previous_statehash': m[1], 'transition_type': m[2], 'transition_id': m[3], 'input_message': str("null"), 'block_height': meta_data["block_height"], 'block_timestamp': meta_data["block_timestamp"], 'transaction_hash': meta_data["transaction_hash"], 'IPFS_hash': meta_data["IPFS_hash"] } return output_dict
def filter_out(bad_chars, byte_array): """filter out bad characters from byte_array_list""" for i in bad_chars: i = int(i, 16) if i in byte_array: byte_array.remove(i) return byte_array
def application_error(e): """Return a custom 500 error.""" return '{}'.format(e), 500
def calc_fuel(mass): """ >>> calc_fuel(12) 2 >>> calc_fuel(14) 2 >>> calc_fuel(1969) 654 >>> calc_fuel(100756) 33583 """ return (mass//3)-2
def get_zone_from_bucket_location(bucket_location): """Get zone from bucket location. Args: bucket_location (str): Bucket location Returns: str: Zone for that given bucket location """ return '{}-c'.format(bucket_location)
def generate_symbol_definitions_direct(symbols, prefix): """Generate a listing of definitions to point to real symbols.""" ret = [] for ii in symbols: ret += [ii.generate_rename_direct(prefix)] return "\n".join(ret)
def look_index(offset, name_index, names): """ Return previous or next key|value pair in list using offset """ ndx = name_index + offset if 0 <= ndx < len(names): str_name, str_value = names[ndx].split('|') return str_name, int(str_value) else: return None, None
def common_text(stringlist, kind='prefix'): """ For a list of strings find common prefix or suffix, returns None if no common substring of kind is not 'prefix' or 'suffix' :param stringlist: a list of strings to test :param kind: string, either 'prefix' or 'suffix' :return: """ substring = stringlist[0] if kind == 'prefix': # loop around strings in list (except first) for _str in stringlist[1:]: # while substring is not equal in first and Nth shorten while _str[:len(substring)] != substring and len(substring) != 0: substring = substring[:len(substring)-1] # test for blank string if len(substring) == 0: break elif kind == 'suffix': # loop around strings in list (except first) for _str in stringlist[1:]: # while substring is not equal in first and Nth shorten while _str[-len(substring):] != substring and len(substring) != 0: substring = substring[1:] # test for blank string if len(substring) == 0: break # if prefix or suffix is the same as all in list return None - there # is no prefix if substring == stringlist[0]: return None # else return the substring else: return substring
def get_overtime(row): """ Whether or not the game was decided in overtime """ return ('OT' in row['Home Score']) or ('OT' in row['Away Score'])
def _check_and_coerce_cfg_value_type(value_a, value_b, full_key): """Checks that `value_a`, which is intended to replace `value_b` is of the right type. The type is correct if it matches exactly or is one of a few cases in which the type can be easily coerced. """ if value_b is None: return value_a # The types must match (with some exceptions) type_b = type(value_b) type_a = type(value_a) if type_a is type_b: return value_a # Exceptions: numpy arrays, strings, tuple<->list if isinstance(value_b, str): value_a = str(value_a) elif isinstance(value_a, tuple) and isinstance(value_b, list): value_a = list(value_a) elif isinstance(value_a, list) and isinstance(value_b, tuple): value_a = tuple(value_a) elif isinstance(value_a, str) and isinstance(value_b, list): if value_a.startswith('[') and value_a.endswith(']'): value_a = value_a[1:-1] value_a = value_a.split(',') else: raise ValueError( 'Type mismatch ({} vs. {}) with values ({} vs. {}) for config ' 'key: {}'.format(type_b, type_a, value_b, value_a, full_key) ) return value_a
def count_substring(string, sub_string): """ asdalskdm """ if sub_string not in string: return 0 else: return string.find(sub_string)
def read_file(file): """Reads (file), returns text as a string. Returns -1 on failure.""" text = str() try: with open(file, 'r') as f: text = str(f.read()) f.close() return text except: return -1
def update_docstring_references(obj, ref="ref"): """ Updates docstring reference names to strings including the function name. Decorator will return the same function with a modified docstring. Sphinx likes unique names - specifically for citations, not so much for footnotes. Parameters ----------- obj : :class:`func` | :class:`class` Class or function for which to update documentation references. ref : :class:`str` String to replace with the object name. Returns ------- :class:`func` | :class:`class` Object with modified docstring. """ name = obj.__name__ if hasattr(obj, "__module__"): name = obj.__module__ + "." + name obj.__doc__ = str(obj.__doc__).replace(ref, name) return obj
def envelope_to_scale(env): """Get scale info from envelope """ # check bbox size xsize = env[2][0] - env[1][0] ysize = env[0][1] - env[1][1] tsize = abs(xsize * ysize) scale = "regional" # if tsize >= 32400: if tsize >= 22500: scale = "global" return scale
def pad_dataset(dataset, bs): """ """ n_records = len(dataset) n_padded = bs - n_records % bs new_dataset = [t for t in dataset] new_dataset.extend(dataset[:n_padded]) return new_dataset
def _convert_to_a1(row_index=None, col_index=None): """ Convert a row and column index pair to A1 notation. At least 1 of row or column index is required. :type row_index: int :param row_index: 0-based row index :type col_index: int :param col_index: 0-based column index :rtype: str :return: A1 notation of row and column index pair """ if row_index is None and col_index is None: raise ValueError('Both row and column indexes are None') if (row_index is not None and row_index) < 0 or (col_index is not None and col_index < 0): raise ValueError('Negative index') if col_index is not None and col_index > 255: raise ValueError('Column index {} exceeds limits') # Row row_component = str(row_index + 1) if row_index is not None else '' # Column col_component = '' if col_index is not None: column_index_mapping = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] num_letters = len(column_index_mapping) first_letter_index = col_index // num_letters - 1 second_letter_index = col_index % num_letters if first_letter_index >= 0: col_component = column_index_mapping[first_letter_index] + column_index_mapping[second_letter_index] else: col_component = column_index_mapping[second_letter_index] return '{}{}'.format(col_component, row_component)
def _parse_leaf_node_line(line): """ Return the leaf value given the string representation of a leaf node. """ return float(line.split('=')[1])
def generate_tmpcontext_for_search(q, placeholder): """ Function to generate context for the search """ tmpcontext = { 'searchPlaceholder': placeholder, 'q': q } return tmpcontext
def calculate_previous_version(version: str, is_point: bool) -> str: """Calculate the previous version to compare to. In the case of -rc to final that verison is the previous .0 release, (19.3.0 in the case of 20.0.0, for example). for point releases that is the last point release. This value will be the same as the input value for a point release, but different for a major release. """ if '-' in version: version = version.split('-')[0] if is_point: return version base = version.split('.') if base[1] == '0': base[0] = str(int(base[0]) - 1) base[1] = '3' else: base[1] = str(int(base[1]) - 1) return '.'.join(base)
def next_key(basekey, keys): """Returns the next unused key for basekey in the supplied array. The first try is `basekey`, followed by `basekey-2`, `basekey-3`, etc until a free one is found. """ if basekey not in keys: return basekey i = 2 while '{}-{}'.format(basekey, i) in keys: i = i + 1 return '{}-{}'.format(basekey, i)
def __newobj__(cls, *args): """ Unpickles new-style objects. """ return cls.__new__(cls, *args)
def get_last_t(t_input): """Get the index of the fictitious dummy goal (last vertex + 1)""" if isinstance(t_input, int): t_g = t_input + 1 return t_g elif isinstance(t_input, list): t_g = t_input[-1] + 1 return t_g else: print('Wrong type of input, accepts integer or list') return None
def infer_title(uri_string): """From a DBpedia resource URI (http://dbpedia.org/resource/Color_difference) only return the last bit""" return uri_string.replace("http://dbpedia.org/resource/", "").replace( "_", " ")
def find_possible_words(word: str, dictionary: list) -> list: """Return all possible words from word.""" possible_words = [] first_character = word[0] last_character = word[len(word) - 1] for dictionary_entry in dictionary: if (dictionary_entry.startswith(first_character) and dictionary_entry.endswith(last_character)): for character in dictionary_entry: if character in word: continue else: break else: possible_words.append(dictionary_entry) return possible_words
def get_ipv6_range(numeric_netaddr, prefix_len): """ Return the smallest and biggest possible IPv6 address of the specified network address (in numeric representation) and prefix length. """ mask_bin_full = 0xffffffffffffffffffffffffffffffff mask_inverted = 128 - prefix_len mask_bin = mask_bin_full >> mask_inverted << mask_inverted range_start = numeric_netaddr & mask_bin range_end = range_start | (1 << mask_inverted) - 1 return range_start, range_end
def apply_f(f,*iters): """Apply some function to matching 'bottom level' objects in mirrored nested structure of lists, return the result in the same nested listed structure. 'iters' should be """ # We have to descend both list structures in lock-step! if all(isinstance(item,list) for item in iters): return list(map(lambda *items: apply_f(f,*items), *iters)) elif any(isinstance(item,list) for item in iters): raise ValueError("Inconsistency in nested list structure of arguments detected! Nested structures must be identical in order to apply functions over them") else: return f(*iters)
def get_linked_constellations(point_to_constellation, points_in_range): """Get constellations to which are connected points_in_range.""" linked_constellations = set() for point_in_range in points_in_range: constellation = point_to_constellation.get(point_in_range) if constellation is not None: linked_constellations.add(constellation) return linked_constellations
def uris_from_clique_dict(clique_dict): """Return list of all uris in a clique dictionary. Args: clique_dict (dict): dictionary of clique names (keys) each pointing to a list or uris Returns: list: list of all uris in the dictionary """ uris = [uri for clique in clique_dict for uri in clique_dict[clique]] return uris
def run_intcode(program): """ Takes data, list of ints to run int_code on. Returns list of ints after intcode program has been run. Running Intcode program looks reads in the integers sequentially in sets of 4: data[i] == Opcode data[i+1] == Index 1 data[i+2] == Index 2 data[i+3] == Index 3 If Opcode == 1, the entries at index 1 and 2 in the program are summed and stored at index 3. If Opcode == 2, the entries at index 1 and 2 are multiplied instead. If Opcode == 99, the program is completed and will stop running. """ data = program[:] for i in range(0, len(data), 4): if data[i] == 99: break elif data[i] == 1: data[data[i+3]] = data[data[i+2]] + data[data[i+1]] elif data[i] == 2: data[data[i+3]] = data[data[i+2]] * data[data[i+1]] else: print("Problem with the Program") break return data
def map_bound(value, in_low, in_high, out_low, out_high): """Map value with high and low bound handling.""" result = None if value <= in_low: result = out_low else: if value >= in_high: result = out_high else: # http://stackoverflow.com/a/5650012/574981 result = out_low + ( (out_high - out_low) * (value - in_low) / (in_high - in_low) ) return result
def concatenate_list_data(char_list): """ DESCRIPTION: List concatenation of characters to produce words. INPUT: Translated character list OUTPUT: A single element that represents a word """ result = '' for element in char_list: result += str(element) return result
def cleanup_community(community): """Given a dictionary of a community, return a new dictionary for output as JSON.""" return community["community"]
def devilry_groupstatus(group): """ Get the status for the given AssignmentGroup. The output is one of the following texts wrapped in some ``<span>`` elements with appropriate css classes: - ``"waiting for feedback"`` - ``"waiting for deliveries"`` - ``"corrected"`` .. note:: We normally do not show the corrected status, but instead show the grade using :func:`.devilry_grade_full`, at least in listings and other places where space is premium. Assumes the AssignmentGroup queryset is annotated with: - :meth:`~devilry.apps.core.models.AssignmentGroupQuerySet.annotate_with_is_waiting_for_feedback` - :meth:`~devilry.apps.core.models.AssignmentGroupQuerySet.annotate_with_is_waiting_for_deliveries` - :meth:`~devilry.apps.core.models.AssignmentGroupQuerySet.annotate_with_is_corrected` Args: group: A An :class:`devilry.apps.core.models.AssignmentGroup` object. """ return { 'group': group }
def check_hyphen(name=""): """ replaces the hyphen with a colon. :param name: :return: <str> name. """ if '-' in name: return ':'.join(name.split(':')) return name
def setdefault2(d, key, value): """ >>> d = {} >>> setdefault2(d, 1, 2) 2 >>> len(d) 1 >>> setdefault2(d, 1, 2) 2 >>> len(d) 1 >>> l = setdefault2(d, 2, []) >>> len(d) 2 >>> l.append(1) >>> setdefault2(d, 2, []) [1] >>> len(d) 2 >>> setdefault2(d, Unhashable(), 1) Traceback (most recent call last): TypeError: I am not hashable >>> h1 = setdefault2(d, Hashable(), 55) >>> len(d) 3 >>> h2 = setdefault2(d, Hashable(), 66) >>> len(d) 3 >>> d[Hashable()] 55 """ return d.setdefault(key, value)
def is_number(value): """By duck typing, test if a value contains something recognisable as a number. Args: value: the value (string, int, float, etc) to test Returns: bool: True if usable as a number (via normalise_number()) """ try: float(value) return True except: return False
def split_complex(list_complex): """ Returns the real and imaginary part in two separate lists. [list_re, list_im] = split_complex(list_complex) """ list_real = [] list_imag = [] for i in range(len(list_complex)): list_real.append(list_complex[i].real) list_imag.append(list_complex[i].imag) return (list_real, list_imag)
def is_raid(device): """Detect if a device name comes from a RAID array.""" return device.startswith("/dev/md")
def normalize_scheme(scheme): """Normalize the scheme component.""" return scheme.lower()
def get_interval_pps(complement_interval_pps, timestamp): """ Gets the packet rate (pps) for a specific time interval. :param complement_interval_pps: an array of tuples (the last timestamp in the interval, the packet rate in the corresponding interval). :param timestamp: the timestamp at which the packet rate is required. :return: the corresponding packet rate (pps) . """ for row in complement_interval_pps: if timestamp <= row[0]: return row[1] return complement_interval_pps[-1][1]
def enddate(acquired): """Returns the enddate from an acquired date string Args: acquired (str): / separated date range in iso8601 format Returns: str: End date """ return acquired.split('/')[1]
def parser(list): """ Convert comma separated items to list """ split = list.split(',') out = [x.strip() for x in split] return out
def categories(categories): """ Extract categories from stated categories of respondent. """ return set(categories.split(", "))
def extract_coordinates(user_information): """ Extracts the turker's coordinates: (latitude, longitude) Args: user_information: dict with the user information of the turkers (returned from @extract_information_per_turker Returns: locations: latitude, longitude) pairs """ coordinates = [] for turker_id in user_information.keys(): latitude = user_information[turker_id]['latitude'] longitude = user_information[turker_id]['longitude'] coordinates.append((latitude, longitude)) return coordinates
def ListSimulationDirectories(bin_dir): """ We count the number of directory that end in \d{5}.BQ. This gives us the number of simulation that we ran, and also their names. """ import os import re dirList = [f for f in os.listdir(bin_dir) if re.search(r'(.*\d{5}.BQ)', f)] sortedList = sorted(dirList, key=str.lower) for i in range(len(sortedList)): sortedList[i] += "/{:05g}.BQ/".format(i+1) return sortedList
def exclude(users, excluded_user): """ Return a filtered list excluding the passed value. Used to render other Chat users that are not the current user. """ return [item for item in users if item != excluded_user]
def unlist_list(listoflist): """ given a list e.g. [["James", "Jones"], ["Hackman", "Talisman", "Daboi"]] we want to unlist the list as follows: ["James", "Jones", "Hackman", "Talisman", "Daboi"] param listoflist: A list of listed items """ new_list = [] for alist in listoflist: for item in alist: new_list.append(item) return new_list
def clean_sentence(sentence, freq_words, tagset, join_sign): """ Cleans sentence from rare words and words lacking a specific POS :param sentence: list of tuples with form (word, lemma, pos, index) :param freq_words: list of words with sufficient frequency :param tagset: list of POS-tags to include :param join_sign: sign to join lemma + first char of POS :return: cleaned sentence """ sentence_cleaned = [] for (word, lemma, pos, index) in sentence: target = lemma + join_sign + pos[0] # lemma + first char of POS if not target in freq_words or not True in [pos.startswith(x) for x in tagset]: continue else: sentence_cleaned.append((word, lemma, pos, index)) return sentence_cleaned
def _get_attrdict(obj, basename, excluded=None): """ get a dictionary of (basename-prefixed) attribute names/values, excluding the excluded names, internal stuff and callables. :param obj: the object to inspect :param basename: the prefix for the names in the result dictionary :param excluded: excluded attribute names, do not even touch [set or list] :return: dict names: values """ if excluded is None: excluded = set() d = {} names = set(dir(obj)) - set(excluded) for name in names: if not name.startswith('_'): try: attr = getattr(obj, name) if not callable(attr): d[basename + name] = attr except AttributeError: pass return d
def can_deploy(y, x, board, ship_length, orientation, valid_fields=None): """ This function is a slight variant of deploy_ship() and merely checks whether a ship can be deployed somewhere and does not actually place it on the board. :param y: an integer, making up the row index. :param x: an integer, making up the column index. :param board: a 2D numpy array containing a string representation of the board. :param ship_length: length of the ship. :param orientation: the way the ship is oriented. :param valid_fields: a list of what fields are considered valid to place a ship over. This option gives it more uses. :return: True, if the ship can be placement. False, if it was not. """ if not valid_fields: valid_fields = [''] if orientation == "V": # If we are trying to place ship vertically if y + ship_length - 1 >= len(board): # If ship doesn't fit within board boundaries return False # Ship not deployed for l in range(ship_length): # For every section of the ship if board[y + l][x] not in valid_fields: # If there is something on the board obstructing the ship return False # Ship not deployed else: # If we are trying to place ship horizontally if x + ship_length - 1 >= len(board[0]): # If ship doesn't fit within board boundaries return False # Ship not deployed for l in range(ship_length): # For every section of the ship if board[y][x + l] not in valid_fields: # If there is something on the board obstructing the ship return False # Ship not deployed return True
def to_bool(v): """Utility to translated string values to bool.""" if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise TypeError('Boolean value expected.')
def init_parameters(**kwargs): """Initialize parameters for smoothing in workers. Returns: dict with kwargs containing processing parameters for worker """ params = {} for key, value in kwargs.items(): params[key] = value return params
def _bisect_points(a, x, lo=0, hi=None): """ Insert point `p` in list `a`, and keep it sorted assuming `a` is sorted. If `p` is already in `a`, insert it to the left of the leftmost `p` if `no_repeat` is `False`. Optional args `lo` (default `0`) and `hi` (default `len(a)`) bound the slice of `a` to be searched. Source: https://github.com/python/cpython/blob/master/Lib/bisect.py """ if lo < 0: raise ValueError('lo must be non-negative') a_len = len(a) if hi is None: hi = a_len while lo < hi: mid = (lo + hi) // 2 if a[mid][0] < x: lo = mid + 1 else: hi = mid return lo
def sampleapi(text): """ converts string to latex using wolfram API error is returned as \text{Error} parameters: text: the spoken string to be converted into latex code returns: latex code """ return "sampleapi: " + text
def dencrypt(s: str, n: int = 13) -> str: """ https://en.wikipedia.org/wiki/ROT13 >>> msg = "My secret bank account number is 173-52946 so don't tell anyone!!" >>> s = dencrypt(msg) >>> s "Zl frperg onax nppbhag ahzore vf 173-52946 fb qba'g gryy nalbar!!" >>> dencrypt(s) == msg True """ out = "" for c in s: if "A" <= c <= "Z": out += chr(ord("A") + (ord(c) - ord("A") + n) % 26) elif "a" <= c <= "z": out += chr(ord("a") + (ord(c) - ord("a") + n) % 26) else: out += c return out
def normalize_line(line, newline): """Return line with fixed ending, if ending was present in line. Otherwise, does nothing. """ stripped = line.rstrip('\n\r') if stripped != line: return stripped + newline return line
def GetPlatformRestrictions(campaign_feed): """Get the Platform Restrictions for a given Campaign Feed. Args: campaign_feed: the Campaign Feed we are retreiving Platform Restrictons for. Returns: The Platform Restrictions for the given feed. """ platform_restrictions = None if campaign_feed['matchingFunction']['operator'] == 'AND': for argument in campaign_feed['matchingFunction']['lhsOperand']: # Check if matchingFunction is EQUALS(CONTEXT.DEVICE, 'Mobile') if argument['value']['operator'] == 'EQUALS': request_context_operand = argument['value']['lhsOperand'][0] if (request_context_operand and request_context_operand == 'DEVICE_PLATFORM'): # This needs to be capitalized for ExtensionSettingPlatform. platform_restrictions = argument['value']['rhsOperand'][0].upper() return platform_restrictions
def maak_eerste_letter_hoofdletter(s): """maak van de eerste letter een hoofdletter""" return s[0].upper() + s[1:]
def missing_digits(n): """Given a number a that is in sorted, increasing order, return the number of missing digits in n. A missing digit is a number between the first and last digit of a that is not in n. >>> missing_digits(1248) # 3, 5, 6, 7 4 >>> missing_digits(19) # 2, 3, 4, 5, 6, 7, 8 7 >>> missing_digits(1122) # No missing numbers 0 >>> missing_digits(123456) # No missing numbers 0 >>> missing_digits(3558) # 4, 6, 7 3 >>> missing_digits(35578) # 4, 6 2 >>> missing_digits(12456) # 3 1 >>> missing_digits(16789) # 2, 3, 4, 5 4 >>> missing_digits(4) # No missing numbers between 4 and 4 0 >>> from construct_check import check >>> # ban while or for loops >>> check(HW_SOURCE_FILE, 'missing_digits', ['While', 'For']) True """ "*** YOUR CODE HERE ***" def help(x,last): if x==0: return 0 if x%10==last: return help(x//10,x%10) return help(x//10,x%10)+last-x%10-1 return help(n//10,n%10)
def get_user_from_context(context): """ Get the user instance from the template context, if possible. If the context does not contain a `request` or `user` attribute, `None` is returned. """ try: return context['user'] except KeyError: pass try: request = context['request'] return request.user except (KeyError, AttributeError): pass return None
def hash_table_size(item, tablesize): """ A hashing technique that involves 1. Converting the characters in a string to a list of ordinal values 2. Get the sum of the list 3. Get the remained by doing a modulo using tablesize item - string tablesize """ ordinal_list = [ord(i) for i in item] return sum(ordinal_list) % tablesize
def parse_date(date: str, date_format: str = "%Y-%m-%dT%H:%M:%S%z"): """ Parse a date string and return the corresponding datetime object :param date: String representing the date :param date_format: The format in which the date is written (https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior) :return: Corresponding datetime object """ from datetime import datetime return datetime.strptime(date, date_format)
def translate_relaxation(quantity: str) -> str: """Convert names of dynamic quantities to their relaxations. Args: quantity: The name of the quantity to convert the name of. Returns: The translated name. """ translation = { "alpha": "max_alpha_time", "gamma": "max_gamma_time", "com_struct": "tau_F", "msd": "diffusion_constant", "rot1": "tau_R1", "rot2": "tau_R2", "struct": "tau_S", } return translation.get(quantity, quantity)
def get_vector12(): """ Return the vector with ID 12. """ return [ 0.3805575, 0.5000000, 0.6194425, ]
def doc2features(doc, features): """ Extract the features from the document. Each feature is either just the name or a tuple of (name,flag,type) where flag indicates if the feature should get selected. This returns the features in their original representation. :param doc: to extract the features from :param features: list of feature names or 3-tuples :return: list of values for the selected features """ ret = [] for f in features: if isinstance(f, str): val = doc.get(f) ret.append(val) else: name, flag, ftype = f if flag: val = doc.get(name) ret.append(val) return ret
def _sample_names(files, kwargs): """ Make sample (or other) names. Parameters: ----------- files : list of string Typically a list of file paths although could be any list of strings that you want to make names for. If neither names nor define_sample_name are provided, then files is returned as is. kwargs : dict kwargs from another function. Can include the following keys with appropriate arguments. names : list of strings Names to use. Overrides define_sample_name if provided. define_sample_name : function that takes string as input Function mapping string to name. For instance, you may have a sample name in a file path and use a regex to extract it. """ if 'define_sample_name' not in kwargs.keys(): define_sample_name = lambda x: x else: define_sample_name = kwargs['define_sample_name'] if 'names' in kwargs.keys(): names = kwargs['names'] else: names = [define_sample_name(f) for f in files] assert len(names) == len(files) return names
def is_secure_scheme(scheme: str) -> bool: """Check if the given scheme is secure. Args: scheme: Scheme to check Returns: Whether the scheme is secure. """ return scheme in {"tcps", "tcp4s", "tcp6s", "rss"}
def restriction_filter(document, newIndexes, j=None): """ The function performs the union of all the values of the different attributes into a single string, returning the final string Args: document(line): The line of the rdd to parse. newIndexes(list): List of index of the elements to union in a single string j(int): Index of the dependent element that should be returned as a value of the key-value pair """ for h in range(1, len(newIndexes)): document[newIndexes[0]] = str( document[newIndexes[0]]) + "," + str(document[newIndexes[h]]) if j is not None: return (document[newIndexes[0]], document[j]) return (document[newIndexes[0]])
def GET_DATA(tag: str) -> dict: """GET_DATA: generate APDU for GET DATA command """ return {'header' : '80CA' + tag, 'Le' : '00'}
def expanded_form(num): """ Expands the form of a number placing all digits in the number into their place values :param num: Integer :return String representation of the number broken down into its place values :rtype: str """ str_num = str(num) length = len(str_num) result = [] for digit in str_num: if digit != "0": result.append(str(int(digit) * int("1{}".format("0" * (length - 1))))) length -= 1 return " + ".join(result)
def get_iorient(orient): """Converts x, y, z, u and a, b, c, u to 0, 1, 2, 3. This is a helper function for bound methods in class diffusivitiy and for determining thickness from raypath for wholeblocks""" if orient == 'x' or orient == 'a': iorient = 0 elif orient == 'y' or orient == 'b': iorient = 1 elif orient == 'z' or orient == 'c': iorient = 2 elif orient == 'u' or orient == None: iorient = 3 else: iorient = orient return iorient
def parse_id2_from_nir_sample_id(sampleId, harvestYear): """Extracts the embedded ID2 value from the given sampleId. Expects sampleId to be in format similar to CW516GP2019WWGr, split by harvestYear (e.g. 2019) :rtype: int """ if not isinstance(sampleId, str): return None id2 = 0 if(harvestYear >= 2017): id2 = int(sampleId.upper().split(str(harvestYear))[0].replace("GP", "").replace("CE", "").replace("CW", "")) else: id2 = None return id2
def convert_v4_address_string_to_bits(ip_address): """ return v4 address string to bits for example: '255.255.0.0' -> '11111111111111110000000000000000' """ return ''.join([bin(int(octet))[2:].zfill(8) for octet in ip_address.split('.')])
def _frequency_and_multiplier(freq_type): """Converts frequency type to MONTH and returns the computes multiplier.""" multiplier = 1 if freq_type == 5: freq_type = 3 return freq_type, multiplier
def _get_quant_var_name(var_name): """ get quantized var name """ return var_name + '.int8'
def complement_strand(sequence): """ Returns the string which will be the second strand of the DNA sequence given that Ts complement As, and Cs complement Gs. If given a bad input, the function returns "Sequencing Error" :param sequence: A DNA sequence :return: the complement string for the DNA sequence """ complement = "" # This can be used to "build" the complement letter_dictionary = {"A": "T", "C": "G", "T": "A", "G": "C"} for letter in sequence: if letter in letter_dictionary: complement += letter_dictionary[letter] else: return "Sequencing Error" return complement
def indent_except_sections(text): """Indent text except if restructured text section definition lines.""" ret_text = [] previous_line = None for line in text.split('\n'): if len(set(line)) == 1 \ and previous_line is not None \ and len(line) >= len(previous_line) \ and len(previous_line) > 1: ret_text.append(previous_line) # un-indented section title ret_text.append(line) # un-indented section adornment previous_line = None else: if previous_line is not None: ret_text.append(' ' + previous_line) # indentation previous_line = line if previous_line is not None: ret_text.append(' ' + previous_line) return '\n'.join(ret_text)
def is_scalar(obj): """Check if an object is a scalar (number), i.e., an int, a float or a complex. Parameters ---------- obj Any python object. Returns ------- bool True if argument is a scalar, False otherwise. """ return not isinstance(obj, bool) and isinstance(obj, (int, float, complex))
def txt_to_json(file): """ Takes a list of words in a text file Returns a valid json file with words in an array """ # accept only files that end in .txt if file[-4:] != '.txt': raise Exception('File must be a .txt') #convert the text list of words into a python list of words words = open(file, 'r').read().split('\n') # get rid of any words with no values words = [word for word in words if len(word) != 0] # write the JSON string result = '{ "words": [' i = 0 for word in words: result += '"' + word + '"' if i != len(words) - 1: result += ',' i += 1 result += "]}" return result
def flatten_list(x): """Flattens a nested list Parameters ---------- x : list nested list of lists to flatten Returns ------- x : list flattened input """ if isinstance(x, list): return [a for i in x for a in flatten_list(i)] else: return [x]
def value_tuple(binding, value_var, graph_var, label_var): """ The value tuple is the basic element used in the templates. It follows the "label first" principle, i.e., everthing should have a label. If uri is set, it is a URI. The graph indicates the dataset where this value is stated for the given property and the main (fixed) subject/object of this page. """ value = binding[value_var]['value'] uri = None if binding[value_var]['type'] == 'uri': uri = value if label_var in binding: value = binding[label_var]['value'] graph = None if graph_var in binding: graph = binding[graph_var]['value'] return (value, uri, graph)
def _totuple(a): """ Converts a numpy array into nested tuples, so that each row of the array is a different tuple. E.g. a = [ [0 1 2 3] [4 5 6 7] ] out = ((0, 1, 2, 3), (4, 5, 6, 7)) """ try: return tuple(_totuple(i) for i in a) except TypeError: return a
def find_between(s, first, last): """ Find between strings from a given string and a start and end string. """ try: start = s.index(first) + len(first) end = s.index(last, start) return s[start:end] except ValueError: return " "
def format_size(size): """ This method formats an arbitrary amount of bytes to a printable string. For example 1024 will be converted to 1.0 kB. :param size: an amount of bytes :return: a string that's ready to be printed representing approximately the amount of bytes in a human readable way. """ scales = ["bytes", "kB", "MB", "GB", "TB", "PB"] count = 0 while (1 == 1): if (size > 1024.0): size /= 1024.0 count += 1 else: break return str(round(size, 1)) + " " + scales[count]
def c_ref(condition_name): """The intrinsic function Condition used to reference a named condition When you refer to a condition in another condition or associate the condition with a resource, you use the Condition: key. For the Fn::If function, you only need to specify the condition name. Args: condition_name: The name of the condition you want to reference. Returns: * A reference to the named condition """ return {'Condition': condition_name}
def config_lower_get(tgt_dict, query, default=None): """ Case Insensitve dict get. :param tgt_dict: Dictionary :param query: String to look for key from :param default: Default value to return if not found. :return: Value in dict if found, otherwise default """ value = {k.lower(): v for k, v in tgt_dict.items()}.get(query.lower(), default) if value is None: value = default return value
def count_bots_on_grid(bots, x, y, z, grid_size): """Count bots within the grid_size.""" count = 0 for bot in bots: cur_distance = bot.distance_to(x, y, z) if (cur_distance - bot.radius) // grid_size <= 0: count += 1 return count
def gera_chave1(letras): """ Nome da funcao: gera_chave1 -A partir de um dado tuplo, gera uma chave que pode ser usada para encriptar/desencriptar strings, baseada no quadrado de Polybius Input: -letras: tuplo com 25 caracteres Output: -chave: tuplo com 5 elementos, cada elemento de chave-> 1 tuplo com 5 caracteres Funcoes (externas ao Python) usadas: Nenhuma """ chave= () for i in range(5): #linha=tuplo de 5 caracteres, que forma as linhas horizontais do quadrado de Polybius linha= letras[(i*5):(i*5)+5] chave= chave+(linha,) return chave