content
stringlengths
42
6.51k
def get_rotor_dirn(kk): """ this function gets an array of integers to specify rotor rotation directions for a 4-4 tilt-wing layout there are 7 layouts available, best one is kk=3 Input: kk: 1 to 7, integer Output: rotDir: array of 8 integers with entries as "+1" (CCW) or "-1" (CW) """ if kk == 1: rotDir = [ 1, 1, -1, -1, -1, -1, 1, 1] # Option 1 elif kk == 2: rotDir = [ 1, -1, 1, -1, -1, 1, -1, 1] # Option 2 elif kk == 3: rotDir = [-1, 1, -1, 1, 1, -1, 1, -1] # Option 3 elif kk == 4: rotDir = [-1, 1, -1, 1, -1, 1, -1, 1] # Option 2 elif kk == 5: rotDir = [-1, 1, 1, -1, -1, 1, 1, -1] # Option 5 elif kk == 6: rotDir = [-1, 1, 1, -1, 1, -1, -1, 1] # Option 6 elif kk == 7: rotDir = [-1, -1, 1, 1, 1, 1, -1, -1] # Option 4 else: print('unknown value of kk: defaulting to best known layout') rotDir = [-1, 1, -1, 1, 1, -1, 1, -1] # Option 3 return rotDir
def read_hhmmss(field: str) -> int: """Read a HH:MM:SS field and return us since midnight.""" if field != "": hour = int(field[0:2]) minute = int(field[3:5]) second = int(field[6:8]) return 1000000 * ((3600 * hour) + (60 * minute) + second) else: return 0
def validate_file(file): """Validate that the file exists and is a proper puzzle file. Preemptively perform all the checks that are done in the input loop of sudoku_solver.py. :param file: name of file to validate :return True if the file passes all checks, False if it fails """ try: open_file = open(file) file_contents = open_file.read() puzzle_list = [char for char in file_contents if char.isdigit() or char == '.'] puzzle_string = ''.join(puzzle_list) if len(puzzle_string) == 81: clues = [char for char in puzzle_string if char != '.' and char != '0'] num_clues = len(clues) if num_clues >= 17: return True else: print('{} is an unsolvable puzzle. It has {} clues.\n' 'There are no valid sudoku puzzles with fewer than 17 clues.'.format(file, num_clues)) return False else: print('{} in incorrect format.\nSee README.md for accepted puzzle formats.'.format(file)) return False except OSError: print('File {} not found.'.format(file)) return False
def _role_selector(role_arn, roles): """ Select a role base on pre-configured role_arn and Idp roles list """ chosen = [r for r in roles if r['RoleArn'] == role_arn] return chosen[0] if chosen else None
def env_step(action): """ Arguments --------- action : int the action taken by the agent in the current state Returns ------- result : dict dictionary with keys {reward, state, isTerminal} containing the results of the action taken """ global world, current_position # if action < 1 or action > np.minimum(current_state[0], num_total_states + 1 - current_state[0]): # print "Invalid action taken!!" # print "action : ", action # print "current_state : ", current_state # exit(1) is_terminal = False # print("env step") #print(action) reward = 0 #displacement = random.randrange(1,101) #1-100 random # if action == 0: #go left # displacement = - displacement # move left #current_position = displacement + current_position #update current_position = action #update if current_position >= 1001: # finish on right with 1 reward = 1 is_terminal = True elif current_position <= 1: # finish on left with -1 reward = -1 is_terminal = True result = {"reward": reward, "state": current_position, "isTerminal": is_terminal} #print(reward) return result
def groupLevel(level): """ Create the level to group mapping. Parameters : level: string the level to convert Returns : a string which represents the level to group mapping """ if(level in ['1','2','3']): return 'A1',0 elif(level in ['4','5','6']): return 'A2',1 elif(level in ['7','8','9']): return 'B1',2 elif(level in ['10','11','12']): return 'B2',3 elif(level in ['13','14','15']): return 'C1',4 else: return 'C2',5
def maplookup(l, lookup): """Look up all elements of a sequence in a dict-valued variable.""" return [lookup[i] for i in l]
def every_nth(L, n=1): """ (list, int) -> list Precondition: 0 <= n < len(L) Return a list containing every nth item of L, starting at index 0. >>> every_nth([1, 2, 3, 4, 5, 6], n=2) [1, 3, 5] >>> every_nth([1, 2, 3, 4, 5, 6], 3) [1, 4] >>> every_nth([1, 2, 3, 4, 5, 6]) [1, 2, 3, 4, 5, 6] """ result = [] for i in range(0, len(L), n): result.append(L[i]) return result
def human_readable(kb): """ Returns an input value in KB in the smallest unit with a value larger than one as a human readable string with the chosen unit. """ format = "%3.2f %s" for x in ['KiB','MiB','GiB']: if kb < 1024.0 and kb > -1024.0: return format % (kb, x) kb /= 1024.0 return format % (kb, 'TiB')
def join_ints(*ints): """Given a list of ints, return a underscore separated strings with them. >>> join_ints(1, 2, 3) '1_2_3' """ return '_'.join(map(str, ints))
def getLineMatch(search, text): """Return the line partially matching the text""" for idx, line in enumerate(text.splitlines()): if search in line: return idx else: return 0
def normalize_none_to_empty(text): """Return verbatim a given text or an empty string if the value is None.""" return text if text is not None else ""
def get_update_url(d_included, base_url): """Parse a dict and returns, if present, the post URL :param d_included: a dict, as returned by res.json().get("included", {}) :type d_raw: dict :param base_url: site URL :type d_raw: str :return: post url :rtype: str """ try: urn = d_included["updateMetadata"]["urn"] except KeyError: return "" except TypeError: return "None" else: return f"{base_url}/feed/update/{urn}"
def secs_to_readable(secs: int): """ Parameters ---------- secs >>> secs_to_readable(100) '1m40s' """ secs = round(secs) readable = "" hours, remainder = divmod(secs, 3600) minutes, seconds = divmod(remainder, 60) if hours: readable += str(hours) + "h" if hours or minutes: readable += str(minutes) + "m" if hours or minutes or seconds: readable += str(seconds) + "s" return readable
def tweet_id_helper(tweet_id): """Helper function since some tweet id's start with " ' " and others do not tweet_id: tweet_id in string format """ if len(tweet_id) == 20: tweet_id = tweet_id.split("'") return tweet_id[1] else: return tweet_id
def combine_sequences(vsequences, jsequences): """ Do a pairwise combination of the v and j sequences to get putative germline sequences for the species. """ combined_sequences = {} for v in vsequences: vspecies, vallele = v for j in jsequences: _, jallele= j combined_sequences[("%s_%s_%s"%(vspecies, vallele,jallele)).replace(" ", "_")] = vsequences[v] + jsequences[j] return combined_sequences
def YilmIndexVector(i, l, m): """ Compute the index of an 1D array of spherical harmonic coefficients corresponding to i, l, and m. Usage ----- index = YilmIndexVector (i, l, m) Returns ------- index : integer Index of an 1D array of spherical harmonic coefficients corresponding to i, l, and m. Parameters ---------- i : integer 1 corresponds to the cosine coefficient cilm[0,:,:], and 2 corresponds to the sine coefficient cilm[1,:,:]. l : integer The spherical harmonic degree. m : integer The angular order. Notes ----- YilmIndexVector will calculate the index of a 1D vector of spherical harmonic coefficients corresponding to degree l, angular order m and i (1 = cosine, 2 = sine). The index is given by l**2+(i-1)*l+m. """ return l**2 + (i - 1) * l + m
def get_bandwidth(width, threshold): """Approximates the bandwidth needed to achieve a threshold. :param width: = bands * rows # = elements in signature :param threshold: = (1/bands) ** (1/rows) where :returns: number of rows per band :rtype: int """ best = width min_err = float("inf") for rows_per_band in range(1, width + 1): try: num_bands = 1. / (threshold ** rows_per_band) except ZeroDivisionError: return best err = abs(width - num_bands * rows_per_band) if err < min_err: best = rows_per_band min_err = err return best
def temperature(cell): """ Returns the temperature (in degrees Celsius) for the given integer index ``cell``. """ temperatures = { 0: 21.0, } return temperatures[cell]
def parse_gtest_cases(out): """Returns the flattened list of test cases in the executable. The returned list is sorted so it is not dependent on the order of the linked objects. Expected format is a concatenation of this: TestFixture1 TestCase1 TestCase2 """ tests = [] fixture = None lines = out.splitlines() while lines: line = lines.pop(0) if not line: break if not line.startswith(' '): fixture = line else: case = line[2:] if case.startswith('YOU HAVE'): # It's a 'YOU HAVE foo bar' line. We're done. break assert ' ' not in case tests.append(fixture + case) return sorted(tests)
def get_okta_group_name(prefix, account_id, role_name): """ Return an Okta group name specific to an account. The format of the group name is '<prefix>_<account_id>_role'. """ group_name = prefix + "_" + account_id + "_" + role_name print(f"get_okta_group_name() result is {group_name}") return group_name
def DESCRIBE(parent, r): """Get the list of functions and parameters of a module""" if len(r) == 1: module = r[0] return parent.robot.describe(module) return ''
def xgcd(a,b): """ Returns g, x, y such that g = x*a + y*b = gcd(a,b). """ ## enter your source code here tempa = a tempb = b x,y, u,v = 0,1, 1,0 while a != 0: q, r = b//a, b%a m, n = x-u*q, y-v*q b,a, x,y, u,v = a,r, u,v, m,n g = b if x<0: # just in case we get a negative factor we add PhiN x+=tempa if y<0: # just in case we get a negative factor we add PhiN y+=tempa return (g,x,y)
def cut(text, length=40, trailing=" (...)"): """Cuts text to a predefined length and appends a trailing ellipsis for longer sources.""" if not text: return text if len(text) <= length: trailing = "" return text[:length] + trailing
def remove_repetition_from_changelog( current_release_version_number, previous_release_version, changelog_lines): """Removes information about current version from changelog before generation of changelog again. Args: current_release_version_number: str. The current release version. previous_release_version: str. The previous release version. changelog_lines: str. The lines of changelog file. Returns: list(str). Changelog lines with no information on current release version. """ current_version_start = 0 previous_version_start = 0 for index, line in enumerate(changelog_lines): if 'v%s' % current_release_version_number in line: current_version_start = index if 'v%s' % previous_release_version in line: previous_version_start = index changelog_lines[current_version_start:previous_version_start] = [] return changelog_lines
def indent(lines_str, indent_size=2): """ Indent a multi-line string with a common indent. :param lines_str: Multi-line string. :type lines_str: ``str`` :param indent_size: Number of spaces to indent by - defaults to 2. :type indent_size: ``int`` :return: New string with extra indent. :rtype: ``str`` """ indent = " " * indent_size return "\n".join( "{indent}{line}".format(indent=indent, line=line) for line in lines_str.splitlines() )
def get_primes(n): """ Returns prime factors for an integer N :param n: integer to factor :return: list<integers> prime factors """ primefac = [] d = 2 while d*d <= n: while (n % d) == 0: primefac.append(d) # supposing you want multiple factors repeated n //= d d += 1 if n > 1: primefac.append(n) return primefac
def _get_username(user): """ Return user's username. ``user`` can be standard Django User instance, a custom user model or just an username (as string). """ value = None # custom user, django 1.5+ get_username = getattr(user, 'get_username', None) if get_username is not None: value = get_username() if value is None: # standard User username = getattr(user, 'username', None) if username is not None: value = username else: # assume user is just an username value = user return value
def sift(datum, sieve): """ Sift properties of a given dict using a Boolean sieve. :param datum: the dict to recursively filter. :type datum: dict :param sieve: a dict indicating which datum fields to keep :type sieve: dict :return: result :rtype: dict """ result = {} for key, value in datum.items(): # Ignore elements not defined in the sieve if key not in sieve: continue # Sift all values in a dict, allowing nested data elif isinstance(value, dict): result[key] = sift(value, sieve[key]) # Sift all elements of lists, allowing nested data elif isinstance(value, list): result[key] = [sift(item, sieve[key]) for item in value] elif value == True: # Check for truthiness of value, since we expect a Boolean sieve # i.e. some values can be explicitly filtered out result[key] = value return result
def strip_leading_option_delim(args): """Remove leading -- if present. Using the "--" end of options syntax bypasses docopt's parsing of options. """ if len(args) > 1: if args[0] == '--': return args[1:] return args
def add_suffix_to_parameter_set(parameters, suffix, divider='__'): """ Adds a suffix ('__suffix') to the keys of a dictionary of MyTardis parameters. Returns a copy of the dict with suffixes on keys. (eg to prevent name clashes with identical parameters at the Run Experiment level) """ suffixed_parameters = {} for p in parameters: suffixed_parameters[u'%s%s%s' % (p['name'], divider, suffix)] = p['value'] return suffixed_parameters
def clean_string(string): """Returns a string with all newline and other whitespace garbage removed. Mostly this method is used to print out objectMasks that have a lot of extra whitespace in them because making compact masks in python means they don't look nice in the IDE. :param string: The string to clean. :returns string: A string without extra whitespace. """ if string is None: return '' else: return " ".join(string.split())
def _parse_record(record): """Return a list of all top-level importable names from a distribution's RECORD""" python_names = set() for rec in record.splitlines(): # every entry is made up of three parts name = rec.rsplit(',', 2)[0] # RECORD paths are not required to be in the style of native paths name = name.split('/', 1)[0].split('\\', 1)[0] if not (name.startswith('..') # relative paths are typically data and anyway not findable or '-' in name # skip the .dist-info paths # there are other characters that would invalidate lookup ): if name.endswith(".py"): name = name[:-3] python_names.add(name) return python_names
def _get_containing_blocks(size, point): """Get 2x2 blocks containing point in open maze of size `size`. Unless point is on the boundary of the size x size square, there will be 4 containing 2x2 blocks. Args: size: Int. point: Tuple of ints (i, j). Must satisfy 0 <= i, j < size. Returns: block_inds: List of tuples. If (k, l) is in block_inds, then point (i, j) is in {(k, l), (k + 1, l), (k, l + 1), (k + 1, l + 1)}. """ i, j = point block_inds = [] if i > 0: if j > 0: block_inds.append((i - 1, j - 1)) if j < size - 1: block_inds.append((i - 1, j)) if i < size - 1: if j > 0: block_inds.append((i, j - 1)) if j < size - 1: block_inds.append((i, j)) return block_inds
def reconcile_countries_by_name(plot_countries, gdp_countries): """ Inputs: plot_countries - Dictionary whose keys are plot library country codes and values are the corresponding country name gdp_countries - Dictionary whose keys are country names used in GDP data Output: A tuple containing a dictionary and a set. The dictionary maps country codes from plot_countries to country names from gdp_countries The set contains the country codes from plot_countries that were not found in gdp_countries. """ country_codes = set() country_datas = {} for code in plot_countries: if not plot_countries.get(code) in gdp_countries: country_codes.add(code) else: country_datas[code] = plot_countries.get(code) tupled_values = () + (country_datas, country_codes,) return tupled_values
def _get_total_page(total_size, page_size): """ Get total page """ quotient, remainder = total_size / page_size, total_size % page_size total_page = quotient + min(1, remainder) return total_page
def Question_f(text): """ :param text: The "text" of this Question """ return '\\begin{block}{Question}\n' + text + '\n\\end{block}\n'
def git2pep440(ver_str): """ Converts a git description to a PEP440 conforming string :param ver_str: git version description :return: PEP440 version description """ dash_count = ver_str.count('-') if dash_count == 0: return ver_str elif dash_count == 1: return ver_str.split('-')[0] + "+dirty" elif dash_count == 2: tag, commits, sha1 = ver_str.split('-') return "{}.post0.dev{}+{}".format(tag, commits, sha1) elif dash_count == 3: tag, commits, sha1, _ = ver_str.split('-') return "{}.post0.dev{}+{}.dirty".format(tag, commits, sha1) else: raise RuntimeError("Invalid version string")
def extension_from_parameters(params, framework): """Construct string for saving model with annotation of parameters""" ext = framework ext += '.A={}'.format(params['activation']) ext += '.B={}'.format(params['batch_size']) ext += '.D={}'.format(params['drop']) ext += '.E={}'.format(params['epochs']) if params['feature_subsample']: ext += '.F={}'.format(params['feature_subsample']) for i, n in enumerate(params['dense']): if n: ext += '.D{}={}'.format(i+1, n) ext += '.S={}'.format(params['scaling']) return ext
def _setup_classnames(enabled, classname): """Converts an alias (as enabled in a ferenda.ini file) to a fully qualified class name. If the special alias "all" is used, return the class names of all enabled repositories. Note: a list is always returned, even when the classname ``'all'`` is not used. If a fully qualified classname is provided, a list with the same string is returned. :param enabled: The currently enabled repo classes, as returned by :py:func:`~ferenda.Manager._enabled_classes` :type enabled: dict :param classname: A classname (eg ``'ferenda.DocumentRepository'``) or alias (eg ``'base'``). The special value ``'all'`` expands to all enabled classes. :returns: Class names (as strings) :rtype: list """ # "w3c" => ['ferenda.sources.tech.W3Standards'] # "all" => ['ferenda.sources.tech.W3Standards', 'ferenda.sources.tech.RFC'] if classname == "all": # wonder why we filtered out ferenda.Devel -- does it cause problems with "./ferenda-build.py all [action]" ? # return [v for v in enabled.values() if v != 'ferenda.Devel'] return enabled.values() else: if classname in enabled: classname = enabled[classname] return [classname]
def eval_if_symbolic(obj, context, **options): """Evaluate an object if it is a symbolic expression, or otherwise just returns it back. Args: obj: Either a symbolic expression, or anything else (in which case this is a noop). context: Passed as an argument to `obj._eval` if `obj` is symbolic. `**options`: Passed as arguments to `obj._eval` if `obj` is symbolic. Returns: anything Examples: >>> eval_if_symbolic(Symbol('x'), {'x': 10}) 10 >>> eval_if_symbolic(7, {'x': 10}) 7 """ return obj._eval(context, **options) if hasattr(obj, '_eval') else obj
def is_valid_scc_to_loop_mapping(data, max_loops_per_condensation): """ Check if each SCC condensation is mapped to at most N loops. Args: data (binary): The input data in JSON format. max_loops_per_condendation (integer): The max number of loops per SCC condensation mapping. """ status = True condensations = data['condensations'] for c in condensations: if len(c['loops']) > max_loops_per_condensation: status = False print( 'condensation: {} mapping to {} loops exceeds allowed maximum'. format(c['condensation'], len(c['loops']))) return status
def _bop_and(obj1, obj2): """Boolean and.""" return bool(obj1) and bool(obj2)
def get_base_command_name(command_name:str) -> str: """ GPS and OBD commands can return lists or dictionaries of results. These field names within the results are used to create field names in a format like "NMEA_GNGNS-lat" where "NMEA_GNGNS" is the root command name and "lat" is the field name. """ return (command_name.split("-"))[0] if "-" in command_name else command_name
def clean_up_agents(agents): """Stops all agents""" return [agent.shutdown() for agent in agents]
def dict_subtract(d1, d2): """Subtract one dictionary from another. Args: d1 (dict): First dictionary. d2 (dict): Second dictionary. Returns: dict: `d1 - d2`. """ if set(d1.keys()) != set(d2.keys()): raise ValueError("Dictionaries have different keys.") return {k: d1[k] - d2[k] for k in d1.keys()}
def prime_factors(number): """Returns prime factors for given number.""" result = [] divisor = 2 while divisor <= number: if number % divisor == 0: number /= divisor result.append(divisor) else: divisor += 1 return result
def inverse(m: dict) -> dict: """Return inverse of dict""" return {v: k for k, v in m.items()}
def lobid_qs(row, q_field='surname', add_fields=[], base_url="https://lobid.org/gnd/search?q="): """ creates a lobid query string from the passed in fields""" search_url = base_url+row[q_field]+"&filter=type:Person" if add_fields: filters = [] for x in add_fields: if x: filters.append(row[x]) search_url = "{} AND {}".format(search_url, "AND ".join(filters)) return search_url
def bayeselo_to_probability(elo, draw_elo): """ elo is expressed in BayesELO (relative to the choice draw_elo). Returns a probability, P['win'], P['loss'], P['draw'] """ probability = { 'win': 1.0 / (1.0 + pow(10.0, (-elo + draw_elo) / 400.0)), 'loss': 1.0 / (1.0 + pow(10.0, (elo + draw_elo) / 400.0)), } probability['draw'] = 1.0 - probability['win'] - probability['loss'] return probability
def combinatorial(numbers): """Returns a list of all possible product combinations of numbers from list numbers, with some duplicates.""" if len(numbers) == 0: return [] head = numbers[0] c = [head] tail = combinatorial(numbers[1:]) for o in tail: if o != head: c.append(o) c.append(head * o) return sorted(c)
def replace_sequence_chars(sequence_name): """Replace special characters in the sequence by underscores (as done for corresponding folder names in ADNI). Args: sequence_name: sequence to process Returns: the new string """ import re return re.sub("[ /;*()<>:]", "_", sequence_name)
def list_from_attr(vals, attr, **kwargs): """Generate a list with all one the items' attributes' in <vals>. The attribute is specified as <attr>.""" if 'default' in kwargs: return [v.get(attr, kwargs['default']) for v in vals] else: return [v[attr] for v in vals]
def name(version, prefix='schema'): """Return canonical name for schema version.""" return '%s_%03i' % (prefix, version)
def DeepMergeDict(dict_x, dict_y, path=None): """Recursively merges dict_y into dict_x.""" if path is None: path = [] for key in dict_y: if key in dict_x: if isinstance(dict_x[key], dict) and isinstance(dict_y[key], dict): DeepMergeDict(dict_x[key], dict_y[key], path + [str(key)]) elif dict_x[key] == dict_y[key]: pass # same leaf value else: dict_x[key] = dict_y[key] else: dict_x[key] = dict_y[key] return dict_x
def fix_UTC_offset(date_string): """ Python 3.6 and lower does not like when a date string has a colon in the UTC offset, such as 2020-04-20T23:59:59-04:00 Intead, Pyton 3.6 and lower needs the colon removed: 2020-04-20T23:59:59-0400 We can fix this easily by simply removing the colon if it exists. (Python 3.7 and later does not have this issue.) See https://stackoverflow.com/questions/30999230/how-to-parse-timezone-with-colon for an example. :param date_string: a date string of the format "%Y-%m-%dT%H:%M:%S%z" :return: The date string with the UTC offset fixed """ if ":" == date_string[-3:-2]: date_string = date_string[:-3] + date_string[-2:] return date_string
def allocz_fixed(size): """Alloc zeros with *""" return [0] * size
def _unigrams(words): """ Input: a list of words, e.g., ["I", "am", "Denny"] Output: a list of unigram """ assert type(words) == list return words
def strict_range(value, values): """ Provides a validator function that returns the value if its value is less than the maximum and greater than the minimum of the range. Otherwise it raises a ValueError. :param value: A value to test :param values: A range of values (range, list, etc.) :raises: ValueError if the value is out of the range """ if min(values) <= value <= max(values): return value else: raise ValueError('Value of {:g} is not in range [{:g},{:g}]'.format( value, min(values), max(values) ))
def showValue(s, valueRepr=str): """printable representation of elements""" if len(s) == 0: return "e" def escape(s): return s if len(s)==1 else "["+s+"]" return "".join(escape(valueRepr(x)) for x in s)
def scale(val, src, dst): """Helper to scale val from src range to dst range """ return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
def legislature_to_number(leg): """ Takes a full session and splits it down to the values for FormatDocument.asp. session = '49th-1st-regular' legislature_to_number(session) --> '49Leg/1s' """ l = leg.lower().split('-') return '%sLeg/%s%s' % (l[0][0:2], l[1][0], l[2][0])
def rotateToHome(x, y): """Rotate to the home coordinate frame. Home coordinate frame starts at (0,0) at the start of the runway and ends at (0, 2982 at the end of the runway). Thus, the x-value in the home coordinate frame corresponds to crosstrack error and the y-value corresponds to downtrack position. Args: x: x-value in local coordinate frame y: y-value in local coordinate frame """ rotx = 0.583055934597441 * x + 0.8124320138514389 * y roty = -0.8124320138514389 * x + 0.583055934597441 * y return rotx, roty
def slice_arrays(arrays, start=None, stop=None): """Slices an array or list of arrays. """ if arrays is None: return [None] elif isinstance(arrays, list): return [None if x is None else x[start:stop] for x in arrays] else: return arrays[start:stop]
def merge_dicts(*dict_args): """ Given any number of dicts, shallow copy and merge into a new dict, precedence goes to key value pairs in latter dicts. """ result = {} for dictionary in dict_args: if dictionary is not None: result.update(dictionary) return result
def check_and_invert(categories): """ Takes a list of categories to aggregrate and removes them from all possible categories, creating a list of categories to preserve that can be used by groupby Will thow ValueError if one of supplied categories is not one of the column names below """ inverted = ['C_AGE', 'GENDER', 'GEOGRAPHY_CODE', 'PROJECTED_YEAR_NAME'] # first ensure list if isinstance(categories, str): categories = [categories] if "PROJECTED_YEAR_NAME" in categories: raise ValueError("It makes no sense to aggregate data over PROJECTED_YEAR_NAME") for cat in categories: inverted.remove(cat) return inverted
def combine_cells(*fragments): """Combine several fragments of cells into one. Remember that the fragments get a space between them, so this is mainly to avoid it when not desired.""" return sum(fragments, ())
def format_watershed_title(watershed, subbasin): """ Formats title for watershed in navigation """ max_length = 30 watershed = watershed.strip() subbasin = subbasin.strip() watershed_length = len(watershed) if(watershed_length>max_length): return watershed[:max_length-1].strip() + "..." max_length -= watershed_length subbasin_length = len(subbasin) if(subbasin_length>max_length): return (watershed + " (" + subbasin[:max_length-3].strip() + " ...)") return (watershed + " (" + subbasin + ")")
def encrypt_symmetric_modulo(k: int, plaintext: str) -> str: """Return the encrypted message of plaintext with the above cryptosystem using the key k. Preconditions: - math.gcd(k, len(plaintext)) == 1 >>> encrypt_symmetric_modulo(2, 'David is cool') 'Dsa vciodo li' Hint: this is tricky, and easiest done using an index-based for loop and list mutation. We've set up an accumulator for you to use: a list of characters of length m that you should fill in. Inside your loop use list index assignment to set a particular index in the accumulator, and then at the end of the function join the characters into a single string using str.join('', the_accumulator_list). """ n = len(plaintext) # Accumulator c = [''] * n for i in range(n): c[(i * k) % n] = plaintext[i] return str.join('', c)
def _CheckRevisions(rev_a, rev_b): """Checks to ensure the revisions are valid.""" rev_a = int(rev_a) rev_b = int(rev_b) if rev_b < rev_a: rev_a, rev_b = rev_b, rev_a return rev_a, rev_b
def is_asn_bogus(asn): """Returns True if the ASN is in the private-use or reserved list of ASNs""" # References: # IANA: http://www.iana.org/assignments/as-numbers/as-numbers.xhtml # RFCs: rfc1930, rfc6996, rfc7300, rfc5398 # Cymru: http://www.team-cymru.org/Services/Bogons/, http://www.cymru.com/BGP/asnbogusrep.html # WHOIS: https://github.com/rfc1036/whois -- in the program source # CIDR-Report: http://www.cidr-report.org/as2.0/reserved-ases.html # Note that the full list of unallocated and bogus ASNs is long, and changes; we use the basic if 64198 <= asn <= 131071 or asn >= 4200000000: # reserved & private-use-AS return True if asn >= 1000000: # way above last currently allocated block (2014-11-02) -- might change in future return True return False
def strip_trailing_fields_csv(names): """ Strip trailing spaces for field names #456 """ field_names = [] for name in names: field_names.append(name.strip()) return field_names
def convertRawLongitudeLatitude(rawLongitude, rawLatitude, bitFactor): """Convert raw coordinates to standard ones. Change native coordinates into normal longitude and latitude. The numbers are truncated to 6 decimal places since that approximates typical GPS coordinates. Args: rawLongitude (int): Longitude directly from data. rawLatitude (int): Latitude directly from data. bitFactor (float): Raw coordinates can be of different bit lengths. This is the conversion factor: the correct one is GEO_xx_BITS, where 'xx' is the bit size of the raw data. Returns: tuple: Tuple of: 1. longitude 2. latitude """ longitude = rawLongitude * bitFactor if longitude > 180: longitude = longitude - 360.0 latitude = rawLatitude * bitFactor if latitude > 90: latitude = latitude - 180.0 # Attempt to preserve only 6 places after the decimal (akin # to GPS precision) longitude = float(round(longitude, 6)) latitude = float(round(latitude, 6)) return (longitude, latitude)
def is_valid_trajectory(ext): """ Checks if trajectory format is compatible with GROMACS """ formats = ['xtc', 'trr', 'cpt', 'gro', 'g96', 'pdb', 'tng'] return ext in formats
def isHexDigit(s): """ isHexDigit :: str -> bool Selects ASCII hexadecimal digits, i.e. '0'..'9', 'a'..'f', 'A'..'F'. """ return s in "0123456789abcdefABCDEF"
def double_sort(pathways_dictionary): """ Return the keys to a dictionary sorted with top values first then for duplicate values sorted alphabetically by key """ sorted_keys = [] prior_value = "" store = [] for pathway in sorted(pathways_dictionary, key=pathways_dictionary.get, reverse=True): if prior_value == pathways_dictionary[pathway]: if not store: store.append(sorted_keys.pop()) store.append(pathway) else: if store: sorted_keys += sorted(store) store = [] prior_value = pathways_dictionary[pathway] sorted_keys.append(pathway) if store: sorted_keys += sorted(store) return sorted_keys
def normalize(value): """Returns the string with decimal separators normalized.""" return value.replace(',', '.')
def locate_tifs(file_list) -> list: """identify the .tif files in a list of files Parameters ---------- file_list : list list of files to parse Returns ------- list list of files where the extension is .tif """ return list([s for s in file_list if '.tif' in s.lower()])
def get_rare_elements_number(d, n): """ Count the number of rare elements :param d: dictionary to use :param n: the threshold for rarity :return: the number of rare elements as a string """ i = 0 for k, v in d.items(): if v < n: i += 1 return str(i)
def decode_http_header(hdr_obj): """ Helper function for decoding HTTP headers :param hdr_obj: Header object to be decoded :return: a dictionary of the HTTP Header Key-value pairs """ hdr_obj_txt = hdr_obj.decode() http_hdr = {} hline_count = 0 for line in hdr_obj_txt.splitlines(): if hline_count == 0: hstatus = line.split(' ') http_hdr['Version'] = hstatus[0] http_hdr['Code'] = hstatus[1] http_hdr['Status'] = ' '.join(hstatus[2:]) else: try: hkey, hval = line.split(': ') http_hdr[hkey] = hval if hkey == 'Content-Type': if hval == 'text/plain' or hval == 'text/html' or hval == 'text/html; charset=iso-8859-1': http_hdr['File-Type'] = 'text' else: http_hdr['File-Type'] = 'binary' except ValueError: continue hline_count += 1 return http_hdr
def create_statistics(results_list, number_of_vulnerabilities, number_of_subnets): """ Create statistics about module :param results_list: list with results from shodan api :param number_of_vulnerabilities: number of reviewed vulnerabilities :param number_of_subnets: number of reviewed subnets :return: Statistics """ result = "Statistics: " number_of_cases = 0 number_of_cases += len(results_list) result += str(number_of_subnets) if number_of_subnets > 1: result += " subnets were tested, " elif number_of_subnets == 1 or number_of_subnets == 0: result += " subnet was tested, " if number_of_subnets: if number_of_vulnerabilities > 1: result += str(number_of_vulnerabilities) result += " types of vulnerabilities were reviewed, " else: return result + " 0 cases discovered, nothing to do" if number_of_cases > 0: result += str(number_of_cases) result += " security events created." else: result += "nothing was created." return result
def calc_tcv(signal, threshold=2.0): """ Calculate the count of threshold crossings for positive and negative threshold. :param signal: input signal :param threshold: threshold used for counting :return: number of times the threshold was crossed """ tcv = 0 prev = 0.0 tm = -threshold for x in signal: if (x < threshold and prev > threshold) or ( x > threshold and prev <= threshold ): prev = x tcv += 1 elif (x < tm and prev > tm) or (x > tm and prev <= tm): prev = x tcv += 1 return float(tcv)
def all(p, xs): """ all :: (a -> Bool) -> [a] -> Bool Applied to a predicate and a list, all determines if all elements of the list satisfy the predicate. For the result to be True, the list must be finite; False, however, results from a False value for the predicate applied to an element at a finite index of a finite or infinite list. """ return False not in ((p(x) for x in xs))
def make_list(dico): """ Make information stored in dictionary with chromosomes as key into one list. (lost of chromosomal information) """ final_list = [] for c in sorted(dico.keys()): final_list += dico[c] return final_list
def is_alive(character: dict) -> bool: """ Determine if character is alive. :param character: a dictionary :precondition: character must be a dictionary :precondition: character must be a valid character created by character_creation function :postcondition: returns True if character["Current wounds"] > 0, else return False :return: True if character is alive, otherwise False >>> is_alive({"Current wounds": 5}) True >>> is_alive({"Current wounds": 0}) False >>> is_alive({"Current wounds": -1}) False """ return character["Current wounds"] > 0
def get_sequence(n): """ Return Fibonacci sequence from zero to specified number. """ cache = {0: 0, 1: 1} def fib(num): """ Return Fibonacci value by specified number as integer. """ if num in cache: return cache[num] cache[num] = fib(num - 1) + fib(num - 2) return cache[num] def sequence(num): """ Return sequence of Fibonacci values as list. """ return [fib(value) for value in range(num + 1)] return sequence(n)
def dataToHex(d): """ Convert the raw data in 'd' to an hex string with a space every 4 bytes. """ bytes = [] for i,c in enumerate(d): byte = ord(c) hex_byte = hex(byte)[2:] if byte <= 0xf: hex_byte = '0' + hex_byte if i % 4 == 3: hex_byte += ' ' bytes.append(hex_byte) return ''.join(bytes).strip()
def justify_content(keyword): """``justify-content`` property validation.""" return keyword in ( 'flex-start', 'flex-end', 'center', 'space-between', 'space-around', 'space-evenly', 'stretch')
def is_palindrome(string: str) -> bool: """ Test if given string is a palindrome """ return string == string[::-1]
def remove_brackets(s: str) -> str: """ Remove text in the square brackets :param s: the string from which you need to remove the brackets :return: string without brackets """ (split_symbol, index) = (']', 1) if s.startswith('[') else ('[', 0) if s.endswith(']') else ('\n', 0) return s.split(split_symbol)[index].strip(' ')
def _get_delta(index, camera_angle): """ This operation takes an index into the three-value discrete space (constant, increase, decrease) and converts that into a continuous camera value. This is done quite simply, by returning `camera_angle` for an index of 1, and -1*camera_angle for an index of 2 """ if index == 0: return 0 elif index == 1: return camera_angle elif index == 2: return -1*camera_angle else: raise ValueError(f"Unsupported value {index}")
def hyperparam_to_path(hyperparameters): """Return a string of all hyperparameters that can be used as a path extension.""" return "/".join([f"{k}_{v}" for k, v in hyperparameters.items()])
def _increment_last_byte(byte_string): """ Increment a byte string by 1 - this is used for etcd prefix gets/watches. FIXME: This function is doing it wrong when the last octet equals 0xFF. FIXME: This function is doing it wrong when the byte_string is of length 0 """ s = bytearray(byte_string) s[-1] = s[-1] + 1 return bytes(s)
def _count_key_occurence_list_of_tuples(list_of_tuples, key): """Return the number of times `key` occurs as a key in `list_of_tuples`.""" return sum(1 for i, _ in list_of_tuples if i == key)
def cTok(c, nu = 5.0, xF = 0.2, xS = 0.8, xG = 0.0 ): """ cTok will convert a c-eigenvalue to a k-eigenvalue given the cross sections for the problem. """ xA = xG + xF xT = xA + xS return nu*xF/(xT*c - xS)
def make_text(text, i18n_texts=None): """ make text. reference - `Common Message Property <https://developers.worksmobile.com/jp/document/100500801?lang=en>`_ :return: text content. """ if i18n_texts is not None: return {"type": "text", "text": text, "i18nTexts": i18n_texts} return {"type": "text", "text": text}
def _str_to_bytes(value: str) -> bytes: """Convert ``str`` to bytes""" return value.encode("utf-8")
def slice_dim(slc, axis): """Return slice object that indexes along an arbitrary axis. Args: slc: the object used for slicing e.g. a slice object, an int or a sequence axis: the axis to slice """ if axis < 0: return (Ellipsis, slc) + (slice(None),) * (-axis - 1) else: return (slice(None),) * axis + (slc, Ellipsis)
def has_subclass(iterable, class_or_tuple): """returns True if iterable contains an subclass of cls""" for i in iterable: if issubclass(i, class_or_tuple): return True return False
def kronecker(x, y): """ Returns 1 if x==y, and 0 otherwise. Note that this should really only be used for integer expressions. """ if x == y: return 1 return 0