content
stringlengths
42
6.51k
def group_count(elements, group=None): """ Groups the elements of elements in the groups dictionary :param List[List[str]] elements: A list of str or a recursive list of lists of str :param Dict[] group: where to group the elements, used for recursion :return Dict[str, int]: dictionary with the number (value) of each element (key) """ if group is None: group = {} for e in elements: if isinstance(e, list): group = group_count(e, group) elif e in group: group[e] += 1 else: group[e] = 1 return group
def _use_cache(outputs, use_cache): """During generation, decide whether to pass the `past` variable to the next forward pass.""" if len(outputs) <= 1 or use_cache is False: return False return True
def elementarDensityError(N,R,current,livetime): """ wssdw """ return('not implemented yet')
def recursive_thue_morse(n): """The recursive definition of the Thue-Morse sequence. The first few terms of the Thue-Morse sequence are: 0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 0 . . . """ if n == 0: return 0 if n % 2 == 0: return recursive_thue_morse(n / 2) if n % 2 == 1: return 1 - recursive_thue_morse((n - 1) / 2)
def mult_vector(vector, coeff): """ multiplies a 2D vector by a coefficient """ return [coeff * element for element in vector]
def _bool_from_str(s: str) -> bool: """ Converts *s* to a boolean value based on common truthy keywords. """ if s.lower() in ('yes', 'true', 'on', 'enabled'): return True if s.lower() in ('no', 'false', 'off', 'disabled'): return True raise ValueError(f'not a truthy keyword: {s!r}')
def calc_no_within_ts(levels, lvl, varname): """calculate the position for a given level and variable inside the current timestep Args: levels: all levels lvl: the level we are interested in varname: the variable we are interested in Returns: no of steps """ steps = 0 for i in range(len(levels)): if lvl == levels[i]['level']: #print('level reached ', lvl) steps += list(map(lambda x: x[0], levels[i]['vars'])).index(varname) break steps += len(levels[i]['vars']) return steps
def ellipsize(s, max_length=60): """ >>> print(ellipsize(u'lorem ipsum dolor sit amet', 40)) lorem ipsum dolor sit amet >>> print(ellipsize(u'lorem ipsum dolor sit amet', 20)) lorem ipsum dolor... """ if len(s) > max_length: ellipsis = u'...' return s[:(max_length - len(ellipsis))] + ellipsis else: return s
def clean_single_quotes(text: str) -> str: """Excapes all single quotes (') in text into a double single quote('')""" return text.replace("'", "''")
def CMTIntParser(value: str) -> int: """ Small custom parser for ints Args: value: the VISA return string using exponential notation """ return int(float(value))
def string_split(input_string): """Split a string based on the presence of commas Each line of the file is read in as a single string, so it is necessary to separate this string into two categories: time and voltage. Given that the file is comma separated, this function is used to split each string into the two categories mentioned above. The input_string will have the following format: input_string = "0.#####,1.#####" Once the string is split, the function returns a list, where the first element in the list is the time, and the second element in the list is the voltage. This output list has the following format: output = ["0.#####". "1.#####"] Parameters ---------- input_string : string Contains the time and voltage combined as a single string Returns ------- list List containing two strings """ output = input_string.split(",") return output
def simple_delete(a_dictionary, key=""): """ deletes an element based on the key from a dictionary """ if key is not "" and key in a_dictionary: a_dictionary.pop(key) return (a_dictionary.copy())
def pluralize(number, word): """Pluralize the word based on the number.""" if number: phrase = '%s %s' % (number, word) if number != 1: phrase += 's' return phrase return ''
def convertCountsToCumulativeDistribution(att_to_count): """Converts a dict of counts to a cumulative probability distribution for sampling. Args: att_to_count: a dict of attribute counts. Returns: dist a cumulative probability distribution mapping cumulative probabilities [0,1] to attributes. """ dist = {} total = sum(att_to_count.values()) if total == 0: return dist cumulative = 0 for att, count in att_to_count.items(): if count > 0: p = count/total cumulative += p dist[cumulative] = att return dist
def is_xmfa_blank_or_comment(x): """Checks if x is blank or an XMFA comment line.""" return (not x) or x.startswith('=') or x.isspace()
def pad(contents: str, size: int) -> str: """ Pads text on the right to meet the target size, shortening if necessary. :param contents: the original content to left pad :param size: the target size :return: the original string contents padded with spaces on the right """ if len(contents) < size: return f"{contents}{' ' * (size - len(contents))}" else: return f"{contents[:size-4]}... "
def p16b(x): """Packs an integer into a 2-byte string (big endian)""" import struct return struct.pack('>H', x & 0xffff)
def isSupsetTo(d: dict, what: dict): """Check whether one `d` dict is supset or equal to `what` dict. This means that all the items from `what` is equal to `d`. Args: d, what (dict): Dicts to compare. Returns: bool: True if d > what, False otherwise. """ for key, value in what.items(): if d[key] != value: return False return True
def divides(a: int, b: int) -> bool: """Return `True` iff `a` is a multiple of `b`.""" return a % b == 0
def make_str_from_row(board, row_index): """ (list of list of str, int) -> str Return the characters from the row of the board with index row_index as a single string. >>> make_str_from_row([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 0) 'ANTT' """ word = '' i= 0 while i < len(board[row_index]): word = word + board[row_index][i] i = i + 1 return word
def get_tuple(from_var): """Convert tuples, strings and None into tuple.""" if type(from_var) is tuple: return from_var elif type(from_var) is str: return (from_var,) elif from_var is None: return () else: raise TypeError('Value should be either tuple, string or None, ' + 'received: ' + type(from_var).__name__)
def compile_word(word): """Compile a word of uppercase letters as numeric digits. E.g., compile_word('YOU') => '(1*U+10*O+100*Y)' Non-uppercase words unchanged: compile_word('+') => '+'""" if word.isupper(): terms = [('%s*%s' % (10**i, d)) for (i, d) in enumerate(word[::-1])] return '(' + '+'.join(terms) + ')' else: return word
def attributes2schema(attributes): """Transform the list of Open311 service attributes into a valid Cerberus schema (see: http://wiki.open311.org/GeoReport_v2#Response_2).""" schema = {} for attr in attributes: if attr['variable']: typ = attr['datatype'] # Attributes of type 'text', 'singlevaluelist', 'multivaluelist' # are represented as strings if typ in ['text', 'singlevaluelist', 'multivaluelist']: typ = 'string' field = attr['code'] schema[field] = {'type': typ, 'required': attr['required']} # If the attribute has a list of values, use their keys as allowed # values for this attribute if 'values' in attr: schema[field]['allowed'] = [v['key'] for v in attr['values']] # If the attribute has a data relation, enforce the foreign key # constraint on it if 'relation' in attr: schema[field]['data_relation'] = attr['relation'] return schema
def get_boundary_polyhedra(polyhedra, boundary_x=0, boundary_width=0.5, verbose=True, z_lim=[0, 100]): """ get indices of polyhedra at boundary (assumed to be parallel to x-axis) Parameter --------- polyhedra: dict dictionary of all polyhedra boundary_x: float position of boundary in Angstrom boundary_width: float width of boundary where center of polyhedra are considered in Angstrom verbose: boolean optional z_lim: list upper and lower limit of polyhedra to plot Returns ------- boundary_polyhedra: list list of polyhedra at boundary """ boundary_polyhedra = [] for key, polyhedron in polyhedra.items(): center = polyhedron['vertices'].mean(axis=0) if abs(center[0] - boundary_x) < 0.5 and (z_lim[0] < center[2] < z_lim[1]): boundary_polyhedra.append(key) if verbose: print(key, polyhedron['length'], center) return boundary_polyhedra
def is_unknown(val): """Check if value is in list of unknown values. If value is not a string catch the AttributeError raised by use of str.lower() and return False. """ unknown_vals = ('unknown', 'n/a') try: return val.lower() in unknown_vals # truth value except AttributeError: # not a str return False
def is_palindrome(text) : """ Takes in a string and determines if palindrome. Returns true or false. """ if len(text) == 1 : return True elif len(text) == 2 and text[0] == text[-1] : return True elif text[0] != text[-1] : return False elif text[0] == text[-1] : is_palindrome(text[1:-1]) return True
def append(listA, listB): """ Add the linked lists together to form one linked list. If both are None, return None. If either is None, return the other one. If both have nodes, this function will append listB to listA and return the head of listA. :param listA: Node, head of linked list :param listB: Node, head of linked list :return: Node or None, head of combined list or None if both lists are None """ # if both lists are None, return None if listA is None and listB is None: return None # if either list is None, return the other if listA is None or listB is None: return listA if listB is None else listB # at this point both lists have nodes. # let's loop through listA until we get to the # last node and append listB to it. current_node = listA # find the last node in listA while current_node.next is not None: current_node = current_node.next # append listB to the last node of listA current_node.next = listB # return the combined linked lists return listA
def index_roster(roster_table): """Convert roster table to dictionaries for by-student lookup. The key lookup dictionary provides lookup either by the last name or by the key itself. The roster dictionary provides only lookup by the key itself. One may thus ask for, e.g., student = ta_info_by_ta[key_dict["Fasano"]] print(student["hours"]) student = ta_info_by_ta[key_dict["Fasano:Patrick"]] print(student["hours"]) Arguments: roster_table (list of dict) : TA records in input order Returns: (tuple) : (ta_keys,key_dict,ta_info_by_ta) ta_keys (list of str) : list of ta keys in input order key_dict (dict) : mapping from all accepted ta identifier values (str) to canonical key "last:first" (str) ta_info_by_ta (dict) : mapping from canonical key "last:first" (str) to ta record (dict) """ # put each student record into dictionary ta_keys = [] key_dict = {} ta_info_by_ta = {} for record in roster_table: ta_keys.append(record["key"]) key_dict[record["last"]] = record["key"] key_dict[record["key"]] = record["key"] ta_info_by_ta[record["key"]] = record return (ta_keys,key_dict,ta_info_by_ta)
def loc_to_latlon_with_hashing(location, geocode, address_dict): """ Get a location name and return lat/lon and country name. :param location: Name of location :param geocode: The geocode finder from geopy :param address_dict: dict for with addresses already discovered :return: lat, lon and country codes """ try: lat, lon = address_dict[location]['lat'], address_dict[location]['lon'] country = address_dict[location]['country'] except KeyError: # address encountered for first time - find it by geocoding and save it to dict location_geo = geocode(location) if location_geo is None: return None, None, None else: lat, lon = location_geo.latitude, location_geo.longitude address_dict[location] = {} address_dict[location]['lat'] = lat address_dict[location]['lon'] = lon try: country = location_geo.raw["address"]["country"] except KeyError: country = "&*COUNTRY_KEY_ERROR*&" address_dict[location]['country'] = country return lat, lon, country
def _validate_attribute_id(this_attributes, this_id, xml_ids, enforce_consistency, name): """ Validate attribute id. """ # the given id is None and we don't have setup attributes # -> increase current max id for the attribute by 1 if this_id is None and this_attributes is None: this_id = max(xml_ids) + 1 # the given id is None and we do have setup attributes # set id to the id present in the setup elif this_id is None and this_attributes is not None: this_id = this_attributes[name] # the given id is not None and we do have setup attributes # -> check that the ids match (unless we are in over-write mode) elif this_id is not None and this_attributes is not None: if (this_id != this_attributes[name]) and enforce_consistency: raise ValueError("Expect id %i for attribute %s, got %i" % (this_attributes[name], name, this_id)) return this_id
def parent(i:int): """ Return the Parent element's index of the request element Not used for sorting. Keyword Arguments i:int: index of requested element Return int: parent's index of requested element """ return int(i / 1)
def CoilTime(d, v1, v2): """Function to compute duration of line # Need to know distance between Nth and Nth+1 coil""" return 2 * d / (v1 + v2)
def sort_order(data, dictParam): """[List will be ordered and sorted always by the first field which is the system IP of the router] Args: lista ([list]): [List of IP system] Returns: [list]: [Ordered List] """ ipCol = dictParam['dataGroupColumn'] if dictParam['strictOrder'] == 'yes': if dictParam['useHeader'] == 'yes': try: routers = list(data[ipCol]) except Exception as e: print("No column header " + str(e) + " in file " + dictParam['data'] + ". Quitting...\n") quit() else: routers = list(data[0]) else: if dictParam['useHeader'] == 'yes': try: routers = list(data[ipCol].unique()) except Exception as e: print("No column header " + str(e) + " in file " + dictParam['data'] + ". Quitting...\n") quit() else: routers = list(data[0].unique()) return routers, data
def linecol_to_pos(text, line, col): """Return the offset of this line and column in text. Lines are one-based, columns zero-based. This is how Jedi wants it. Don't ask me why. """ nth_newline_offset = 0 for i in range(line - 1): new_offset = text.find("\n", nth_newline_offset) if new_offset < 0: raise ValueError("Text does not have {0} lines." .format(line)) nth_newline_offset = new_offset + 1 offset = nth_newline_offset + col if offset > len(text): raise ValueError("Line {0} column {1} is not within the text" .format(line, col)) return offset
def break_words(stuff): #Comment added:Describe the function. """This function will break up words for us.""" #Comment added: Set variable to equal function, and activate with the return command. words = stuff.split(' ') return words
def calc_hilo(min_val, max_val, df, cols_to_test): """ Return lowest and highest values from min_val and max_val if present, or calculate from df. """ # Calculate (or blindly accept) the range of the y-axis, which must be the same for all four axes. if (max_val is None) and (len(df.index) > 0): highest_possible_score = max([max(df[col]) for col in cols_to_test]) else: highest_possible_score = max_val if (min_val is None) and (len(df.index) > 0): lowest_possible_score = min([min(df[col]) for col in cols_to_test]) else: lowest_possible_score = min_val return lowest_possible_score, highest_possible_score
def unicode_filter(intext): """ Remove Unicode crap. :param intext: Text to filter. :type intext: str """ return intext.replace("\u2013", "").strip()
def relative_f_to_c_degree(fahrenheit_degree): """ Convert relative degrees Fahrenheit to Celsius For example, if I want to increase the thermostat 3 degrees F, then I need to adjust it 1.66668 degrees C. :param fahrenheit_degree: relative number of degrees F :return: relative number of degrees C """ return float(fahrenheit_degree) * 0.555556
def place(cards, pos, card): """ Replaces the card at a given position in a list. Example: > place([1,4,7,10,12], 2, 9) [1,4,9,10,12] """ result = cards[:] result[pos] = card return result
def get_width(img): """ Returns the number of columns in the image """ return len(img[0])
def _get_tag_flags(tags, exclude_tags): """ tags: List exclude_tags: List """ return ["-t %s" % tag for tag in tags] + ["-T %s" % tag for tag in exclude_tags]
def circle(circWidth, x, y, dotRadius, idField): """Draw a circle.""" return ('<circle stroke="Black" stroke-width="{}" fill="black"\n' ' cx="{}" cy="{}" r="{}"' ' id="{}" />\n').format(circWidth, x, y, dotRadius, idField)
def compare_dicts(before, after): """ Comparing 2 dicts and providing diff list of [added items, removed items] Args: before (dict): Dictionary before execution after (dict): Dictionary after execution Returns: list: List of 2 lists - ('added' and 'removed' are lists) """ added = [] removed = [] uid_before = [ uid.get('metadata').get( 'generateName', uid.get('metadata').get('name') ) for uid in before ] uid_after = [ uid.get('metadata').get( 'generateName', uid.get('metadata').get('name') ) for uid in after ] diff_added = [val for val in uid_after if val not in uid_before] diff_removed = [val for val in uid_before if val not in uid_after] if diff_added: added = [ val for val in after if val.get('metadata').get( 'generateName', val.get('metadata').get('name') ) in [v for v in diff_added] ] if diff_removed: removed = [ val for val in before if val.get('metadata').get( 'generateName', val.get('metadata').get('name') ) in [v for v in diff_removed] ] return [added, removed]
def validate_parent_field(instance, value): """ Check if parent field is valid """ return instance is not None and value != instance
def convert_incremental_metrics_to_dict(incremental_metrics): """ This method is required because of the nested nature of the incremental metrics JSON. """ db_formatted_incremental_metrics = [] for metric in incremental_metrics: db_formatted_incremental_json = { 'time': metric.time, 'throughput': metric.throughput, 'latency': metric.latency.__dict__, 'memory_info': metric.memory_info.__dict__, } db_formatted_incremental_metrics.append(db_formatted_incremental_json) return db_formatted_incremental_metrics
def key_prefix_replace(d, prefix, new_prefix=""): """ replaces the list of prefix in keys of a flattened dict :param d: the flattened dict :param prefix: a list of prefixes that are replaced with a new prefix. Typically this will be "" :type prefix: list of str :param new_prefix: The new prefix. By default it is set to "" :return: the dict with the keys replaced as specified """ items = [] for k, v in d.items(): new_key = k for p in prefix: new_key = new_key.replace(p, new_prefix, 1) items.append((new_key, v)) return dict(items)
def generate_header_string(text, symbol="-"): """Generates a 2-line header string with underlined text. >>> header = generate_header_string("header string", symbol="*") >>> print(header) header string ************* """ return f"{text}\n{symbol * len(text)}"
def construct_url(ip_address: str) -> str: """Construct the URL with a given IP address.""" if "http://" not in ip_address and "https://" not in ip_address: ip_address = "{}{}".format("http://", ip_address) if ip_address[-1] == "/": ip_address = ip_address[:-1] return ip_address
def build_slice_name( experiment_name, variable_name, time_index, xy_slice_index ): """ Builds a unique name for a slice based on the experiment, variable, and location within the dataset. Takes 4 arguments: experiment_name - String specifying the experiment that generated the slice. variable_name - String specifying the variable associated with the slice. time_index - Non-negative index specifying the time step associated with the slice. xy_slice_index - Non-negative index specifying the XY slice. Returns 1 value: slice_name - String containing the constructed name. """ return "{:s}-{:s}-z={:03d}-Nt={:03d}".format( experiment_name, variable_name, xy_slice_index, time_index )
def flip_exchange(exc): """Flip amount and formula of an exchange""" exc['amount'] = -1 * exc['amount'] if 'formula' in exc: exc['formula'] = '-1 * ({})'.format(exc['formula']) return exc
def translate_DNA(dnaStrand): """ Return the translated protein from the DNA strand >>> translate_DNA("ATGTATGATGCGACCGCGAGCACCCGCTGCACCCGCGAAAGCTGA") MYDATASTRCTRES """ dna_table = {"TTT" : "F", "CTT" : "L", "ATT" : "I", "GTT" : "V", "TTC" : "F", "CTC" : "L", "ATC" : "I", "GTC" : "V", "TTA" : "L", "CTA" : "L", "ATA" : "I", "GTA" : "V", "TTG" : "L", "CTG" : "L", "ATG" : "M", "GTG" : "V", "TCT" : "S", "CCT" : "P", "ACT" : "T", "GCT" : "A", "TCC" : "S", "CCC" : "P", "ACC" : "T", "GCC" : "A", "TCA" : "S", "CCA" : "P", "ACA" : "T", "GCA" : "A", "TCG" : "S", "CCG" : "P", "ACG" : "T", "GCG" : "A", "TAT" : "Y", "CAT" : "H", "AAT" : "N", "GAT" : "D", "TAC" : "Y", "CAC" : "H", "AAC" : "N", "GAC" : "D", "TAA" : "", "CAA" : "Q", "AAA" : "K", "GAA" : "E", "TAG" : "", "CAG" : "Q", "AAG" : "K", "GAG" : "E", "TGT" : "C", "CGT" : "R", "AGT" : "S", "GGT" : "G", "TGC" : "C", "CGC" : "R", "AGC" : "S", "GGC" : "G", "TGA" : "", "CGA" : "R", "AGA" : "R", "GGA" : "G", "TGG" : "W", "CGG" : "R", "AGG" : "R", "GGG" : "G", } protein = "" for i in range(0, len(dnaStrand)-2, 3): protein = protein + dna_table[dnaStrand[i:i+3]] return protein
def apply_polynomial(coeff, x): """Given coefficients [a0, a1, ..., an] and x, compute f(x) = a0 + a1*x + ... + ai*x**i + ... + an*x**n. Args: coeff (list(int)): Coefficients [a0, a1, ..., an]. x (int): Point x to on which to evaluate the polynomial. Returns: int: f(x) = a0 + a1*x + ... + ai*x**i + ... an*x**n. """ f = 0 xi = 1 for ai in coeff: f += ai * xi xi *= x return f
def check_diagonal_winner(input_list, size): """ Check the winner number in diagonal direction. Arguments: input_list -- a two dimensional list for checking. size -- the length for winning. Returns: winner -- the winner player number, if no winner return None. """ for row_idx, line in enumerate(input_list): winner = ' ' try: list_for_check = [] for i in range(size): list_for_check.append(input_list[row_idx+i][i]) if list_for_check.count(list_for_check[0]) == size: if list_for_check[0] != ' ': return list_for_check[0] except IndexError: winner = ' '
def get_supported_platforms(entries): """ :param entries: :return: """ supported_platforms = [] for entry in entries: supported_platforms.extend((entry.get('Platform', ['N/A']))) unique_supported_platforms = set(supported_platforms) supported_platforms_stat = [(l, supported_platforms.count(l)) for l in unique_supported_platforms] supported_platforms_stat.sort(key=lambda x: str.casefold(x[0])) # first sort by name supported_platforms_stat.sort(key=lambda x: -x[1]) # then sort by occurrence (highest occurrence first) return supported_platforms_stat
def iskfunc(cls_or_object): """ Tests if the given class or instance has been wrapped as a kfunc. """ return hasattr(cls_or_object, '_is_kfunc')
def count_strata(matches, strata): """Stratify taxa in a map and count occurrences. Parameters ---------- matches : dict of str or dict Query-to-taxon(a) map. strata : dict, optional Read-to-feature map for stratification. Returns ------- dict of tuple of (str, str): int Stratified (feature, taxon): count map. """ res = {} for query, taxa in matches.items(): if query in strata: feature = strata[query] if isinstance(taxa, dict): k = 1 / sum(taxa.values()) for taxon, n in taxa.items(): taxon = (feature, taxon) res[taxon] = res.get(taxon, 0) + n * k else: taxon = (feature, taxa) res[taxon] = res.get(taxon, 0) + 1 return res
def pole_minimizer_two(Emin, ChemPot, kT, p): """ This function is minimizing the problem: N = N_AB + N_BC + N_CD + N_DE + N_EF, where: N_AB = N_BC = N_CD = N_DE = N_EF = though this has x free parameters (excluding p), we set certain parameters so that we do not violate our relative tolerance, these are: muRe1 = x muIm1 = y muRe2 = x muIm2 = y which then reduces this to a x variable problem: N = """ # Run code, get output, real answer will be different than these initial filler values. kTIm1 = kT kTIm2 = kT kTRe1 = kT kTRe2 = kT muRe2 = Emin - p * kTRe2 muRe1 = 0.5 * (muRe2 + ChemPot) # Initial guess is halfway between muIm1 = p * kTIm1 muIm2 = p * kTIm2 return kTRe1, kTIm1, muRe1, muIm1, kTRe2, kTIm2, muRe2, muIm2
def first_val(dictionary): """Get the first value.""" vals = list(dictionary.values()) if len(vals) > 0: return vals[0] return ""
def T(a, p): """ the number of dots in tri-angle down """ ret = 0 ed = (p + 1)>>1 for i in range(ed): ret += a * i // p return ret
def matlab_bbs_from_py(y_top, x_top, target_height, target_width): """ Takes bounding box defined by upper left corner, height and width and returns matlab-style bounding box in form x1, y1, x2, y2 """ return (x_top + 1, y_top + 1, x_top + target_width, y_top + target_width)
def esc_kw(kw): """ Take a keyword and escape all the Solr parts we want to escape!""" kw = kw.replace('\\', '\\\\') # be sure to do this first, as we inject \! kw = kw.replace('(', '\(') kw = kw.replace(')', '\)') kw = kw.replace('+', '\+') kw = kw.replace('-', '\-') kw = kw.replace(':', '\:') kw = kw.replace('/', '\/') kw = kw.replace(']', '\]') kw = kw.replace('[', '\[') kw = kw.replace('*', '\*') kw = kw.replace('?', '\?') kw = kw.replace('{', '\{') kw = kw.replace('}', '\}') kw = kw.replace('~', '\~') return kw
def format_days(days): """ Converts a list of tuples with a date into a list of tuples with a string representation of the date :param days: List of tuples from database with datetime in second position :return: List of tuples as described above """ formatted = [] for day in days: formatted.append((day[0], day[1].strftime('%Y-%m-%d'))) return formatted
def get_phonopy_options(postprocess_parameters): """Return phonopy command option strings.""" mesh_opts = [] if "mesh" in postprocess_parameters: mesh = postprocess_parameters["mesh"] try: length = float(mesh) mesh_opts.append("--mesh=%f" % length) except TypeError: mesh_opts.append('--mesh="%d %d %d"' % tuple(mesh)) mesh_opts.append("--nowritemesh") fc_opts = [] if "fc_calculator" in postprocess_parameters: if postprocess_parameters["fc_calculator"].lower().strip() == "alm": fc_opts.append("--alm") return mesh_opts, fc_opts
def section_to_program(section): """Transform a section number into program Args: section (int): The number of the section Returns: str: The name of the program """ if section in range(1, 10): return "Full-Time" elif section in range(81, 85): return "Evening" elif section in range(85, 87): return "Weekend" elif section in range(87, 94): return "EMBA" elif section == 50 or section == 60: return "PhD" else: return "NA"
def add_growth_rate_to_agents(agents): """ calculate individual agent growth rates and add it to the data """ time_vec = list(agents.keys()) # initial state at 0 for agent_id, state in agents[time_vec[0]].items(): state['boundary']['growth_rate'] = 0 for t0, t1 in zip(time_vec[:-1], time_vec[1:]): agents0 = agents[t0] agents1 = agents[t1] for agent_id, state1 in agents1.items(): if agent_id in agents0: mass0 = agents0[agent_id]['boundary']['mass'] mass1 = state1['boundary']['mass'] dm = mass1 - mass0 dt = t1 - t0 growth_rate = dm/dt state1['boundary']['growth_rate'] = growth_rate return agents
def common_token_adder(bits2tokens, common_tokens): """ Args: bits2tokens: dictionary {bits_block: [tokens]} common_tokens: a list of the most frequently used tokens Return: bits2tokens_common: a dictionary with each block been added the common_tokens """ bits2tokens_common = {} for bits, tokens in bits2tokens.items(): bits2tokens_common[bits] = tokens+common_tokens return bits2tokens_common
def verify_changed(source, result): """ Verify that the source is either exactly equal to the result or that the result has only changed by added or removed whitespace. """ output_lines = result.split("\n") changed = False for line_nr, line in enumerate(source.split("\n")): if line != output_lines[line_nr]: changed = True if line.strip() != output_lines[line_nr].strip(): raise IndentationError("Non-whitespace changes detected. Core dumped.") return changed
def conv_out_size(input_size, kernel_size, stride=1, padding=0): """Calculate the output size of convolution operation on the input. Parameters ---------- input_size : int Row/Column size of the input tensor. kernel_size : int Kernel size of the convolution filter. stride : int, default 1 Stride of the convolution filter. padding : int, default 0 The amount of padding added to the input's given dimension. Returns ------- int Output size of the convolution operation on the given input's dimension. Notes ----- .. math:: n_{out} = \lfloor\\frac{n_{in} + 2 * p - k}{s}\\rfloor + 1 Examples -------- >>> x = np.random.random((5, 5)) >>> conv_out_size(x.shape[0], kernel_size=2, stride=2, padding=1) 3 """ return (input_size + 2 * padding - kernel_size) // stride + 1
def _get_amino_acid_exchange( original_AA_sequence, altered_AA_sequences, ref_nucleotides, altered_nucleotides ): """FF: Not sure what this does exactly. I think it turns the differences between orignal_AA and altered_aa it into a 'edit' representation """ ret = "" for altered_AA_sequence in altered_AA_sequences.split(","): if len(ref_nucleotides) == 1 and len(altered_nucleotides) == 1: # SNP for num, aminoacid in enumerate(original_AA_sequence): if aminoacid != altered_AA_sequence[num]: ret = aminoacid + str(num + 1) + altered_AA_sequence[num] break elif len(ref_nucleotides) < len(altered_nucleotides): # insertion if (len(altered_nucleotides) - len(ref_nucleotides)) % 3 == 0: # inframe # inserted_len = (len(altered_nucleotides) - len(ref_nucleotides)) / 3 for num, aminoacid in enumerate(original_AA_sequence): if aminoacid != altered_AA_sequence[num]: ret = ( aminoacid + str(num + 1) + "ins" + original_AA_sequence[num + 1] ) break else: # frameshift ret = "fs" elif len(ref_nucleotides) > len(altered_nucleotides): # deletion if ( len(altered_AA_sequences) != 0 and (len(ref_nucleotides) - len(altered_nucleotides)) % 3 == 0 ): # inframe deleted_aa = (len(ref_nucleotides) - len(altered_nucleotides)) / 3 for num, aminoacid in enumerate(altered_AA_sequence): if aminoacid != original_AA_sequence[num]: ret = ( original_AA_sequence[num] + str(num + 1) + "del" + original_AA_sequence[num + 1 + deleted_aa] ) break else: ret = "fs" return ret
def ord_time(time): """Compute the ordinal number of a text milestone :param time: string :rtype: integer """ return {"day": 1, "week": 2, "month": 3, "season": 4, "year": 5}.get(time, 5)
def comp(a, b): """ Custom comparator """ for i in range(min(len(a), len(b))): if a[i] < b[i]: return -1 elif a[i] > b[i]: return 1 return -1 # This to handle empty element
def get_wikipedia_single_pattern(lang, date): """Return a regex pattern matching for wiki .bz2 files to be extracted.""" return r'({}wiki-{}-pages-articles+.xml.*bz2$)'.format(lang, date)
def linear_regression(x_list, y_list): """ Calculates slope and intercept using linear regression This implementation is not suited for large datasets Arguments: tuples_list -- An array of tuples containing x and y values Output: A tuple of slope and intercept values """ assert len(x_list) == len(y_list), "expected input shapes to match" sum_x = 0.0 sum_y = 0.0 sum_xy = 0.0 sum_x_squared = 0.0 sum_y_squared = 0.0 count = len(x_list) for i in range(0, len(x_list)): # pylint: disable=C0200 x = x_list[i] y = y_list[i] sum_x += x sum_y += y sum_xy += x * y sum_x_squared += x * x sum_y_squared += y * y try: slope = (((count * sum_xy) - (sum_x * sum_y)) / ((count * sum_x_squared) - (sum_x * sum_x))) # I didn't include the intercept because it was unused except ZeroDivisionError: return float('NaN') return slope
def sn_rate(t): """ CCSNE rate SN / Gyr per solar msas of star formation Changed output to /Gyr to keep same units as input t """ agemin = 0.003401 # Gyr agebrk = 0.010370 # Gyr agemax = 0.03753 # Gyr RSNE = 0.0 if (t>agemin): if (t <= agebrk): RSNE = 5.408E-4 elif (t<=agemax): RSNE=2.516E-4 if (t > agemax): #RSNE=5.3E-8+1.6*np.exp(-0.5*((t-0.05)/0.01)*((t-0.05)/0.01)) # This is JUST SNIa RSNE=0.0 # set to zero for CCSNE return RSNE * 1000.0
def compute_checksum(data): """Compute checksum from a list of byte values. This is appended to the data of the sysex message. """ return (128 - (sum(data) & 0x7F)) & 0x7F
def convert_int(string_value): """Converts a string to to an integer (see CONVERTERS). There is a converter function for each column type. :param string_value: The string to convert :raises: ValueError if the string cannot be represented by an int """ return int(string_value.strip())
def late_contract_cps(changepoints, thresh=60): """ Compute changepoints closer than "thresh" (optional argument) days to the start of the contract. CPs should be given as obtained by the model.predict() method from the ruptures package, i.e. a list/numpy array of positive integers representing days since the start of the contract, with the last element being the final day of the contract. """ late_cps = 0 contract_end = changepoints[-1] for cp in changepoints: delta = contract_end - cp if (delta > 0) and (delta <= thresh): late_cps += 1 return late_cps
def cross_product(vector1, vector2): """ get cross-product for 3D vectors """ a1, a2, a3 = vector1 b1, b2, b3 = vector2 return a2 * b3 - a3 * b2, a3 * b1 - a1 * b3, a1 * b2 - a2 * b1
def fixedcase_word(w, truelist=None, falselist=None, allcaps=False): """Returns True if w should be fixed-case, False if not, None if unsure.""" if not allcaps and any(c.isupper() for c in w[1:]): return True if truelist is not None and w in truelist: return True if falselist is not None and w in falselist: return False
def filter_providers(providers, names): """ Filters the list of :providers: given one or more :names:. """ if names is None or len(names) == 0: return providers if isinstance(names, str): names = [names] names = [n.lower() for n in names] return [p for p in providers if p.provider_name.lower() in names]
def state_dict_cpu_copy(chkpt): """save cpu copy of model state, so it can be reloaded by any device""" #if chkpt has things other than state_dict get the state_dict if 'state_dict' in chkpt: state_dict = chkpt['state_dict'] else: state_dict = chkpt for n, p in state_dict.items(): state_dict[n] = p.cpu() return chkpt
def split(windows, num): """Split an iterable of windows into `num` sublists of (roughly) equal length. Return a list of these sublists.""" if num == 1: return [windows] windows = windows if isinstance(windows, list) else list(windows) length = len(windows) // num return [windows[i*length:(i+1)*length] for i in range(num)]
def wrapped_string(text, screen_width, prefix=0): """This function will take a string and make sure it can fit within the given screen_width. If the string is too long to fit, it will be broken on word boundaries (specifically the ' ' character) if it can or the word will be split with a '-' character and the second half moved to the next line. If a prefix is given, the line(s) will be prefixed with that many ' ' characters, including any wrapped lines. If the given string includes embeded newline characters, then each line will be evaluated according to the rules above including breaking on word boundaries and injecting a prefix. """ if not text: return '' new_text = '' # if we have multiple paragraphs, then wrap each one as if it were a single line lines = text.split('\n') if len(lines) > 1: for index, line in enumerate(lines): if index > 0: new_text += ' ' * prefix new_text += wrapped_string(line, screen_width, prefix=prefix) + '\n' return new_text.rstrip() if len(text) + prefix < screen_width: return text words = text.split(' ') current_line = '' for word in words: if prefix + len(current_line) + len(word) + 1 < screen_width: # if word fits on line, just add it current_line += word + ' ' else: space_left = screen_width - (prefix + len(current_line)) if space_left < 3 or len(word) - space_left < 3: # if not much room, move whole word to the next line new_text += '%s\n' % current_line.rstrip() current_line = '%s%s ' % (' ' * prefix, word) else: # split the word across lines with a hyphen current_line += word[:space_left - 1] + '-' new_text += current_line.rstrip() + "\n" current_line = ' ' * prefix + word[space_left - 1:] + ' ' new_text += current_line.rstrip() return new_text
def is_attempt_dir(dir_i): """ """ #| - is_attempt_dir out_dict = dict() is_attempt_dir_i = False att_num = None if "_" in dir_i: dir_split = dir_i.split("_") if len(dir_split) == 2: if dir_split[0].isnumeric(): att_num = int(dir_split[0]) if dir_split[1] == "attempt": is_attempt_dir_i = True out_dict["is_attempt_dir"] = is_attempt_dir_i out_dict["att_num"] = att_num return(out_dict) #__|
def _create_table_query(table_name: str) -> str: """Build SQL query to create metrics table.""" return ( f"CREATE TABLE IF NOT EXISTS {table_name}(" f" id SERIAL PRIMARY KEY," f" page_url TEXT," f" http_code SMALLINT," f" response_time INT," f" timestamp TIMESTAMPTZ" f")" )
def get_int_from_rgb(rgb): """Convert an RBG color to its TRNSYS Studio compatible int color. Values are used ranging from 0 to 255 for each of the components. Important: Unlike Java, the TRNSYS Studio will want an integer where bits 0-7 are the blue value, 8-15 the green, and 16-23 the red. Examples: Get the rgb int from an rgb 3-tuple >>> get_int_from_rgb((211, 122, 145)) 9534163 Args: rgb (tuple): The red, green and blue values. All values assumed to be in range [0, 255]. Returns: (int): the rgb int. """ red, green, blue = map(int, rgb) rgb_int = (blue << 16) + (green << 8) + red return rgb_int
def string_to_interactions(string): """ Converts a compact string representation of an interaction to an interaction: 'CDCDDD' -> [('C', 'D'), ('C', 'D'), ('D', 'D')] """ interactions = [] interactions_list = list(string) while interactions_list: p1action = interactions_list.pop(0) p2action = interactions_list.pop(0) interactions.append((p1action, p2action)) return interactions
def dirCosToGnomonic(alpha, beta, gamma): """Convert direction cosines to gnomonic tangent plane projection. Parameters ---------- alpha, beta, gamma : float Direction cosines (unit vector projected onto x, y, z in order) Returns ------- u, v : float Gnomonic tangent plane coordinates in radians. Notes ----- The tangent plane reference is at (u,v) = (0,0), which corresponds to (alpha, beta, gamma) = (0, 0, -1) (a ray coming directly from above). The orientation is such that vx (vy) is positive when u (v) is positive. """ u = -alpha / gamma v = -beta / gamma return u, v
def count_to_dict(lst): """loop on the list of id's to create a dictionary of key = product id / value = count how many common categories""" return {k: lst.count(k) for k in lst}
def as_cmd(selector:str): """ selector:str -> Sets the command's executor to target entity, without changing execution position, rotation, dimension, or anchor """ if not isinstance(selector, str): return "" return f"as {selector}"
def url_encode(to_sanitize: str) -> str: """format special chars met for URL purposes :see: https://www.degraeve.com/reference/urlencoding.php """ return to_sanitize.replace( ' ', '%20' ).replace( ',', '%B4' ).replace( '\'', '%27' ).replace( '(', '%28' ).replace( ')', '%29' )
def class_counts(rows, column_number): """ counts the occurance of object in dataset for a particular column number return dic of object:count""" counts = {} # a dictionary of label -> count for row in rows: label = row[column_number] if label not in counts: counts[label] = 0 counts[label] += 1 return counts
def _correct_xml_vs_js_structure_mismatch(py_obj): """ Some of the object substructure in XML EPCIS is just not present in JSON or unsystematically renamed """ # bisTransaction, source and destination have a nested id in JSON but not in XML if py_obj[0] == "bizTransaction" or py_obj[0] == "source" or py_obj[0] == "destination": for element in [x for x in py_obj[2] if x[0] == py_obj[0]]: py_obj[2].remove(element) return py_obj[0], element[1], py_obj[2] # inconsistent child names / omissions if py_obj[0] == "inputEPC" or py_obj[0] == "outputEPC": return "epc", py_obj[1], py_obj[2] # quantity can be a quantityList child which should be called quantityElement (omitted in JSON) or the quantity # property of such an element if py_obj[0] == "inputQuantity" or py_obj[0] == "outputQuantity" or py_obj[0] == "childQuantity" or ( py_obj[0] == "quantity" and len(py_obj[2]) > 0): return "quantityElement", py_obj[1], py_obj[2] if py_obj[0] == "inputQuantity" or py_obj[0] == "outputQuantity": return "quantityElement", py_obj[1], py_obj[2] return py_obj
def calculate_chart_size_from_elements(header_size, element_size, number_elements): """ Calculates the total size for a chart based on the desired size for the header (title, axis, etc.), and the size and number of elements (lines, columns). """ return header_size + (element_size * number_elements)
def tmake_remove(all_list, removed_list): """remove path in list""" if all_list is None: return [] if removed_list is None: return all_list if not isinstance(removed_list, list): removed_list = [removed_list] removed_list = set(removed_list) all_list = filter(lambda x: x not in removed_list, all_list) return all_list
def validate_nb_token(token): """ Validate NetBox API token environment variable """ # Ensure NetBox API token is present if token == "" or token is None: print("Missing API token") return False return True
def get_class_namespace(cls): """ Resolve the full qualified namespace of a class. This support multiple inheritance using Python's method resolution order. >>> from omtk.rigs.rigArm import Arm >>> get_class_namespace(Arm) 'Module.Limb.Arm' :param cls: A class definition to inspect. :return: A str instance representing the full qualified class namespace. """ if not hasattr(cls, '__mro__'): raise NotImplementedError("Class {0} is a Python old-style class and is unsupported.".format(cls)) return '.'.join( reversed([subcls.__name__ for subcls in cls.__mro__ if subcls != object]) )
def ascii_to_walls(char_matrix): """ A parser to build a gridworld from a text file. Each grid has ONE start and goal location. A reward of +1 is positioned at the goal location. :param char_matrix: Matrix of characters. :param p_success: Probability that the action is successful. :param seed: The seed for the GridWorldMDP object. :param skip_checks: Skips assertion checks. :transition_matrix_builder_cls: The transition matrix builder to use. :return: """ grid_size = len(char_matrix[0]) assert(len(char_matrix) == grid_size), 'Mismatch in the columns.' for row in char_matrix: assert(len(row) == grid_size), 'Mismatch in the rows.' # ... wall_locs = [] empty = [] for r in range(grid_size): for c in range(grid_size): char = char_matrix[r][c] if char == '#': wall_locs.append((r, c)) elif char == ' ': empty.append((r, c)) else: raise ValueError('Unknown character {} in grid.'.format(char)) # Attempt to make the desired gridworld. return wall_locs, empty
def validate_label(label: str) -> str: """Ensure that the given label does not start with the reserved '$' character. Returns the given label if valid. Raises a ValueError if an invalid label is given. Parameters ---------- label: string Label for serialization of an archive component. Returns ------- string Raises ------ ValueError """ if label.startswith('$'): raise ValueError("invalid label '{}'".format(label)) return label
def convert_bytes_to_str(message): """Convert bytes to str phrase """ return message.decode('utf8', 'ignore')