content
stringlengths
42
6.51k
def create_individual_tests(test_set): """ Creates test definitions from a test set as dictionaries. Assumes 'expected_outputs' is a list of tuples defining expected outputs key value pairs [('schema', [SCHEMA-DICT]), ...] """ test_inputs = {k: v for k, v in test_set.items() if k != "expected_outputs"} test_dicts = [ {k: v, **test_inputs} for k, v in test_set.get("expected_outputs", [(None, None)]) if v ] return test_dicts
def get_corr_hex(num): """ Gets correspondence between a number and an hexadecimal string Parameters ------------- num Number Returns ------------- hex_string Hexadecimal string """ if num < 10: return str(int(num)) elif num < 11: return "A" elif num < 12: return "B" elif num < 13: return "C" elif num < 14: return "D" elif num < 15: return "E" elif num < 16: return "F"
def addContent(old_html, raw_html): """Add html content together""" old_html += raw_html return old_html
def left_beats_right(left: str, right: str) -> bool: """Determines if left choice beats right choice in RPS.""" beats_map = { "rock": "scissors", "paper": "rock", "scissors": "paper" } if beats_map[left] == right: return True return False
def parse_field(es_article, hit, es_key, api_key): """ update the record with the value in the hit, if the key not found, then the record does not change :param es_article: the record data which will be inserted into ElasticSearch :param hit: the input data :param es_key: the key in the elastic search record :param api_key: the key to search the input data :return: the updated record """ if api_key in hit: es_article[es_key] = hit[api_key] return es_article
def ctof(at :float) ->float: """ Simple Celsius to Fahrenheit conversion """ return float(at * (9/5) + 32)
def is_abbreviation(sentence): """ Evaluate a word to be an abbreviation if the immediate word before the period contains a capital letter and not a single word sentence. """ sentence_split = sentence.split(" ") if len(sentence_split) == 1: return False elif len(sentence_split[-1]) <= 3 and \ any(x.isupper() for x in sentence_split[-1]): return True else: return False
def is_power(a, b): """ This functions check if the number a is a power of b. That is, if the following is true for a certain number n: a == b**n. """ if (a == 1): return True elif (a == b): return True elif (a == 0) and (b != 0): return False elif (a != 1) and (b == 1): return False else: if (a % b == 0) and is_power((a/b), b): return True else: return False
def unicode_path(utf8path): """Turn an utf8 path into a unicode path.""" if isinstance(utf8path, bytes): return utf8path.decode("utf-8") return utf8path
def clean_whitespace(text): """Standardizes whitespaces so there is only one space separating tokens Parameters ---------- text : str The fixed text Returns ------- str fixed text """ return ' '.join(str(text).split())
def get_last_pair(dictionary, key): """Throws exception if key not in dictionary""" if isinstance(dictionary[key], list): return dictionary[key][-1] return dictionary[key]
def create_id(fname: str, lname: str) -> str: """Create a player ID from a first name and last name. String format: <first 5 characters of last name><first 2 characters of first name><01> The last two integer digits allow for the prevention of ID conflicts. To increment by an integer n, use add_n(player_id, n) NOTE: spaces, periods, and apostrophes are omitted """ fname = fname.lower() # remove spaces, periods, and apostrophes lname = lname.lower().replace(" ", "").replace(".", "").replace("'", "") if len(lname) > 5: lname = lname[0:5] if len(fname) > 2: fname = fname[0:2] return "{}{}01".format(lname, fname)
def recursive_s(l, x, low=None, high=None, mid=None): """Searches for x in sorted list l, returning the index of x or None. Assumes that l only contains distinct values. If l contains duplicate x values then an arbitrary matching index will be returned. Recursive implementation. """ if not l: return if len(l) == 1: return 0 if l[0] == x else None if low is None: low = 0 if high is None: high = len(l) - 1 if mid is None: mid = (high - low) // 2 if low > high: return if x > l[mid]: low = mid + 1 return recursive_s(l, x, low, high, low + ((high - low) // 2)) elif x < l[mid]: high = mid - 1 return recursive_s(l, x, low, high, low + ((high - low) // 2)) else: return mid
def transposed(table): """Returns the transposition of the table.""" if table == None: return None t_table = [] for i in table: while len(i) > len(t_table): t_table.append([]) for collumn in table: for i in range(len(collumn)): t_table[i].append(collumn[i]) return t_table
def add_array_type(property_schema): """Convert the parameter schema to be of type list. :param dict property_schema: schema to add array type to :returns: a new dict schema """ new_schema = property_schema.copy() new_schema['type'] = [property_schema['type'], 'array'] return new_schema
def is_even(val): """ Predicate testing if a value is even """ return val % 2 == 0
def points_intermediates(p1, p2, nb_points): """ "Return a list of nb_points equally spaced points between p1 and p2, includes p1 and p2""" if not nb_points: nb = 3 x_spacing = (p2[0] - p1[0]) / (nb_points + 1) y_spacing = (p2[1] - p1[1]) / (nb_points + 1) points = [ [p1[0] + i * x_spacing, p1[1] + i * y_spacing] for i in range(1, nb_points + 1) ] + [p1, p2] return points
def _tree_to_paths(tree): """Build a list of paths made by walking from the root of the tree to each leaf.""" if len(tree) == 0: return [""] return [ c + path for c, subtree in sorted(tree.items()) for path in _tree_to_paths(subtree) ]
def find_loop_size( public_key, subject=7 ): """ To transform a subject number, start with the value 1. Then, a number of times called the loop size, perform the following steps: - Set the value to itself multiplied by the subject number. - Set the value to the remainder after dividing the value by 20201227 After the desired loop size, the subject number 7 is transformed into the public key itself. """ loops = 0 value = 1 while value != public_key: loops += 1 value *= subject value = value % 20201227 return loops
def thresh_hold_binarization(feature_vector, thresh_hold): """ Turn each value above or equal to the thresh hold 1 and values below the thresh hold 0. :param feature_vector: List of integer/float/double.. :param thresh_hold: Thresh hold value for binarization :return: Process and binarized list of data. """ return [1 if data >= thresh_hold else 0 for data in feature_vector]
def _encapsulate_admin(cmd): """Encapsulate a command with an Administrator flag""" # To get admin access, we start a new powershell instance with admin # rights, which will execute the command return "Start-Process PowerShell -windowstyle hidden -Wait -Verb RunAs -ArgumentList '-command &{%s}'" % cmd
def is_superincreasing(seq): """Return whether a given sequence is superincreasing.""" ct = 0 # Total so far for n in seq: if n <= ct: return False ct += n return True
def format_run_status(run, default='-'): """common formatting success boolean field""" if not run: return default return run.success
def title_case(string: str) -> str: """ Capitalizes the first character and all characters immediately after spaces in the given string (the string.title() method additionally capitalizes characters after punctuation) :param s: the string to be title-cased :return: s in title case """ return "".join([char.upper() if (i == 0 or string[i - 1] == ' ') else char for i, char in enumerate(string)])
def combine(d1, d2): """Combine dictionaries into one.""" return {**d1, **d2}
def is_iterable(x): """Return True if an object is iterable.""" try: iter(x) except TypeError: return False return True
def tuple2str(tuple_in): """Converts a tuple into a string. :param tuple_in: tuple to convert :type tuple_in: tuple :returns: concatenated string version of the tuple :rtype: str """ string = '' for i in tuple_in: string += str(i) return string
def koma_sepp(n): """ Take input integer n and return comma-separated string, separating 1000s. >>> koma_sepp(131032047) '131,032,047' >>> koma_sepp(18781) '18,781' >>> koma_sepp(666) '666' """ return '{:,}'.format(n)
def generate_hashtag(s): """ Generates a hashtag which ever word is capitalized and we start with a hashtag. Also, string can't be longer than 140 chars or empty. :param s: a string value. :return: the string in hashtag form otherwise, False. """ if len(s) > 140 or len(s) < 1: return False return "#" + "".join(x.capitalize() for x in s.split())
def add_backticks(s): """ Adds double-backticks to the beginning and end of s for mono-spaced rst output. e.g.: add_backticks("zone_helper") -> "``zone_helper``" """ return "``{s}``".format(s=s)
def alfa_(w): """This function returns True if the given string 'w' contains only alphabetic or underscore characters Note: It is implemented in a hacky way to increase speed """ return (w + "a").replace('_', '').isalpha()
def recurrence_abc(n, alpha, beta): """See A&S online - https://dlmf.nist.gov/18.9 . Pn = (an-1 x + bn-1) Pn-1 - cn-1 * Pn-2 This function makes a, b, c for the given n, i.e. to get a(n-1), do recurrence_abc(n-1) """ aplusb = alpha+beta if n == 0 and (aplusb == 0 or aplusb == -1): A = 1/2 * (alpha + beta) + 1 B = 1/2 * (alpha - beta) C = 1 else: Anum = (2 * n + alpha + beta + 1) * (2 * n + alpha + beta + 2) Aden = 2 * (n + 1) * (n + alpha + beta + 1) A = Anum/Aden Bnum = (alpha**2 - beta**2) * (2 * n + alpha + beta + 1) Bden = 2 * (n+1) * (n + alpha + beta + 1) * (2 * n + alpha + beta) B = Bnum / Bden Cnum = (n + alpha) * (n + beta) * (2 * n + alpha + beta + 2) Cden = (n + 1) * (n + alpha + beta + 1) * (2 * n + alpha + beta) C = Cnum / Cden return A, B, C
def add_marker(x, y, z): """ Create a plotly marker dict. """ return { "x": [x], "y": [y], "z": [z], "mode": "markers", "marker": {"size": 25, "line": {"width": 3}}, "name": "Marker", "type": "scatter3d", "text": ["Click point to remove annotation"], }
def TestResourceName(name): """Return a resource name for a resource under the test data directory.""" prefix = __name__ + ':data/' return prefix + name
def monet_escape(data): """ returns an escaped string """ data = str(data).replace("\\", "\\\\") data = data.replace("\'", "\\\'") return "'%s'" % str(data)
def running_sum(x): """ Returns a list representing the running sum of a list """ sum_val = 0 running_sum = [0] * len(x) for n in range(len(x)): sum_val += x[n] running_sum[n] = sum_val return running_sum
def transpose_loggraph(loggraph_dict): """Transpose the information in the CCP4-parsed-loggraph dictionary into a more useful structure.""" columns = loggraph_dict["columns"] data = loggraph_dict["data"] results = {} # FIXME column labels are not always unique - so prepend the column # number - that'll make it unique! PS counting from 1 - 01/NOV/06 new_columns = [] for j, c in enumerate(columns): col = "%d_%s" % (j + 1, c) new_columns.append(col) results[col] = [] for record in data: for j, nc in enumerate(new_columns): results[nc].append(record[j]) return results
def fn_Z_L_1(omega,L_1): """Readout inductor impedance as a function of angular frequency omega and inductance L_1.""" return 1j * omega * L_1
def is_fmt(obj): """ Returns true iff `obj` is a formatter instance. """ return callable(obj) and hasattr(obj, "width")
def get_hashable_value_tuple_from_dict(d): """ Hashable tuple of values with sorted keys. >>> get_hashable_value_tuple_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, }) (45, 5.0) >>> get_hashable_value_tuple_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, "resolutions": [(740, 480), (1920, 1080), ]}) (45, 5.0, ((740, 480), (1920, 1080))) """ return tuple(map( lambda k: tuple(d[k]) if isinstance(d[k], list) else d[k], sorted(d.keys())))
def _rectangles_overlap(bottomleft_1, topright_1, bottomleft_2, topright_2): """Compare two rectangles and return True if they are overlapping. Parameters ---------- bottomleft_1 : listlike, float x, y coordinate of bottom left corner of rectangle 1. topright_1 : listlike, float x, y coordinate of top right corner of rectangle 1. bottomleft_2 : listlike, float x, y coordinate of bottom left corner of rectangle 2. topright_2 : listlike, float x, y coordinate of top right corner of rectangle 2. Returns ------- boolean True if rectangles are overlapping, False if they do not overlap. """ # check if bottom_left_1 is above top_right_2 if bottomleft_1[1] > topright_2[1]: return False # check if bottom_left_2 is above top_right_1 elif bottomleft_2[1] > topright_1[1]: return False # check if top_right_1 is to the left of bottom_left_2 elif topright_1[0] < bottomleft_2[0]: return False # check if top_right_2 is to the left of bottom_left_1 elif topright_2[0] < bottomleft_1[0]: return False # else, rectangles are overlapping else: return True
def min_equals_max(min, max): """ Return True if minimium value equals maximum value Return False if not, or if maximum or minimum value is not defined """ return min is not None and max is not None and min == max
def ms2knots(ms: float) -> float: """ Convert meters per second to knots. :param float ms: m/s :return: speed in knots :rtype: float """ if not isinstance(ms, (float, int)): return 0 return ms * 1.94384395
def residual_imag(im, fit_re, fit_im): """ Relative Residuals as based on Boukamp's definition Ref.: - Boukamp, B.A. J. Electrochem. SoC., 142, 6, 1885-1894 Kristian B. Knudsen (kknu@berkeley.edu || kristianbknudsen@gmail.com) """ modulus_fit = (fit_re ** 2 + fit_im ** 2) ** (1 / 2) return (im - fit_im) / modulus_fit
def cadence(a, b, required_gap, start): """ For the pair of numbers determine when they first repeat with the required gap and the period that it will repeat, starting at the given value >>> cadence(67, 7, 1, 0) (335, 469) >>> cadence(67, 7, 2, 0) (201, 469) >>> cadence(1789, 37, 1, 0) (30413, 66193) >>> cadence(17, 13, 21, 0) (187, 221) """ value = start first = None while True: if (value + required_gap) % b == 0: if first is None: first = value else: return (first, value - first) value += a
def check_datasets_compatible(dataset1, dataset2): """ Used for cross-corpus datasets that are combined from two datasets. Checks if two datasets have the same class names and original shape. The first entry of the original shape is ignored, since the number of samples does not matter. Returns: class_names, original_shape Raises: Exception if datasets are not compatible """ # check class names class_names1 = dataset1['class_names'] class_names2 = dataset2['class_names'] msg = f'Class names are not equal: {class_names1}, {class_names2}' e = Exception(f'Datasets are not compatible!\n{msg}') if len(class_names1) != len(class_names2): raise e for i in range(len(class_names1)): if class_names1[i] != class_names2[i]: raise e # check if both have a shape or not has_shape1 = has_shape2 = False if 'specs' in dataset1 and 'original_shape' in dataset1['specs']: has_shape1 = True if 'specs' in dataset2 and 'original_shape' in dataset2['specs']: has_shape2 = True if has_shape1 and not has_shape2: raise Exception( 'Dataset 1 has a original_shape but dataset 2 does not!') if has_shape2 and not has_shape1: raise Exception( 'Dataset 2 has a original_shape but dataset 1 does not!') # check shapes original_shape = None if has_shape1 and has_shape2: shape1 = dataset1['specs']['original_shape'] shape2 = dataset2['specs']['original_shape'] msg = f'Shapes are not equal: {shape1}, {shape2}' e = Exception(f'Datasets are not compatible!\n{msg}') if len(shape1) != len(shape2): raise e for i in range(1, len(shape1)): if shape1[i] != shape2[i]: raise e original_shape = shape1 return class_names1, original_shape
def _pelt_tau(half_life, window): """ Compute the time constant of an equivalent continuous-time system as defined by: ``tau = period * (alpha / (1-alpha))`` https://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter """ # Alpha as defined in https://en.wikipedia.org/wiki/Moving_average decay = (1 / 2)**(1 / half_life) alpha = 1 - decay tau = window * ((1 - alpha) / alpha) return tau
def find_already_present_insp_keys(replacements, bib_dbs): """Filter replacements to those whose INSPIRE key appear in bibs. Parameters ---------- replacements: array of dict Each dict has keys "ads_key", "insp_key", and "bib_str". bib_dbs: array of `bibtexparser.bibdatabase.BibDatabase` Returns ------- already_present_insp_keys: set of string Strings are INSPIRE keys appearing in `bib_dbs` """ already_present_insp_keys = [] for rep in replacements: for bib_db in bib_dbs: if rep["insp_key"] in bib_db.entries_dict: already_present_insp_keys.append(rep["insp_key"]) return set(already_present_insp_keys)
def log_new_fit(new_fit, log_gplus, mode='residual'): """Log the successful refits of a spectrum. Parameters ---------- new_fit : bool If 'True', the spectrum was successfully refit. log_gplus : list Log of all previous successful refits of the spectrum. mode : str ('positive_residual_peak', 'negative_residual_peak', 'broad', 'blended') Specifies the feature that was refit or used for a new successful refit. Returns ------- log_gplus : list Updated log of successful refits of the spectrum. """ if not new_fit: return log_gplus modes = {'positive_residual_peak': 1, 'negative_residual_peak': 2, 'broad': 3, 'blended': 4} log_gplus.append(modes[mode]) return log_gplus
def simple_1arg_default(hello: str = "default"): """This will print hello. Args: hello: Your name. """ return f"Hello {hello}"
def getchapter(chapter): """To change chapter number into desired format for saving""" chapter = str(chapter) if int(chapter) < 10: chapter = '00' + chapter elif int(chapter) < 100: chapter = '0' + chapter return chapter
def strip_trailing_nl(s): """If s ends with a newline, drop it; else return s intact""" return s[:-1] if s.endswith('\n') else s
def extract_data(dat): """ Function to extract data from api in particular separate players data from the match data perform basic filtering of removing invalid_matches invalid matches are those where all players ids are known and the result is known (i.e. not missing) also adds match_id to the players dataset """ PLAYERS = [] MATCHES = [] for row in dat: VALID_MATCH = True PLAYERS_ROW = [] match_id = row["match_id"] players = row["players"] for player in players: if player["profile_id"] is None or player["won"] is None: VALID_MATCH = False player["match_id"] = match_id PLAYERS_ROW.append(player) if VALID_MATCH: for i in PLAYERS_ROW: PLAYERS.append(i) MATCHES.append(row) return {"matches": MATCHES, "players": PLAYERS}
def transObj(object, disp): """ Translate an object """ return (object[0]+disp,object[1])
def summarize_node_support(snp_clade_info): """ Conversts SNP data into a clade indexed datastructure :param snp_clade_info: [dict] Dictionary of clades and SNPs supporting them :return: [dict] Summary of clade SNP support """ clades = {} for chrom in snp_clade_info: for position in snp_clade_info[chrom]: for base in snp_clade_info[chrom][position]: data = snp_clade_info[chrom][position][base] clade_id = data['clade_id'] if clade_id not in clades: clades[clade_id] = {'count_canonical':0,'count_homoplasy':0,'clade_total_members':data['clade_total_members'],'canonical_snps':{}} if data['is_canonical']: clades[clade_id]['count_canonical']+= 1 if not chrom in clades[clade_id]['canonical_snps']: clades[clade_id]['canonical_snps'][chrom] = {} clades[clade_id]['canonical_snps'][chrom][position] = {'in_base':base,'out_bases':data['out_bases']} else: clades[clade_id]['count_homoplasy'] += 1 return clades
def IoU(bbox0, bbox1): """ Runs the intersection over union of two bbox :param bbox0: bbox1 list :param bbox1: bbox2 list :return: IoU """ dim = int(len(bbox0)/2) overlap = [max(0, min(bbox0[i+dim], bbox1[i+dim]) - max(bbox0[i], bbox1[i])) for i in range(dim)] intersection = 1 for i in range(dim): intersection = intersection * overlap[i] area0 = 1 area1 = 1 for i in range(dim): area0 *= (bbox0[i + dim] - bbox0[i]) area1 *= (bbox1[i + dim] - bbox1[i]) union = area0 + area1 - intersection if union == 0: return 0 return intersection/union
def _default_combiner(tokens): """Default token combiner which assumes each token is a line.""" return '\n'.join(tokens)
def set_param(param, default, row, issues, pre, overrides={}, verbose=False): """ Set a parameter value Given a parameter name, that parameter's default value, a data table row, and a JSON dictionary which may have an entry for the current row that will override the parameter, return the parameter value that should be used. Parameters ---------- param : str The parameter to check and return default : object The default value for that parameter row : abscal.common.exposure_data_table.AbscalDataTable A single-row table containing the data of interest. issues : dict A dictionary containing a set of parameters (one of which may be param), along with information to identify files whose parameters should be adjusted . overrides : dict A dictionary containing any parameters whose value is being overridden by the user. verbose : bool Whether or not informational output should be printed. Returns ------- value : object The appropriate value for the parameter given """ value = default if param in issues: issue_list = issues[param] for item in issue_list: val_len = len(item["value"]) if row[item["column"]][:val_len] == item["value"]: value = item["param_value"] if verbose: reason = item["reason"] source = item["source"] msg = "{}: changed {} to {} because {} from {}" print(msg.format(pre, param, value, reason, source)) if param in overrides: value = overrides[param] return value
def normalize_knot_vector(knot_vector): """Returns a normalized knot vector within the [0, 1] domain. Parameters ---------- list of float A knot vector Returns ------- list of float The normalized knot vector. """ knot_vector = [v - knot_vector[0] for v in knot_vector] return [v / float(knot_vector[-1]) for v in knot_vector]
def get_short_from_little_endian_bytearray(array, offset): """ Get a short from a byte array, using little-endian representation, starting at the given offset :param array: The byte array to get the short from :type array: bytearray :param offset: The offset at which to start looking :type offset: int :return: The decoded short :rtype: int """ return (array[offset + 1] << 8) | array[offset]
def cfs_to_mmday(cfs, SA_sq_ft): """ cfs: (float) flow rate in cubic feet per second SA_sq_ft: (float) surface area in square feet """ return(cfs/SA_sq_ft * 24 * 60 * 60 * 304.8)
def _get_embedded(inspected_interfaces): """Gets embedded interfaces from inspected interfaces.""" embedded_interfaces = [] for interface in inspected_interfaces: _biosdevname = interface['predictable_names'].get('biosdevname', '') if _biosdevname: if 'em' in _biosdevname: embedded_interfaces.append(interface) return embedded_interfaces
def intersectionRect(r1, r2, shift1 = (0,0), shift2 = (0,0), extraSize = 3 ): """ gets two 4-tuples of integers representing a rectangle in min,max coord-s optional params. @shifts can be used to move boxes on a larger canvas (2d plane) @extraSize, forces the rectangles to stay away from each other by the given size (number of pixels) returns True if the rectangles intersect """ if ((min(r1[0] - extraSize + shift1[0] , r1[2] + extraSize + shift1[0] ) > max( r2[0] - extraSize + shift2[0], r2[2] + extraSize + shift2[0] ) ) or ( max(r1[0] - extraSize + shift1[0], r1[2] + extraSize + shift1[0] ) < min( r2[0] - extraSize + shift2[0], r2[2] + extraSize + shift2[0] ) ) ): return False if ((min(r1[1] - extraSize + shift1[1] , r1[3] + extraSize + shift1[1] ) > max( r2[1] - extraSize + shift2[1], r2[3] + extraSize + shift2[1]) ) or ( max(r1[1] - extraSize + shift1[1], r1[3] + extraSize + shift1[1] ) < min( r2[1] - extraSize + shift2[1], r2[3]+ extraSize + shift2[1] ) ) ): return False return True
def decode_http_header(raw): """ Decode a raw HTTP header into a unicode string. RFC 2616 specifies that they should be latin1-encoded (a.k.a. iso-8859-1). If the passed-in value is None, return an empty unicode string. :param raw: Raw HTTP header string. :type raw: string (non-unicode) :returns: Decoded HTTP header. :rtype: unicode string """ if raw: return raw.decode('iso-8859-1', 'replace') else: return u''
def resultCombine(type, old, new): """Experimental-ish result-combiner thing If the result isn't something from action=query, this will just explode, but that shouldn't happen hopefully? """ ret = old if type in new['query']: # Basic list, easy ret['query'][type].extend(new['query'][type]) else: # Else its some sort of prop=thing and/or a generator query for key in new['query']['pages'].keys(): # Go through each page if not key in old['query']['pages']: # if it only exists in the new one ret['query']['pages'][key] = new['query']['pages'][key] # add it to the list else: if not type in new['query']['pages'][key]: continue elif type in new['query']['pages'][key] and not type in ret['query']['pages'][key]: # if only the new one does, just add it to the return ret['query']['pages'][key][type] = new['query']['pages'][key][type] continue else: # Need to check for possible duplicates for some, this is faster than just iterating over new and checking for dups in ret retset = set([tuple(entry.items()) for entry in ret['query']['pages'][key][type]]) newset = set([tuple(entry.items()) for entry in new['query']['pages'][key][type]]) retset.update(newset) ret['query']['pages'][key][type] = [dict(entry) for entry in retset] return ret
def get_str_arg(param: dict, key: str, required: bool = False, default: str = "") -> str: """Get a key from a command arg and convert it into an str.""" value = param.get(key, default) if not isinstance(value, str): raise ValueError(f"Please provide a valid string value for the parameter {key!r}") value = value.strip() if not value and required: raise ValueError(f"No value supplied for parameter {key!r}") return value
def _splitnport(host, defport=-1): """Split host and port, returning numeric port. Return given default port if no ':' found; defaults to -1. Return numerical port if a valid number are found after ':'. Return None if ':' but not a valid number.""" host, delim, port = host.rpartition(':') if not delim: host = port elif port: try: nport = int(port) except ValueError: nport = None return host, nport return host, defport
def v(t): """ modell for farten v ved konstant tyngdeakselerasjon g og utgangsfart v_0=10 """ g = -9.81 v_0 = 10 return v_0 + g*t
def merge_items(base, new_items): """ Merges two lists and eliminates duplicates :type base: list :type new_items: list :rtype: list """ for item in new_items: if not item in base: base = base + [item] return base
def safe_key_extractor(dico, key, default_val): """ Get the value from a dictionary based on a key, If the key is not present, return the default value :param dico: dict :param key: key present or not in the dictionary :param default_val: default value to return :return: value or default_val """ try: return dico[key] except KeyError: return default_val
def parse_fragment(fragment_string): """Takes a fragment string nd returns a dict of the components""" fragment_string = fragment_string.lstrip('#') try: return dict( key_value_string.split('=') for key_value_string in fragment_string.split('&') ) except ValueError: raise ValueError( 'Invalid fragment string {fragment_string}'.format( fragment_string=fragment_string ) )
def get_credentials_from_event(event): """Get passed credentials from the event.""" username = event.get('username') password = event.get('password') refresh_token = event.get('refresh_token') return username, password, refresh_token
def byte_align(size, alignment): """Returns the int larger than ``size`` aligned to ``alginment`` bytes.""" mask = alignment - 1 if size & mask == 0: return size else: return (size | mask) + 1
def _setup_smoothing_sigmas(scale: int=1): """Setup the smoothing sigmas array for registration""" smoothing_sigmas = [0] if scale > 1: for idx in range(1, scale, 1): smoothing_sigmas.insert(0, 2**(idx - 1)) print('No smoothing sigmas given, setting to {}'.format(smoothing_sigmas)) return smoothing_sigmas
def standard_deviation(list1): """Calculate a standard deviation :param list1: list of values :return: standard deviation value""" # moyenne moy = sum(list1, 0.0) / len(list1) # variance variance = [(x - moy) ** 2 for x in list1] variance = sum(variance, 0.0) / len(variance) # ecart_type deviation = variance ** 0.5 return deviation
def split_magics( buffer ): """ Split the cell by lines and decide if it contains magic or bot input @return (tuple): a pair \c stripped_lines,is_magic """ # Split by lines, strip whitespace & remove comments. Keep empty lines buffer_lines = [ ls for ls in ( l.strip() for l in buffer.split('\n') ) if not ls or ls[0] !='#' ] # Remove leading empty lines i = 0 for i, line in enumerate(buffer_lines): if line: break if i>0: buffer_lines = buffer_lines[i:] # Decide if magic or not & return if not buffer_lines: return None, None elif buffer_lines[0][0] == '%': return buffer_lines, True else: return u'\n'.join(buffer_lines), False
def status_request(token): """Create ACME "statusRequest" message. :param unicode token: Token provided in ACME "defer" message. :returns: ACME "statusRequest" message. :rtype: dict """ return { "type": "statusRequest", "token": token, }
def find_called_name_offset(source, orig_offset): """Return the offset of a calling function. This only approximates movement. """ offset = min(orig_offset, len(source) - 1) paren_count = 0 while True: if offset <= 1: return orig_offset elif source[offset] == '(': if paren_count == 0: return offset - 1 else: paren_count -= 1 elif source[offset] == ')': paren_count += 1 offset -= 1
def getChunkPartition(chunk_id): """ return partition (if any) for the given chunk id. Parition is encoded in digits after the initial 'c' character. E.g. for: c56-12345678-1234-1234-1234-1234567890ab_6_4, the partition would be 56. For c-12345678-1234-1234-1234-1234567890ab_6_4, the partition would be None. """ if not chunk_id or chunk_id[0] != 'c': raise ValueError("unexpected chunk id") n = chunk_id.find('-') # go to first underscore if n == 1: return None # no partition partition = int(chunk_id[1:n]) return partition
def util_key_index ( keys, key ): """Returns index for key in list""" result = -1 n = 0 for i in keys: if (i == key): result = n n += 1 return result
def contains(list, item): """Return 1 if item is in list, 0 otherwise.""" try: i = list.index(item) except: return 0 else: return 1
def dequote(s): """ If a string has single or double quotes around it, remove them. Make sure the pair of quotes match. If a matching pair of quotes is not found, return the string unchanged. """ if len(s) >= 2 and (s[0] == s[-1]) and s.startswith(("'", '"')): return s[1:-1] return s
def _get_edge_length_in_direction(curr_i: int, curr_j: int, dir_i: int, dir_j: int, i_rows: int, i_cols: int, edge_pixels: set) -> int: """ find the maximum length of a move in the given direction along the perimeter of the image :param curr_i: current row index :param curr_j: current col index :param dir_i: direction of change in row index :param dir_j: direction of change in col index :param i_rows: number of rows of containing array :param i_cols number of cols of containing array :param edge_pixels: set of remaining edge pixels to visit :return: the length of the edge in the given direction, 0 if none exists, if direction is a diagonal length will always be <= 1 """ length = 0 while 0 <= curr_i + dir_i < i_rows and 0 <= curr_j + dir_j < i_cols and \ (curr_i + dir_i, curr_j + dir_j) in edge_pixels: # update seen edge pixels edge_pixels.remove((curr_i + dir_i, curr_j + dir_j)) length += 1 curr_i += dir_i curr_j += dir_j # only allow length 1 diagonal moves if dir_i != 0 and dir_j != 0: break return length
def dp_key(relations): """ generates a unique key for the dptable dictionary :param relations: set of relations :return: str """ return '-'.join(sorted([r.name for r in relations]))
def first_part(txt): """First logical part for password.""" return txt[0].upper()
def neat_data(data): """ returns list of neat strings data should not include blob !!!ESPECIALLY DONE FOR GET RECENT FUNCTION!!! """ neat_data = [] dockets = [] for i in data: neat_str = "Docket# : {} \n Customer : {} \n Date Shipment {} \n Delivery Address : \n {}".format( i[0], i[1], i[2], i[3]) neat_data.append(neat_str) dockets.append(i[0]) return [neat_data, dockets]
def escape(text): """Escape slack control characters""" return text.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
def get_filename_from_url(url, accession): """ Return the filename extracted from the given URL. If it is not a pdf file, return the original url :param url: url to parse :param accession: accession number :return: file name """ if (not url) or (url and len(url) == 0): # print(f"{accession} url is empty") return "" if url.lower().endswith(".pdf"): return url.split("/")[-1] else: return url
def hello(who): """Say hello.""" return "Hello %s!" % who
def query(*args): """ Execute a database query """ # Table query actions (dict, string, func, args*) if (isinstance(args[0], dict) and isinstance(args[1], str) and callable(args[2])): query_args = args[3:] def table_loop(i, db): if i > 0: i = i - 1 return table_loop(i, args[2](args[0], args[1], query_args[i])) return db return table_loop(len(query_args), args[0]) # Table column (dict, string, string, func, args*) if (isinstance(args[0], dict) and isinstance(args[1], str) and isinstance(args[2], str) and callable(args[3])): query_args = args[4:] def column_loop(i, db): if i > 0: i = i - 1 return column_loop( i, args[3](args[0], args[1], args[2], query_args[i]) ) return db return column_loop(len(query_args), args[0]) return False
def toggleBit(int_type: int, offset: int) -> int: """ toggleBit() returns an integer with the bit at 'offset' inverted, 0 -> 1 and 1 -> 0. """ mask = 1 << offset return int_type ^ mask
def _get_module_name(function): """Extracts module signature of a function.""" full_name = function.__module__ return full_name.split(".")[0]
def get_len_single_middle_vertex(len_leading, len_middle, len_trailing): """Get the length of the resulting sequence if only one middle vertex is used. Args: len_leading: The length of the leading input n-grams. len_middle: The length of the middle input n-grams. len_trailing: The length of the trailing input n-grams. Returns: The length of a one middle vertex sequence. """ return (max(len_leading, len_middle) + 1) + \ (max(len_trailing, len_middle) + 1) - len_middle
def to_none_int_or_checkpoint(value): """Coerce-ish a value that is None, int, or "checkpoint" """ if value is None or value == "checkpoint": return value else: return int(value)
def _preferential_attachment(set_one: list, set_two: list) -> int: """ Calculate Preferential attachment score for input lists :param set_one: A list of graph nodes -> part one :param set_two: A list of graph nodes -> part two :return: Preferential attachment score """ return len(set(set_one)) * len(set(set_two))
def extract_var_id(fpath): """ Extract the main variable of the file given by the file path. The variable is extracted according to its expected position in the file path. :param fpath: The file path of the file to extract the var_id from :return: The variable id of the main variable in the given file """ file_name = fpath.split("/")[-1] var_id = file_name.split("_")[0] return var_id
def kappa_analysis_altman(kappa): """ Analysis kappa number with Altman benchmark. :param kappa: kappa number :type kappa : float :return: strength of agreement as str """ try: if kappa < 0.2: return "Poor" if kappa >= 0.20 and kappa < 0.4: return "Fair" if kappa >= 0.40 and kappa < 0.6: return "Moderate" if kappa >= 0.60 and kappa < 0.8: return "Good" if kappa >= 0.80 and kappa <= 1: return "Very Good" return "None" except Exception: # pragma: no cover return "None"
def pad_extra_whitespace(string, pad): """ Given a multiline string, add extra whitespaces to the front of every line. """ return '\n'.join(' ' * pad + line for line in string.split('\n'))
def recipe_has_step_processor(recipe, processor): """Does the recipe object contain at least one step with the named Processor?""" if "Process" in recipe: processors = [step.get("Processor") for step in recipe["Process"]] if processor in processors: return True return False
def _check_option(parameter, value, allowed_values, extra=''): """Check the value of a parameter against a list of valid options. Return the value if it is valid, otherwise raise a ValueError with a readable error message. Parameters ---------- parameter : str The name of the parameter to check. This is used in the error message. value : any type The value of the parameter to check. allowed_values : list The list of allowed values for the parameter. extra : str Extra string to append to the invalid value sentence, e.g. "when using ico mode". Raises ------ ValueError When the value of the parameter is not one of the valid options. Returns ------- value : any type The value if it is valid. """ if value in allowed_values: return value # Prepare a nice error message for the user extra = ' ' + extra if extra else extra msg = ("Invalid value for the '{parameter}' parameter{extra}. " '{options}, but got {value!r} instead.') allowed_values = list(allowed_values) # e.g., if a dict was given if len(allowed_values) == 1: options = f'The only allowed value is {repr(allowed_values[0])}' else: options = 'Allowed values are ' options += ', '.join([f'{repr(v)}' for v in allowed_values[:-1]]) options += f', and {repr(allowed_values[-1])}' raise ValueError(msg.format(parameter=parameter, options=options, value=value, extra=extra))