content
stringlengths
42
6.51k
def GetEvalExpression(value): """Generate evaluation formula.""" """In ArnoldC, 0 means True and other numbers mean False.""" """To follow ArnoldC's evaluation rule, it's little complicated.""" return "(%s if type(%s) == type(bool()) else %s > 0)" % tuple([value]*3)
def make_dag_id(namespace: str, organisation_name: str) -> str: """Make a DAG id from a namespace and an organisation name. :param namespace: the namespace for the DAG id. :param organisation_name: the organisation name. :return: the DAG id. """ return f'{namespace}_{organisation_name.strip().lower().replace(" ", "_")}'
def _mpi_command(nprocs, mpi_exec): """ Generate a string for shell execution of MPI. If the number of processors is 1 or the executable path is empty, the returned value is simply an empty string. Args: nprocs (int): Number of processors to use. mpi_exec (str): Path to MPI executable. Returns: (*str*) -- Shell-compatible code for executing MPI. """ if nprocs == 1 or mpi_exec == '': return '' else: return mpi_exec + ' -np ' + str(nprocs) + ' '
def set_name_line(hole_lines, name): """Define the label of each line of the hole Parameters ---------- hole_lines: list a list of line object of the slot name: str the name to give to the line Returns ------- hole_lines: list List of line object with label """ for ii in range(len(hole_lines)): hole_lines[ii].label = name + "_" + str(ii) return hole_lines
def _get_output_filename(dataset_dir, split_name): """Creates the output filename. Args: dataset_dir: The directory where the temporary files are stored. split_name: The name of the train/test split. Returns: An absolute file path. """ return '%s/mnist_mix_%s.tfrecord' % (dataset_dir, split_name)
def convert_string_to_value(value: str): """convert a string to value with the most appropriate type""" if value.lower() == "true": return True if value.lower() == "false": return False if value == "null": return None try: return eval(value) except: # noqa E722 return value
def convert_type(t, v): """Convert value 'v' to type 't'""" _valid_type = ['int', 'float', 'long', 'complex', 'str'] if t not in _valid_type: raise RuntimeError('[-] unsupported type: must be one of: ' + ",".join([i for i in _valid_type])) try: return type(eval("{}()".format(t)))(v) # nosec except ValueError: raise ValueError('type={}, value="{}"'.format(t, v))
def merge_with(f, *dicts): """Returns a dict that consists of the rest of the dicts merged with the first. If a key occurs in more than one map, the value from the latter (left-to-right) will be the combined with the value in the former by calling f(former_val, latter_val). Calling with no dicts returns {}.""" d = dict() for _dict in dicts: for k in _dict: if k in d: d[k] = f(d[k], _dict[k]) else: d[k] = _dict[k] return d
def get_lines( note: str, max_length: int ) -> list: """ Get lines for each transaction. Each line, by design, is 947 bytes in length, max length is 1024 bytes for the Algorand note field. :param note: The main feed which is base64 encoded with ISO-8859-1 encoding :param max_length: The intended line length :return: list_of_notes - A list of notes """ # Do first append list_of_notes = [note[0:max_length]] new_note = note[max_length:] # Repeat succeeding appends while True: list_of_notes.append(new_note[0:max_length]) new_note = new_note[max_length:] # Do append if final line is reached if len(new_note) < max_length: list_of_notes.append(new_note[0:]) break return list_of_notes
def get_nes_from_sentence(sentence_data, default_ne_tag, include_tag=False): """ Extract all named entities from a named entity tagged sentence. :param sentence_data: Tagged sentence data. :type sentence_data: list :param default_ne_tag: Default Named Entity tag for words that are not Named Entities. :type default_ne_tag: str :param include_tag: Flag to indicate whether the Named entity tag should be included. :type include_tag: bool :return: Extracted Named Entities. :rtype: list """ def add_ne(nes_, current_ne_, current_ne_tag_, include_tag_): ne = " ".join(current_ne_) to_add = ne if not include_tag_ else (ne, current_ne_tag_) nes_.append(to_add) return nes_ nes = [] current_ne = [] current_ne_tag = "" for word, ne_tag in sentence_data: # Start of named entity if ne_tag != default_ne_tag and not current_ne: current_ne.append(word) current_ne_tag = ne_tag # End of named entity elif ne_tag == default_ne_tag and current_ne: nes = add_ne(nes, current_ne, current_ne_tag, include_tag) current_ne = [] current_ne_tag = "" # Decide if named entity is ongoing or not elif ne_tag != default_ne_tag and current_ne: # New named entity if current_ne_tag == ne_tag: current_ne.append(word) # New named entity else: nes = add_ne(nes, current_ne, current_ne_tag, include_tag) current_ne = [word] current_ne_tag = ne_tag return nes
def page_not_found(e): """Return a custom 404 error.""" return 'Not found', 404
def check_success(responses: list) -> bool: """ Iterates through the list of responses and returns True if all were successful """ success = True for response in responses: success = success and response return success
def calc_precision(true_pos, false_pos): """ function to calculate precision Args: true_pos: Number of true positives false_pos: Number of false positives Returns: None """ try: prec = true_pos / float(true_pos + false_pos) return round(prec, 3) except BaseException: return None
def add_star(string): """Add '*' to string >>> assert add_star('fred') == 'fred*' """ return f"{string}*"
def get_us_gpa(average) -> float: """Return the GPA as calculated in the US.""" result = 0 if average >= 60: result = 0.7 if average >= 63: result = 1 if average >= 67: result = 1.3 if average >= 70: result = 1.7 if average >= 73: result = 2 if average >= 77: result = 2.3 if average >= 80: result = 2.7 if average >= 83: result = 3 if average >= 87: result = 3.3 if average >= 90: result = 3.7 if average >= 93: result = 4 return round(result, 2)
def get_wf_task_id(task_name): """ Extracts the WorkFlowy ID from the name of the task. Args: task_name: Task name Returns: Task ID """ split = task_name.split("$wf:") # If there is a WorkFlowy ID included in the intention name if len(split) > 1: return split[-1] # Return the WorkFlowy ID else: return "__no_wf_id__"
def stretch_prob(probability: float) -> float: """ Fits probabilities between 0.75 and 1 into the range of [-1,1] depending on the result Args: probability: The probability with which the bot determined either mood or affection Returns: A value between 0 and 1 which is later passed to the tanh(2*x) function for a more realistic change in mood and affection """ return 4 * probability - 3
def plural(num, singStr, plStr): """Return singStr or plStr depending if num == 1 or not. A minor convenience for formatting messages (in lieu of ?: notation) """ if num == 1: return singStr return plStr
def split_string(s, skip): """ Splits a string with \n every skip characters. Will avoid breaking words. Parameters: s (String): The string to split. skip (Integer): How often to split in characters. Returns: String: The split string. """ words = s.split() current_len = 0 result = [] for word in words: if current_len + len(word) > skip: result.append(f'\n{word}') current_len = len(word) else: result.append(f'{word}') current_len += len(word) return " ".join(result)
def is_eop(ch): """ Is the given byte an eop """ return ch in (b'\n', b'\r')
def swap_bits(num, pos1, pos2): """Swaps bits in num at positions 1 and 2. Used by quantum_state.measure_multiple method. """ bit1 = (num >> pos1) & 1 bit2 = (num >> pos2) & 1 x = bit1 ^ bit2 x = (x << pos1) | (x << pos2) return num ^ x
def find_combos(length: int) -> int: """ In the data, there are no gaps of two, only gaps of one or three, between numbers in the sorted list. The rules are such that the number of combos are the same regardless of the specific numbers involved--there are the same number of combos for [0, 1, 2, 3] and for [23, 24, 25, 26]. So we only need the length of a run to figure out the number of combos in it. The rule is that any number can be skipped as long as there's not a gap of more than three. Since we're dealing with runs that are separated by gaps of three, the first and last numbers must be included in each combo. So for [0, 1, 2] the only combos are [0, 2] and [0, 1, 2]. For runs of three, the answer is two. For four, it's four. But at five, you hit a limit of having a gap of more than three between the start and finish. Because the start and finish numbers of each run are required, and gaps of more than three aren't allowed, and there are no gaps of two, it looks like a run of n has combos equal to the sum of runs of n-1, n-2, n-3. n1 = 1 n2 = 1 n3 = 2 n4 = 4 n5 = 7 n6 = 13 """ start = {0: 0, 1: 1, 2: 1} if length in start: return start[length] return sum(map(find_combos, [max([0, length - _]) for _ in (1, 2, 3)]))
def _autobitmask(val, total_size, index, size, keymap): """ Generate a bitmask and apply it to 'val' bits given the 'total_size', 'index', and 'size' of the BitField """ _bitmask = eval( "0b{}".format("0" * (total_size - (index + size)) + (size * "1") + "0" * index) ) key = (val & _bitmask) >> index key_str = str(key) mapped_val = keymap[key_str] return mapped_val
def cleanup_empty_lines(content): """Allow at most two consecutive empty lines >>> content = 'abc\\n\\n\\n\\nbc\\n\\nc\\n\\n\\n\\nd\\n\\n' >>> cleanup_empty_lines(content) 'abc\\n\\n\\nbc\\n\\nc\\n\\n\\nd\\n' >>> content = '''- static { ... // initialize resource bundle ... NLS.initializeMessages(BUNDLE_NAME, Messages.class); ... } ... }''' >>> content == cleanup_empty_lines(content) True >>> content + "\\n" == cleanup_empty_lines(content + "\\n") True """ lines = content.split("\n") nlines = len(lines) newlines = [] prev, prevprev = None, None def all_empty(*args): empty = [(l is not None and l.strip() == "") for l in args] return not False in empty for idx, line in enumerate(lines): if (not all_empty(prevprev, prev, line) and (idx != nlines - 1 or not all_empty(prev, line))): newlines.append(line) prevprev = prev prev = line return "\n".join(newlines)
def build_param_dict(stnm, year, month, day_hour): """ Builds a dictionary containing the station number, year, month, and day/hour for the desired atmospheric sounding data. Parameters ---------- stnm : string String of the station identifier, e.g. '70261' for PAFA, Fairbanks Int'l Airport. year : string String of the year, e.g. '2021'. month : string String of the month, e.g. '01'. day_hour : string String of the combined day and hour, e.g. '0100' for the first day of the month as '01', and for the beginning of the day in UTC as '00'. Returns ------- param_dict : dict A dictionary containing the station number, year, month, and day/hour of the desired date and location. """ param_dict = {'STNM': stnm, 'YEAR': year, 'MONTH': month, 'FROM': day_hour, 'TO': day_hour} return param_dict
def is_in(val, lvals): """ Replace the standard 'in' operator but uses 'is' to check membership. This method is mandatory if element to check overloads operator '=='. Args: val: Value to check lvals: List of candidate values Returns: True if value is in the list of values (presence checked with 'is') """ return any(val is v for v in lvals)
def value_or_from_dict(obj, key, default=None): """ Many Polygraphy APIs can accept a `Union[obj, Dict[str, obj]]` to allow for specifying either a global value, or a per-key (e.g. input, output, etc.) value. When a dictionary is provided, the `""` key indiciates a default value to use for keys not otherwise found. For example, Polygraphy allows for providing per-output tolerances. Thus, all of the following are valid arguments: :: # Value directly atol = 1.0 # Per-output values atol = {"out1": 1.0, "out2": 2.0} # Per-output values with default atol = {"out1": 1.0, "": 2.0} Args: obj (Union[obj, Dict[str, obj]]): The value, or per-key values. key (str): The key to use when per-key values are provided. default (obj): The default value to use if it is not found in the dictionary. Returns: obj: The value. """ if not isinstance(obj, dict): return obj if key in obj: return obj[key] elif "" in obj: return obj[""] return default
def wrap(sequence, limit=80): """Wraps sequences to `limit` characters per line. Parameters: sequence (str): Sequence to be wrapped. limit (int): Total characters per line. Returns: (str): Sequence wrapped to maximum `limit` characters per line. """ return "\n".join(sequence[i: i + limit] for i in range(0, len(sequence), limit))
def bytes_to_int(bytes): """ Helper function, convert set of bytes to an integer """ result = 0 for b in bytes: result = result * 256 + int(b) return result
def distribute_amounts(available: int, categories: dict, distributed_by_categories: dict): """ This function distributes total amount into categories in proportion to their prices. :param available: amount of available money from a payment :param categories: a dict of categories with their prices :param distributed_by_categories: a dict of distributed categories :return: a list of proportionally distributed amounts """ data = [] total_price = sum(categories.values()) for category, price in categories.items(): distributed_amount = round(price / total_price * available) # Check if sum of already distributed amount and current distributed amount does not exceeds the price if distributed_by_categories[category] + distributed_amount >= price: distributed_amount = price - distributed_by_categories[category] distributed_by_categories[category] += distributed_amount total_price -= price available -= distributed_amount data.append({ 'category': category, 'net_amount': distributed_amount }) return data, distributed_by_categories
def tag(pages, tag): """Pages with a given tag.""" if not tag: return pages return [p for p in pages if tag in p.tags]
def _keytify_test_cases(test_cases): """Traverse the test cases list and return a dictionnary which associate test case name to its duration. This is used for fast access to tests cases duration. """ res = {} for tc in test_cases: key = "%s/%s" % (tc.get('classname'), tc.get('name')) if tc.get('time') is None or float(tc.get('time')) == 0.0: continue res[key] = float(tc.get('time')) return res
def gef_pystring(x): """Returns a sanitized version as string of the bytes list given in input.""" res = str(x, encoding="utf-8") substs = [("\n", "\\n"), ("\r", "\\r"), ("\t", "\\t"), ("\v", "\\v"), ("\b", "\\b"), ] for x, y in substs: res = res.replace(x, y) return res
def normalize_to_11(x, in_min, in_max): """Map value to -1..1 range.""" return (x - in_min) * 2 / (in_max - in_min) + -1
def get_port_from_usb( first_usb_index, second_usb_index ): """ Based on last two USB location index, provide the port number """ acroname_port_usb_map = {(4, 4): 0, (4, 3): 1, (4, 2): 2, (4, 1): 3, (3, 4): 4, (3, 3): 5, (3, 2): 6, (3, 1): 7, } return acroname_port_usb_map[(first_usb_index, second_usb_index)]
def error_j(Dj,Pap,Pec,QBERI,exp_loss_jt): """ Calculates the conditional probability for a pulse of intensity mu_j to cause an error, after sifting, in the time slot t. Defined as e_k in Sec. IV of [1]. Parameters ---------- Dj : float, array Expected detection rate. Pap : float Probability of an afterpulse event. Pec : float Extraneous count probability. QBERI : float Intrinsic Quantum Bit Error Rate. exp_loss_jt : float, array Loss, per intensity per time slot, decay function. Returns ------- float, array Error rate per intensity per time slot. """ return Pec + (0.5*Pap*Dj) + QBERI*(1 - exp_loss_jt)
def _merge_windows(feature_dict): """ Merge sliding windows to calculate total feature importance across the entire sequence. """ total = 0 out_dict = {} for k in feature_dict.keys(): # Merge flips and _pos sliding windows key = k.split("_pos") if len(key) == 1: if "_flip" in key[0]: key = key[0].split("_flip")[0] else: key = k else: key = key[0] try: out_dict[key] += feature_dict[k] except KeyError: out_dict[key] = feature_dict[k] total += feature_dict[k] for k in out_dict.keys(): out_dict[k] = out_dict[k]/total return out_dict
def _nearest_mult_of_8(number, up=True): """ Find the nearest multiple of 8, rounding up or down """ if up: return ((number + 7) // 8) * 8 else: return (number // 8) * 8
def fix_request(req): """ When copied from developer console or BurpSuite \r\n is replaced by \n so let's fix this """ # whenever we don't have \r\n inside our request if "\r\n" not in req: # let's replace \n with \r\n should fix the issue anyway it's not really strong req = req.replace("\n", "\r\n") return req
def complementary_dict(D): """ Returns dict with 1 - current value in input dict """ complement = {} for k in D.keys(): complement[k] = 1 - D[k] return complement
def trim(text: str, limit: int) -> str: """limit text to a certain number of characters""" return text[: limit - 3].strip() + "..." if len(text) > limit else text
def hex_int(value) -> int: """Converts from hex to int""" return int(value, 16)
def count_aa(seq): """ Count occurrences of all amino acids in sequence. Return as dictionary. """ seq = seq.upper() assert ( all([s in "IVLFCMAGTWSYPHEQDNKR*U" for s in seq]) is True ), "Error, unknown amino acids %s in sequence: %s" % ( str([s for s in seq if s not in "XIVLFCMAGTWSYPHEQDNKR*"]), seq, ) AA = { "I": seq.count("I"), "V": seq.count("V"), "L": seq.count("L"), "F": seq.count("F"), "C": seq.count("C"), "M": seq.count("M"), "A": seq.count("A"), "G": seq.count("G"), "T": seq.count("T"), "W": seq.count("W"), "S": seq.count("S"), "Y": seq.count("Y"), "P": seq.count("P"), "H": seq.count("H"), "E": seq.count("E"), "Q": seq.count("Q"), "D": seq.count("D"), "N": seq.count("N"), "K": seq.count("K"), "R": seq.count("R"), "*": seq.count("*"), "U": seq.count("U"), } return AA
def problem_02_fibonacci_sum(limit): """ Problem 2: Calculates the sum of all even terms in Fibonacci sequence below limit. Args: limit (int): The limit below which even numbers are summed. """ fibonacci_sequence = [0, 1] while fibonacci_sequence[-1] + fibonacci_sequence[-2] < limit: fibonacci_sequence.append(fibonacci_sequence[-1] + fibonacci_sequence[-2]) sum_even_terms = 0 for term in fibonacci_sequence: if term % 2 == 0: sum_even_terms += term return sum_even_terms
def get_index(usage_key, children): """ Returns an index of the child with `usage_key`. """ children = [str(child) for child in children] return children.index(usage_key)
def zero_matrix(matrix): """ This solution runs in O(n^2) because it necessarily must look at every element in the matrix. """ n = len(matrix) if n == 0: return True m = len(matrix[0]) if m == 0: return True zero_rows = set() zero_columns = set() for i in range(n): for j in range(m): if matrix[i][j] == 0: zero_rows.add(i) zero_columns.add(j) for i in zero_rows: for j in range(m): matrix[i][j] = 0 for i in range(n): for j in zero_columns: matrix[i][j] = 0 return True
def get_binary_url(version: str, arch: str) -> str: """ get atx-agent url """ return "https://github.com/openatx/atx-agent/releases/download/{0}/atx-agent_{0}_linux_{1}.tar.gz".format( version, arch)
def _transform_opt(opt_val): """Transform a config option value to a string. If already a string, do nothing. If an iterable, then combine into a string by joining on ",". Args: opt_val (Union[str, list]): A config option's value. Returns: str: The option value converted to a string. """ if isinstance(opt_val, (list, tuple)): return ','.join(opt_val) else: return opt_val
def strip_text(text): """ Takes a bunch of test and removes all possible "indentation gunk" from it. >>> example_noisy_text = ''' ... hey guys ... it looks like ... I am all over the place''' >>> strip_text(example_noisy_text) u'hey guys it looks like I am all over the place' """ if text: return u' '.join( [line.strip() for line in text.splitlines() if line.strip()]) else: # return whatever text was, there's nothing to do # (prolly None or empty string) return text
def fizzbuzz_list(n): """`map` and `lambda` functions, showing usages of `enumerate`, `type`, and `zip`.""" ret = [] m = range(1, n + 1) m1 = map(lambda i: i if i % 3 > 0 else "Fizz", m) m2 = map(lambda i: i if i % 5 > 0 else "Buzz", m) m3 = map(lambda i: i if i % 15 > 0 else "FizzBuzz", m) for i, t in enumerate(zip(m1, m2, m3)): r = [i for i in t if type(i) == str] ret.append(list(r).pop() if r else str(i + 1)) return ret
def get_player_color(maximize): """Return the color (R or Y) for a player. The maximizing player plays red, the minimizing player plays yellow. """ return "R" if maximize else "Y"
def get_radio_horizontalization_html(radio_label): """ Takes a normal radio and restilizes it to make it horizontal and bigger""" html = f"""<iframe src="resources/horizontalize-radio.html?radioText={radio_label}" style="height:0px;width:0px;"></iframe>""" return html
def _list_find(lst, predicate): """ Find the first element in a list that satisfies the given predicate Arguments: - lst: List to search through - predicate: Predicate that determines what to return Returns: The first element that satisfies the predicate otherwise None """ for v in lst: if predicate(v): return v return None
def flatten(lst): """Flatten a list of lists into a single list. Parameters ---------- lst : list of list A list of embedded lists. Returns ------- lst A flattened list. """ return [item for sublist in lst for item in sublist]
def subsetindex(full,subset): """ Get the indices of the subset of a list. """ if isinstance(subset,str):subset=[subset] idx=[] for s in subset: idx += [i for i, x in enumerate(full) if x == s] return idx
def classify_output(doc): """Determine the type of output for an argument based on the docstring. """ if doc.find('raster') > 0 or doc.find('colour') > 0: return 'tif' if doc.find('HTML') > 0: return 'html' if doc.find('LiDAR') > 0: return 'lidar'
def _parse_search_results(json_result): """Search results are divided into 'statuses' and 'search_metadata'. The former contains the tweets themselves, and the latter contains the max_id to use to retrieve the next batch of tweets""" statuses = json_result.get('statuses') metadata = json_result.get('search_metadata') next_results = metadata.get('next_results') return statuses, next_results
def merge_lists(la, lb, key=lambda x: x): """ Merge two sorted lists @la: first list @lb: second list (order doesn't matter though) @key: comparison key """ merged = [] lena, lenb = len(la), len(lb) lb_ind, la_ind = 0, 0 while lb_ind < lenb: bval = key(lb[lb_ind]) while la_ind < lena and key(la[la_ind]) <= bval: merged.append(la[la_ind]) la_ind += 1 merged.append(lb[lb_ind]) lb_ind += 1 # if some left in a merged.extend(la[la_ind:]) return merged
def avg(values): """Return the average of a set of values.""" return sum(values) / len(values)
def read_fakeroot_state(statefile): """ Reads fakeroot state file, key by inode. fakeroot state file has the following structure: dev=fd03,ino=136887,mode=100600,uid=0,gid=0,nlink=1,rdev=0 dev=fd02,ino=3932193,mode=100600,uid=0,gid=0,nlink=1,rdev=0 Read all fields by line into a dictionary, and keep this dictionary keyed by inode number. """ entry_by_inode = {} for line in open(statefile, "r").readlines(): d = {} for item in line[:-1].split(","): k, v = item.split("=") d[k] = v entry_by_inode[int(d["ino"])] = d return entry_by_inode
def metamict_score(alpha_damage_score: float) -> float: """ Function returns metamict stage 1, 2 or 3 depending on input alpha value. Based on: Murakami et al., 1991, Alpha-decay event damage in zircon, American Mineralogist, v.76, p.1510-1532. """ if alpha_damage_score < 3: return 1 if alpha_damage_score < 8: return 0.5 return 0
def data_not_corrupted(data, reference): """ Perform sanity checks to validate that the received data matches the expected format """ # Check equal lengths if len(reference)-3 > len(data) > len(reference)+3: return False # Check equal number of values data_vals = data.split(",") ref_vals = reference.split(",") if len(data_vals) != len(ref_vals): return False # Check equal value format for each value for idx in range(len(ref_vals)): if data_vals[idx].count(".") != ref_vals[idx].count("."): return False return True
def default(base, deft): """Return the deft value if base is not set. Otherwise, return base""" if base == 0.0: return base return base or deft
def setActiveTab(session_a_tab): """Determines what tab should be open initially""" a_tab = {'orders': True, 'p_agg': False, 'suppliers': False, 'invoices': False, 'hold': False} # <-- default value if session_a_tab == 'supplier': a_tab.update({ 'orders': False, 'p_agg': False, 'suppliers': True, 'invoices': False, 'hold': False}) elif session_a_tab == 'p_agg': a_tab.update({ 'orders': False, 'p_agg': True, 'suppliers': False, 'invoices': False, 'hold': False}) elif session_a_tab == 'invoices': a_tab.update({ 'orders': False, 'p_agg': False, 'suppliers': False, 'invoices': True, 'hold': False}) elif session_a_tab == 'hold': a_tab.update({ 'orders': False, 'p_agg': False, 'suppliers': False, 'invoices': False, 'hold': True}) return a_tab
def ADD_TO_SET(expression): """ Returns an array of all unique values that results from applying an expression to each document in a group of documents that share the same group by key. See https://docs.mongodb.com/manual/reference/operator/aggregation/addToSet/ for more details :param expression: expression :return: Aggregation operator """ return {'$addToSet': expression}
def encode_token_auth(token, **kwargs): """ Encode token as the bearer authentication header. """ # NOTE: Only ASCII characters are allowed in HTTP headers. return { b"Authorization": b"Bearer " + token.encode("ascii") }
def calculate_percent(yesterday_closing, day_before_closing): """Calculating alteration in BTCUSD.""" percentage = (yesterday_closing - day_before_closing) / day_before_closing return percentage
def sanitize_string(string: str) -> str: """ Sanitize the filename to be used in the file system. ### Arguments - string: the string to sanitize ### Returns - the sanitized string """ output = string # this is windows specific (disallowed chars) output = "".join(char for char in output if char not in "/?\\*|<>") # double quotes (") and semi-colons (:) are also disallowed characters but we would # like to retain their equivalents, so they aren't removed in the prior loop output = output.replace('"', "'").replace(":", "-") return output
def enquote_string(s): """Enquotes a value according to the DND protocol rules. All interior quotation marks are doubled, and the resulting string is enclosed in double quotations. """ return '"' + s.replace('"', '""') + '"'
def _format_optname(value): """Format the name of an option in the configuration file to a more readable option in the command-line.""" return value.replace('_', '-').replace(' ', '-')
def get_recursively(source_dict, search_keys): """ Takes a dict with nested lists and dicts, and searches all dicts for a key of the search_keys provided. """ search_keys_found = {} for key, value in source_dict.items(): if key in search_keys.keys(): search_keys_found[key] = value elif isinstance(value, dict): results = get_recursively(value, search_keys) for keytmp in results.keys(): search_keys_found[keytmp] = results.get(keytmp) elif isinstance(value, list): for item in value: if isinstance(item, dict): more_results = get_recursively(item, search_keys) for key_another in more_results.keys(): search_keys_found[key_another] = \ more_results.get(key_another) return search_keys_found
def matrix(rows,columns,val): """ Bulds a matrix of size rows x columns, with val values in cells NOTE: Does not checks negative values Parameters ---------- rows(int) : The number of rows of the matrix columns(int) : The number of columns of the matrix val(int) : The value in every cell of the matrix Returns ------- list : The matrix """ matrix = [] for i in range(rows): row = [] for j in range(columns): column = val row += [column] matrix += [row] return matrix
def get_gem_group(sample_def): """ Get the GEM group from a sample def. Defaults to 1 if the gem_group is specified as None. Args: sample_def (dict): Sample def Returns: int: GEM group """ gg = sample_def['gem_group'] or 1 return int(gg)
def extract_numbers(list_of_things): """ Filter all objects of types rather than int or float. return the filtered list. """ filtered_list = [] for element in list_of_things: if type(element) == int or type(element) == float: filtered_list.append(element) else: continue return filtered_list
def pack(arg): """Pack variables into a list. Parameters ---------- arg : object Either a list or tuple, or any other Python object. Lists will be returned as is, and tuples will be cast to lists. Any other variable will be returned in a singleton list. Returns ------- list List containing the arguments """ if isinstance(arg, (list, tuple)): return list(arg) else: return [arg]
def is_whitespace_only(line): """ Checks if the given line contains only whitespace. :param line: The line to check. :return: True if the line contains only whitespace, False if not. """ return line.strip() == ''
def max_rw_index(prices, start, end): """ Searches min price index inside window [start,end] :param prices: in list format :param start: window start index :param end: window end index :return: """ matching_index = start for i in range(start, end + 1): if prices[matching_index] < prices[i]: matching_index = i return matching_index
def root_start(root, refs): """ Compute a subset of references that start with given root. """ return frozenset(r for r in refs if r.startswith(root))
def get_split_expr(which_entries): """Makes search expression for re.split""" split_expr = '(' split_expr += '|'.join([str(we) for we in which_entries]) split_expr += ')' return split_expr
def _format_ssl_thumbprint(number): """ Formats ssl cert number number Number to be formatted into ssl thumbprint """ string = str(number) return ":".join(a + b for a, b in zip(string[::2], string[1::2]))
def to_bool(boolean: bool): """ Checks if an argument is of type bool: - If argument is type bool, simply returns argument - If argunent is type str, attempts to convert to type bool - Raises TypeError otherwise :param boolean: bool, str argument to be converted to type bool :return: bool original argument or converted string-to-bool argument """ if isinstance(boolean, bool): return boolean if not isinstance(boolean, str): raise TypeError('value {} is not a bool'.format(boolean)) if boolean.lower() == 'true': return True elif boolean.lower() == 'false': return False else: raise TypeError('value {} is not a bool'.format(boolean))
def gen_function_terminate(function_name, malloc_vars): """Writes a terminate function Terminate function is used to deallocate memory after completion Args: function_name (str): name of main function malloc_vars (dict): variables to deallocate Returns: signature (str): delcaration of the terminate function function (str): definition of the terminate function """ term_sig = 'void ' + function_name + '_terminate(' term_sig += ','.join(['float* ' + key for key in malloc_vars.keys()]) term_sig += ')' term_fun = term_sig term_fun += ' { \n\n' for key in malloc_vars.keys(): term_fun += "free(" + key + "); \n" term_fun += "} \n\n" return term_sig, term_fun
def module_loaded(module): """ Checks if the specified kernel-module has been loaded. :param module: Name of the module to check :return: True if the module is loaded, False if not. """ return any(s.startswith(module) for s in open("/proc/modules").readlines())
def process_tag_list(tags): """Convert tag list to format accepted for the auto_tag functions. """ return list({'type': t[0], 'value': t[1]} for t in tags)
def provisioned_throughput_validator(throughput): """ Property: FileSystem.ProvisionedThroughputInMibps """ if throughput < 0.0: raise ValueError( "ProvisionedThroughputInMibps must be greater than or equal to 0.0" ) return throughput
def window(tokens, size: int = 3): """ Generate samples for a window size. Example: ```python >>> window(['a', 'b', 'c', 'd'], size=2) [(['a', 'b'], 'c'), (['b', 'c'], 'd')] ``` Args: tokens: List of tokens size: Window size Returns: List of windowed samples """ return [ (tokens[i : i + size], tokens[i + size]) for i in range(0, len(tokens) - size, 1) ]
def get_value(path, source, default_value=None): """ Gets the value in source based on the provided path, or 'default_value' if not exists (default: None) """ split_path = path.split('.') if split_path[0] in source: if len(split_path) > 1: return get_value('.'.join(split_path[1:]), source[split_path[0]]) if split_path[0] == 'ip': if isinstance(source[split_path[0]], type([])): return source[split_path[0]][0] return source[split_path[0]] return default_value
def say(number): """ print out a number as words in North American English using short scale terms """ number = int(number) if number < 0 or number >= 1e12: raise ValueError if number == 0: return "zero" def quotient_and_remainder(number, divisor): """ return the integer quotient and remainder of dividing number by divisor """ divisor = int(divisor) remainder = number % divisor quotient = (number - remainder) // divisor return quotient, remainder def say_term(which, terms): """ return a term from a tuple of strings as a list of one element """ return terms[which : which + 1] def say_tens(number): """ return a string representing a number less than 100 in English """ terms = [] quotient, remainder = quotient_and_remainder(number, 10) if quotient == 1: terms += say_term(remainder, ("ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen")) else: if quotient: terms += say_term(quotient, ("units", "teens", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety")) if remainder: terms += say_term(remainder, ("zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine")) return '-'.join(terms) def say_hundreds(number, final=False): """ return a string representing a number less than 1000 in English """ terms = [] quotient, remainder = quotient_and_remainder(number, 100) if quotient: terms += [say_tens(quotient), "hundred"] if remainder: if quotient or final: terms += ["and"] terms += [say_tens(remainder)] return terms # now finally convert a number less than a million million terms = [] quotient, remainder = quotient_and_remainder(number, 1e9) if quotient: terms += say_hundreds(quotient) + ["billion"] quotient, remainder = quotient_and_remainder(remainder, 1e6) if quotient: terms += say_hundreds(quotient) + ["million"] quotient, remainder = quotient_and_remainder(remainder, 1e3) if quotient: terms += say_hundreds(quotient) + ["thousand"] if remainder: terms += say_hundreds(remainder, terms != []) return ' '.join(terms)
def past_days(next_day_to_be_planned): """ Return the past day indices. """ return range(1, next_day_to_be_planned)
def detect_encoding(data, default_encoding='UTF-8'): """Detects the encoding used by |data| from the Byte-Order-Mark if present. Args: data: string whose encoding needs to be detected default_encoding: encoding returned if no BOM is found. Returns: The encoding determined from the BOM if present or |default_encoding| if no BOM was found. """ if data.startswith('\xFE\xFF'): return 'UTF-16BE' if data.startswith('\xFF\xFE'): return 'UTF-16LE' if data.startswith('\xEF\xBB\xBF'): return 'UTF-8' return default_encoding
def get_allbefore_in_array(lst: list, obj: object, include_value=False): """Returns a list of all elements before the given value (if that value is in the list). Example: >>> mylst = ['exit', 'quit', 're', 'sys', 'teststring']\n >>> get_allbefore_in_array(mylst, 're')\n ['exit', 'quit'] >>> get_allbefore_in_array(mylst, 're', include_value=True)\n ['exit', 'quit', 're'] """ index = lst.index(obj) if include_value: newlst = list(lst[0 : index + 1]) else: newlst = list(lst[0:index]) return newlst
def to_decimal_degrees(degrees_n, degrees_d, minutes_n, minutes_d, seconds_n, seconds_d): """Converts degrees, minutes and seconds into decimal degrees.""" degrees = degrees_n / degrees_d minutes = minutes_n / minutes_d seconds = seconds_n / seconds_d deg_loc = degrees + (minutes/60) + (seconds/3600) return deg_loc
def ellipse_equation(bd, el_constant1, el_constant2, bd_max, x_coord, y_coord): """ Equation for an ellipse in the form from Burton & Liszt (1978) Function serves to be used in scipy.optimize.brenth to solve for bd Parameters ---------- bd: 'number' semi-minor axis of ellipse el_constant1: 'number' First parameter for defining ellipse el_constant2: 'number' second parameter for defining ellipse bd_max: 'number' Maximum semi-minor axis allowed within defined elliptical disk x_coord: 'number, ndarray' x-coordinate in ellipse y_coord: 'number, ndarray' y-coordinate in ellipse """ a = bd *el_constant1 + el_constant2 * bd**2 / bd_max result = x_coord**2 / a**2 + y_coord**2 / bd**2 - 1. return result
def commands_not_installed(commands, completer_content): """ Checking for installed commands in exists completer file :param commands: List of commands found in completions directory :param completer_content: Content of current installed complete file :return: List of not installed commands """ return [ command for command in commands if command not in completer_content ]
def is_valid_int(num): """Check if input is valid integer""" try: int(num) return True except ValueError: return False
def singer_map(pop, rate): """ Define the equation for the singer map. Arguments --------- pop: float current population value at time t rate: float growth rate parameter values Returns ------- float scalar result of singer map at time t+1 """ return rate * (7.86 * pop - 23.31 * pop ** 2 + 28.75 * pop ** 3 - 13.3 * pop ** 4)
def str_before_last(src, sub): """Return a substring from the beginning of the string to the last occurrence of the substring sub""" idx = src.rfind(sub) return src[:idx] if idx >= 0 else ""
def _read_event_log(event_log_text): """Return OpenDSS event log information. Parameters ---------- event_log_text : str Text of event log Returns ------- list list of dictionaries (one dict for each row in the file) """ data = [] if not event_log_text: return data for line in event_log_text.split("\n"): if line == "": continue tokens = [x.strip() for x in line.split(",")] row = {} for token in tokens: name_and_value = [x.strip() for x in token.split("=")] name = name_and_value[0] value = name_and_value[1] row[name] = value data.append(row) return data
def entity_seqs_equal(expected, predicted): """ Returns true if the expected entities and predicted entities all match, returns false otherwise. Note that for entity comparison, we compare that the span, text, and type of all the entities match. Args: expected (list of core.Entity): A list of the expected entities for some query predicted (list of core.Entity): A list of the predicted entities for some query """ if len(expected) != len(predicted): return False for expected_entity, predicted_entity in zip(expected, predicted): if expected_entity.entity.type != predicted_entity.entity.type: return False if expected_entity.span != predicted_entity.span: return False if expected_entity.text != predicted_entity.text: return False return True
def data_is_valid(data): """Check to see if data is valid for this class. Returns a tuple of (bool, string) indicating valididty and any error message. """ if isinstance(data, dict) and len(data) == 0: return True, "OK" return False, "Data is not an object or not empty."