content
stringlengths
42
6.51k
def find_frame(buffer): """ Finds the next MP3 frame header. @param bytearray buffer Bytes from an MP3 file. @return int The index in the buffer where the frame was found, or -1 if not found. """ try: synchs = [buffer.find(b'\xFF\xFA'), buffer.find(b'\xFF\xFB')] return min(x for x in synchs if x > -1) except ValueError: return -1
def mac_str_to_tuple(mac): """ Convert 'xx:xx:xx:xx:xx:xx' MAC address string to a tuple of integers. Example: mac_str_to_tuple('00:01:02:03:04:05') == (0, 1, 2, 3, 4, 5) """ return tuple(int(d, 16) for d in mac.split(':'))
def sub_base(k,base): """ If base is a list of sorted integers [i_1,...,i_R] then sub_base returns a list with the k^th element removed. Note that k=0 removes the first element. There is no test to see if k is in the range of the list. """ n = len(base) if n == 1: return([]) if n == 2: if k == base[0]: return([base[1]]) else: return([base[0]]) return(base[:k]+base[k+1:])
def logical_xor(a, b, unsafe=False): """logical xor without the bloat of numpy.logical_xor could be substituted with numpy.logical_xor !important: if unsafe is set to True, a and b are not checked. This improves speed but is risky. expects integers [0,1] or bools """ if not unsafe: sum = a*1+b*1 if sum > 2 or sum < 0: raise Exception( "The parameters for logical_xor have to be booleans or integers in range [0,1]. got a: " + str(a) + ", b: " + str(b)) return a ^ b
def bytes2human(n, format="%(value)i%(symbol)s"): """ >>> bytes2human(10000) '9K' >>> bytes2human(100001221) '95M' """ symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i+1)*10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n)
def simple_chewrec_func(data, rec, arg): """ Callback for record chewing. """ if rec is None: return 1 return 0
def area_triangle(base, height): """ .. math:: area = \\frac{base * height}{2.0} Parameters ---------- base: float length of the base of the triangle height: float height of the triangle Returns ------- area: float area of the triangle """ return (base * height) / 2.0
def find_word_in_turn(dialogue, turn_id, value, start_idx, end_idx): """ find non-cat slot value in turn. return List[(turn_id, frame_id, key)] """ assert isinstance(value, str) frames = dialogue["turns"][turn_id]["frames"] res = [] for frame_id, frame in enumerate(frames): for slot in frame["slots"]: if start_idx == slot["start"] and end_idx == slot["exclusive_end"]: res.append((turn_id, frame_id, slot["slot"])) return res
def fibonacci(n): """Return a list with the first `n` fibonacci numbers""" if n == 1: return 1 elif n == 2: return 2 else: return fibonacci(n - 1) + fibonacci(n - 2)
def separa_palavras(frase): """[A funcao recebe uma frase e devolve uma lista das palavras dentro da frase] Arguments: frase {[str]} -- [Uma frase] Returns: [lista] -- [Retorna uma lista de palavras dentro da frase recebida] """ return frase.split()
def get_model_var_scope(module_scope, model_name): """ Assembly Module Scope Name and model scope name """ model_var_scope = module_scope + "_" + model_name return model_var_scope
def name_from_url(url): """ Parses a URL in REST format where the last component is the object name""" return url.split('/')[-1]
def dictToHashTuple(baseDict): """super inefficient and explicit for now""" return tuple( (k, str(baseDict[k]), str(id((baseDict[k])))) for k in sorted(baseDict.keys())) # return ", ".join(tuple(k + "=" + str(baseDict[k]) for k in sorted(baseDict.keys())))
def thankyou(contributor): """End""" #form = ReusableForm(request.form) #print(form.errors) #if request.method == 'POST': # expertiseLevel = request.form['expertiseLevel'] #if form.validate(): # return redirect(url_for('description', contributor=name)) return("THANK YOU, %s!! We really appreciate your feedback. If you have any questions or further comments, please feel free to contact bartlett@isi.edu" % contributor)
def g_speed(parameters, actual_speed): """returns a speed from a g-code""" # default values speed = actual_speed # parse text params = parameters.split(' ') for param in params: coordinate = param[0] value = float(param[1:]) if coordinate == 'f': speed = value return(speed)
def clean(string_value): """Standardizes string values for lookup by removing case and special characters and spaces. Args: string_value (str): The lookup key to be transformed. Returns: str: The original value, but lowercase and without spaces, underscores or dashes. """ return string_value.lower().strip().replace("_","").replace("-","").replace(" ","")
def reached_minimum_node_size(data, min_node_size): """ Purpose: Determine if the node contains at most the minimum number of data points Input : Data array and minimum node size Output : True if minimum size has been reached, else False """ if len(data) <= min_node_size: return True else: return False
def ParseKey(algorithm, key_length, key_type, messages): """Generate a keyspec from the given (unparsed) command line arguments. Args: algorithm: (str) String mnemonic for the DNSSEC algorithm to be specified in the keyspec; must be a value from AlgorithmValueValuesEnum. key_length: (int) The key length value to include in the keyspec. key_type: ('KEY_SIGNING'|'ZONE_SIGNING') Whether to create a keyspec for a KSK or a ZSK. messages: (module) Module (generally auto-generated by the API build rules) containing the API client's message classes. Returns: A messages.DnsKeySpec instance created from the given arguments. """ key_spec = None if algorithm is not None or key_length is not None: spec_args = {} spec_args['keyType'] = messages.DnsKeySpec.KeyTypeValueValuesEnum( key_type) if algorithm is not None: spec_args['algorithm'] = messages.DnsKeySpec.AlgorithmValueValuesEnum( algorithm) if key_length is not None: spec_args['keyLength'] = key_length if spec_args: key_spec = messages.DnsKeySpec(**spec_args) return key_spec
def limpiar_extremos(texto): """Quita los espacios presentes al inicio y al final de una cadena de texto. :param texto: (str) Cadena de texto de entrada. :return: (str) Cadena de texto sin espacios en el inicio y en el final. """ return texto[::-1].rstrip()[::-1].rstrip()
def partition(pred, iterable): """ Returns tuple of allocated and unallocated systems :param pred: status predicate :type pred: function :param iterable: machine data :type iterable: list :returns: ([allocated], [unallocated]) :rtype: tuple .. code:: def is_allocated(d): allocated_states = ['started', 'pending', 'down'] return 'charms' in d or d['agent_state'] in allocated_states allocated, unallocated = utils.partition(is_allocated, [{state: 'pending'}]) """ yes, no = [], [] for i in iterable: (yes if pred(i) else no).append(i) return (yes, no)
def has_loops(path): """Returns True if this path has a loop in it, i.e. if it visits a node more than once. Returns False otherwise.""" visited = {} for node in path: if node not in visited: visited[node] = 0 else: return True return False
def eps(i, N): """Dispersion.""" i=i+1 #THIS IS SO THE DISPERSION IS THE SAME AS IN mathematica return -1 + (2/(N+1) * i)
def resolve_multi_input_change_ranges(input_change_ranges_list): """For AGGREGATE_LAYERS such as Add, the different inputs have different change ranges. For the change ranges, take the largest range over all input ranges: e.g. [ [(1,3), (4,6)], [(2,4), (4,5)] ] -> [(1,4), (3,6)] input1 -^ input2 -^ :param input_change_ranges_list: list of list of tuples. Inner lists must have same length, where each ith tuple corresponds to ith mutation in the input (ith input change range). :type input_change_ranges_list: list[list[tuple]] :return: Resolved input change ranges. All ranges must have the same width. :rtype: list[tuple] """ # change range lists should have the same length assert(len(set([len(x) for x in input_change_ranges_list])) == 1) # get maximal interval # [ [(1,3), (4,6)], [(2,4), (4,5)] ] -> [(1,4), (4,6)] input_change_ranges = [(min([x[0] for x in ranges]), max([x[1] for x in ranges])) for ranges in zip(*input_change_ranges_list)] # adjust intervals to have same width # [(1,4), (4,6)] -> [(1,4), (3,6)] max_end = max([y for _, y in input_change_ranges]) max_width = max([y-x for x, y in input_change_ranges]) input_change_ranges = [(x, x+max_width) if x+max_width <= max_end else (max_end-max_width, max_end) for x, y in input_change_ranges] return input_change_ranges
def col_to_str(agg, col, tab, table_names, N=1): """ transform Agg Column Table to str Args: agg(str): col(str): tab(str): table_names(dict): N(int): Default is 1 Returns: str Rsises: NULL """ _col = col.replace(' ', '_') tab = ''.join(tab) if agg == 'none': if tab not in table_names: table_names[tab] = 'Table' + str(len(table_names) + N) table_alias = table_names[tab] if col == '*': return '*' return '%s.%s' % (table_alias, _col) if col == '*': if tab is not None and tab not in table_names: table_names[tab] = 'Table' + str(len(table_names) + N) return '%s(%s)' % (agg, _col) else: if tab not in table_names: table_names[tab] = 'Table' + str(len(table_names) + N) table_alias = table_names[tab] return '%s(%s.%s)' % (agg, table_alias, _col)
def get_encrypted_payment_id_from_tx_extra_nonce(extra_nonce): """ Extracts encrypted payment id from extra :param extra_nonce: :return: """ if 9 != len(extra_nonce): raise ValueError("Nonce size mismatch") if 0x1 != extra_nonce[0]: raise ValueError("Nonce payment type invalid") return extra_nonce[1:]
def function_merge_two_dicts(x, y): """Given two dicts, merge them into a new dict as a shallow copy.""" z = x.copy() z.update(y) return(z)
def check_puppet_class(rec): """ Checks if the given file is a puppet class :param rec: file path :return: check result """ return rec.lower().endswith(".pp")
def pad_locs(book,loc_length): """ Pad location keys as necessary """ book_new = {} for key,value in book.items(): pad = loc_length - len(key) # how much we need to pad newkey=key while pad > 0: newkey = newkey[0] + "0" + newkey[1:] pad-=1 book_new[newkey] = value return book_new
def remove_quantopian_imports(code: str) -> str: """ we implement the algorithm api inside pylivetrader. the quantopian api is removed. :param code: :return: """ result = [] skip_next_line = False for line in code.splitlines(): if "import" in line and "\\" in line: skip_next_line = True if skip_next_line: if "\\" not in line: skip_next_line = False continue if "import" in line and "quantopian" in line: continue result.append(line) return "\r\n".join(result)
def filt_by_filetype(filetypes, value): """ filt the urls of filetypes """ for ftype in filetypes: if value.endswith(ftype): return True return False
def get_bytes(count, ignore): """ unittest patch for get_rnd_bytes (ID2TLib.Utility.py) :param count: count of requested bytes :param ignore: <not used> :return: a count of As """ return b'A' * count
def key_exists(dictionary, key): """Tests if a key exists in a dictionary Arguments: dictionary {dict} -- The dictionary to test key {str} -- The key to test Returns: bool -- `True` if the key exists in the dictionary """ exists = dictionary.get(key, None) return exists is not None
def unbox_usecase2(x): """ Expect a list of tuples """ res = 0 for v in x: res += len(v) return res
def recall(cm): """The ratio of correct positive predictions to the total positives examples. This is also called the true positive rate.""" return cm[1][1]/(cm[1][1] + cm[1][0])
def get_lr(lr, total_epochs, steps_per_epoch, lr_step, gamma): """get_lr""" lr_each_step = [] total_steps = steps_per_epoch * total_epochs lr_step = [i * steps_per_epoch for i in lr_step] for i in range(total_steps): if i < lr_step[0]: lr_each_step.append(lr) elif i < lr_step[1]: lr_each_step.append(lr * gamma) else: lr_each_step.append(lr * gamma * gamma) return lr_each_step
def result(result): """ Prints results in a specific color """ if result == "conditional": return "[ " + "(!) WARNING (!)" + " ]" else: if result: return "[ " + "(#) PASS (#)" + " ]" else: return "[ " + "<!!> FAILURE <!!>" + " ]"
def get_api_name(intfspec): """Given an interface specification return an API name for it""" if len(intfspec) > 3: name = intfspec[3] else: name = intfspec[0].split('.')[-1] return name
def map_reputation_to_score(reputation: str) -> int: """Map reputation as string to it's score as integer representation :param reputation: The reputation as str :type reputation: ``str`` :return: the score integer value :rtype: ``int`` """ reputation_map = { 'unknown': 0, 'none': 0, 'good': 1, 'suspicious': 2, 'bad': 3 } return reputation_map.get(reputation.lower(), 0)
def getGuids(fileList): """ extracts the guids from the file list """ guids = [] # loop over all files for thisfile in fileList: guids.append(str(thisfile.getAttribute("ID"))) return guids
def list2map(listoffilenames, delimiter): """ convert a list to a map :param listoffilenames: list of filenames :param delimiter: common separator used in filenames :return: map/dictionary of list with key of filename before delimiter and value of complete filename """ return dict(map(lambda x: [x.split(delimiter)[0], x], listoffilenames))
def asciify(string): """Returns a string encoded as ascii""" return string.encode('ascii')
def time_periods_in_epoch(epoch): """ Args: epoch (int): the Unix-time epoch to extract time periods from. Returns: tuple: A tuple of ints (days, hours, mins) in the epoch. """ epoch = epoch // 60 mins = epoch % 60 epoch = epoch // 60 hours = epoch % 24 epoch = epoch // 24 days = epoch return days, hours, mins
def find_letter_grade(grade: float) -> str: """ Grading Scale - 89.50 - 100 = A \n 88.50 - 89.49 = B+ \n 79.50 - 88.49 = B \n 78.50 - 79.49 = C+ \n 69.50 - 78.49 = C \n 68.50 - 69.49 = D+ \n 59.50 - 68.49 = D \n 00.00 - 59.50 = F \n @type grade: float @param grade: float @return: str """ # Check to make sure that the incoming grade is a float and if it isn't # check to see if it is an int. If it is an int, reformat it as a float # if it isn't either an int or a float, return an error if type(grade) != float: if type(grade) == int: grade_to_convert = float(grade) else: raise TypeError( "Input was: " + str(type(grade)) + " --> input must be either <class 'float'> OR <class 'int'>!") else: grade_to_convert = grade if grade_to_convert >= 89.5: return "A" elif grade_to_convert >= 88.5 < 89.5: return "B+" elif grade_to_convert >= 79.5 < 88.5: return "B" elif grade_to_convert >= 78.5 < 79.5: return "C+" elif grade_to_convert >= 69.5 < 78.5: return "C" elif grade_to_convert >= 68.5 < 69.5: return "D+" elif grade_to_convert >= 59.5 < 68.5: return "D" else: return "F"
def calculate_uncertainty(terms, rho=0.): """ Generically calculates the uncertainty of a quantity that depends on multiple *terms*. Each term is expected to be a 2-tuple containing the derivative and the uncertainty of the term. Correlations can be defined via *rho*. When *rho* is a numner, all correlations are set to this value. It can also be a mapping of a 2-tuple, the two indices of the terms to describe, to their correlation coefficient. In case the indices of two terms are not included in this mapping, they are assumed to be uncorrelated. Example: .. code-block:: python calculate_uncertainty([(3, 0.5), (4, 0.5)]) # uncorrelated # -> 2.5 calculate_uncertainty([(3, 0.5), (4, 0.5)], rho=1) # fully correlated # -> 3.5 calculate_uncertainty([(3, 0.5), (4, 0.5)], rho={(0, 1): 1}) # fully correlated # -> 3.5 calculate_uncertainty([(3, 0.5), (4, 0.5)], rho={(1, 2): 1}) # no rho value defined for pair (0, 1), assumes zero correlation # -> 2.5 """ # sum over squaresall single terms variance = sum((derivative * uncertainty)**2. for derivative, uncertainty in terms) # add second order terms of all pairs for i in range(len(terms) - 1): for j in range(i + 1, len(terms)): _rho = rho.get((i, j), 0.) if isinstance(rho, dict) else rho variance += 2. * terms[i][0] * terms[j][0] * _rho * terms[i][1] * terms[j][1] return variance**0.5
def ema(values, n): """Calculates actual value of exponential moving average of given series with certain length. :param values: list of floats :param n: int ema length :return: float actual ema value """ ema_list = [] sma = sum(values) / n multiplier = 2 / (n + 1) # EMA(current) = Val(now) * multiplier + EMA(prev) * (1 - multiplier) if values: ema_list.append(values[0] * multiplier + sma * (1 - multiplier)) for i in range(1, len(values) - 1): val = values[i] * multiplier + ema_list[-1] * (1 - multiplier) ema_list.append(val) else: return None return ema_list[-1]
def courses_to_take(input): """ Time complexity: O(n) (we process each course only once) Space complexity: O(n) (array to store the result) """ # Normalize the dependencies, using a set to track the # dependencies more efficiently course_with_deps = {} to_take = [] for course, deps in input.items(): if not deps: # Course with no dependencies: # candidate to start the search to_take.append(course) else: course_with_deps[course] = set(deps) result = [] while to_take: course = to_take.pop() # Add course to journey result.append(course) # Iterate through courses and remove this course from # dependencies for prereq_course, prereq_deps in course_with_deps.items(): if course in prereq_deps: prereq_deps.remove(course) if not prereq_deps: # Course has all the dependencies solved: # add to the "to_take" queue to_take.append(prereq_course) del course_with_deps[prereq_course] return result if len(result) == len(input) else None
def is_payload_list_pages(payload: bytes) -> bool: """Checks if payload is a list of UsmPage which has a signature of '@UTF' at the beginning.""" if len(payload) < 4: return False return payload[:4] == bytes("@UTF", "UTF-8")
def exchangeRows(M, r1, r2): """Intercambia las filas r1 y r2 de M""" M[r1], M[r2] = M[r2], M[r1] return M
def suggested_filter(sensor: str) -> str: """Suggested sensors.""" filt = { "serial": "last", "overall_state": "last", "batter_soc": "last", "total_load": "step", }.get(sensor) if filt: return filt if sensor.startswith("total_"): return "last" if sensor.startswith("temp_"): return "avg" return "step"
def isValidWord(word, hand, wordList): """ Returns True if word is in the wordList and is entirely composed of letters in the hand. Otherwise, returns False. Does not mutate hand or wordList. word: string hand: dictionary (string -> int) wordList: list of lowercase strings """ if word in wordList: valWord = True else: return False for ltr in word: if hand.get(ltr,0) - word.count(ltr) >= 0: valWord = True else: valWord = False break return valWord
def crawl_dictionary(dictionary, parent, parameter, inserted=False): """ Recursively look for a given parent within a potential dictionary of dictionaries. If the parent is found, insert the new parameter and update the 'inserted' flag. Return both the updated dictionary and inserted flag. :param dictionary: A dict object containing CAOM parameters. Nested dictionaries are possible in this object. :type dictionary: dict :param parent: The parent to search dictionary keys for. May not be currently present. :type parent: str :param parameter: parameter is a single-key single-value dictionary, to be inserted to the existing dictionary under the parent key. :type parameter: dict :param inserted: A flag to keep track of whether or not parent has been found and the parameter inserted. :type inserted: bool """ # Assign current dictionary items to tuples current_keys = tuple(dictionary.keys()) current_values = tuple(dictionary.values()) # If the requested parent already exists, either assign it the parameter # value if it is empty or update the current value. Set the inserted flag. if parent in current_keys: if dictionary[parent] == "": dictionary[parent] = parameter else: dictionary[parent].update(parameter) inserted = True # If the requested parent cannot be found, recursively call # crawl_dictionary on any subdictionaries found within the current one. else: for v in current_values: ind = current_values.index(v) if isinstance(v, dict): results = crawl_dictionary(v, parent, parameter, inserted) sub_dictionary = results[0] inserted = results[1] dictionary[current_keys[ind]] = sub_dictionary # Return both the dictionary and the inserted flag. return (dictionary, inserted)
def _word_badness(word): """ Assign a heuristic to possible outputs from Morphy. Minimizing this heuristic avoids incorrect stems. """ if word.endswith('e'): return len(word) - 2 elif word.endswith('ess'): return len(word) - 10 elif word.endswith('ss'): return len(word) - 4 else: return len(word)
def check_set_number(value, typ, default=None, minimum=None, maximum=None): """ Checks if a value is instance of type and lies within permissive_range if given. """ if value is None: return default if not isinstance(value, typ): try: value = typ(value) except: raise TypeError("Incompatible type: Expected {0}, got {1}.".format(typ, type(value))) if minimum is not None: if value < minimum: raise ValueError("Value must be larger than {}.".format(minimum)) if maximum is not None: if value > maximum: raise ValueError("Value must be smaller than {}.".format(maximum)) return value
def get_pathname_from_url(url): """Get the pathname from a URL. Args: url (str): URL. Returns: str: Pathname of the URL. """ return url[:url.rfind('/') + 1:]
def sizify(value): """ Simple kb/mb/gb size snippet for templates: {{ product.file.size|sizify }} """ if value < 512000: value = value / 1024.0 ext = 'kb' elif value < 4194304000: value = value / 1048576.0 ext = 'mb' else: value = value / 1073741824.0 ext = 'gb' return '%s %s' % (str(round(value, 2)), ext)
def corrected_components(x): """ :return: the components x, negated if both components are negative """ if x[0] < 0 and x[1] < 0: return -x return x
def select_keys(d, keys): """ >>> d = {'foo': 52, 'bar': 12, 'baz': 98} >>> select_keys(d, ['foo']) {'foo': 52} """ return {k:d[k] for k in keys}
def subnet4(ip_addresses, max_address_bits=32): """ Takes an iterable sequence of positive integers. Returns a tuple consisting of the minimum subnet address and netmask. The subnet is calculated based on the received set of IP addresses. Examples of usage: # 10.0.0.0/21 >>> subnet4(range(167772672, 167773695, 64)) (167772160, 21) An empty set does not throw an exception, but returns a tuple (0, -1): (perhaps raising an exception should be added in the future) >>> subnet4([]) (0, -1) >>> subnet4([0]) (0, 31) >>> subnet4([1]) (0, 31) >>> subnet4([2]) (2, 31) >>> subnet4([2**32-1]) (4294967294, 31) >>> subnet4([2, 3]) (2, 31) >>> subnet4([1, 2]) (0, 30) >>> subnet4([2, 3, 4]) (0, 29) >>> subnet4([1, 2, 3, 4, 5]) (0, 29) # 10.0.0.0/24 >>> subnet4([167772160, 167772294, 167772192, 167772167, 167772324]) (167772160, 24) # 64.0.0.0/2 >>> subnet4([1742463364, 1311235649, 1182087098]) (1073741824, 2) # 0/0 >>> subnet4([3257689175, 1742463364, 2311235649, 3182087098, 3806496640]) (0, 0) """ assert hasattr(ip_addresses, '__iter__') is True assert type(max_address_bits) is int and max_address_bits > 0 max_address = 2 ** max_address_bits - 1 addition, product = 0, max_address for ip_address in ip_addresses: assert hasattr(ip_address, '__and__') and hasattr(ip_address, '__or__') addition |= ip_address product &= ip_address ip_address_bits = len(bin(addition - product)) - 2 net_address = addition & (max_address ^ (2 ** ip_address_bits - 1)) return net_address, (max_address_bits - ip_address_bits)
def mean(l): """ Returns the mean value of the given list """ sum = 0 for x in l: sum = sum + x return sum / float(len(l))
def get_version(diff_file, ix=True): """Determine the product version from the diff file name. param ix denotes if the diff file was generated by APIx or CLIx """ split_ver = diff_file.split("/")[-1].split("-") if "-comp.yaml" in diff_file: return split_ver[0] else: return f"{split_ver[0]}-to{split_ver[2]}"
def get_row_sql(row): """Function to get SQL to create column from row in PROC CONTENTS.""" postgres_type = row['postgres_type'] if postgres_type == 'timestamp': postgres_type = 'text' return row['name'].lower() + ' ' + postgres_type
def equal(* vals): """Returns True if all arguments are equal""" if len(vals) < 2: return True a = vals[0] for b in vals[1:]: if a != b: return False return True
def alpha_adj(color, alpha=0.25): """ Adjust alpha of color. """ return [color[0], color[1], color[2], alpha]
def discounted_columns_pairs(cashflow_columns, prefix, suffix): """ Computes a dictionary with the undiscounted version of columns as keys and the discounted version as values :param cashflow_columns: list undiscounted cashflow columns :param prefix: str prefix used to mark discounted columns :param suffix: str prefix used to mark discounted columns :return: a dictionary with the undiscounted version of columns as keys and the discounted version as values """ return { undiscounted_column: prefix + undiscounted_column + suffix for undiscounted_column in cashflow_columns }
def rescale_to_max_value(max_value, input_list): """ Rescale each value inside input_list into a target range of 0 to max_value """ scale_factor = max_value / float(max(input_list)) # Multiply each item by the scale_factor input_list_rescaled = [int(x * scale_factor) for x in input_list] return input_list_rescaled
def binary_search(items, target): """O(log n).""" low = 0 high = len(items) - 1 while low <= high: mid = (low + high) // 2 if items[mid] == target: return mid elif items[mid] < target: low = mid + 1 elif items[mid] > target: high = mid - 1 return None
def auto_id(index: int, total: int) -> str: """Generate ID property for sentence Arguments: index {int} -- sentence index in content total {int} -- total number of sentences in content Returns: str -- sentence id """ pad = len(str(total)) template = '{{0:{0}d}} of {{1}}'.format(pad) return template.format(index + 1, total)
def trailing_stop_loss(last, higher, percentage=3): """ Trailing stop loss function. Receives structure with: - Last price. - Entry point x. - Exit percentage [0.1-99.9] Returns true when triggered. """ if last <= higher * (1 - (percentage * 0.01)): return True return False
def match(v1, v2, nomatch=-1, incomparables=None, start=0): """ Return a vector of the positions of (first) matches of its first argument in its second. Parameters ---------- v1: array_like Values to be matched v2: array_like Values to be matched against nomatch: int Value to be returned in the case when no match is found. incomparables: array_like Values that cannot be matched. Any value in ``v1`` matching a value in this list is assigned the nomatch value. start: int Type of indexing to use. Most likely 0 or 1 """ v2_indices = {} for i, x in enumerate(v2): if x not in v2_indices: v2_indices[x] = i v1_to_v2_map = [nomatch] * len(v1) skip = set(incomparables) if incomparables else set() for i, x in enumerate(v1): if x in skip: continue try: v1_to_v2_map[i] = v2_indices[x] + start except KeyError: pass return v1_to_v2_map
def reverse_only_alnum(s): """reverse only alnums leaving special charactes in place""" return len(s) != 0 and reverse_only_alnum(s[1:]) + s[0] or s
def _im_func(f): """Wrapper to get at the underlying function belonging to a method. Python 2 is slightly different because classes have "unbound methods" which wrap the underlying function, whereas on Python 3 they're just functions. (Methods work the same way on both versions.) """ # "im_func" is the old Python 2 name for __func__ if hasattr(f, '__func__'): return f.__func__ else: return f
def pvr(green, red): """Normalized Difference 550/650 Photosynthetic vigour ratio boosted with Numba See: https://www.indexdatabase.de/db/i-single.php?id=484 """ return (green - red) / (green + red)
def _class_hasattr(instance, attr): """ Helper function for checking if `instance.__class__` has an attribute Parameters ---------- instance : obj instance to check attr : str attribute name Returns ------- bool """ return hasattr(instance.__class__, attr)
def readable_mem(mem): """ :param mem: An integer number of bytes to convert to human-readable form. :return: A human-readable string representation of the number. """ for suffix in ["", "K", "M", "G", "T"]: if mem < 10000: return "{}{}".format(int(mem), suffix) mem /= 1024 return "{}P".format(int(mem))
def cipher(text, shift, encrypt=True): """ This function applies the Caesar cipher, which is a simple and well-known encryption technique, on text input. Each letter in the text is replaced by a letter some fixed number of positions down the alphabet. Parameters ---------- text: This is any Python string input. This is the text that is to be enciphered. shift: This is any Python integer input. This specifies the number of positions down the alphabet for the letters in the text to be replaced by. encrypt: This is set to a default value of True, which indicates that the shift is to be added to the position of each letter in the input text to obtain the new position of each letter in the enciphered text. If this parameter is set to False, the shift will be subtracted from the position of each letter in the input text instead. Returns ------ This is the enciphered text after the Caesar cipher has been applied to the input text. Examples -------- >>> from cipher_ln2444 import cipher >>> cipher('apple', 1) 'bqqmf' >>> cipher('bqqmf', 1, False) 'apple' """ alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' new_text = '' for c in text: index = alphabet.find(c) if index == -1: new_text += c else: new_index = index + shift if encrypt == True else index - shift new_index %= len(alphabet) new_text += alphabet[new_index:new_index+1] return new_text
def sequential_search(a, value): """ Searches a value in a list. Return the index if found, otherwise returns <None>. """ for i in range(len(a)): # If found the value if (a[i] == value): return i return None
def type_to_str(type_object) -> str: """convert a type object to class path in str format Args: type_object: type Returns: class path """ cls_name = str(type_object) assert cls_name.startswith("<class '"), 'illegal input' cls_name = cls_name[len("<class '"):] assert cls_name.endswith("'>"), 'illegal input' cls_name = cls_name[:-len("'>")] return cls_name
def sse_pack(event_id: int, event: str, data: int, retry: str = "2000") -> str: """Pack data in Server-Sent Events (SSE) format""" return f"retry: {retry}\nid: {event_id}\nevent: {event}\ndata: {data}\n\n"
def _list_frame(value): """ Returns: list: Value converted to a list. """ try: return [value] if isinstance(value, (str, bytes)) else list(value) except TypeError: return [value]
def trapezint(f, a, b, n) : """ Just for testing - uses trapazoidal approximation from on f from a to b with n trapazoids """ output = 0.0 for i in range(int(n)): f_output_lower = f( a + i * (b - a) / n ) f_output_upper = f( a + (i + 1) * (b - a) / n ) output += (f_output_lower + f_output_upper) * ((b-a)/n) / 2 return output
def _to_time(integ, frac, n=32): """Return a timestamp from an integral and fractional part. Parameters: integ -- integral part frac -- fractional part n -- number of bits of the fractional part Retuns: timestamp """ return integ + float(frac)/2**n
def _find_header(md): """Find header markers """ mark = '<!-- end header -->' lines = md.splitlines() for n, line in enumerate(lines): if mark == line: return n return None
def percent_change(starting_point, current_point): """ Computes the percentage difference between two points :return: The percentage change between starting_point and current_point """ default_change = 0.00001 try: change = ((float(current_point) - starting_point) / abs(starting_point)) * 100.00 if change == 0.0: return default_change else: return change except: return default_change
def get_value(json_dict, key, default=""): """Try to fetch optional parameters from input json dict.""" try: return json_dict[key] except KeyError: return default
def _reg_str_comp(str1, str2): """Compare the float values in str1 and str2 and determine if they are equal. Returns True if they are the "same", False if different""" aux1 = str1.split() aux2 = str2.split() if not aux1[0] == aux2[0] == "@value": # This line does not need to be compared return True # Extract required tolerances and values rel_tol = float(aux1[2]) abs_tol = float(aux1[3]) val1 = float(aux1[1]) val2 = float(aux2[1]) rel_err = 0 if val2 != 0: rel_err = abs((val1 - val2) / val2) else: rel_err = abs((val1 - val2) / (val2 + 1e-16)) abs_err = abs(val1 - val2) if abs_err < abs_tol or rel_err < rel_tol: return True else: return False
def af_rotation(xfm90, xf0, xf90, xf180): """ takes the fiducial centers measured in the x direction at -90, 0, 90, and 180 degrees and returns the offset in x and z from the center of rotation, as well as the unrotated x positon of the fiducial marker. the x offset is not expected to vary between loads, and has been measured to be 1.88, while the z offset is as the bar flexes in this direction, and will be used to map the surface locations of other samples between the fiducials """ x0 = xf0 xoff = (xf180 + x0) / 2 zoff = (xfm90 - xf90) / 2 return (x0, zoff, xoff)
def normalize(val, minval, maxval): """Scale a value between 0 and 1.""" if val >= maxval: return 1 elif val <= minval: return 0 normed = float(val - minval) / float(maxval - minval) return normed
def get_supported_os(scheduler): """ Return a tuple of the os supported by parallelcluster for the specific scheduler. :param scheduler: the scheduler for which we want to know the supported os :return: a tuple of strings of the supported os """ return ("alinux" if scheduler == "awsbatch" else "alinux", "centos6", "centos7", "ubuntu1604", "ubuntu1804")
def build_dict(key_list, value): """ Build a hierarchical dictionary with a single element from the list of keys and a value. :param key_list: List of dictionary element keys. :param value: Key value. :return: dictionary """ temp_dict = {} if key_list: key = key_list.pop() temp_dict[key] = build_dict(key_list, value) # recursion return temp_dict else: return value
def map_items_to_parent(items, parents): """Groups all items into a list based on thier respective parent ex: pages have request, runs have pages """ item_lists = {} if len(parents) > 0: pk = parents.values()[0].pk for p_id in parents: item_lists[p_id] = [] for item_id, item in items.iteritems(): p_id = item.data[pk] if p_id in item_lists: item_lists[p_id].append(item) return item_lists
def scaleMatrix(x, y, z): """Generate scale matrix x,y,z -- scale vector """ S = [ [x,0.,0.,0.], [0.,y,0.,0.], [0.,0.,z,0.], [0.,0.,0.,1.] ] return S
def flatten(l, ltypes=(list, tuple)): """ Flattens an arbitrarily large list or tuple Apparently very fast Pulled from online >> https://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html Author: Mike C. Fletcher, Distribution: BasicTypes library. """ ltype = type(l) l = list(l) i = 0 while i < len(l): while isinstance(l[i], ltypes): if not l[i]: l.pop(i) i -= 1 break else: l[i:i + 1] = l[i] i += 1 return ltype(l)
def _annotate_bookmark(label, bookmark=None): """ Annotate a bookmark indicator onto the type indicator of a bucket or prefix """ if not bookmark: return label return '\x1b[33m${}\x1b[0m {}'.format(bookmark, label)
def _transform(command, *args): """Apply command's transformation function (if any) to given arguments. Arguments: command -- the command description dict *args -- command arguments """ if "value_transform" in command: return command["value_transform"](*args) return args if len(args) > 1 else args[0]
def path(y): """Equation: x = a(y-h)^2 + k""" a = 110.0 / 160.0 ** 2 x = a * y ** 2 + 0.0 return x, y
def array_index_to_idx(i: int, idxpos0: int): """ Converts a nucleotide index to an 0-included array index :param i: The array index :param idxpos0: The start index :return: The index of the element in the array >>> array_index_to_idx(5, 5) 10 >>> array_index_to_idx(209, -200) 10 >>> array_index_to_idx(1, -1) 1 """ return idxpos0 + i + (1 if (idxpos0 < 0 < i or i < 0 < idxpos0) else 0)
def lowercase_first_string_letter(v:str): """Transform a string to lowercase first letter. Args: v (str): String value to convert Returns: str: Lowercase first letter of v """ return v[0].lower() + v[1:]
def whitespace_tokenize(text): """Basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() return text.split() if text else []
def serialize_mc(analysis, type): """serialised function for the Monte Carlo Analysis""" return { 'id': None, 'type': type, 'attributes': { 'window': analysis.get('window', None), 'mc_number': analysis.get('mc_number', None), 'bin_number': analysis.get('bin_number', None), 'description': analysis.get('description', None), 'anomaly': analysis.get('anomaly', None), 'anomaly_uncertainty': analysis.get('anomaly_uncertainty', None), 'upper_p': analysis.get('upper_p', None), 'p': analysis.get('p', None), 'lower_p': analysis.get('lower_p', None) } }
def get_docker_networks(data, state, labels=None): """Get list of docker networks.""" network_list = [] network_names = [] if not labels: labels = {} for platform in data: if "docker_networks" in platform: for docker_network in platform["docker_networks"]: if "labels" not in docker_network: docker_network["labels"] = {} for key in labels: docker_network["labels"][key] = labels[key] docker_network["state"] = state if "name" in docker_network: network_list.append(docker_network) network_names.append(docker_network["name"]) # If a network name is defined for a platform but is not defined in # docker_networks, add it to the network list. if "networks" in platform: for network in platform["networks"]: if "name" in network: name = network["name"] if name not in network_names: network_list.append( {"name": name, "labels": labels, "state": state} ) return network_list