content
stringlengths
42
6.51k
def _sort_key_max_confidence_sd(sample): """Samples sort key by the max. confidence_sd.""" max_confidence_sd = float("-inf") for inference in sample["inferences"]: confidence_sd = inference.get("confidence_sd", float("-inf")) if confidence_sd > max_confidence_sd: max_confidence_sd = confidence_sd return max_confidence_sd
def route(*ends) -> str: """Formats into a route. route("api", "botLogin") -> "https://analyticord.solutions/api/botLogin """ s = "https://analyticord.solutions" return "/".join((s, *ends))
def stringToPercentage(s) : """Converts a string describing a percentage to a float. The string s can be of any of the following forms: 60.2%, 60.2, or 0.602. All three of these will be treated the same. Without the percent sign, it is treated the same as with the percent sign if the value is greater than 1. This is to gracefully handle user misinterpretation of action input specification. In all cases, this function will return a float in the interval [0.0, 1.0]. Keyword arguments: s - the string to convert. """ if len(s)==0 : return 0 doDivide = False if s[-1]=="%" : s = s[:-1].strip() if len(s)==0 : return 0 doDivide = True try : p = float(s) except ValueError : return 0 if p > 1 : doDivide = True return p / 100 if doDivide else p
def swap_values(x: int, y: int) -> tuple: """ Returns swap values """ temp = x x = y y = temp return x, y
def as_int(n): """ Convert the argument to a builtin integer. The return value is guaranteed to be equal to the input. ValueError is raised if the input has a non-integral value. Examples ======== >>> 3.0 3.0 >>> as_int(3.0) # convert to int and test for equality 3 >>> int(sqrt(10)) 3 >>> as_int(sqrt(10)) Traceback (most recent call last): ... ValueError: ... is not an integer """ try: result = int(n) if result != n: raise TypeError except TypeError as exc: raise ValueError(f'{n} is not an integer') from exc return result
def set_max_call_stack_size_to_capture(size: int) -> dict: """ Parameters ---------- size: int **Experimental** """ return {"method": "Runtime.setMaxCallStackSizeToCapture", "params": {"size": size}}
def create_marker_and_content(genome_property_flat_file_line): """ Splits a list of lines from a genome property file into marker, content pairs. :param genome_property_flat_file_line: A line from a genome property flat file line. :return: A tuple containing a marker, content pair. """ columns = genome_property_flat_file_line.split(' ') marker = columns[0].strip() content = ''.join(columns[1:]).rstrip() return marker, content
def merged_codepoints(cps): """ return a list of codepoints (start, end) for inclusive ranges """ if not cps: return [] cps = sorted(cps, key=lambda cp: cp.codepoint) ranges = [(cps[0], cps[0])] for cp in cps[1:]: last_range = ranges[-1] if cp.codepoint == last_range[1].codepoint + 1: ranges[-1] = (last_range[0], cp) continue ranges.append((cp, cp)) return ranges
def generate_freq_vector(index_vector, max_freq, number_of_freq_estimations): """building frequency vector""" return index_vector*max_freq/number_of_freq_estimations
def parse_keypair_lines(content, delim='|', kv_sep='='): """ Parses a set of entities, where each entity is a set of key-value pairs contained all on one line. Each entity is parsed into a dictionary and added to the list returned from this function. """ r = [] if content: for row in [line for line in content if line]: item_dict = {} for item in row.split(delim): key, value = [i.strip("'\"").strip() for i in item.strip().split(kv_sep)] item_dict[key] = value r.append(item_dict) return r
def hpo_genes_from_dynamic_gene_list(case_obj, is_clinical, clinical_symbols): """ Case where dynamic_panel_phenotypes is empty, perhaps because user has added custom genes to HPO panel Args: case_obj(dict): models.Case) is_clinical(bool): if True, only list genes from HPO that are among the case clinical_symbols clinical_symbols(set): set of clinical symbols Returns: hpo_genes(set): """ gene_list = [ gene.get("hgnc_symbol") or str(gene["hgnc_id"]) for gene in case_obj["dynamic_gene_list"] ] unique_genes = set(gene_list) if is_clinical: unique_genes = unique_genes.intersection(set(clinical_symbols)) return unique_genes
def tf(seconds): """ Formats time in seconds to days, hours, minutes, and seconds. Parameters ---------- seconds : float The time in seconds. Returns ------- str The formatted time. """ days = seconds // (60*60*24) seconds -= days * 60*60*24 hours = seconds // (60*60) seconds -= hours * 60*60 minutes = seconds // 60 seconds -= minutes * 60 tf = [] if days > 0: tf.append("%s days" % int(days)) if hours > 0: tf.append("%s hours" % int(hours)) if minutes > 0: tf.append("%s minutes" % int(minutes)) tf.append("%s seconds" % round(seconds, 2)) return ", ".join(tf)
def squote(text): """Return a string surrounded by single quotes""" return "'{}'".format(text)
def decision_boundary(prob): """ Convert a probability `prob` into a class and return 1 if `prob` >= 0.5, otherwise return 0 """ return 1 if prob >= .5 else 0
def define_body_for_add_ip_command(list_target, mask, ip_address, duration, note, tags): """ API docs: https://portal.f5silverline.com/docs/api/v1/ip_objects.md (POST section) prepares the body of a POST request in order to add an IP """ return \ { "list_target": list_target, "data": { "id": "", "type": "ip_objects", "attributes": { "mask": mask, "ip": ip_address, "duration": duration }, "meta": { "note": note, "tags": tags } } }
def hours_difference(time_1, time_2): """ (number, number) -> float Return the number of hours later that a time in seconds time_2 is than a time in seconds time_1. >>> hours_difference(1800.0, 3600.0) 0.5 >>> hours_difference(3600.0, 1800.0) -0.5 >>> hours_difference(1800.0, 2160.0) 0.1 >>> hours_difference(1800.0, 1800.0) 0.0 """ return (time_2 - time_1) / 3600
def get_acaddress(core_data): """ returns address for aircraft oem if it exists. """ if core_data != 'error': try: core_data = core_data[1] children = list(core_data.children) for i, child in enumerate(children): for t in child: # major assumption -- if there is a comma or 'USA' in text then it's an address # success of this assumption gives >90% accuracy given observed site structure, some errors are imminent if ',' in t or 'USA' in t: address = child.strip() return address except: return 'N/A' else: return 'N/A'
def masked_by_quotechar(data, quotechar, escapechar, test_char): """Test if a character is always masked by quote characters This function tests if a given character is always within quoted segments (defined by the quote character). Double quoting and escaping is supported. Parameters ---------- data: str The data of the file as a string quotechar: str The quote character escapechar: str The escape character test_char: str The character to test Returns ------- masked: bool Returns True if the test character is never outside quoted segements, False otherwise. """ if test_char == "": return False escape_next = False in_quotes = False i = 0 while i < len(data): s = data[i] if s == quotechar: if escape_next: i += 1 continue if not in_quotes: in_quotes = True else: if i + 1 < len(data) and data[i + 1] == quotechar: i += 1 else: in_quotes = False elif s == test_char and not in_quotes: return False elif s == escapechar: escape_next = True i += 1 return True
def make_ks(reverse=False): """ks are the bits of the ints from 0 to 7, used as truth flags for vars in predicates. I.e. 0 means the denial of a var, 0 for var X means notX 1 for var X means X is true """ ks = [] for i in range(2): for j in range(2): for k in range(2): if reverse: ks.append((1-i,1-j,1-k)) else: ks.append((i,j,k)) return ks
def filter_matching_fields(fields, other_fields): """return fields which are same as in other_fields list, ignoring the case""" other_fields_lowercase = set([f.lower() for f in other_fields]) return [f for f in fields if f.lower() in other_fields_lowercase]
def _as_windows_path(s): """Returns the input path as a Windows path (replaces all of "/" with "\").""" return s.replace("/", "\\")
def derivs1(t,x,v): """ Derivatives for simple harm. osc. (\"sliding block\") """ dxdt = v dvdt = -x return(dxdt, dvdt)
def stimulus(t): """ External Current | :param t: time | :return: step up to 10 uA/cm^2 at t>100 | step down to 0 uA/cm^2 at t>200 | step up to 35 uA/cm^2 at t>300 | step down to 0 uA/cm^2 at t>400 """ return 10 * (t > 100) - 10 * (t > 200) + 35 * (t > 300) - 35 * (t > 400)
def preprocess_sents(sentences_list): """Clean up sentences predicted by TRAM""" prepocessed_sents = [] for s in sentences_list: # Replace any new lines separating parts of the sentence s = s.replace('\n', ' ') # Replace any double spaces which might result from previous step with a single space s = s.replace(' ', ' ') # Do a length check to skip empty strings and random punctuation if len(s) < 3: continue prepocessed_sents.append(s) return prepocessed_sents
def get_max_digits(numbers): """ Return the amount of digits of the largest number. """ list_digits = [] for item in numbers: list_digits.append(int(len(str(item)))) return int(max(list_digits))
def string_search(lst, stringy): """returns true if any of the strings in lst are in the string stringy Args: lst: list list of strings to check stringy: string string to check Returns: mhm: boolean string """ return any(s in stringy for s in lst)
def apriori_gen(f_items): """ Create (k+1)-itemsets from all frequent k-itemsets. Those are candidates for frequent (k+1) itemsets. This step is known as 'candidate generation'. """ new_f_items = [] for i, itemset1 in enumerate(f_items): for itemset2 in f_items[i+1:]: # Check if those sets, which are guaranteed to have the same # number of elements, differ only by 1 element. if len(itemset1['itemset'] - itemset2['itemset']) == 1: new_f_items.append({'itemset': (itemset1['itemset'] .union(itemset2['itemset']))}) return new_f_items
def reverse(t): """Return a tuple with reversed order""" return tuple([t[i] for i in range(len(t)-1, 0, -1)])
def hamming(num): """Returns the nth hamming number""" h = [1] * num x2, x3, x5 = 2,3,5 i = j = k = 0 for n in range(1, num): h[n] = min(x2, x3, x5) if x2 == h[n]: i += 1 x2 = 2 * h[i] if x3 == h[n]: j += 1 x3 = 3 * h[j] if x5 == h[n]: k += 1 x5 = 5 * h[k] return h[-1]
def get_drug_targets_from_chembl(cursor, unification_table, user_entity_ids): """ Get the drug targets of a list of drug user entity IDs from ChEMBL """ query_chembl_targets = ("""SELECT U2.userEntityID, E2.type, DB.databaseName FROM {} U1, externalEntityRelationParticipant R1, externalEntityRelationParticipant R2, {} U2, externalEntity E2, externalDatabase DB WHERE U1.externalEntityID = R1.externalEntityID AND R1.externalEntityRelationID = R2.externalEntityRelationID AND R1.externalEntityID != R2.externalEntityID AND R2.externalEntityID = U2.externalEntityID AND R2.externalEntityID = E2.externalEntityID AND E2.externalDatabaseID = DB.externalDatabaseID AND DB.databaseName = "chembl" AND E2.type = "protein" AND U1.userEntityID = %s """.format(unification_table, unification_table)) print('\nRETRIEVING DRUG TARGETS FROM CHEMBL ASSOCIATED TO USER ENTITY IDS...\n') chembl_drug_target_interactions = set() chembl_drug_to_targets = {} chembl_targets = set() for ueid1 in user_entity_ids: cursor.execute(query_chembl_targets, (ueid1,)) for row in cursor: ueid2, ee_type, database = row #print(ueid1, ueid2, source) chembl_targets.add(ueid2) interaction = (ueid1, ueid2) chembl_drug_target_interactions.add(interaction) chembl_drug_to_targets.setdefault(ueid1, set()).add(ueid2) print('NUMBER OF DRUG TARGET INTERACTIONS RETRIEVED FROM CHEMBL: {}'.format(len(chembl_drug_target_interactions))) print('NUMBER OF DRUG TARGETS RETRIEVED FROM CHEMBL: {}'.format(len(chembl_targets))) return chembl_drug_target_interactions, chembl_targets, chembl_drug_to_targets
def is_substring_divisible(num): """Return True if a 10 digit numeral has substrings divisible by sequential prime numbers. """ num = str(num) divisibility_condition = (int(num[1:4]) % 2 == 0 and int(num[2:5]) % 3 == 0 and int(num[3:6]) % 5 == 0 and int(num[4:7]) % 7 == 0 and int(num[5:8]) % 11 == 0 and int(num[6:9]) % 13 == 0 and int(num[7:10]) % 17 == 0) return divisibility_condition
def hasmethod(obj, m): """Return ``True`` if object *obj* contains the method *m*.""" return hasattr(obj, m) and callable(getattr(obj, m))
def blocks(text): """Split the text into blocks deliminated by a blank line.""" return text.split("\n\n")
def common_elements(set_1, set_2): """ returns a set of common elements from two sets """ return ({ele for ele in set_1 if ele in set_2})
def run(action, *args, **kwargs): """ :doc: run :name: renpy.run :args: (action) Run an action or list of actions. A single action is called with no arguments, a list of actions is run in order using this function, and None is ignored. Returns the result of the first action to return a value. """ if action is None: return None if isinstance(action, (list, tuple)): rv = None for i in action: new_rv = run(i, *args, **kwargs) if new_rv is not None: rv = new_rv return rv return action(*args, **kwargs)
def calc_error_by_batches(model, data_size, batch_size): """Calculate error in batches using the given valid/test model.""" err = 0.0 beg = 0 while (beg < data_size): end = min(beg+batch_size, data_size) err += model(beg,end) * (end-beg) beg = end return err / data_size
def info(v, row, row_n, i_s, i_d, header_s, header_d, scratch, errors, accumulator): """ Print information about a value, and return the value. Prints out these values: - row_n: The row number - header_d: Schema header - type: The python type of the value - value: The value of the row, truncated to 40 characters. :param v: The current value of the column :param row: A RowProxy object for the whiole row. :param row_n: The current row number. :param i_s: The numeric index of the source column :param i_d: The numeric index for the destination column :param header_s: The name of the source column :param header_d: The name of the destination column :param scratch: A dict that can be used for storing any values. Persists between rows. :param errors: A dict used to store error messages. Persists for all columns in a row, but not between rows. :param accumulator: A dict for use in accumulating values, such as computing aggregates. :return: The final value to be supplied for the column. """ print("{}:{} {} {}".format(row_n, header_d, type(v), str(v)[:40])) return v
def to_bytes_string(value): # type: (...) -> bytes """Convert value to bytes if required.""" return value.encode("utf8") if isinstance(value, type(u"")) else value
def is_subdomain(domain, reference): """Tests if a hostname is a subdomain of a reference hostname e.g. www.domain.com is subdomain of reference @param domain: Domain to test if it is a subdomain @param reference: Reference "parent" domain """ index_of_reference = domain.find(reference) if index_of_reference > 0 and domain[index_of_reference:] == reference: return True return False
def is_unsigned_number(number): """ is_unsigned_number :rtype: boolean :param number: :return: """ is_unsigned = False try: number = float(number) except ValueError: is_unsigned = False if number >= 0: is_unsigned = True else: is_unsigned = False return is_unsigned
def toKelvin(temp): """ A function to convert given celsius temperature to kelvin. param temp: intger or float returns kelvin temperature """ kelvin = 273.15 + temp return kelvin
def get_array_names(symbols): """Given a set of symbols, return a set of source array names and a set of destination array names. """ src_arrays = set(x for x in symbols if x.startswith('s_') and x != 's_idx') dest_arrays = set(x for x in symbols if x.startswith('d_') and x != 'd_idx') return src_arrays, dest_arrays
def reduceQtyVars(nb_min_var:int, dict_values:dict, list_models_var): """ return a list of model_var that the quantities of each variable are upper than the np_min_ar :param nb_min_var: quantity of the minimum variables that you want to save :param dict_values: dictionary with the frequency variables :param list_models_var: list of all the model_var objects :type nb_min_var: integer - required :type dict_values: dict{string:int} - required :type list_models_var: list[model_var] - required :return: list with all the model_Var saved :rtype: list[model_var] """ dict2 = dict_values.copy() #On garde les variables qui ont une freq inferieur au seuil dict2 = {k: v for k, v in dict2.items() if v < nb_min_var} list_var_remove = list(dict2.keys()) list_index_remove = [] index_value = 0 for model_var in list_models_var: var_in_models = list(model_var.dict_freq_var.keys()) exists_var = any(x in var_in_models for x in list_var_remove) if exists_var == True: list_index_remove.append(index_value) index_value =index_value +1 list_index_remove= reversed(list_index_remove) for element in list_index_remove: list_models_var.pop(element) return list_models_var
def div(a, b): """Return a divided by b and Raise exception for b==0""" if not b: raise ValueError("Cannot divide by zero!") return a / b
def undunder_keys(_dict): """Returns dict with the dunder keys converted back to nested dicts eg:: >>> undunder_keys({'a': 'hello', 'b__c': 'world'}) {'a': 'hello', 'b': {'c': 'world'}} :param _dict : (dict) flat dict :rtype : (dict) nested dict """ def f(key, value): parts = key.split('__') return { parts[0]: value if len(parts) == 1 else f(parts[1], value) } result = {} for r in [f(k, v) for k, v in _dict.items()]: rk = list(r.keys())[0] if rk not in result: result.update(r) else: result[rk].update(r[rk]) return result
def merge_results(old_results, new_results): """Update results in new baseline with audit information from old baseline. Secrets only appear in old baseline are ignored. If secret exists in both old and new baselines, old baseline has audit (is_secret) info but new baseline does not, then audit info will be copied to new baseline. :type old_results: dict :param old_results: results of status quo :type new_results: dict :param new_results: results to replaced status quo :rtype: dict """ for filename, old_secrets in old_results.items(): if filename not in new_results: continue old_secrets_mapping = {} for old_secret in old_secrets: old_secrets_mapping[old_secret['hashed_secret']] = old_secret for new_secret in new_results[filename]: if new_secret['hashed_secret'] not in old_secrets_mapping: # We don't join the two secret sets, because if the newer # result set did not discover an old secret, it probably # moved. continue old_secret = old_secrets_mapping[new_secret['hashed_secret']] # Only propagate 'is_secret' if it's not already there if 'is_secret' in old_secret and 'is_secret' not in new_secret: new_secret['is_secret'] = old_secret['is_secret'] return new_results
def get_positions(start_idx, end_idx, length): """ Get subj/obj position sequence. """ return list(range(-start_idx, 0)) + [0]*(end_idx - start_idx + 1) + \ list(range(1, length-end_idx))
def fire_print(requested_print, completed_print): """Function that executes print jobs for requested models """ while requested_print: current_print = requested_print.pop() print(f"\n Currently printing {current_print}") completed_print.append(current_print) return completed_print
def sort_orbitals(element_pdos): """Sort the orbitals of an element's projected density of states. Sorts the orbitals based on a standard format. E.g. s < p < d. Will also sort lm decomposed orbitals. This is useful for plotting/saving. Args: element_pdos (dict): An element's pdos. Should be formatted as a :obj:`dict` of ``{orbital: dos}``. Where dos is a :obj:`~pymatgen.electronic_structure.dos.Dos` object. For example:: {'s': dos, 'px': dos} Returns: list: The sorted orbitals. """ sorted_orbitals = [ "s", "p", "py", "pz", "px", "d", "dxy", "dyz", "dz2", "dxz", "dx2", "f", "f_3", "f_2", "f_1", "f0", "f1", "f2", "f3", ] unsorted_keys = element_pdos.keys() sorted_keys = [] for key in sorted_orbitals: if key in unsorted_keys: sorted_keys.append(key) return sorted_keys
def height(square: float, side3: float): """ Find height of triangle >>> print(height(48, 12)) 8.0 """ height = 2 * square / side3 return height
def max_activities(start: list, end: list) -> int: """ Since the activities are sorted by finish time, we can solve the problem in O(n) time """ return_value: int = 1 index: int = 1 length: int = len(start) prev_index: int = 0 while index < length: if start[index] >= end[prev_index]: return_value += 1 prev_index = index index += 1 return return_value
def _compute_regularization(alpha, l1_ratio, regularization): """Compute L1 and L2 regularization coefficients for W and H""" alpha_H = 0. alpha_W = 0. if regularization in ('both', 'components'): alpha_H = float(alpha) if regularization in ('both', 'transformation'): alpha_W = float(alpha) l1_reg_W = alpha_W * l1_ratio l1_reg_H = alpha_H * l1_ratio l2_reg_W = alpha_W * (1. - l1_ratio) l2_reg_H = alpha_H * (1. - l1_ratio) return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
def quadratic_func(x, a): """ Define the quadratic function like this: y = 2x^2 + a -1 (read as y is equal to 2 x squared plus a minus 1) :param x: :return: """ y = 2*x**2+a-1 return y
def quick_sort_median(aList, startIndex=0, endIndex=None, comparisons=False): """Sort a list from least to greatest using quicksort Returns a sorted list If 'comparisons' is set to True, it returns the sorted list and the number of comparisons It chooses the median of the first, middle and last element in the list as the pivot. """ if endIndex is None: endIndex = len(aList) # Base Case if endIndex - startIndex <= 1: if comparisons: return aList, 0 else: return aList ## DEBUG #import ipdb; ipdb.set_trace() #print(aList[startIndex:endIndex]) # Find the median of the first, middle and last elements first = aList[startIndex] if (endIndex - startIndex) % 2 == 0: middle = aList[startIndex + int((endIndex-startIndex)/2)-1] else: middle = aList[startIndex + int((endIndex-startIndex)/2)] last = aList[endIndex-1] # Is the first element the median of the three? if middle < first < last or last < first < middle: pivot = first # Is the middle element the median of the three? elif first < middle < last or last < middle < first: pivot = middle # Swap the middle with the first if (endIndex - startIndex) % 2 == 0: aList[startIndex + int((endIndex-startIndex)/2)-1] = aList[startIndex] else: aList[startIndex + int((endIndex-startIndex)/2)] = aList[startIndex] aList[startIndex] = pivot # The last element must be the median of the three... else: pivot = last # Switch the last element with the first aList[endIndex-1] = aList[startIndex] aList[startIndex] = pivot ## DEBUG #print(aList, aList[startIndex:endIndex], first, middle, last, pivot) # Partition the list between elements greater than and less than # the pivot element p = startIndex + 1 # Partition index i = startIndex + 1 # Element index for elem in aList[startIndex+1:endIndex]: # Is this element less than our pivot? if elem < pivot: # Swap this element with the lowest item in the upper # partition. But only do that if we've created an upper # partition. if i != p: aList[i] = aList[p] aList[p] = elem # Move the partition index up to make room for the new # value. p += 1 # Track the index of the next list element i += 1 # Move the pivot element between the partitions aList[startIndex] = aList[p-1] aList[p-1] = pivot ## DEBUG #import ipdb; ipdb.set_trace() if comparisons: compares = len(aList[startIndex:endIndex]) - 1 # Rescursively call quick_sort on the upper and lower partitions aList, lowerCompares = quick_sort_median(aList, startIndex, p-1, True) aList, upperCompares = quick_sort_median(aList, p, endIndex, True) totalCompares = compares + lowerCompares + upperCompares return aList, totalCompares else: # Rescursively call quick_sort on the upper and lower partitions aList = quick_sort_median(aList, startIndex, p-1) aList = quick_sort_median(aList, p, endIndex) # Return the sorted list return aList
def parseNum(num): """0x or $ is hex, 0b is binary, 0 is octal. Otherwise assume decimal.""" num = str(num).strip() base = 10 if (num[0] == '0') & (len(num) > 1): if num[1] == 'x': base = 16 elif num[1] == 'b': base = 2 else: base = 8 elif num[0]=='$': base = 16 return int(num, base)
def cons_tuple(head, tail): """Implement `cons_tuple`.""" return (head,) + tail
def is_palindrome(word): """Check if a given word is a palindrome.""" if not isinstance(word, str): raise ValueError('Word must be a string') n = len(word) # Edge cases if n == 0: raise ValueError('I still need convincing that empty string ' 'is a palindrome, Dan') elif n == 1: return True word = word.lower() i_mid = n // 2 # Skip middle letter if odd. if n % 2 == 1: i_mid2 = i_mid + 1 else: i_mid2 = i_mid first_half = word[:i_mid] second_half = word[i_mid2:][::-1] return first_half == second_half
def sort_peaks(peaks): """ Sort a list of peaks according to the score field """ peaks_sorted = sorted(peaks, key=lambda p: -p['score']) return peaks_sorted
def decode(number: str) -> bool: """Return if number is valid.""" if int(number) > 0 and int(number) < 27: return True return False
def str2bool(v): """Bodge around strange handling of boolean values in ArgumentParser.""" return v.lower() in ('yes', 'true', 't', '1')
def t(s): """Force Windows line endings to Unix line endings.""" return s.replace("\r\n", "\n")
def url(anchor, uri): """Return a Markdown URL.""" return f"[{anchor}]({uri})"
def HexToByte( hexStr ): """ Convert a string hex byte values into a byte string. The Hex Byte values may or may not be space separated. """ bytes = [] hexStr = ''.join( hexStr.split(" ") ) for i in range(0, len(hexStr), 2): bytes.append( chr( int (hexStr[i:i+2], 16 ) ) ) return ''.join( bytes )
def adder(x, y): """ Adds two numbers together. >>> adder(3,5) 8 >>> adder(-1,50) 49 """ return x + y + 1
def pair(ratio): """Format a pair of numbers so JavaScript can read them in an attribute.""" return "%s %s" % ratio
def shorten_replication_tasks(replication_tasks): """Returns only relevent fields form replication_tasks object """ tasks = [] for task in replication_tasks: t1 = { "ReplicationTaskIdentifier": task['ReplicationTaskIdentifier'], "Status": task['Status'], "ReplicationTaskArn": task['ReplicationTaskArn'] } tasks.append(t1) return tasks
def should_filter(target, stem_mapping, filtered_phrases): """Determine if a noun phrase should be included in the tags list. @param target: The noun phrase in question. @type target: basestring @param stem_mapping: Renaming of noun phrases through which target should be translated before tested for filtering. @type stem_mapping: dict (str to str) @param filtered_phrases: List of noun phrases to exclude. @type filtered_phrases: list of str """ filtered = target in filtered_phrases filtered = filtered or stem_mapping[target] in filtered_phrases return filtered
def energy_emc(mass,speedoflight): """Usage: energy_emc(mass of object, speed of light) - You can use the constant light_speed.""" return mass*speedoflight**2
def email_blacklist_offender(offender_d): """Offender part of a new/deleted blacklist email""" output = "Offender details:\n" output += "* address: %s\n" % offender_d['address'] output += "* cidr: %s\n" % offender_d['cidr'] output += "* score: %s\n" % offender_d['score'] if offender_d['hostname']: output += "* hostname: %s\n" % offender_d['hostname'] if offender_d['asn']: output += "* ASN: %s\n" % offender_d['asn'] output += "* created_date: %s\n" % offender_d['created_date'] output += "* updated_date: %s\n" % offender_d['updated_date'] output += "\n\n" return output
def unpad(string): """Un pad string.""" return string[:-ord(string[len(string) - 1:])]
def reconstruct_full_path(entry): """ Create a unique string representation of a PathSpec object, starting at the root of the dfvfs input object :param entry: A dfvfs path_spec object :return: [str] Representation of the object's location as file path """ if not entry: return None curr = entry path = '' while curr: if getattr(curr, 'parent', None) is None: # skip the last level, as this is the storage path on the evidence store which has no # relevance break newpath = getattr(curr, 'location', None) if newpath is None: newpath = '/' + getattr(curr, 'type_indicator', '') path = newpath + path curr = getattr(curr, 'parent', None) return path.replace('\\', '/').rstrip('/')
def calculate_max_power(panel_array): """ Returns the maximal product of positive and (odd) negative numbers.""" # Edge case 0: no panels :] if (len(panel_array) == 0): return 0 # Get positive panels positive_panels = list(filter(lambda x: x >0 , panel_array)) #print("positive_panels=", positive_panels) positive_product = 1 for x in positive_panels: positive_product *= x # Get negative panels. negative_panels = sorted(list(filter(lambda x: x <0 , panel_array))) # Edge case I: there is only one "negative panel". if (len(negative_panels) == 1) and (len(positive_panels) == 0): return negative_panels[0] # Get zero panels. zero_panels = sorted(list(filter(lambda x: x == 0 , panel_array))) # Edge case II: no positive panels. if (len(zero_panels) == len(panel_array)): return 0 # Check number of negative panels. if len(negative_panels) % 2 != 0: # Remove smallest. negative_panels.pop() #print("negative_panels=", negative_panels) negative_product = 1 for x in negative_panels: negative_product *= x # Return product of those two. return negative_product * positive_product
def troll_troll_name(results): """ retrieve troll name from item """ name = results['by'] return name
def format_satoshis_plain_nofloat(x, decimal_point = 8): """Display a satoshi amount scaled. Always uses a '.' as a decimal point and has no thousands separator. Does not use any floating point representation internally, so no rounding ever occurs. """ x = int(x) xstr = str(abs(x)) if decimal_point > 0: integer_part = xstr[:-decimal_point] fract_part = xstr[-decimal_point:] fract_part = '0'*(decimal_point - len(fract_part)) + fract_part # add leading zeros fract_part = fract_part.rstrip('0') # snip off trailing zeros else: integer_part = xstr fract_part = '' if not integer_part: integer_part = '0' if x < 0: integer_part = '-' + integer_part if fract_part: return integer_part + '.' + fract_part else: return integer_part
def pct_format(x, y): """ Returns x/y in percent as a formatted string with two decimal places. """ return "{:.2f} %".format((x / y) * 100)
def has_same_digits(num1: int, num2: int) -> bool: """ Return True if num1 and num2 have the same frequency of every digit, False otherwise. digits[] is a frequency table where the index represents the digit from 0-9, and the element stores the number of appearances. Increment the respective index every time you see the digit in num1, and decrement if in num2. At the end, if the numbers have the same digits, every index must contain 0. >>> has_same_digits(123456789, 987654321) True >>> has_same_digits(123, 12) False >>> has_same_digits(1234566, 123456) False """ digits = [0] * 10 while num1 > 0 and num2 > 0: digits[num1 % 10] += 1 digits[num2 % 10] -= 1 num1 //= 10 num2 //= 10 for digit in digits: if digit != 0: return False return True
def format_msg(wiki_link: str): """Return html formatted email content.""" contents = f''' <!DOCTYPE html> <html> <body> <div style="text-align:center;"> <h1>Your Weekly Article:</h1> <a href="{wiki_link}">{wiki_link}</a> </div> </body> </html> ''' return contents
def need_fake_wells(tsclass, well_model): """ Return boolean to see if fake wells are needed """ if well_model == 'fake': abst_rxn = bool('abstraction' in tsclass) # addn_rxn = bool('addition' in tsclass) subs_rxn = bool('substitution' in tsclass) need = bool(abst_rxn or subs_rxn) else: need = False return need
def folders_paths(paths): """Return a list of only folders from a list of paths""" folders = [x for x in paths if x.is_dir()] return folders
def ax_in(ma, ga): """ Returns a dictionary with axion parameters. Parameters ---------- ma : axion mass [eV] ga : axion-photon coupling [GeV^-1] """ axion_input = {'ma': ma, 'ga': ga} return axion_input
def _occupation_set(index): """The bits whose parity stores the occupation of mode `index`.""" indices = set() # For bit manipulation we need to count from 1 rather than 0 index += 1 indices.add(index - 1) parent = index & (index - 1) index -= 1 while index != parent: indices.add(index - 1) # Remove least significant one from index # E.g. 00010100 -> 00010000 index &= index - 1 return indices
def _strip(line): """Line endings variety shall not complicate the parser.""" return line.strip().rstrip('\r')
def generate_autopep8_command(file: str) -> str: """ Generate the autopep8 command for a file. Parameters ---------- file : str The file to fix. Returns ------- str The autopep8 command. """ cmd = f"autopep8 {file} -a -a -a -a -a -i -v" return cmd
def diff_list(first, second): """ Get difference of lists. """ second = set(second) return [item for item in first if item not in second]
def stop_list_to_link_list(stop_list): """ [a, b, c, d] -> [(a,b), (b,c), (c,d)] """ return list(zip(stop_list[:-1], stop_list[1:]))
def findByRef(ref="", dataset=[]): """Summary or Description of the Function Parameters: argument1 (int): Description of arg1 Returns: int:Returning value """ if ref=="": # logger.info("No Reference Supplied to findByRef function") return {"result": "error1"} if (dataset==[]): # logger.info("No Dataset Supplied to findByRef function") return {"result": "error2"} if ref in dataset: return {"result": "IN"} else: return {"result": "OUT"}
def cast_int(str): """a helper method for converting strings to their integer value Args: str: a string containing a number Returns: the integer value of the string given or None if not an integer """ try: v = int(str) except: v = None return v
def sequences_add_end_id(sequences, end_id=888): """Add special end token(id) in the end of each sequence. Parameters ----------- sequences : list of list of int All sequences where each row is a sequence. end_id : int The end ID. Returns ---------- list of list of int The processed sequences. Examples --------- >>> sequences = [[1,2,3],[4,5,6,7]] >>> print(sequences_add_end_id(sequences, end_id=999)) [[1, 2, 3, 999], [4, 5, 6, 999]] """ sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences) for i, _ in enumerate(sequences): sequences_out[i] = sequences[i] + [end_id] return sequences_out
def xTrans(thing, transforms): """Applies set of transformations to a thing. :args: - thing: string; if None, then no processing will take place. - transforms: iterable that returns transformation function on each turn. Returns transformed thing.""" if thing == None: return None for f in transforms: thing = f(thing) return thing
def get_ev(ev, keys=None, ikpt=1): """Get the correct list of the energies for this eigenvalue.""" res = False if keys is None: ener = ev.get('e') spin = ev.get('s') kpt = ev.get('k') if not kpt and ikpt == 1: kpt = True elif kpt and kpt != ikpt: kpt = False if ener and (spin == 1 or not spin): if kpt: res = [ener] elif ener and spin == -1: if kpt: res = [None, ener] else: for k in keys: if k in ev: res = ev[k] if not isinstance(res, list): # type(res) != type([]): res = [res] break return res
def key2num(key): """ Translates MIDI key to a number. """ key2num = {"C": 0, "Db": 1, "D": 2, "Eb": 3, "E": 4, "F": 5, "Gb": 6, "G": 7, "Ab": 8, "A": 9, "Bb": 10, "B": 11, "Cb": 11, "C#": 1, "D#": 3, "F#": 6, "G#": 8, "A#": 10, "B#": 0, "Cmin": 20, "Dbmin": 21, "Dmin": 22, "Ebmin": 23, "Emin": 24, "Fmin": 25, "Gbmin": 26, "Gmin": 27, "Abmin": 28, "Amin": 29, "Bbmin": 30, "Bmin": 31, "Cbmin": 31, "C#min": 21, "D#min": 23, "F#min": 26, "G#min": 28, "A#min": 30, "minB#": 20, "(null)": -1} return key2num[key]
def getCasing(word): """ Returns the casing of a word""" if len(word) == 0: return 'other' elif word.isdigit(): #Is a digit return 'numeric' elif word.islower(): #All lower case return 'allLower' elif word.isupper(): #All upper case return 'allUpper' elif word[0].isupper(): #is a title, initial char upper, then all lower return 'initialUpper' return 'other'
def name_to_number(name): """ Converts a string called name to an integer. Otherwise, it sends an error message letting you know that an invalid choice was made. """ if name == "rock": return 0 elif name == "Spock": return 1 elif name == "paper": return 2 elif name == "lizard": return 3 elif name == "scissors": return 4 else: return "Not a valid choice"
def sol(arr, m, n): """ The sliding approach """ l = 0 r = 0 c = 0 res = 0 # Intialize left right and zero count to 0 while r < n: if c <= m: if arr[r] == 0: c+=1 r+=1 # Keep moving right and if zero count is less than or equal to m increment # the zero count. This way we can exceed the zero count by 1 if c > m: if arr[l] == 0: c-=1 l+=1 # If the zero count > m, and we have found a zero again we decrease the # count by one, basically saying drop the left zero and pick the right # one res = max(res, r-l) # Update the maximum window while sliding return res
def isValid(text): """ Returns True if input is related to the time. Arguments: text -- user-input, typically transcribed speech """ if text.lower() == "what time is it" or text.lower() == "what is the time": return True else: return False
def get_host_latency (host_url) : """ This call measures the base tcp latency for a connection to the target host. Note that port 22 is used for connection tests, unless the URL explicitly specifies a different port. If the used port is blocked, the returned latency can be wrong by orders of magnitude. """ try : # FIXME see comments to #62bebc9 -- this breaks for some cases, or is at # least annoying. Thus we disable latency checking for the time being, # and return a constant assumed latency of 250ms (which approximately # represents a random WAN link). return 0.25 global _latencies if host_url in _latencies : return _latencies[host_url] u = saga.Url (host_url) if u.host : host = u.host else : host = 'localhost' if u.port : port = u.port else : port = 22 # FIXME: we should guess by protocol import socket import time # ensure host is valid ip = socket.gethostbyname (host) start = time.time () s = socket.socket (socket.AF_INET, socket.SOCK_STREAM) s.connect ((host, port)) s.shutdown (socket.SHUT_RDWR) stop = time.time () latency = stop - start _latencies[host_url] = latency return latency except : raise
def get_download_url_path_for_minecraft_lib(descriptor): """ Gets the URL path for a library based on it's name :param descriptor: string, e.g. "com.typesafe.akka:akka-actor_2.11:2.3.3" :return: string """ ext = "jar" pts = descriptor.split(":") domain = pts[0] name = pts[1] last = len(pts) - 1 if "@" in pts[last]: idx = pts[last].index("@") ext = pts[last][idx+1:] pts[last] = pts[last][0:idx+1] version = pts[2] classifier = None if len(pts) > 3: classifier = pts[3] file = name + "-" + version if classifier is not None: file += "-" + classifier file += "." + ext path = domain.replace(".", "/") + "/" + name + "/" + version + "/" + file return path
def get_the_trees(DTA): """ Get the bracket trees from the list of (winner, score, loser, score) tuples """ losers = set() winner_tree = {} loser_tree = {} for team1, score1, team2, score2 in DTA: tree = loser_tree if team1 in losers else winner_tree losers.add(team2) h = tree.get(team1, []) h.append( (team1, score1, team2, score2) ) tree[team1] = h return winner_tree, loser_tree
def performanceMinCalculator(count, avg, std, maxv, countref, avgref, stdref, maxvref): """ =========================================================================== Performance calculator function using max value This would be the worst case =========================================================================== Calculate performance based on reference values. If some value is None return None **Args**: * count : actual number of samples -- (int) * avg : actual duration average -- (float) * std : actual duration standar desviation -- (float) * maxv : actual duration max value -- (float) * countref : reference number of samples -- (int) * avgref : reference duration average -- (float) * stdref : reference duration standar desviation -- (float) * maxvref : reference duration max value -- (float) **Returns**: performance value indicator. [0-1] -- (float) """ if avgref == None or stdref == None or maxvref == None: return None if stdref < 0.01: stdref = 0.01 f = (1-((maxv - avgref) / (stdref*2))) if f > 1: f=1 if f < 0: f=0 return f
def proc_alive(process): """Check if process is alive. Return True or False.""" return process.poll() is None if process else False