content
stringlengths
42
6.51k
def parse_content_type(content_type, default_charset='utf-8'): """Parse content type value for media type and charset.""" charset = default_charset if ';' in content_type: content_type, parameter_strings = (attr.strip() for attr in content_type.split(';', 1)) try: parameter_pairs = [atom.strip().split('=') for atom in parameter_strings.split(';')] parameters = {name: value for name, value in parameter_pairs} charset = parameters['charset'] except (ValueError, KeyError): # KeyError when no charset found. # ValueError when the parameter_strings are poorly # formed (for example trailing ;) pass return (content_type, charset)
def index(lst, trunc): """ Converts an n-ary index to a 1-dimensional index. """ return sum([lst[i] * trunc**(len(lst)-i-1) for i in range(len(lst))])
def parse_list_from_string(a_string): """ This just parses a comma separated string and returns an INTEGER list Args: a_string (str): The string to be parsed Returns: A list of integers Author: SMM Date: 10/01/2018 """ if len(a_string) == 0: print("No items found, I am returning and empty list.") return_list = [] else: return_list = [int(item) for item in a_string.split(',')] print("The parsed string is:") print(return_list) return return_list
def is_signed_message(message): """Returns true if the message contains our unique signature""" UNIQUE_SIGNATURE = "DLZ" if message[0] == UNIQUE_SIGNATURE: return True else: return False
def ca2s(cypher_array): """Takes a cipher array and converts it to a string of cipher texts instead of integers""" for i in range(len(cypher_array)): cypher_array[i] = str(cypher_array[i]) return "".join(cypher_array)
def _translate_vif_summary_view(_context, vif): """Maps keys for VIF summary view.""" d = {} d['id'] = vif['id'] d['mac_address'] = vif['address'] d['ip_addresses'] = vif['ip_addresses'] return d
def example_function_with_shape(a, b): """ Example function for unit checks """ result = a * b return result
def normalize_cycleway(shape, props, fid, zoom): """ If the properties contain both a cycleway:left and cycleway:right with the same values, those should be removed and replaced with a single cycleway property. Additionally, if a cycleway_both tag is present, normalize that to the cycleway tag. """ cycleway = props.get('cycleway') cycleway_left = props.get('cycleway_left') cycleway_right = props.get('cycleway_right') cycleway_both = props.pop('cycleway_both', None) if cycleway_both and not cycleway: props['cycleway'] = cycleway = cycleway_both if (cycleway_left and cycleway_right and cycleway_left == cycleway_right and (not cycleway or cycleway_left == cycleway)): props['cycleway'] = cycleway_left del props['cycleway_left'] del props['cycleway_right'] return shape, props, fid
def map_pairs(a_list, func): """For list [1,2,3,4] returns: [func(1,2), func(2,3), func(3,4)] Works only for lists. """ result = [] if a_list: head_item = a_list[0] tail = a_list[1:] while tail: next_item = tail[0] result.append(func(head_item, next_item)) head_item = next_item tail = tail[1:] return result
def find_codon(codon, seq): """Find a specified codon with a given sequence.""" i = 0 # Scan sequence until we hit the start codon or the end of the sequence while seq[i:i+3] != codon and i < len(seq): i += 1 if i == len(seq): return 'not found' return i
def user_info(user): """ Get the serialized version of a user's information. Args: user: The user to get the serialized information of. Returns: A dictionary containing the serialized representation of the given user's information. """ if not user: return None return { "first_name": user.first_name, "image": None, "last_name": user.last_name, }
def strConfig(config): """Stringifies a list of moosicd filetype-player associations. This function converts the list used to store filetype-to-player associations to a string. The "config" parameter is the list to be converted. The return value is a good-looking string that represents the contents of the list of associations. """ s = '' for regex, command in config: s = s + regex.pattern + '\n\t' + ' '.join(command) + '\n' return s
def clean(values): """ Clean up the value convert (100) to -100 """ if "(" in values: values = "-" + values.replace("(", "").replace(")", "") values = values.replace(",", "") try: return float(values) except ValueError: return values
def find_closest_points(some_list, a_number, num_of_points): """ some_list -> (list of ints) A list of x or y coordinates a_number -> (int) a specific number that will be our base num_of_points -> (int) how many numbers we should looking for """ some_list = sorted(some_list) closest_points = [] while num_of_points > 0: closest_num = min(some_list, key=lambda x:abs(x-a_number)) closest_points.append(closest_num) some_list.remove(closest_num) num_of_points -= 1 return sorted(closest_points)
def insertion_sort(arr): """ Insertion Sort Complexity: O(n^2) """ for i in range(len(arr)): cursor = arr[i] pos = i while pos > 0 and arr[pos-1] > cursor: # Swap the number down the list arr[pos] = arr[pos-1] pos = pos-1 # Break and do the final swap arr[pos] = cursor return arr
def __assert_data_tuple(data, num): """Internal helper to ensure data is a tuple of given `num` length.""" if not isinstance(data, tuple): data = (data,) if not len(data) == num: raise AssertionError() return data
def gen_list_of_lists(original_list, new_structure): """ Generates a list of lists with a given structure from a given list. Parameters ---------- original_list : list The list to make into a list of lists. new_structure : list of lists (contains ints). Returns ------- list_of_lists : list of lists The original list with elements organized with the given structure. """ assert len(original_list) == sum( new_structure ), "The number of elements in the original list and desired structure don't match." return [ [original_list[i + sum(new_structure[:j])] for i in range(new_structure[j])] for j in range(len(new_structure)) ]
def segment_diff(s1, s2): """ Returns the sum of absolute difference between two segments' end points. Only perfectly aligned segments return 0 """ return abs(s1[0] - s2[0]) + abs(s1[1] - s2[1])
def get_ssml_string(text, language, font): """Pack text into a SSML document Args: text: Raw text with SSML tags language: Language-code, e.g. de-DE font: TTS font, such as KatjaNeural Returns: ssml: String as SSML XML notation """ ssml = f'<speak version="1.0" xmlns="https://www.w3.org/2001/10/synthesis" xml:lang="en-US"><voice name="{language}-{font}">{text}</voice></speak>' return ssml
def kinetic_energy(m : float, v : float) -> float: """ [FUNC] kinetic_energy: Returns the kinetic energy in Joules Where: Mass = m Velocity = v """ return (m * (v*v)) / 2
def ReadData( offset, arg_type ): """Emit a READ_DOUBLE or READ_DATA call for pulling a GL function argument out of the buffer's operand area.""" if arg_type == "GLdouble" or arg_type == "GLclampd": retval = "READ_DOUBLE(pState, %d)" % offset else: retval = "READ_DATA(pState, %d, %s)" % (offset, arg_type) return retval
def funct_worker(input_list,pre_text): """ Worker Function: define function that each process should do e.g. create string from content in input_list while begin with pre_text => "[pre_text] [input_list] """ output_string = f"{pre_text}" output_string = output_string + " ".join(input_list) return output_string
def compute_reference_gradient_siemens(duration_ms, bandwidth, csa=0): """ Description: computes the reference gradient for exporting RF files to SIEMENS format, assuming the gradient level curGrad is desired. Theory: the reference gradient is defined as that gradient for which a 1 cm slice is excited for a 5.12 ms pulse. Demanding the product Slicethickness * gamma * gradient * duration to be equal in both cases (reference and current), one obtains gamma*refGrad*(10 mm)*(5.12 ms) = gamma*curGrad*curThickness*pulse.tp However, gamma*curGrad*curThickness = the pulses's bandwidth, pulseBW, so refGrad = pulseBW*pulse.tp / (gamma*(10 mm)*(5.12 ms)) In general, the formula is, (pulse_duration[ms]*pulse_bandwidth[kHz]) Ref_grad [mT/m] = -------------------------------- (Gyr[kHz/mT] * Ref_slice_thickness[m]* Ref_pulse_duration[ms]) Input Variables Variables Name Units Description ------------------------------------ duration_ms ms Duration of pulse bandwidth kHz Bandwidth of current pulse csa kHz Chemical shift artifact "immunity" - see below. Optional, set to 0 if not present. Output Variables Variables Name Units Description ------------------------------------ ref_grad mT/m Reference gradient Chemical Shift Artifact immunity: Since different chemical shifts shift the excitation region, it follows the if we want to excite a range [-x,+x], we will not actually excite that range for any offset other than 0 if we calibrate our gradient for 0 offset. However, we CAN calibrate our gradient for 0 offset BUT excite a larger range [-x-dx, x+dx] such that the pulse will affect all chemical shifts equally. This of course comes at the price of exciting a larger region which might have unwanted signals. This however is good for: 1. Cases in which there are not external unwanted signals. 2. For dual-band suppression pulses, one sometimes uses the PASSBAND, which is also the VOI, to calibrate the pulse. If we don't want any spins in the VOI affected despite their varying chemical shifts we can grant them immunity, at the cost of pushing away the suppression bands - this works if, e.g., we're interested in killing off fat away from the VOI, so we don't care if a bit of signal comes from the region close to the VOI. To use, set CSA to the range of +-chemical shifts you want to feel the pulse. e.g., if you want all spins +-100 Hz from resonance to be affected equally within the VOI, set CSA = 0.1. """ ref_slice_thickness = 0.01 # in meters ref_duration = 5.12 # ms gyromagnetic_ratio = 42.57 # kHz/milliTesla ref_grad = ((bandwidth-2*csa)*duration_ms)/(gyromagnetic_ratio*ref_slice_thickness*ref_duration) return ref_grad
def flatten_filesystem(entry, metadata): """ Converts all nested objects from the provided metadata into non-nested `field->field-value` dicts representing filesystem metadata changes. Raw values (such as memory size, timestamps and durations) are transformed into easy-to-read values. :param entry: entry associated with the provided metadata :param metadata: metadata to flatten :return: the flattened metadata """ return list( map( lambda entity: { 'entity': entity[0], 'state': entity[1]['entity_state'], 'entry': entity[1].get('entry', '{} <current>'.format(entry)), }, metadata['filesystem']['entities'].items() ) )
def dict_create(text): """ Build dictionary with list of words from text """ dict = {'': ['']} for i in range(0, len(text)): if i < len(text) - 2: if text[i] + " " + text[i+1] not in dict: dict[text[i] + " " + text[i+1]] = [text[i+2]] else: dict[text[i] + " " + text[i+1]] = dict[text[i] + " " + text[i+1]] + [text[i+2]] return dict
def rounding(v): """Rounding for pretty plots""" if v > 100: return int(round(v)) elif v > 0 and v < 100: return round(v, 1) elif v >= 0.1 and v < 1: return round(v, 1) elif v >= 0 and v < 0.1: return round(v, 3)
def clean_consonants(text): """ Removes nearby equal consonants if they are more than 2. Parameters ---------- text : str Returns ------- str """ consonants = ['b','c','d','f','g','h','k','l','m','n','p','q','r','s','t','v','x','y','z'] new_text = text words = text.split() for word in words: new_string = word[0] for i in range(1, len(word)): if word[i].lower() not in consonants: new_string = new_string + word[i] else: if(word[i].lower() != word[i-1].lower()): new_string = new_string + word[i] elif i>=2 and (word[i].lower() != word[i-2].lower()): new_string = new_string + word[i] new_text = new_text.replace(word, new_string) return new_text
def _get_workflow_name(json_spec, workflow_name=None): """ Returns the name of the workflow to be created. It can be set in the json specification or by --destination option supplied with `dx build`. The order of precedence is: 1. --destination, -d option, 2. 'name' specified in the json file. If not provided, returns empty string. """ return workflow_name or json_spec.get('name')
def list2csv (l) : """ Converts a list to a string of comma-separated values.""" s = None if isinstance(l,list) : s = str(l[0]) for i in range(1,len(l)) : s += ','+str(l[i]) return s
def same_container(cont1, cont2): """ Return True if cont1 and cont2 are the same containers.We assume that processes that share the same PID are the same container even if their name differ. We assume that files that are located in the same directory and share the same inode are the same containers too even if their name differ. In reality this should not be limited to files in the same directory but located in the same partition. """ partition_list = ["/data", "/system", "/mnt/sdcard", "/sdcard"] if (cont1 == cont2): return True if (cont1[0] == cont2[0]): if (cont1[0] == 'process'): return cont1[2] == cont2[2] elif (cont1[0] == 'file') and (cont1[2] == cont2[2]): s1 = cont1[1].split("/") s2 = cont2[1].split("/") if len(s1) == len (s2): i = 0 equal = True while equal and (i < (len(s1) - 2)): if not (s1[i] == s2[i]): equal = False i += 1 if equal: return True elif (cont1[0] == 'socket') : return cont1[1] == cont2[1] return False
def length(b): """ Returns the length of the first netstring in the provided bytes object without decoding it. WARNING: This function doesn't check for netstring validity. """ try: return int(b[:b.find(b':')].decode('ascii')) except: raise ValueError
def guess_service_info_from_path(spec_path): """Guess Python Autorest options based on the spec path. Expected path: specification/compute/resource-manager/readme.md """ spec_path = spec_path.lower() spec_path = spec_path[spec_path.index("specification"):] # Might raise and it's ok split_spec_path = spec_path.split("/") rp_name = split_spec_path[1] is_arm = split_spec_path[2] == "resource-manager" return { "rp_name": rp_name, "is_arm": is_arm }
def b2tc(number: int): """ Funcion devuelve el complemento de un numero entero el cual se define como "inversion de todos los bits". :param number: numero entero :type number: int :return: cadena conforme a resultado inverso de bit (XOR) :rtype: str """ b2int = int(bin(number)[2:]) # [2:] excluir `0b` xor = ['0' if b == '1' else '1' for b in str(b2int)] # comprehension list return ''.join(xor)
def transform_from_local(xp, yp, cphi, sphi, mx, my): """ Transform from the local frame to absolute space. """ x = xp * cphi - yp * sphi + mx y = xp * sphi + yp * cphi + my return (x,y)
def revert_graph(G): """Returns a reverted version of the graph, where all the edges are in the opposite direction""" rev = [[] for _ in range(len(G))] for v, neighbors in enumerate(G): for w in neighbors: rev[w].append(v) return rev
def parseMemory(memAttribute: str) -> float: """ Returns EC2 'memory' string as a float. Format should always be '#' GiB (example: '244 GiB' or '1,952 GiB'). Amazon loves to put commas in their numbers, so we have to accommodate that. If the syntax ever changes, this will raise. :param memAttribute: EC2 JSON memory param string. :return: A float representing memory in GiB. """ mem = memAttribute.replace(',', '').split() if mem[1] == 'GiB': return float(mem[0]) else: raise RuntimeError('EC2 JSON format has likely changed. Error parsing memory.')
def extend(a, b): """ extend :param a: :param b: :return: """ a[0] = min(a[0], b[0]) a[1] = min(a[1], b[1]) a[2] = max(a[2], b[2]) a[3] = max(a[3], b[3]) return a
def list_cycles(grammar, parent, length): """Unrestricted""" if length == 1: return [parent] return [ parent + x for node in grammar[parent] for x in list_cycles(grammar, node, length - 1) ]
def tolist(x): """convert x to a list""" return x if isinstance(x, list) else [x]
def time_key(file): """ :return: 'time' field or None if absent or damaged """ field = file.get('imageMediaMetadata').get('time') if field and len(field) > 5: return field return None
def _fix_docstring_for_sphinx(docstr): """ Remove 8-space indentation from lines of specified :samp:`{docstr}` string. """ lines = docstr.split("\n") for i in range(len(lines)): if lines[i].find(" " * 8) == 0: lines[i] = lines[i][8:] return "\n".join(lines)
def dictionary_merge(a, b): """merges dictionary b into a Like dict.update, but recursive """ for key, value in b.items(): if key in a and isinstance(a[key], dict) and isinstance(value, dict): dictionary_merge(a[key], b[key]) continue a[key] = b[key] return a
def mape(a, p): """Calculate the mean absolute percentage error.""" return abs(p-a) / a
def unhappy_point_lin(list, index): """ Checks if a point is unhappy. Returns False if happy. """ if index == 0: if list[index] == list[index + 1]: return False if index == len(list)-1: if list[index] == list[index - 1]: return False else: if list[index] == list[index - 1] or list[index] == list[(index + 1) % len(list)]: return False return True
def checkSha(firstSha, secondSha): """ This function checks if the two files that are to be merged have the same res """ if firstSha != secondSha: raise ValueError('\n\nCan\'t merge files!\nSha are different!') return True
def generate_url(internal_id): """ Generate url of article in Dokumentlager. @param internal_id: uuid of article @type internal_id: string @return url to get all data of one article """ template = "https://dokumentlager.nordiskamuseet.se/api/list/{}/0/500" return template.format(internal_id)
def indent(txt): """Indent the given text by 4 spaces.""" lines = ((" " + x) for x in txt.split('\n')) return '\n'.join(lines)
def make_prereq_level_check_formulas(skill, reqskill_level): """ Returns a list of formulas that check if each of the given skill's reqskills, if any, have a base level same or greater than reqskill_level. """ if reqskill_level <= 1: return "" reqskills = [skill["reqskill1"], skill["reqskill2"], skill["reqskill3"]] return [f"skill('{r}'.blvl) >= {reqskill_level}" for r in filter(None, reqskills)]
def bond_yield(price, face_value, years_to_maturity, coupon=0): """ """ return (face_value / price) ** (1 / years_to_maturity) - 1
def is_number(x): """ Returns: True if value x is a number; False otherwise. Parameter x: the value to check Precondition: NONE (x can be any value) """ return type(x) in [float, int]
def flat_multi(multidict): """ Flattens any single element lists in a multidict. Args: multidict: multidict to be flattened. Returns: Partially flattened database. """ flat = {} for key, values in multidict.items(): flat[key] = values[0] if type(values) == list and len(values) == 1 \ else values return flat
def get_subscribe_broadcast_messages(received_message, subscription_id, connection_id): """ Return a BroadcastMessage to be delivered to other connections, possibly connected through redis pub/sub This message is called whenever an user subscribes to a topic. """ assert received_message is not None, "get_subscribe_broadcast_message requires a received_message" assert subscription_id is not None, "get_subscribe_broadcast_message requires a subscription_id" assert connection_id is not None, "get_subscribe_broadcast_message requires a connection_id" return []
def func(x, a, b, c, d, e): """Smooth help function""" return a*x + b*x*x + c*x*x*x +d*x*x*x*x +e
def radius_curvature(z, zR): """calculate R(z)""" # This could be smarter, just adding epsilon to avoid nan's if (z == 0): z += 1e-31 return z * (1 + (zR/z)*(zR/z))
def split_particle_type(decays): """ Separate initial particle, intermediate particles, final particles in a decay chain. :param decays: DecayChain :return: Set of initial Particle, set of intermediate Particle, set of final Particle """ core_particles = set() out_particles = set() for i in decays: core_particles.add(i.core) for j in i.outs: out_particles.add(j) inner = core_particles & out_particles top = core_particles - inner outs = out_particles - inner return top, inner, outs
def init_layout(height, width): """Creates a double list of given height and width""" return [[None for _ in range(width)] for _ in range(height)]
def expected_value_test(context, value: str): """ Check if the context contain the expected value :param str context: The context :param str value: The expected value to find """ if str(value).__contains__('context'): split_value = value.split('.') expected_value = context.__getattr__(split_value[1]) if len(split_value) > 2: for i in range(2, len(split_value)): expected_value = expected_value[split_value[i] if not split_value[i].isnumeric() else int(split_value[i])] return expected_value if str(value) == '0<' or str(value).__contains__('>'): return "" else: return value
def format_time(time_us): """Defines how to format time in FunctionEvent""" US_IN_SECOND = 1000.0 * 1000.0 US_IN_MS = 1000.0 if time_us >= US_IN_SECOND: return "{:.3f}s".format(time_us / US_IN_SECOND) if time_us >= US_IN_MS: return "{:.3f}ms".format(time_us / US_IN_MS) return "{:.3f}us".format(time_us)
def create_source_object(sources): """Format the source information as appropriate for the api""" if sources: source_object = [] srcs = sources.split("/") for ix, src in enumerate(srcs): source_object.append({ "source-name": src, "id": ix, "source-description": "" }) return source_object return None
def is_eyr_valid(eyr: str) -> bool: """Expiration Year""" return 2020 <= int(eyr) <= 2030
def xml_escape(s): """Escapes for XML. """ return (("%s" % s).replace('&', '&amp;').replace('"', '&quot;') .replace('<', '&lg;').replace('>', '&gt;'))
def foundSolution(solver_result): """ Check if a solution was found. """ return "Valid" not in solver_result and "unsat" not in solver_result
def convert_class_functions(line): """Convert class initializer functions to the corresponding variable.""" first_paren = line.find('(') if first_paren == -1: return line if "initContainer" in line: line = line[first_paren + 1:] first_comma = line.find(',') if first_comma != -1: line = line[:first_comma] line = line.strip() elif "->" in line: line = '' elif "initVectors" in line: line = '' elif "ASBase::init" in line: line = '' elif "initStatic" in line: line = '' elif "initTempStacksContainer" in line: line = '' elif "setSpaceIndentation" in line: line = "indentLength" elif "setMinConditionalIndentOption" in line: line = "minConditionalOption" elif "setMaxInStatementIndentLength" in line: line = "maxInStatementIndent" elif "setClassIndent" in line: line = "classIndent" elif "setModifierIndent" in line: line = "modifierIndent" elif "setSwitchIndent" in line: line = "switchIndent" elif "setCaseIndent" in line: line = "caseIndent" elif "setBlockIndent" in line: line = "blockIndent" elif "setBracketIndentVtk" in line: # must preceede "setBracketIndent" line = "bracketIndentVtk" elif "setBracketIndent" in line: line = "bracketIndent" elif "setNamespaceIndent" in line: line = "namespaceIndent" elif "setLabelIndent" in line: line = "labelIndent" elif "setEmptyLineFill" in line: line = "emptyLineFill" elif "setCStyle" in line: line = "fileType" elif "setPreprocDefineIndent" in line: line = "shouldIndentPreprocDefine" elif "setPreprocConditionalIndent" in line: line = "shouldIndentPreprocConditional" elif "setAlignMethodColon" in line: line = "shouldAlignMethodColon" else: line = "unidentified function: " + line return line
def rm_dsstore_list(list): """ ** Intended for macOS users who sorted files in Finder in any special way ** Removes the directories (strings) from an input list that contain .DS_Store in the string (therefore removes the directory to .DS_Store files) :param: list of directories :return: list of directories without directories to any .DS_Store files """ for directory in list: if ".DS_Store" in directory: list.remove(directory) return list
def all_subsets(aset): """Solution to exercise C-4.15. Write a recursive function that will output all the subsets of a set of n elements (without repeating any subsets). -------------------------------------------------------------------------- Solution: -------------------------------------------------------------------------- I've made the following assumptions: 1. The input is a list of unique numbers 2. The set itself is considered a subset (not a proper subset) 3. The empty set is considered a subset """ def recurse(alist): if not alist: return [[]] # Base case, return empty set prev_lists = recurse(alist[1:]) return prev_lists + [[alist[0]] + y for y in prev_lists] return recurse(aset)
def _flip_bits_256(input: int) -> int: """ Flips 256 bits worth of `input`. """ return input ^ (2**256 - 1)
def labelit(varname, vardict): """Return the variable label or, if none, the variable name varname is the variable to label. If none, return "" vardict is a VariableDict object""" if not varname: return "" return vardict[varname].VariableLabel or varname
def average_over_dictionary(mydict): """ Average over dictionary values. """ ave = sum([x for x in mydict.values()])/len(mydict) return ave
def bilinear_interpolation(n1, n2, n3, n4, x, y): """ Bilinear interpolation of value for point of interest (P). :param n1: value at node 1 :param n2: value at node 2 :param n3: value at node 3 :param n4: value at node 4 :param x: interpolation scale factor for x axis :param y: interpolation scale factor for y axis :return: value at node P """ a0 = n1 a1 = round(n2 - n1, 3) a2 = round(n3 - n1, 3) a3 = round(n1 + n4 - n2 - n3, 3) p = a0 + (a1 * x) + (a2 * y) + (a3 * x * y) return p
def compare(lst1: list, lst2: list) -> list: """ Compares list's items to each other using OR operator. Saves the results of compairson to a new list and returns it. >>> compare([0, 0, 0], [1, 0, 1]) [1, 0, 1] >>> compare([0, 0, 0], [1, 1, 1]) [1, 1, 1] """ compared = [] for i in range(len(lst1)): compared.append(lst1[i] or lst2[i]) return compared
def _get_lemma_from_mor(mor): """Extract lemma from ``mor``. Parameters ---------- mor : tuple(str, str, str) Returns ------- str """ lemma, _, _ = mor.partition("-") lemma, _, _ = lemma.partition("&") return lemma
def is_timefrequency_frequency(timefrequency): """return bool of whether input is TimefrequencyFrequency""" return isinstance(timefrequency, (int, float))
def data_override(parameter_data, override): """Override parameter values with specified values""" data=parameter_data for i in override: data[i]=override[i] return data
def find_boyer_moore(T, P): """Return the index of first occurance of P; otherwise, returns -1.""" n, m = len(T), len(P) if m == 0: return 0 last = {k: i for i, k in enumerate(P)} i = k = m - 1 while i < n: if T[i] == P[k]: if k == 0: return i i -= 1 k -= 1 else: j = last.get(T[i], -1) i += m - min(k, j + 1) k = m - 1 return -1
def sequence_sampling_probability(sequence_list): """Calculate genome size and the relative sizes of the the sequences in the multi-fasta file. Relative sizes are further used as probabilities for random sampling.""" # Calculate genome size genome_size = 0 for f in sequence_list: genome_size = genome_size + int(f[1]) # Calculate the size percentage of each sequence probabilities = [] for f in sequence_list: probabilities.append(int(f[1]) / genome_size) return (probabilities, genome_size)
def _is_safe_type(value): """ These are types which aren't exploitable """ return ( isinstance(value, int) or isinstance(value, bool) or value is None )
def sub(keys, d): """ Create a new dict containing only a subset of the items of an existing dict. @param keys: An iterable of the keys which will be added (with values from C{d}) to the result. @param d: The existing L{dict} from which to copy items. @return: The new L{dict} with keys given by C{keys} and values given by the corresponding values in C{d}. @rtype: L{dict} """ return dict([(k, d[k]) for k in keys])
def _sgn(x): """ Returns sign(x). :param x: Number :type x: float, int :return: 1, 0, -1 :rtype: int """ if x > 0: return 1 elif x == 0: return 0 else: return -1
def is_collection(name): """compare with https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/user""" return name in [ 'assignedLicenses', 'assignedPlans', 'businessPhones', 'imAddresses', 'interests', 'provisionedPlans', 'proxyAddresses', 'responsibilities', 'schools', 'skills' ]
def get_file_extension(path): """returns the file extension of path""" if "." not in path: return "" else: return path.split(".")[-1].lower()
def simple2string(x): """ Simple objects (bytes, bool, float, int, None, str) are converted to string and returned. Other types are returned as None. """ if isinstance(x, bytes) or \ isinstance(x, bool ) or \ isinstance(x, float) or \ isinstance(x, int ) or \ x is None or \ isinstance(x, str ): return str(x) else: return None
def nitro_tz(tz_id): """Maps McAfee SIEM/Nitro ESM internal timezone IDs to the tz database at: http://web.cs.ucla.edu/~eggert/tz/tz-link.htm Arguments: - `tz_id` (`int`): McAfee ESM internal timezone ID Returns: `str`: timezone name """ tz_map = { 1: "Pacific/Pago_Pago", 2: "Pacific/Honolulu", 3: "America/Anchorage", 4: "America/Los_Angeles", 5: "America/Phoenix", 6: "America/Chihuahua", 7: "America/Denver", 8: "America/Guatemala", 9: "America/Chicago", 10: "America/Mexico_City", 11: "America/Regina", 12: "America/Bogota", 13: "America/New_York", 14: "America/Indiana/Indianapolis", 15: "America/Halifax", 16: "America/Caracas", 17: "America/Santiago", 18: "America/St_Johns", 19: "America/Sao_Paulo", 20: "America/Buenos_Aires", 21: "America/Godthab", 22: "Atlantic/South_Georgia", 23: "Atlantic/Azores", 24: "Atlantic/Cape_Verde", 25: "Africa/Casablanca", 26: "Etc/UTC", 27: "Europe/Amsterdam", 28: "Europe/Belgrade", 29: "Europe/Brussels", 30: "Europe/Warsaw", 31: "Africa/Tripoli", 32: "Europe/Athens", 33: "Europe/Bucharest", 34: "Africa/Cairo", 35: "Africa/Maputo", 36: "Europe/Helsinki", 37: "Asia/Jerusalem", 38: "Asia/Baghdad", 39: "Asia/Riyadh", 40: "Europe/Moscow", 41: "Africa/Nairobi", 42: "Asia/Tehran", 43: "Asia/Dubai", 44: "Asia/Baku", 45: "Asia/Kabul", 46: "Asia/Yekaterinburg", 47: "Asia/Karachi", 48: "Asia/Kolkata", 49: "Asia/Kathmandu", 50: "Asia/Almaty", 51: "Asia/Dhaka", 52: "Asia/Colombo", 53: "Asia/Rangoon", 54: "Asia/Bangkok", 55: "Asia/Krasnoyarsk", 56: "Asia/Shanghai", 57: "Asia/Irkutsk", 58: "Asia/Singapore", 59: "Australia/Perth", 60: "Asia/Taipei", 61: "Asia/Tokyo", 62: "Asia/Seoul", 63: "Asia/Yakutsk", 64: "Australia/Adelaide", 65: "Australia/Darwin", 66: "Australia/Brisbane", 67: "Australia/Sydney", 68: "Pacific/Guam", 69: "Australia/Hobart", 70: "Asia/Vladivostok", 71: "Asia/Magadan", 72: "Pacific/Auckland", 73: "Pacific/Fiji", 74: "Pacific/Tongatapu", 75: "Asia/Tbilisi", 76: "Europe/Dublin", 77: "Europe/Istanbul", } return tz_map[tz_id]
def purgewords(i): """ >>> purgewords("the agency of national trust buildings") 'buildings' """ purge = ["ltd", "limited", "company", "", "the", "of", "uk", "nhs", "pct", "foundation", "trust", "national", "department", "dept", "agency", "and", "association", "authority", "co", "metropolitan", "british", "consulting", "group", "services", "systems"] words = i.split(' ') for p in purge: while p in words: words.remove(p) return ' '.join(words)
def _PureShape(shape): """Make sure shape does not contain int tensors by calling int().""" return [int(x) for x in shape]
def merge_strings(str1, str2, minimum_overlap): """ Returns a tuple `(m, i)` where `m` is a string that is the combination of str1 and str2 if they overlap, otherwise returns an empty string. The two strings are considered to overlap if they are non-empty and one of the following conditions apply: - one is a substring of the other and it's size >= `minimum_overlap * min(len(str1), len(str2))`, - the end of one is equal to the beginning of the other and the size of the common substring is >= `minimum_overlap * min(len(str1), len(str2))` The integer `i` is the offset of str2 relative to str1. For example: merge_strings("a black", "black coffee", 0.1) == ("a black coffee", 2) merge_strings("black coffee", "a black", 0.1) == ("a black coffee", -2) merge_strings("a black coffee", "black", 0.1) == ("a black coffee", 2) merge_strings("a black coffee", " with milk", 0.1) == ("", 0) merge_strings("a black coffee", " with milk", 0) == ("a black coffee with milk", 14) merge_strings("a black coffee", "", 0) == ("", 0) merge_strings("a coffee is my first thing in the morning", "morning or evening", 0.25) == ("", 0) merge_strings("a coffee is my first thing in the morning", "in the morning", 0.25) == ("a coffee is my first thing in the morning", 27) """ if str1 == "" or str2 == "": return ("", 0) # Brute force algorithm for a start. Probably inefficient for large # sequences. # Outline: Start by comparing the end of str1 with the beginning of str2. # Shift each sequence towards the other one character at a time. Keep the # positions of the longest matching string in both sequences. # Best match best_i = len(str1) best_s = 0 minimum_overlap_chars = int(minimum_overlap * min(len(str1), len(str2))) # i is the number of characters by which to shift the beginning of str2 to # the right of the beginning of str1. for i in range(len(str1) - 1, -len(str2), -1): if i >= 0: # Current size of compared substrings s = min(len(str1) - i, len(str2)) # Positions of compared substrings in str1 and str2 start1 = i start2 = 0 else: # i < 0 s = min(len(str2) + i, len(str1)) start1 = 0 start2 = -i if s >= minimum_overlap_chars \ and s > best_s \ and str1[start1: start1 + s] == str2[start2: start2 + s]: best_i = i best_s = s if best_s >= minimum_overlap_chars: if best_i >= 0: return (str1 + str2[best_s:], best_i) else: return (str2 + str1[best_s:], best_i) else: return ("", 0)
def sign(x): """Calculates the sign of a number and returns 1 if positive -1 if negative 0 if zero should make it so type int or float of x is preserved in return type """ if x > 0.0: return 1.0 elif x < 0.0: return -1.0 else: return 0.0
def get_book_name(url): """ Gets the formated book name from the url E.g. http://www.wuxiaworld.com/desolate-era-index/de-book-24-chapter-29/ will return 'de-book-24' Arguments: A valid url Returns: A string """ book_list = str(url).split("/") for index, part in enumerate(book_list): if "book" in part: book_name = ''.join(book_list[index:]).split("-") for index, word in enumerate(book_name): if "chapter" in word: book = '-'.join(book_name[:index]) return book elif "chapter" in part: chap_name = book_list[index:] book_name = ''.join(chap_name) return book_name # book_name = '-'.join(book_list) # return book_name
def receptive_field_size(total_layers, num_cycles, kernel_size, dilation=lambda x: 2**x): """Compute receptive field size Args: total_layers (int): total layers num_cycles (int): cycles kernel_size (int): kernel size dilation (lambda): lambda to compute dilation factor. ``lambda x : 1`` to disable dilated convolution. Returns: int: receptive field size in sample """ assert total_layers % num_cycles == 0 layers_per_cycle = total_layers // num_cycles dilations = [dilation(i % layers_per_cycle) for i in range(total_layers)] return (kernel_size - 1) * sum(dilations) + 1
def get_terms_and_score_predictions_render(terms_and_score_predictions): """ From the full collection of terms and scores, select a number of terms and score predictions to render. :param terms_and_score_predictions: All terms and score predictions :return: List of the terms and scores to render """ # select the go terms with the highest scores for each protein sequence prediction terms_and_score_predictions_to_render = [] for term_and_score_dict in terms_and_score_predictions: # sort the scores term_and_score_sorted = dict(sorted(term_and_score_dict.items(), key=lambda item: item[1], reverse=True)) # GO term : score pairs selected_terms_and_scores_pairs = {} for i, term in enumerate(term_and_score_sorted): if i == 50: # select the top 50 scores break selected_terms_and_scores_pairs[term] = term_and_score_sorted[term] terms_and_score_predictions_to_render.append(selected_terms_and_scores_pairs) return terms_and_score_predictions_to_render
def matches(s, t, i, j, k): """ Checks whether s[i:i + k] is equal to t[j:j + k]. We used a loop to ease the implementation in other languages. """ # tests if s[i:i + k] equals t[j:j + k] for d in range(k): if s[i + d] != t[j + d]: return False return True
def getTableSpaceString(tableSpace): """ Generates the TABLESPACE predicate of the SQL query. """ if tableSpace is not None and tableSpace != '': return " TABLESPACE " + tableSpace + " " else: return ""
def create_partition(elems, equiv_map): """Partition a collection of elements into buckets of equivalent elements. Two elements e1, e2 are considered equivalent if and only if equiv_map[(e1, e2)] == True. Returns the list of buckets and a mapping of elements to buckets. """ elem_to_bucket = {i: {i} for i in elems} for (i1, i2), equiv in equiv_map.items(): if equiv: bucket_i1 = elem_to_bucket[i1] bucket_i2 = elem_to_bucket[i2] new_bucket = bucket_i1.union(bucket_i2) for i in new_bucket: elem_to_bucket[i] = new_bucket buckets = [] covered_elems = [] for i, b in elem_to_bucket.items(): if i in covered_elems: continue covered_elems += b buckets.append(list(b)) return buckets, elem_to_bucket
def distinct(l): """ Given an iterable will return a list of all distinct values. """ return list(set(l))
def flattenDict(dict): """ Takes a dictionary with aggregated values and turns each value into a key with the aggregated key (from dict) as the corresponding value. """ flat_dict = {p: g for g, sublist in dict.items() for p in sublist} return flat_dict
def is_in_list(list_or_dict): """Checks to "seq" key is list or dictionary. If one seq is in the prefix-list, seq is a dictionary, if multiple seq, seq will be list of dictionaries. Convert to list if dictionary""" if isinstance(list_or_dict, list): make_list = list_or_dict else: make_list = [list_or_dict] return make_list
def _list(key: str, vals: dict) -> list: """Get a key from a dictionary of values and ensure it is a list.""" result = vals.get(key, []) if not isinstance(result, list): result = [result] return result
def clean_path(path): """Get name from path as last item without dot""" cleaned_name = path[path.rfind("/") + 1:] if "." in cleaned_name: cleaned_name = cleaned_name[:cleaned_name.rfind(".")] return cleaned_name
def reverse(string): """Reverses a string.""" return string[::-1]
def transform_mac_address_to_string_mac_address(string_mac_address): """ It transforms a MAC address from raw string format("\x00\x11\x22\x33\x44\x55") to a human readable string("00:11:22:33:44:55"). """ return ':'.join('%02x' % ord(b) for b in string_mac_address)
def fix_unclosed_quotations(summary_content): """ Merge unclosed quotations with previous sentences :param summary_content: summary text """ ix = 0 fixed_content = [] while ix < len(summary_content): sentence = summary_content[ix] if fixed_content and sum([True for ch in sentence if ch == '"']) % 2 == 1 and sum([True for ch in fixed_content[-1] if ch == '"']) % 2 == 1: fixed_content[-1] = fixed_content[-1].rstrip() + " " + sentence.lstrip() ix += 1 else: fixed_content.append(sentence) ix += 1 return fixed_content