content
stringlengths
42
6.51k
def totaler(products): """Totals the total value of each product.""" totalDict = {'base': 0, 'VAT': 0, 'total': 0}; for h in products: totalDict['base'] += h['base']; totalDict['VAT'] += h['VAT']; totalDict['total'] += h['total']; return totalDict;
def argmax(arr): """Get index of max :param: arr, list """ imax, vmax = 0, 0 for index, value in enumerate(arr): if vmax < value: vmax = value imax = index return imax
def escape(x): """Encode strings with backslashes for python/go""" return x.replace('\\', "\\\\").replace('"', r'\"').replace("\n", r'\n').replace("\t", r'\t')
def transform_wiki_url(file_name: str) -> str: """ Transforms attach url to original wiki url :param file_name: name of the file :return: file url >>> transform_wiki_url('1040017000.png') 'http://gbf-wiki.com/attach2/696D67_313034303031373030302E706E67.png' >>> b'img'.hex().upper() '696D67' """ url = r'http://gbf-wiki.com/attach2/696D67_{}.png' return url.format(file_name.encode('utf-8').hex().upper())
def parse_density_line(r): """parse density data line in numerical values Args: r (str): input row string, like (sr.exe) Target Density = 1.0597E+00 g/cm3 = 1.0903E+23 atoms/cm3 (srmodule.exe) Density = 1.0597E+00 g/cm3 = 1.0902E+23 atoms/cm3 Thus tokens should be counted from the tail. Returns: tuple of ( mass density (in g/cm3), atom density (in /cm3)) Raises: Error: raised when number of columns or data is not sufficient. """ c = r.split() d = ( float(c[-5]), # mass density float(c[-2]) # Atomic Number ) return d
def myfunc(myarg): """prints some text combined with a string from argument""" print("my function", myarg) return "return value"
def chunk_it(seq, num): """ Chunk a sequence in N equal segments :param seq: Sequence of numbers :param num: Number of chunks :return: chunked start and end positions """ # find average chunk size avg = len(seq) / float(num) out = [] last = 0.0 # until the end of sequence while last < len(seq): # append the value to a bin out.append(seq[int(last):int(last + avg)]) last += avg return out
def control_1_7_password_policy_symbol(passwordpolicy): """Summary Args: passwordpolicy (TYPE): Description Returns: TYPE: Description """ result = True failReason = "" offenders = [] offenders_links = [] control = "1.7" description = "Ensure IAM password policy requires at least one symbol" scored = True if passwordpolicy is False: result = False failReason = "Account does not have an IAM password policy." offenders.append('Account') offenders_links.append('https://console.aws.amazon.com/iam/home?#/account_settings') else: if passwordpolicy['RequireSymbols'] is False: result = False failReason = "Password policy does not require at least one symbol" offenders.append('Account') offenders_links.append('https://console.aws.amazon.com/iam/home?#/account_settings') return {'Result': result, 'failReason': failReason, 'Offenders': offenders, 'OffendersLinks': offenders_links, 'ScoredControl': scored, 'Description': description, 'ControlId': control}
def cylinder_flow(Position, t, v=1, r=1): """ This is an auxiliar function to be used with Scipy's odeint. Given a point in the space it returns the velocity that an incompressible fluid flowing around a cylinder placed along the y axis would have at that point. Input: Position :: Array or list containing the x, y, z coordinates. t :: Time (variable for odeint). v :: Float. Magnitude of the velocity. r :: Float. Radius of the cylinder. Output: ddt :: Array of velocity components. For more information on the theoretical derivation see the following page: http://www.vermontveterinarycardiology.com/index.php/for-cardiologists/for-cardiologists?id=127 ("Velocity and Pressure Distribution for Flow Over a Cylinder") """ x = Position[0] y = Position[1] z = Position[2] vx = v * (r**2*(z**2-x**2) + (x**2+z**2)**2) / (x**2+z**2)**2 vy = 0 vz = -v * (2*r**2*x*z) / (x**2+z**2)**2 ddt = [vx, vy, vz] return ddt
def remove_pairs(cards): """ Goes through a list of cards and removes any extra pairs. """ cards = sorted(cards) newlist = [] for i, c in enumerate(sorted(cards)): if i == 0: newlist.append(c) elif c.rank != cards[i - 1].rank: newlist.append(c) return newlist
def add_space(string, symbol=" ", direction="all"): """ Add a placeholder to the string :params direction: support left or right or all. """ if direction == "left": return "{}{}".format(symbol, string) elif direction == "right": return "{}{}".format(string, symbol) return "{}{}{}".format(symbol, string, symbol)
def _get_countries(area): """ :param area: a country or the name set of countries :return: countries that belong to the area """ if area == "EU": countries = {"France", "Germany", "Spain", "Italy", "Netherlands", "Portugal", "Belgium", "Sweden", "Finland", "Greece", "Ireland", "Poland", "Luxembourg", "Malta", "Slovenia", "Austria", "Croatia", "Hungary", "Czechia", "Slovakia", "Hungary", "Romania", "Bulgaria", "Cyprus", "Lithuania", "Latvia", "Estonia"} elif area == "European continent": countries = {"France", "Germany", "Spain", "Italy", "Netherlands", "Portugal", "Belgium", "Sweden", "Finland", "Greece", "Ireland", "United Kingdom", "Norway", "Switzerland", "Poland", "Andorra", "Luxembourg", "Liechtenstein", "Malta", "San Marino", "Holy See", "Monaco", "Hungary", "Czechia", "Slovakia", "Slovenia", "Croatia", "Bosnia and Herzegovina", "Serbia", "Albania", "Romania", "Bulgaria", "Ukraine", "Belarus", "Latvia", "Estonia", "Lithuania", "Moldova", "North Macedonia", "Kosovo", "Montenegro", "Iceland", "Cyprus"} elif area == "European continent+Russia": countries = {"France", "Germany", "Spain", "Italy", "Netherlands", "Portugal", "Belgium", "Sweden", "Finland", "Greece", "Ireland", "United Kingdom", "Norway", "Switzerland", "Poland", "Andorra", "Luxembourg", "Liechtenstein", "Malta", "San Marino", "Holy See", "Monaco", "Hungary", "Czechia", "Slovakia", "Slovenia", "Croatia", "Bosnia and Herzegovina", "Serbia", "Albania", "Romania", "Bulgaria", "Ukraine", "Belarus", "Latvia", "Estonia", "Lithuania", "Moldova", "North Macedonia", "Kosovo", "Montenegro", "Iceland", "Cyprus", "Russia"} elif area == "Africa": countries = {"Morocco", "Tunisia", "Algeria", "Lybia", "Egypt", "Mali", "Niger", "Chad", "Sudan", "Ethiopia", "Mauritania", "Senegal", "Guinea", "Liberia", "Ghana", "Benin", "Togo", "Nigeria", "Sierra Leone", "Cameroon", "Central African Republic", "Gabon", "Congo (Brazzaville)", "Congo (Kinshasa)", "Angola", "Namibia", "Botswana", "Lesotho", "South Africa", "Eswatini", "Zimbabwe", "Mozambique", "Zambia", "Madagascar", "Burundi", "Kenya", "Uganda", "Somalia", "South Sudan", "Cote d'Ivoire", "Rwanda", "Djibouti"} elif area == "North-America": countries = {"US", "Canada"} elif area == "South-America": countries = {"Brazil", "Peru", "Colombia", "Uruguay", "Paraguay", "Argentina", "Bolivia", "Ecuador", "Venezuela", "Guyana", "Suriname"} else: countries = {area} return countries
def to_unicode(string, encoding='utf-8'): """Convert byte string to unicode.""" return str(string, encoding) if isinstance(string, bytes) else string
def __polygon_intersection(verts, x, y): """ Computes the intersection with a polygon. Algorithm from: W. Randolph Franklin (WRF) https://wrf.ecse.rpi.edu//Research/Short_Notes/pnpoly.html#The Method """ intersection = False for i in range(len(verts)): j = (i + len(verts) - 1) % len(verts) if (verts[i][1] > y) != (verts[j][1] > y) \ and x < (verts[j][0] - verts[i][0]) * (y - verts[i][1]) / (verts[j][1] - verts[i][1]) + verts[i][0]: intersection = not intersection return intersection
def is_pentagonal(n): """function to check if the number is pentagonal number or not""" if (1+(24*n+1)**0.5) % 6 == 0: return True return False
def to_string(in_int): """Converts an integer to a string""" out_str = "" prefix = "" if in_int < 0: prefix = "-" in_int = -in_int while in_int / 10 != 0: out_str = chr(ord('0') + in_int % 10) + out_str in_int = in_int / 10 out_str = chr(ord('0') + in_int % 10) + out_str return prefix + out_str
def seasonFromDate(date): """ Returns the value of season from month and day data of the timestamp Parameters ---------- date : String Timestamp or date string in format of YYYY-MM-DD or followed by timestamp Returns ------- season : STRING Season corresponding to the date """ dayMonth=date[5:10] if dayMonth<"03-21"or dayMonth>"12-21": season="Winter" elif dayMonth<"06-22": season="Spring" elif dayMonth<"09-21": season="Summer" else: season="Autumn" return season
def is_subpath_of(file_path: str, potential_subpath: str) -> bool: """ Case insensitive, file paths are not normalized when converted from uri""" return file_path.lower().startswith(potential_subpath.lower())
def dayLookingFor(day_num_string): """ attempts to convert which day asked for, into an integer. If it can't, it a code instead. """ translation_dictionary = {'1st':1,'2nd':2,'3rd':3, '4th':4, 'last':9, 'teenth':6 } return translation_dictionary[day_num_string]
def str_to_doctest(code_lines, lines): """ Converts a list of lines of Python code ``code_lines`` to a list of doctest-formatted lines ``lines`` Args: code_lines (``list``): list of lines of python code lines (``list``): set of characters used to create function name Returns: ``list`` of ``str``: doctest formatted list of lines """ if len(code_lines) == 0: return lines line = code_lines.pop(0) if line.startswith(" ") or line.startswith("\t"): return str_to_doctest(code_lines, lines + ["... " + line]) elif line.startswith("except:") or line.startswith("elif ") or line.startswith("else:") or line.startswith("finally:"): return str_to_doctest(code_lines, lines + ["... " + line]) elif len(lines) > 0 and lines[-1].strip().endswith("\\"): return str_to_doctest(code_lines, lines + ["... " + line]) else: return str_to_doctest(code_lines, lines + [">>> " + line])
def _merge_2(list_, i, mid, j): """ Merge the two sorted halves list_[i:mid + 1] and list_[mid + 1:j + 1] and return them in a new list. Notice that list_[mid] belongs in the left half and list_[j] belongs in the right half -- the indices are inclusive. @param list list_: list to sort @param int i: starting index for first half of list to sort @param int mid: index for middle of list to sort @param int j: index for end of list to sort >>> _merge_2([1, 3, 5, 2, 4, 6], 0, 2, 5) [1, 2, 3, 4, 5, 6] """ result = [] left = i right = mid + 1 # Done when left > mid or when right > j; i.e., # when we've finished one of the halves. while left <= mid and right <= j: if list_[left] < list_[right]: result.append(list_[left]) left += 1 else: result.append(list_[right]) right += 1 # Items left: list_[left:mid + 1] # list_[right:j + 1] return result + list_[left:mid + 1] + list_[right:j + 1]
def diagnosis_from_description(description): """ Return the diagnosis in each description """ diagnosis = description["meta"]["clinical"]["diagnosis"] if diagnosis not in ["nevus", "melanoma", "seborrheic keratosis"]: raise ValueError(diagnosis) return diagnosis
def inany(el, seq): """Returns the first sequence element that el is part of, else None""" for item in seq: if el in item: return item return None
def kvalue(p,a,b): """ Determines the k-value of the point p on the line segment a-b. The k-value is the normalized location on the line ab, with k(a)=0 and k(b)=1 Parameters ---------- p : (x,y) Coordinates of a point. The point is assumed to be on the line through a & b. a : (x,y) Coordinates of the beginning of the line segment. b : (x,y) Coordinates of the end of the line segment. Returns ---------- float The k-value of p on ab. """ # The k-value can be computed from either x or y coordinates as long # as the values are not the same for a & b if(a==b): # k-value is undefined return None elif a[0]==b[0]: # use y-coordinates return (p[1]-a[1])/(b[1]-a[1]) else: # use x-coordinates return (p[0]-a[0])/(b[0]-a[0])
def bracket_sub (sub, comment=False): """ Brackets a substitution pair. Args: sub (tuple): The substitution pair to bracket. comment (bool): Whether or not to comment the bracketed pair. Returns: tuple: The bracketed substitution pair. """ if comment: return ('\(\*\s*\{\{\\s*' + sub[0] + '\\s*\}\}\s*\*\)', sub[1]) else: return ('\{\{\\s*' + sub[0] + '\\s*\}\}', sub[1])
def sanitize_domain(domain: str) -> str: """Makes a potential malicous domain not render as a domain in most systems :param domain: Original domain :return: Sanitized domain """ return domain.replace('.', '[.]')
def listify(x): """ Coerce iterable object into a list or turn a scalar into single element list >>> listify(1.2) [1.2] >>> listify(range(3)) [0, 1, 2] """ try: return list(x) except: if x is None: return [] return [x]
def remove_list_duplicates(lista, unique=False): """ Remove duplicated elements in a list. Args: lista: List with elements to clean duplicates. """ result = [] allready = [] for elem in lista: if elem not in result: result.append(elem) else: allready.append(elem) if unique: for elem in allready: result = list(filter((elem).__ne__, result)) return result
def provided(*args): """checks if given flags are specified during command line usage""" if any(flag is not None for flag in args): return True
def tail(iterable): """Get tail of a iterable is everything except the first element.""" r = [x for x in iterable] return r[1:]
def format_multiple_values(value): """Reformat multi-line key value for PDS3 labels. For example if the ``MAKLABEL`` key value has multiple entries, it needs to be reformatted. :param value: PDS3 key value :type value: str :return: PDS3 key value reformatted :rtype: str """ if ',' in value: values = value.split(',') value = "{\n" for val in values: value += f"{' ' * 31}{val},\n" value = value[:-2] + '\n' + ' ' * 31 + "}\n" return value
def render_cells(cells, width=80, col_spacing=2): """Given a list of short (~10 char) strings, display these aligned in columns. Example output:: Something like this can be used to neatly arrange long sequences of values in a compact format. Parameters ---------- cells : [(strlen, str), ...] Gives the cells to print as tuples giving the strings length in visible characters and the string to display. width : int The width of the terminal. col_spacing : int Size of the gap to leave between columns. """ # Special case (since max below will fail) if len(cells) == 0: return "" # Columns should be at least as large as the largest cell with padding # between columns col_width = max(strlen for strlen, s in cells) + col_spacing lines = [""] cur_length = 0 for strlen, s in cells: # Once line is full, move to the next if cur_length + strlen > width: lines.append("") cur_length = 0 # Add the current cell (with spacing) lines[-1] += s + (" "*(col_width - strlen)) cur_length += col_width return "\n".join(map(str.rstrip, lines))
def find_id(concept, h): """ :type concept: Collection.iterable """ id_found = 0 for e in concept: if e['id'] == h: return id_found id_found += 1 return None
def gcd(x, y): """ The function returns the greatest common divisor """ while x > 0 and y > 0: if x >= y: x = x - y else: y = y - x return x+y
def special_string(instance: object, **kwargs) -> str: """ Return a string that can be used if a standard 'eval repr' is not appropriate. """ class_name = instance.__class__.__name__ if not kwargs: return f'<{class_name}>' else: args = ' '.join(f'{name}={value!r}' for name, value in kwargs.items()) return f'<{class_name} {args}>'
def _cross_2D(A, B): """ Compute cross of two 2D vectors """ return A[0] * B[1] - A[1] * B[0]
def _get_target_connection_details(target_connection_string): """ Returns a tuple with the raw connection details for the target machine extracted from the connection string provided in the application arguments. It is a specialized parser of that string. :param target_connection_string: the connection string provided in the arguments for the application. :return: A tuple in the form of (user, password, host, port) if a password is present in the connection string or (user, host, port) if a password is not present """ password = None connection_string_format_error = 'Invalid connection string provided. Expected: user[/password]@host[:port]' if '@' not in target_connection_string: raise TypeError(connection_string_format_error) connection_string_parts = target_connection_string.split('@') if len(connection_string_parts) != 2: raise TypeError(connection_string_parts) authentication_part = connection_string_parts[0] target_part = connection_string_parts[1] if '/' in authentication_part: auth_parts = authentication_part.split('/') if len(auth_parts) != 2: raise TypeError(connection_string_format_error) user, password = auth_parts else: user = authentication_part if ':' in target_part: conn_parts = target_part.split(':') if len(conn_parts) != 2: raise TypeError(connection_string_format_error) host, port = conn_parts try: port = int(port) except ValueError: raise TypeError(connection_string_format_error) else: host = target_part port = 22 if not len(user) or not len(host): raise TypeError(connection_string_format_error) if password: return user, password, host, int(port) else: return user, host, int(port)
def EndsWith(field, value): """ A criterion used to search for objects having a text field's value end with `value`. It's a wildcard operator that adds the `*` if not specified, at the beginning of `value`. For example: * search for filename observables ending with `.exe` Arguments: field (value): field name value (Any): searched value Returns: dict: JSON repsentation of the criterion ```python # Search for tasks where title stats with 'Communication' query = EndsWith('data', '.png') ``` produces ```json { "_wildcard": { "_field": "data", "_value": "*.png" } } ``` """ if not value.startswith('*'): value = '*' + value return {'_wildcard': {'_field': field, '_value': value}}
def LEFT(text, n): """Slices string(s) a specified number of characters from left. Parameters ---------- text : list or string string(s) to be sliced from left. n : integer number of characters to slice from left. Must be greater than zero. Returns ------- list or string A list of converted strings or converted string that were sliced n characters from left. """ if n > 0: if type(text) == str: return(text[0:n]) elif type(text) == list: try: text_return = [i[0:n] for i in text] return(text_return) except: print('Invalid list: please enter a list of strings.') else: print('Invalid type: please enter a string or list of strings.') else: print('n must be greater than zero.')
def get_substitute_lines(text): """Helper functionality to split on filter.""" substitute_lines = [] for line in text.splitlines(): regex_replacement, regex_search = line.split("|||") substitute_lines.append({"regex_replacement": regex_replacement, "regex_search": regex_search}) return substitute_lines
def convert_simple(csv): """ csv should be a string that meets the RFC 4180 specification, with the additional requirement of a header line. This function returns a list of dictionaries. """ csv = csv.rstrip().strip() lines = csv.splitlines() # fetch the first line header = lines.pop(0).split(",") # the list that will be returned from the function return_list = [] for line in lines: # a temporary dictionary which will be used to # aggregate the key/value pairs before adding # them to the return_list tmp_dict = {} # a variable to track whether the char being read is between double quote marks quoted = False # value to keep track of the index in the line i = 0 # variable to keep track of the index of the header to # be used as the key in tmp_dict header_indx = 0 # variable to keep track of a string of # characters before you add them to tmp_dict with the appropriate header text_buffer = "" # iterate through the line while i < len(line): # if you find a double quote if line[i] == "\"": if quoted: # if you have two double quote marks next to each other # it signifies a literal double quote mark if i+1 < len(line) and line[i+1] == "\"": text_buffer += "\"" i += 1 # otherwise, turn the quoted flag to false else: quoted = False # if this is the beginning quote mark else: quoted = True # if this is a comma delimiter elif line[i] == "," and not quoted: tmp_dict[header[header_indx]] = text_buffer text_buffer = "" header_indx += 1 # normal text, add it to the buffer else: text_buffer += line[i] # increment the index i += 1 # add the final dictionary buffer to the list tmp_dict[header[header_indx]] = text_buffer return_list.append(tmp_dict) # return the list return return_list
def get_ei(oi, pi): """ """ n = sum(oi) ei = [(n * pi[x]) for x in range(len(oi))] return ei
def baumwelch(bw, O, num_iter): """ This convenience function runs the Baum--Welch algorithm in a way that looks similar to the C version of the library. Parameters ---------- bw : baumwelch_t Specifies the context for the Baum--Welch algorithm. O : sequence of integers between 0 and M-1 Specifies the sequence of observations for the Baum--Welch algorithm. Returns ------- log_likelihood : float Log-likelihood (base 2) of the sequence given the re-estimated HMM. lambda_ : hmm_t The re-estimated HMM. """ return bw(O, num_iter)
def decode_pos(pos): """Decoes a scalar position on the board as a pair (i, j).""" return pos // 3, pos % 3
def circumference_triangle(a, b, c): """ Calculate the circumference of a triangle. param a: one side of the triangle param b: other side of the triangle param c: third side of the triangle return: circumference of the triangle """ return a + b + c
def mvt(a, b, fx = lambda x: x): """ Mean value theorem Params: a: start of interval b: end of interval fx: function Returns: f_c: derivative of some point c that is a <= c <= b """ return (fx(b) - fx(a))/(b - a)
def make_mapper_ranges(num_chunks, num_mappers): """kappa:ignore""" base = num_chunks // num_mappers extras = num_chunks % num_mappers mapper_ranges = [] start = 0 for i in range(num_mappers): chunks = base if i < extras: chunks += 1 mapper_ranges.append((start, start + chunks)) start += chunks assert start == num_chunks return mapper_ranges
def parse_name(name): """Parse cpdb name field. """ simple_name = name[0:name.rfind('(')] db = name[name.rfind('(') + 1:-1] return simple_name, db
def source_location_to_tuple(locpb): """Converts a SourceLocation proto into a tuple of primitive types.""" if locpb is None: return None if not locpb.file() and not locpb.line() and not locpb.function_name(): return None return locpb.file(), locpb.line(), locpb.function_name()
def on_off(tag): """Return an ON/OFF string for a 1/0 input. Simple utility function.""" return ['OFF','ON'][tag]
def _dedentlines(lines, tabsize=8, skip_first_line=False): """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines "lines" is a list of lines to dedent. "tabsize" is the tab width to use for indent width calculations. "skip_first_line" is a boolean indicating if the first line should be skipped for calculating the indent width and for dedenting. This is sometimes useful for docstrings and similar. Same as dedent() except operates on a sequence of lines. Note: the lines list is modified **in-place**. """ DEBUG = False if DEBUG: print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ % (tabsize, skip_first_line)) margin = None for i, line in enumerate(lines): if i == 0 and skip_first_line: continue indent = 0 for ch in line: if ch == ' ': indent += 1 elif ch == '\t': indent += tabsize - (indent % tabsize) elif ch in '\r\n': continue # skip all-whitespace lines else: break else: continue # skip all-whitespace lines if DEBUG: print("dedent: indent=%d: %r" % (indent, line)) if margin is None: margin = indent else: margin = min(margin, indent) if DEBUG: print("dedent: margin=%r" % margin) if margin is not None and margin > 0: for i, line in enumerate(lines): if i == 0 and skip_first_line: continue removed = 0 for j, ch in enumerate(line): if ch == ' ': removed += 1 elif ch == '\t': removed += tabsize - (removed % tabsize) elif ch in '\r\n': if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line) lines[i] = lines[i][j:] break else: raise ValueError("unexpected non-whitespace char %r in " "line %r while removing %d-space margin" % (ch, line, margin)) if DEBUG: print("dedent: %r: %r -> removed %d/%d"\ % (line, ch, removed, margin)) if removed == margin: lines[i] = lines[i][j+1:] break elif removed > margin: lines[i] = ' '*(removed-margin) + lines[i][j+1:] break else: if removed: lines[i] = lines[i][removed:] return lines
def func1 (x,y,a=1.0,b=2.0): """ Evaluates a*x+b*y """ return a*x+b*y
def bytes_as_hex(data): """ Parse bytes as a hexademical value for printing or debugging purposes :param data: raw data read from the disk :type data: bytes :rtype: string """ return " ".join('{:02x}'.format(x) for x in data)
def get_index_by_node_id(data): """ Indexes a Dynalist data object by node for easy navigation. """ index = {} for node in data["nodes"]: index[node["id"]] = node return index
def ali_so_vrstice_enako_dolge(sez): """ Definicija sprejme seznam, katerega elementi so dolzine vsake vrstice """ for element in sez: if len(sez) == 0: return False elif len(sez) == 1: return True elif sez[0] != sez[1]: return False else: return ali_so_vrstice_enako_dolge(sez[1:]) return False
def test_closure(a): """This is the closure test in the paper.""" def x1(b): def x4(c): return b return x4 x2 = x1(a) x3 = x2(1.0) return x3
def strip_repl_characters(code): """Removes the first four characters from each REPL-style line. >>> strip_repl_characters('>>> "banana"') == '"banana"' True >>> strip_repl_characters('... banana') == 'banana' True """ stripped_lines = [] for line in code.splitlines(): if line.startswith('>>> ') or line.startswith('... '): stripped_lines.append(line[4:]) else: stripped_lines.append(line) return '\n'.join(stripped_lines)
def getBin(value, bins): """ Get the bin of "values" in axis "bins". Not forgetting that we have more bin-boundaries than bins (+1) :) """ for index, bin in enumerate (bins): # assumes bins in increasing order if value < bin: return index-1 print (' overflow ! ', value , ' out of range ' , bins) return bins.size-2
def make_feature_dict( feature_sequence ): """A feature dict is a convenient way to organize a sequence of Feature object (which you have got, e.g., from parse_GFF). The function returns a dict with all the feature types as keys. Each value of this dict is again a dict, now of feature names. The values of this dict is a list of feature. An example makes this clear. Let's say you load the C. elegans GTF file from Ensemble and make a feature dict: >>> worm_features_dict = HTSeq.make_feature_dict( HTSeq.parse_GFF( ... "test_data/Caenorhabditis_elegans.WS200.55.gtf.gz" ) ) (This command may take a few minutes to deal with the 430,000 features in the GTF file. Note that you may need a lot of RAM if you have millions of features.) Then, you can simply access, say, exon 0 of gene "F08E10.4" as follows: >>> worm_features_dict[ 'exon' ][ 'F08E10.4' ][ 0 ] <GenomicFeature: exon 'F08E10.4' at V: 17479353 -> 17479001 (strand '-')> """ res = {} for f in feature_sequence: if f.type not in res: res[ f.type ] = {} res_ftype = res[ f.type ] if f.name not in res_ftype: res_ftype[ f.name ] = [ f ] else: res_ftype[ f.name ].append( f ) return res
def get_phase_number(total_number_of_phases, phase_number): """Summary Parameters ---------- total_number_of_phases : TYPE Description phase_number : TYPE Description Returns ------- TYPE Description """ # wrap around the phases (use this to find phase after last phase or before phase 1) while phase_number <= 0: phase_number += total_number_of_phases while phase_number > total_number_of_phases: phase_number -= total_number_of_phases return phase_number
def Title(word): """Returns the given word in title case. The difference between this and string's title() method is that Title('4-ary') is '4-ary' while '4-ary'.title() is '4-Ary'.""" return word[0].upper() + word[1:]
def _cast_dict_keys(data, key_cast): """Converts all dict keys in `data` using `key_cast`, recursively. Can be used on any type, as this method will apply itself on dict and list members, otherwise behaving as no-op. >>> _cast_dict_keys({'key': 'value', 'other': {'a': 'b'}}, str.capitalize) {'Key': 'value', 'Other': {'A': 'b'}} >>> _cast_dict_keys(['1', '2', {'3': '4'}], int) ['1', '2', {3: '4'}] >>> _cast_dict_keys(({'a': 1}, {'b': 2}), str.capitalize) ({'a': 1}, {'b': 2}) >>> _cast_dict_keys(1, str.capitalize) 1 """ if isinstance(data, dict): return { key_cast(key): _cast_dict_keys(value, key_cast) for key, value in data.items() } if isinstance(data, list): return [_cast_dict_keys(value, key_cast) for value in data] return data
def cohen_d(t, n): """Utility function for computing Cohen's D given t statistics and n """ d = (2*t) / ((n-1) ** 0.5) return d
def get_edge_nodes(edge, nodes): """Get first and last nodes of an edge. Parameters ---------- edge : dict the edge information nodes : list of dict all available nodes Returns ------- dict information on the first node dict information on the last node """ first_node = next(node for node in nodes if node["id"] == edge["from"]) last_node = next(node for node in nodes if node["id"] == edge["to"]) return first_node, last_node
def wc_int_to_string(tint): """ tint: The ternary integer for (we'll really a quaternary however we don't expect a z) """ as_str = "" while tint: part = tint & 3 assert part if part == 1: as_str += '0' elif part == 2: as_str += '1' else: assert part == 3 as_str += 'x' tint >>= 2 as_str = as_str[::-1] return as_str
def get_ack_status(code): """Get ack status from code.""" ack_status = {0: "No", 1: "Yes"} if code in ack_status: return ack_status[code] + " (" + str(code) + ")" return "Unknown ({})".format(str(code))
def sqrt(x): """Calculates square root of x. Appends result to x.children if x is a Reverse object. Arguments: x {Reverse, Float} -- Value to calculate square root for. Returns: Reverse -- Input raised to the 0.5 """ return x ** (1 / 2)
def data_to_hex_str(data): """convert raw data to hex string in groups of 4 bytes""" if not data: return '' dlen = len(data) # total length in bytes groups = (dlen+3) // 4 # number of 4-byte blocks to create remain = dlen retstr = '' # return string for group in range(groups): if group > 0: retstr += ' ' retstr += '0x' if remain >= 4: llen = 4 else: llen = remain for ind in range(llen): retstr += '%02x' % data[dlen-remain+ind] remain -= llen return retstr
def find_next_square2(sq): """ Alternative method, works by evaluating anything non-zero as True (0.000001 --> True) """ root = sq**0.5 return -1 if root % 1 else (root+1)**2
def save_div(a, b, ret=0): """Division without 0 error, ret is returned instead.""" if b == 0: return ret return a/b
def get_single_data(url, lst, f): """ Tries a url with a certain category, then if it returns the correct data type and doesn't errror, then it will add the score from the category into the list. Returns the list. """ try: temp = f.main(url) except: return "?" return temp
def merge(line): """ Function that merges a single row or column in 2048. """ # replace with your code result_list = [0] * len(line) find_merge = False flag_index = [] # merge same element (from left to right) for dummy_i in range(len(line)): if line[dummy_i] != 0 and dummy_i not in flag_index: for dummy_j in range(dummy_i + 1, len(line)): if line[dummy_i] == line[dummy_j]: result_list[dummy_i] = line[dummy_i] * 2 find_merge = True flag_index.append(dummy_j) break elif line[dummy_j] != 0: break dummy_j += 1 if not find_merge: result_list[dummy_i] = line[dummy_i] find_merge = False dummy_i += 1 # move to the left for dummy_i in range(len(result_list)): if result_list[dummy_i] != 0: dummy_i += 1 else: for dummy_j in range(dummy_i + 1, len(line)): if result_list[dummy_j] != 0: result_list[dummy_i] = result_list[dummy_j] result_list[dummy_j] = 0 break dummy_j += 1 dummy_i += 1 return result_list
def validate_required(value): """ Method to raise error if a required parameter is not passed in. :param value: value to check to make sure it is not None :returns: True or ValueError """ if value is None: raise ValueError('Missing value for argument') return True
def safe_check(comp, state, expected, default=False): """Check that components's state has expected value or return default if state is not defined.""" try: return getattr(comp.state, state) == expected except AttributeError: return default
def even_fibonacci_numbers(limit = 4000000): """ Returns the sum of the even-valued terms in the Fibonacci sequence, whose values do not exceed four million. """ n1, n2 = 1, 2 even_sum = 0 while (n2 <= limit): if n2%2 == 0: even_sum += n2 temp = n2 + n1 n1 = n2 n2 = temp return even_sum
def extract_first(data_list, default=None): """ :return """ if len(data_list) > 0: return data_list[0] else: return default
def calculate_diagnosis_match_score(section): """Process metadata function.""" for ann in section: if ann.get("seen_in_diagnosis") == "True": ann["score"] = 1.0 else: diagnosis_annotation = [ann.get("system", 0), ann.get("organ", 0), ann.get( "histology_0", 0), ann.get("histology_1", 0), ann.get("histology_2", 0)] if set(diagnosis_annotation) == {0}: if ann.get("seen_in_diagnosis") == "False": ann["score"] = 0.0 elif ann.get("seen_in_diagnosis", "na") == "na": ann["score"] = -1.0 else: num_trues = diagnosis_annotation.count("True") num_falses = diagnosis_annotation.count("False") ann["score"] = round(num_trues / (num_trues + num_falses), 2) return section
def _add_extra_longitude_points(gjson): """ Assume that sides of a polygon with the same latitude should be rendered as curves following that latitude instead of straight lines on the final map projection """ import math fuzz = 0.00001 if gjson[u'type'] != u'Polygon': return gjson coords = gjson[u'coordinates'][0] plng, plat = coords[0] out = [[plng, plat]] for lng, lat in coords[1:]: if plat - fuzz < lat < plat + fuzz: parts = int(abs(lng-plng)) if parts > 300: # something wrong with the data, give up return gjson for i in range(parts)[1:]: out.append([(i*lng + (parts-i)*plng)/parts, lat]) out.append([lng, lat]) plng, plat = lng, lat return {u'coordinates': [out], u'type': u'Polygon'}
def get_labels(label_string): """ This function converts label from string to array of labels Input: "(1, 2, 3, 4, 5)" Output: [1, 2, 3, 4, 5] """ label_array = label_string[1:-1] label_array = label_array.split(',') label_array = [int(label) for label in label_array if len(label) > 0] return label_array
def getFuelLatticeCell(cellNum, surfaceNum, assemblyUniverse, latticeUniverse, comment): """Create a hexagonal lattice cell.""" cellCard = "{} 0 -{} u={} fill={} imp:n=1 {}".format(cellNum, surfaceNum, assemblyUniverse, latticeUniverse, comment) assert (len(cellCard) - len(comment)) < 80 return cellCard
def check_row_winner(row): """ Return the player number that wins for that row. If there is no winner, return 0. """ if row[0] == row[1] and row[1] == row[2]: return row[0] return 0
def flatten_kv_dict(orig_dict, join_char=":"): """ >>> flatten_kv_dict({'a': {'b': 2, 'c': 3}, 'd': 4}) """ flat_dict = {} for k in orig_dict: v = orig_dict[k] if isinstance(v, dict): flattened_dict = flatten_kv_dict(v, join_char=join_char) for k_ in flattened_dict: flat_dict["%s%s%s" % (k, join_char, k_)] = flattened_dict[k_] else: flat_dict[k] = v return flat_dict
def merge_dictionaries(to_dict, from_dict, depth=3, replace=True): """ Merges both dictionaries, if replace is True, overwriting data in to_dict with from_dict data with the same keys. Note: Although function does return merged data, actions are inplace and rewrite to_dict. :return: merged dictionaries, returns to_dict reference after the merge. """ for key, value in from_dict.items(): if issubclass(type(value), dict) and key in to_dict and issubclass(type(to_dict[key]), dict) and depth: merge_dictionaries(to_dict[key], value, depth - 1, replace=replace) elif not replace and key in to_dict: continue else: to_dict[key] = value return to_dict
def quote_string(text: str) -> str: """Quotes and returns the text with escaped \" characters.""" return '"' + text.replace('"', '\\"') + '"'
def replace_in_keys(src, existing='.', new='_'): """ Replace a substring in all of the keys of a dictionary (or list of dictionaries) :type src: ``dict`` or ``list`` :param src: The dictionary (or list of dictionaries) with keys that need replacement. (required) :type existing: ``str`` :param existing: substring to replace. :type new: ``str`` :param new: new substring that will replace the existing substring. :return: The dictionary (or list of dictionaries) with keys after substring replacement. :rtype: ``dict`` or ``list`` """ def replace_str(src_str): if callable(getattr(src_str, "decode", None)): src_str = src_str.decode('utf-8') return src_str.replace(existing, new) if isinstance(src, list): return [replace_in_keys(x, existing, new) for x in src] return {replace_str(k): v for k, v in src.items()}
def percentile(N, percent, key=lambda x:x): """ Find the percentile of a list of values. @parameter N - is a list of values. Note N MUST BE already sorted. @parameter percent - a float value from 0 to 100. @parameter key - optional key function to compute value from each element of N. @return - the percentile of the values """ if not N: return None k = (len(N)-1) * percent/100.0 f = math.floor(k) c = math.ceil(k) if f == c: return key(N[int(k)]) d0 = key(N[int(f)]) * (c-k) d1 = key(N[int(c)]) * (k-f) return d0+d1
def filter_list(input_list, key_fn=(lambda x: x), filter_keys=None): """Filter a list with a list of keys Args: input_list: list to be filtered key_fn: a function to generate keys from elements in the list filter_keys: keys to intersect with Returns: filtered_input_list: filtered list """ if filter_keys is None: filtered_input_list = input_list else: input_dict = {key_fn(x): x for x in input_list} keys = set(input_dict.keys()) & set(filter_keys) keys = sorted(list(keys)) filtered_input_list = [input_dict[key] for key in keys] return filtered_input_list
def oi_to_args(issues): """ Return a set containing all arguments in the list of open issues.""" res = set() for i in issues: res &= set(i.arguments) return res
def generate_report(data): """ Process the property data from the web page, build summary dictionary containing: * 'property_name' - Name of property * 'property_type' - Type of property e.g. 'Apartment' * 'room_type' - Type or number of bedrooms * 'room_number' - Number of bedrooms * 'bathrooms' - Number of bathrooms * 'general_amenities' - List of general amenities * 'family_amenities' - List of family amenities * 'safety_feats' - List of safety amenities :param data: dict Web page data, derived from 'bootstrapData' JSON string :return: dict Summarised property information (see above) """ listing = data['bootstrapData']['listing'] # Initialise summary data dictionary. Some values have been assigned default values of 'Not found' # in the event that the data is not present in the web page data summary_data = { 'property_name': listing['name'], 'property_type': 'Not found', 'rooms': 'Not found', 'bathrooms': 'Not found', 'general_amenities': [], 'family_amenities': [], 'safety_feats': [] } # Iterate through 'Space' section to build room details for detail in listing['space_interface']: if detail.get('label') == 'Property type:' and detail['value']: summary_data['property_type'] = detail['value'] if detail.get('label') == 'Bedrooms:' and detail['value']: summary_data['rooms'] = detail['value'] if detail.get('label') == 'Bathrooms:' and detail['value']: summary_data['bathrooms'] = detail['value'] # Iterate through amenities to build list of amenities grouped by category for amenity in listing['listing_amenities']: if amenity['is_present']: if amenity['category'] == 'family': summary_data['family_amenities'].append(amenity['name']) elif amenity['category'] == 'general' and amenity['is_safety_feature']: summary_data['safety_feats'].append(amenity['name']) else: summary_data['general_amenities'].append(amenity['name']) return summary_data
def comma_and_and(*values, period=True): """ Return a properly formatted string, eg: a, b and c. """ period = '.' if period else '' len_values = len(values) if len_values == 0: return "" elif len_values == 1: return f"{values[0]}{period}" else: return f"{', '.join(values[:-1])} and {values[-1]}{period}"
def manhattan_distance_with_heading(current, target): """ Return the Manhattan distance + any turn moves needed to put target ahead of current heading current: (x,y,h) tuple, so: [0]=x, [1]=y, [2]=h=heading) heading: 0:^:north 1:<:west 2:v:south 3:>:east """ md = abs(current[0] - target[0]) + abs(current[1] - target[1]) if current[2] == 0: # heading north # Since the agent is facing north, "side" here means # whether the target is in a row above or below (or # the same) as the agent. # (Same idea is used if agent is heading south) side = (current[1] - target[1]) if side > 0: md += 2 # target is behind: need to turns to turn around elif side <= 0 and current[0] != target[0]: md += 1 # target is ahead but not directly: just need to turn once # note: if target straight ahead (curr.x == tar.x), no turning required elif current[2] == 1: # heading west # Now the agent is heading west, so "side" means # whether the target is in a column to the left or right # (or the same) as the agent. # (Same idea is used if agent is heading east) side = (current[0] - target[0]) if side < 0: md += 2 # target is behind elif side >= 0 and current[1] != target[1]: md += 1 # target is ahead but not directly elif current[2] == 2: # heading south side = (current[1] - target[1]) if side < 0: md += 2 # target is behind elif side >= 0 and current[0] != target[0]: md += 1 # target is ahead but not directly elif current[2] == 3: # heading east side = (current[0] - target[0]) if side > 0: md += 2 # target is behind elif side <= 0 and current[1] != target[1]: md += 1 # target is ahead but not directly return md
def _make_header(text: str, level: int) -> str: """Create a markdown header at a given level""" return f"{'#' * (level + 1)} {text}"
def decode_from_bioes(tags): """ Decode from a sequence of BIOES tags, assuming default tag is 'O'. Args: tags: a list of BIOES tags Returns: A list of dict with start_idx, end_idx, and type values. """ res = [] ent_idxs = [] cur_type = None def flush(): if len(ent_idxs) > 0: res.append({ 'start': ent_idxs[0], 'end': ent_idxs[-1], 'type': cur_type}) for idx, tag in enumerate(tags): if tag is None: tag = 'O' if tag == 'O': flush() ent_idxs = [] elif tag.startswith('B-'): # start of new ent flush() ent_idxs = [idx] cur_type = tag[2:] elif tag.startswith('I-'): # continue last ent ent_idxs.append(idx) cur_type = tag[2:] elif tag.startswith('E-'): # end last ent ent_idxs.append(idx) cur_type = tag[2:] flush() ent_idxs = [] elif tag.startswith('S-'): # start single word ent flush() ent_idxs = [idx] cur_type = tag[2:] flush() ent_idxs = [] # flush after whole sentence flush() return res
def accuracy(prediction, actual): """ :param prediction: :param actual: :return accuaracy: Simple function to compute raw accuaracy score quick comparision. """ correct_count = 0 prediction_len = len(prediction) for idx in range(prediction_len): if int(prediction[idx]) == actual[idx]: correct_count += 1 return correct_count/prediction_len
def validate_var(d, var, *string): """check validate variables in string set var.""" return set(var) - {'walk_result', 'get_result', 'last_result', 'time', 'snmp', 'last', 'options', 'time', 're'} - d['all_var']
def add_three(a, b, c, wb, wc): """add three vectors with weights Note this modifies the input vector a. :param a: array :param b: array :param c: array :param wb: real number :param wc: real number :return: vector a+ wb*b + wc*c """ for i in range(len(a)): a[i] += wb*b[i] + wc*c[i] return a
def sort_str(string): """ Sort a string of wires and segments. """ return "".join(sorted(string))
def _data_filed_conversion(field): """ converts fields in the sample sheet generated by the LIMS in fields that can be used by bcl2fastq2.17 """ datafieldsConversion = {'FCID': 'FCID', 'Lane': 'Lane', 'SampleID' : 'Sample_ID', 'SampleRef': 'SampleRef', 'Index' : 'index', 'Description': 'Description', 'Control': 'Control', 'Recipe': 'Recipe', 'Operator': 'Operator', 'SampleProject' : 'Sample_Project' } if field in datafieldsConversion: return datafieldsConversion[field] else: raise RuntimeError("field {} not expected in SampleSheet".format(field))
def check_next_url(next_url): """ Checks to make sure the next url is not redirecting to another page. Basically it is a minimal security check. """ if not next_url or '://' in next_url: return None return next_url
def is_mlcmt(line,mlcmto,mlcmtc): """Test if the line has an start, end or both of multiple line comment delimiters """ return [line.find(mlcmto),line.find(mlcmtc)]