content
stringlengths
42
6.51k
def encode_name(name): """ encode_name encodes special characters to be xml-compatible entities """ if name is None: return name return name.replace("&", "&")
def _rev_auth_str(client_api_key, user_api_key): """Returns a Rev Auth string.""" return 'Rev %s:%s' % (client_api_key, user_api_key)
def to_year_only(time): """Convert date from '2019-05-08T08:27:07.472Z' to '2019'""" return time[:4]
def Tn(n): """ Triangular numbers.""" return n * (n + 1) // 2
def calculate_informedness(TPR, TNR): """ Calculates the informedness/Youden's J meassure of the supplied data, using the equation as per: https://en.wikipedia.org/wiki/Youden%27s_J_statistic Informedness = TPR+TNR-1 Input: TPR: True Positive Rate TNR: True Negative Rate Output: Informedness """ return TPR+TNR-1
def remove_long_short_sentences(sentences, s_th=3, l_th=25): """ remove too short and too long sentences :param sentences: a list containing the orig sentences in formant of: [index, sentence indices as list] :param s_th: the threshold for the short sentences, i.e. sentences with length below this number will be removed :param l_th: the threshold for the long sentences, i.e. sentences with length above this number will be removed :return: a new list of the filtered sentences """ filter_sent = list() for i in range(len(sentences)): s_len = len(sentences[i][1]) if s_th <= s_len <= l_th: filter_sent.append(sentences[i]) return filter_sent
def choose(paragraphs, select, k): """Return the Kth paragraph from PARAGRAPHS for which SELECT called on the paragraph returns true. If there are fewer than K such paragraphs, return the empty string. """ # BEGIN PROBLEM 1 list = [] for i in range(0, len(paragraphs)): if select(paragraphs[i]): list.append(paragraphs[i]) if k < len(list): return list[k] else: return '' # END PROBLEM 1
def crc16_compute(data): """Straight port of the bank CRC used by DFU data is a sequence or generator of integer in 0..255 """ crc = 0xffff for d in data: crc = (crc >> 8 & 0xff) | (crc << 8 & 0xff00) crc ^= d crc ^= (crc & 0xff) >> 4 & 0xff crc ^= crc << 12 & 0xffff crc ^= (crc & 0xff) << 5 return crc
def getSubset(mali, identifiers, not_in_set=False): """return subset of mali which only contains identifiers.""" new_mali = {} if not_in_set: for k, v in mali.items(): if k not in identifiers: new_mali[k] = v else: for k, v in mali.items(): if k in identifiers: new_mali[k] = v return new_mali
def clean_string(string, remove_parenthesis=False, remove_brackets=False): """ Return given string that is strip, uppercase without multiple whitespaces. Optionally, remove parenthesis and brackets. Note that "\t\n\s" will be removed Parameters ---------- string : str String to clean remove_parenthesis : bool, optional To remove parenthesis remove_brackets : bool, optional To remove brackets Examples -------- >>> clean_string(" sHawn tesT ") 'SHAWN TEST' >>> clean_string("shawn ( te st )") 'SHAWN ( TE ST )' >>> clean_string("shawn ( te st )", remove_parenthesis=True) 'SHAWN' >>> clean_string("shawn [ te st ]", remove_brackets=True) 'SHAWN' Returns ------- string : str """ import re if isinstance(string, str): if remove_parenthesis: string = re.sub(r"\(.*\)", "", string) if remove_brackets: string = re.sub(r"\[.*\]", "", string) string = string.strip().upper() string = " ".join(string.split()) return string else: raise TypeError("Wrong datatype(s)")
def find_edges(faces): """ Find all edges on a mesh Parameters ---------- faces : list of lists of three integers the integers for each face are indices to vertices, starting from zero Returns ------- edges : list of lists of integers each element is a 2-tuple of vertex ids representing an edge Examples -------- >>> # Simple example: >>> from mindboggle.guts.mesh import find_edges >>> faces=[[0,1,2], [0,1,4], [1,2,3], [0,2,5]] >>> find_edges(faces) [[0, 1], [1, 2], [0, 2], [1, 4], [0, 4], [2, 3], [1, 3], [2, 5], [0, 5]] """ edges = [ ] for face in faces: for edge in [face[0:2], face[1:3], [face[0], face[2]] ]: if not edge in edges: # I know that this is costly edges.append(edge) return edges
def inbracket(strg): """ extraction of bracket content Parameters ---------- strg : a string with a bracket Returns ------- lbra : left part of the string inbr : string inside the bracket Examples -------- >>> strg ='abcd{un texte}' >>> lbra,inbr = inbracket(strg) >>> assert(lbra=='abcd') >>> assert(inbr=='un texte') """ strg = strg.replace('\r', '') ssp = strg.split('{') lbra = ssp[0] rbra = '' inbr = '' for k in ssp[1:]: rbra = rbra+k+'{' rsp = rbra.split('}') for k in rsp[:-1]: inbr = inbr+k+'}' inbr = inbr.rstrip('}') return(lbra, inbr)
def FirstChar(line): """Gets the first non-space character of the line.""" idx = 0 while line[idx] == ' ': idx += 1 return line[idx]
def compute_lifting_parameter(lamb, lambda_plane_idxs, lambda_offset_idxs, cutoff): """One way to compute a per-particle "4D" offset in terms of an adjustable lamb and constant per-particle parameters. Notes ----- (ytz): this initializes the 4th dimension to a fixed plane adjust by an offset followed by a scaling by cutoff. lambda_plane_idxs are typically 0 or 1 and allows us to turn off an interaction independent of the lambda value. lambda_offset_idxs are typically 0 and 1, and allows us to adjust the w coordinate in a lambda-dependent way. """ w = cutoff * (lambda_plane_idxs + lambda_offset_idxs * lamb) return w
def dict2str(dic): """Build a str representation of a dict. :param dic: The dict to build a str representation for. :returns: The dict in str representation that is a k=v command list for each item in dic. """ return ','.join("%s=%s" % (key, val) for key, val in sorted(dic.items()))
def write_time(time_in_secs): """Return time in 00h:00m:00s format.""" try: time = round(float(time_in_secs)) except (TypeError, ValueError): raise ValueError('Invalid time measure.') if time < 0: raise ValueError('Time must be positive.') hours = time // 3600 minutes = time // 60 - hours * 60 secs = time - minutes * 60 - hours * 3600 if hours: # return the time in 00h:00m:00s format return '{hours:02d}h:{minutes:02d}m:{secs:02d}s'.format( hours=hours, minutes=minutes, secs=secs) if minutes: # return the time in 00m:00s format return '{minutes:02d}m:{secs:02d}s'.format(minutes=minutes, secs=secs) # return the time in 00s format return '{secs:02d}s'.format(secs=secs)
def get_id(page): """Extract the id from a page. Args: page: a string Returns: an integer """ start_pos = page.find("<id>") end_pos = page.find("</id>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<id>") return int(page[start_pos:end_pos])
def _bytes_to_string(value: bytes) -> str: """Decode bytes to a UTF-8 string. Args: value (bytes): The bytes to decode Returns: str: UTF-8 representation of bytes Raises: UnicodeDecodeError """ return value.decode(encoding="utf-8")
def is_string_list(a_list): """ Checks that every element is a string :param a_list: [] :return: True if list of strings and False otherwise """ for element in a_list: if not isinstance(element, str): return False return True
def fahrenheit_to_celsius(fahrenheit_temp): """Calculate celsius temperature from fahrenheit PARAMETERS ---------- fahrenheit_temp : float A temperature in degrees RETURNS ------- temperature : float """ # apply formula return (fahrenheit_temp - 32)*(5/9)
def plugin_reconfigure(handle, new_config): """ Reconfigures the plugin, it should be called when the configuration of the plugin is changed during the operation of the device service. The new configuration category should be passed. Args: handle: handle returned by the plugin initialisation call new_config: JSON object representing the new configuration category for the category Returns: new_handle: new handle to be used in the future calls Raises: """ new_handle = new_config['gpiopin']['value'] return new_handle
def rejoin_parts_multiple_index(param_parts): """Join parameter name parts.""" return "_".join(param_parts[1:-2])
def radboud_cervix_concepts2labels(report_concepts): """ Convert the concepts extracted from cervix reports to the set of pre-defined labels used for classification Params: report_concepts (dict(list)): the dict containing for each cervix report the extracted concepts Returns: a dict containing for each cervix report the set of pre-defined labels where 0 = absence and 1 = presence """ report_labels = dict() # loop over reports for rid, rconcepts in report_concepts.items(): # assign pre-defined set of labels to current report report_labels[rid] = { 'cancer_scc_inv': 0, 'cancer_scc_insitu': 0, 'cancer_adeno_inv': 0, 'cancer_adeno_insitu': 0, 'lgd': 0, 'hgd': 0, 'hpv': 0, 'koilocytes': 0, 'glands_norm': 0, 'squamous_norm': 0 } # make diagnosis section a set diagnosis = set([concept[1].lower() for concept in rconcepts['concepts']['Diagnosis']]) # update pre-defined labels w/ 1 in case of label presence for d in diagnosis: if 'cervical squamous cell carcinoma' == d: report_labels[rid]['cancer_scc_inv'] = 1 if 'squamous carcinoma in situ' == d or 'squamous intraepithelial neoplasia' == d: report_labels[rid]['cancer_scc_insitu'] = 1 if 'cervical adenocarcinoma' in d: if 'cervical adenocarcinoma in situ' == d: report_labels[rid]['cancer_adeno_insitu'] = 1 else: report_labels[rid]['cancer_adeno_inv'] = 1 if 'low grade cervical squamous intraepithelial neoplasia' == d: report_labels[rid]['lgd'] = 1 if 'squamous carcinoma in situ' == d or \ 'squamous intraepithelial neoplasia' == d or \ 'cervical squamous intraepithelial neoplasia 2' == d or \ 'cervical intraepithelial neoplasia grade 2/3' == d: report_labels[rid]['hgd'] = 1 if 'human papilloma virus infection' == d: report_labels[rid]['hpv'] = 1 if 'koilocytotic squamous cell' == d: report_labels[rid]['koilocytes'] = 1 # update when no label has been set to 1 if sum(report_labels[rid].values()) == 0: report_labels[rid]['glands_norm'] = 1 report_labels[rid]['squamous_norm'] = 1 if 'slide_ids' in rconcepts: report_labels[rid]['slide_ids'] = rconcepts['slide_ids'] return report_labels
def process_groups(text, is_group_member, process_group, is_group_member_parameters={}, process_group_parameters={}): """Processes groups with a block of text. Determining whether a line in the input text is a group member and processing said groups is delegated to the supplied functions. """ text = text.split('\n') group = [] output = [] count = 0 for line in text: is_member = is_group_member(line, **is_group_member_parameters) if not is_member and group: processed_group = process_group(group, **process_group_parameters) output += processed_group output.append(line) group = [] elif is_member and len(text) - 1 == count: group.append(line) output += process_group(group, **process_group_parameters) elif is_member: group.append(line) else: output.append(line) count += 1 text = '\n'.join(output) return text
def zero_ten_to_value(scale_value): """ This method will transform a string value from the 0-10 scale to its confidence integer representation. The scale for this confidence representation is the following: .. list-table:: 0-10 to STIX Confidence :header-rows: 1 * - 0-10 Scale - STIX Confidence Value * - 0 - 0 * - 1 - 10 * - 2 - 20 * - 3 - 30 * - 4 - 40 * - 5 - 50 * - 6 - 60 * - 7 - 70 * - 8 - 80 * - 9 - 90 * - 10 - 100 Args: scale_value (str): A string value from the scale. Accepted strings are "0" through "10" inclusive. Returns: int: The numerical representation corresponding to values in the 0-10 scale. Raises: ValueError: If `scale_value` is not within the accepted strings. """ if scale_value == '0': return 0 elif scale_value == '1': return 10 elif scale_value == '2': return 20 elif scale_value == '3': return 30 elif scale_value == '4': return 40 elif scale_value == '5': return 50 elif scale_value == '6': return 60 elif scale_value == '7': return 70 elif scale_value == '8': return 80 elif scale_value == '9': return 90 elif scale_value == '10': return 100 else: raise ValueError("STIX Confidence value cannot be determined for %s" % scale_value)
def to_locale(language, to_lower=False): """ Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is True, the last component is lower-cased (en_us). """ p = language.find('-') if p >= 0: if to_lower: return language[:p].lower()+'_'+language[p+1:].lower() else: # Get correct locale for sr-latn if len(language[p+1:]) > 2: return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower() return language[:p].lower()+'_'+language[p+1:].upper() else: return language.lower()
def guess_type(s): """ attempt to convert string value into numeric type """ sc = s.replace(',', '') # remove comma from potential numbers try: return int(sc) except ValueError: pass try: return float(sc) except ValueError: pass return s
def multiplicacion_por_suma(n,m): """ int, int --> int OBJ: crear una lista con m elementos (0,m-1), rellenar los elementos con n y sumar los elementos """ if m==0 or n==0: return 0 elif m==1: return n elif n==1: return m elif n>0 and m>0: #n y m positivos lista=[] #creo lista suma_componentes_lista = 0 for i in range (0,m): #la longitud de la lista es m lista.append(n) #relleno la lista, siendo n todos los elementos suma_componentes_lista = suma_componentes_lista + lista[i] #suma de todos los elementos de la lista return suma_componentes_lista elif n<0 and m<0: n=abs(n) m=abs(m) lista=[] suma_componentes_lista = 0 for i in range (0,m): lista.append(n) suma_componentes_lista = suma_componentes_lista + lista[i] return suma_componentes_lista else: if n<0: #n negativo, m positivo n=abs(n) lista=[] suma_componentes_lista = 0 for i in range (0,m): lista.append(n) suma_componentes_lista = suma_componentes_lista + lista[i] return -1 * suma_componentes_lista else: #n positivo, m negativo m=abs(m) lista=[] suma_componentes_lista = 0 for i in range (0,m): lista.append(n) suma_componentes_lista = suma_componentes_lista + lista[i] return -1 * suma_componentes_lista
def merge_metrics(dict_1: dict, dict_2: dict): """ Merge dicts with common keys as list """ dict_3 = {**dict_1, **dict_2} for key, value in dict_3.items(): if key in dict_1 and key in dict_2: dict_3[key] = dict_1[key] + value # since dict_1 val overwritten by above merge return dict_3
def _escape_presto(val: str) -> str: """ParamEscaper https://github.com/dropbox/PyHive/blob/master/pyhive/common.py""" return "'{0}'".format(val.replace("'", "''"))
def _int_to_shot_id(shot_int): """ Returns: integer to shot id """ return str(shot_int).zfill(10) + ".jpg"
def _module_exists(module_name): """ Checks if a module exists. :param str module_name: module to check existance of :returns: **True** if module exists and **False** otherwise """ try: __import__(module_name) return True except ImportError: return False
def _check_from_statement(current: int, following: int) -> bool: """ """ return (current == 108 and following == 109) or ( current == 108 and following == 84 )
def d1l(dx, fc, i): """ first-order, left-sided derivative at index i """ D = (fc[i] - fc[i-1])/dx return D
def to_kelvin(value, unit): """ :param value: magnitude in float or int format :param unit: units of the input one of C, F, R, or K :return: a float value in Kelvin converted from the input unit """ kmap = { 'C': (lambda c: c + 273.15), 'F': (lambda f: (f + 459.67) / 1.8), 'R': (lambda r: r / 1.8), 'K': (lambda k: k) } return kmap[unit](float(value))
def isTokenStepString(s): """Determine whether this is a placeholder string""" if len(s) < 2: return False return s[0] == "~" and s[-1] == "~"
def _val_to_byte_list(number, num_bytes, big_endian=True): """ Converts an integer into a big/little-endian multi-byte representation. Similar to int.to_bytes() in the standard lib but returns a list of integers between 0 and 255 (which allows for bitwise arithmetic) instead of a bytearray. """ if number > (2**(8*num_bytes))-1: raise ValueError("Unsigned integer %d does not fit into %d bytes!" % (number, num_bytes)) byte_list = [] for b in range(num_bytes): val = (number >> (8*b)) & 0xff if big_endian: byte_list.insert(0, val) else: byte_list.append(val) return byte_list
def gini_gain_quotient( left_total, right_total, left_amount_classified_zero, left_amount_classified_one, right_amount_classified_zero, right_amount_classified_one): """ Returns a pair of numbers that represent the Gini gain, given a split in the dataset. Keyword arguments: left_total -- the number of samples that on the left side of the split right_total -- the number of samples that on the right side of the split left_amount_classified_zero -- the number of samples on the left side that are classified as '0' left_amount_classified_one -- the number of samples on the left side that are classified as '1' right_amount_classified_zero -- the number of samples on the right side that are classified as '0' right_amount_classified_one -- the number of samples on the right side that are classified as '1' Return value: (numerator, denominator) -- such that the Gini gain equals (1 / total) * (numerator / denominator), where total is the total number of samples (left and right) May return a denominator of 0, use avoid_zero() to avoid division by zero. See also: Explanation of Gini gain -- https://victorzhou.com/blog/gini-impurity/ Secure Training of Decision Trees with Continuous Attributes -- paper to be published by Mark Abspoel, Daniel Escudero and Nikolaj Volgushev """ numerator = \ right_total * (left_amount_classified_zero ** 2 + left_amount_classified_one ** 2) + \ left_total * (right_amount_classified_zero ** 2 + right_amount_classified_one ** 2) denominator = left_total * right_total return (numerator, denominator)
def CodePagesToReachedSize(reached_symbol_names, page_to_symbols): """From page offset -> [all_symbols], return the reached portion per page. Args: reached_symbol_names: ([str]) List of reached symbol names. page_to_symbols: (dict) As returned by CodePagesToMangledSymbols(). Returns: {page offset (int) -> {'total': int, 'reached': int}} """ reached_symbol_names = set(reached_symbol_names) page_to_reached = {} for offset in page_to_symbols: total_size = sum(x[1] for x in page_to_symbols[offset]) reached_size = sum( size_in_page for (name, size_in_page) in page_to_symbols[offset] if name in reached_symbol_names) page_to_reached[offset] = {'total': total_size, 'reached': reached_size} return page_to_reached
def modified_secant(f, x0, x1, x2, eps=5e-6, max_iterations=50): """ Function that finds a root using a modified Secant (or Inverse quadratic interpolation ) method for a given function f(x). The function finds the root of f(x) with a predefined absolute accuracy epsilon. The function excepts three starting points x0,x1,x2 that belong to an interval [a,b] in which is known that f has a root. If the function f has multiple roots in this interval then Secant method converges randomly to one of them. If in the interval [a,b] f(x) doesn't change sign (Bolzano theorem can not be applied) the function has unpredictable behavior and will execute max_iterations iterations and converge to a false result or converge to a partial correct root with less than max_iterations number of iterations but not with the predefined accuracy. Also the function checks if either x0 or x1 or x2 is a root of f(x) and if there is at least one then first checks x0 and returns it, next x1 and at the end x2. Parameters ---------- f : callable The function to find a root of. x0 : float The first point for modified secant method. x1 : float The second point for modified secant method. x2 : float The third point for modified secant method. eps : float The target accuracy. The iteration stops when the distance between successive iterates is below eps. Default value is 5e-6. max_iterations : int The maximum number of iterations. The function terminates if the number of iterations exceeds the max_iterations value. The parameter is used to avoid infinite loops. Default value is 50. Returns ------- root : float The estimated value for the root. iterations_num : int The number of iterations. """ if f(x0) == 0: # check if x0 is root of f return x0, 0 elif f(x1) == 0: # or x1 is root of f return x1, 0 elif f(x2) == 0: # or x2 is root of f return x2, 0 # initializations, create variables according to n, with n >= 1 q = f(x0) / f(x1) r = f(x2) / f(x1) s = f(x2) / f(x0) numerator = r * (r - q) * (x2 - x1) + (1 - r) * s * (x2 - x0) denominator = (q - 1) * (r - 1) * (s - 1) x_n_3 = x2 - numerator / denominator iterations_num = 0 x_n_1 = x1 x_n_2 = x2 # Modified Secant method algorithm # iterate while the error is larger that predefined argument eps or we have more iterations to do until # max_iterations while abs(x_n_3 - x_n_2) >= eps and iterations_num < max_iterations: # on each step update variables x according to n with removing the earliest point # and also increase iterations_num x_n = x_n_1 x_n_1 = x_n_2 x_n_2 = x_n_3 q = f(x_n) / f(x_n_1) r = f(x_n_2) / f(x_n_1) s = f(x_n_2) / f(x_n) numerator = r * (r - q) * (x_n_2 - x_n_1) + (1 - r) * s * (x_n_2 - x_n) denominator = (q - 1) * (r - 1) * (s - 1) x_n_3 = x_n_2 - numerator / denominator iterations_num += 1 return x_n_3, iterations_num
def is_finite_ordinal(n): """ Return True if n is a finite ordinal (non-negative int). """ return isinstance(n, int) and n >= 0
def numToDigits(num, places): """Helper, for converting numbers to textual digits.""" s = str(num) if len(s) < places: return ("0" * (places - len(s))) + s elif len(s) > places: return s[len(s)-places: ] else: return s
def numberOfPaths(nXm_matrix): """ Returns the total number of paths from the top left to the bottom right by moving right and down in a 2D array of size n x m. Where cells can only contain `1` or `0` while real paths can only contain `1`. """ hight = len(nXm_matrix) width = len(nXm_matrix[0]) def calculate(matrix, matrix_h, matrix_w, next_h, next_w, last_value=0): """ Local calculation recursive function. """ # Stop condition 1 if matrix[next_h][next_w] == 0: return last_value # Stop condition 2 if next_h == matrix_h and next_w == matrix_w: return last_value + 1 # Move right if next_w < matrix_w: last_value = calculate(matrix, matrix_h, matrix_w, next_h, next_w + 1, last_value) # Move down if next_h < matrix_h: last_value = calculate(matrix, matrix_h, matrix_w, next_h + 1, next_w, last_value) return last_value count = calculate(nXm_matrix, hight-1, width-1, 0, 0) return count
def update_results(results, add): """Given a map of results for all ontologies and a map of results to add, append the results to the lists in the map.""" results["error"] = results["error"] + add["error"] results["warn"] = results["warn"] + add["warn"] results["info"] = results["info"] + add["info"] return results
def get_languages_and_names(ref_texts): """Obtain list of languages and names in our reference texts.""" found_names=[] found_languages=[] for ref_text in ref_texts: found_languages.append(ref_text.language) found_names.append(ref_text.name) return found_languages, found_names
def RotateRightWithCarry(cpu, unused, source): """ From z80 Heaven: 8-bit rotation to the right. the bit leaving on the right is copied into the carry, and into bit 7. """ # Fairly similar to above, but note that bit 0 is copied both to bit 7 and 8 (carry set/reset) return (source >> 1) | ((source & 1) << 8) | ((source & 1) << 7)
def _read_genel_fields_until_char_blank(card_fields, istart): """somewhat loose parser helper function for GENEL""" new_fields = [] i = 0 for i, field in enumerate(card_fields[istart:]): if field is None: break if field.upper() in ['UD', 'K', 'S', 'Z']: break new_fields.append(field) return new_fields, istart+i
def sort_tups(seq): """Nicely sorts a list of symbolic tuples, in a way we'll describe later.""" return sorted(seq,key=lambda k:(-len(k),k),reverse=True)[::-1]
def _get_previous_month(month, year): """Returns previous month. :param month: Month (integer from 1...12). :param year: Year (integer). :return: previous_month: Previous month (integer from 1...12). :return: previous_year: Previous year (integer). """ previous_month = month - 1 if previous_month == 0: previous_month = 12 previous_year = year - 1 else: previous_year = year - 0 return previous_month, previous_year
def newton_sqrt1(x): """Return the square root of x using Newton's method""" val = x while True: last = val val = (val + x / val) * 0.5 if abs(val - last) < 1e-9: break return val
def val_to_freq(word_value, scalar): """turn value to integer""" return {k: abs(int(word_value[k] * scalar)) for k in word_value}
def hash_code(data): """Generate hash code using builtin hash() function. :param data: Data to generate hash code for """ # h = 0 # for c in data: # h = (ord(c) + (31 * h)) % MAX_32_INT # return h return abs(hash(data))
def aic_hmm(log_likelihood, dof): """ Function to compute the Aikaike's information criterion for an HMM given the log-likelihood of observations. :param log_likelihood: logarithmised likelihood of the model dof (int) - single numeric value representing the number of trainable parameters of the model :type log_likelihood: float :param dof: single numeric value representing the number of trainable parameters of the model :type dof: int :return: the Aikaike's information criterion :rtype: float """ return -2 * log_likelihood + 2 * dof
def getdetsize(ccdshape, xbin, ybin): """Set the detector size basec on the binning and shape of the ccd ccdshape: tuple Shape of the detector xbin: int x binning ybin: int y binning """ x1=1 y1=1 x2=xbin*ccdshape[1] y2=xbin*ccdshape[0] return '[%i:%i,%i:%i]' % (x1,x2,y1,y2)
def merge(arr1, arr2): """Merge two sorted arrays without duplicates Args: arr1: first sorted array arr2: second sorted array Returns: sorted array comprises items from both input arrays """ i, j = 0, 0 total = [] try: total_len = len(arr1) + len(arr2) except TypeError: # if one of the arrays is None return arr1 if arr2 is None else arr2 while i < len(arr1) or j < len(arr2): try: # i += total[-1] == arr1[i] # j += total[-1] == arr2[j] comparator = arr1[i] < arr2[j] val = arr1[i] if comparator else arr2[j] total.append(val) i += comparator j += not comparator if total[-1] == arr1[i]: i += 1 total_len -= 1 continue if total[-1] == arr2[j]: j += 1 total_len -= 1 continue except IndexError: arr1 = arr1 if i < len(arr1) else [arr2[-1] + 1] arr2 = arr2 if j < len(arr2) else [arr1[-1] + 1] i = i if len(arr1) > 1 else 0 j = j if len(arr2) > 1 else 0 if total_len == len(total): break return total
def maptostr(target_list): """Casts a list of python types to a list of strings Args: target_list (list): list containing python types Returns: List containing strings """ return [str(each) for each in target_list]
def has_double_chars(entry): """Return True if there are double chars (aa, bb,...) in the given string.""" for idx in range(1, len(entry)): if entry[idx] == entry[idx - 1]: return True return False
def _diff_cache_cluster(current, desired): """ If you need to enhance what modify_cache_cluster() considers when deciding what is to be (or can be) updated, add it to 'modifiable' below. It's a dict mapping the param as used in modify_cache_cluster() to that in describe_cache_clusters(). Any data fiddlery that needs to be done to make the mappings meaningful should be done in the munging section below as well. This function will ONLY touch settings that are explicitly called out in 'desired' - any settings which might have previously been changed from their 'default' values will not be changed back simply by leaving them out of 'desired'. This is both intentional, and much, much easier to code :) """ ### The data formats are annoyingly (and as far as I can can tell, unnecessarily) ### different - we have to munge to a common format to compare... if current.get("SecurityGroups") is not None: current["SecurityGroupIds"] = [ s["SecurityGroupId"] for s in current["SecurityGroups"] ] if current.get("CacheSecurityGroups") is not None: current["CacheSecurityGroupNames"] = [ c["CacheSecurityGroupName"] for c in current["CacheSecurityGroups"] ] if current.get("NotificationConfiguration") is not None: current["NotificationTopicArn"] = current["NotificationConfiguration"][ "TopicArn" ] current["NotificationTopicStatus"] = current["NotificationConfiguration"][ "TopicStatus" ] if current.get("CacheParameterGroup") is not None: current["CacheParameterGroupName"] = current["CacheParameterGroup"][ "CacheParameterGroupName" ] modifiable = { "AutoMinorVersionUpgrade": "AutoMinorVersionUpgrade", "AZMode": "AZMode", "CacheNodeType": "CacheNodeType", "CacheNodeIdsToRemove": None, "CacheParameterGroupName": "CacheParameterGroupName", "CacheSecurityGroupNames": "CacheSecurityGroupNames", "EngineVersion": "EngineVersion", "NewAvailabilityZones": None, "NotificationTopicArn": "NotificationTopicArn", "NotificationTopicStatus": "NotificationTopicStatus", "NumCacheNodes": "NumCacheNodes", "PreferredMaintenanceWindow": "PreferredMaintenanceWindow", "SecurityGroupIds": "SecurityGroupIds", "SnapshotRetentionLimit": "SnapshotRetentionLimit", "SnapshotWindow": "SnapshotWindow", } need_update = {} for m, o in modifiable.items(): if m in desired: if not o: # Always pass these through - let AWS do the math... need_update[m] = desired[m] else: if m in current: # Equivalence testing works fine for current simple type comparisons # This might need enhancement if more complex structures enter the picture if current[m] != desired[m]: need_update[m] = desired[m] return need_update
def guard_is_time_to_retry(guard, time): """Tests if enough time has passed to retry an unreachable (i.e. hibernating) guard. Derived from entry_is_time_to_retry() in entrynodes.c.""" if (guard['last_attempted'] < guard['unreachable_since']): return True diff = time - guard['unreachable_since'] if (diff < 6*60*60): return (time > (guard['last_attempted'] + 60*60)) elif (diff < 3*24*60*60): return (time > (guard['last_attempted'] + 4*60*60)) elif (diff < 7*24*60*60): return (time > (guard['last_attempted'] + 18*60*60)) else: return (time > (guard['last_attempted'] + 36*60*60));
def solution3(nums): """convert nums to set -> for each n, continue searching n - 1 and n + 1""" s = set(nums) counted = set() max_size = 0 def countStep(x, step): if x in s: counted.add(x) return countStep(x + step, step) + 1 else: return 0 for n in nums: if n in counted: continue size = countStep(n - 1, -1) + countStep(n + 1, 1) + 1 max_size = max(max_size, size) return max_size
def merge_or(base, to_merge, exclusive): """ Merge json schemas assuming a 'oneOf' or 'anyOf' command The idea is to find out the differences between 'base' and 'to_merge'. If a property is in 'base' and not in 'to_merge', it is added to all the alternative properties except 'to_merge'. If a property is in 'base' and in 'to_merge', it's removed from 'to_merge' and 'base' is left as is """ operand = '$%sor' % ('x' if exclusive else '') if not base: base.update(to_merge) base[operand] = [{}] return base try: alternatives = base[operand] except KeyError: alternatives = [{}] base[operand] = alternatives for key in set(base.keys()).difference(to_merge.keys()): if key.startswith('$'): # do not process special keys continue # keys that are in base but not in to_merge val = base.pop(key) for a in alternatives: a[key] = val for key in set(to_merge.keys()).intersection(base.keys()): if key.startswith('$'): # do not process special keys continue # keys that are both in base and to_merge if key == 'properties': prop_base = base.get(key, {}) prop_mrg = to_merge[key] for p in set(prop_mrg.keys()).intersection(prop_base.keys()): prop_mrg.pop(p) elif key == 'required': req_mrg = to_merge[key] for p in set(req_mrg).intersection(base.get(key, [])): req_mrg.remove(p) else: if to_merge[key] == base.get(key, None): to_merge.pop(key) alternatives.append(to_merge) return base
def calcCropSensorWidth(sensorWidth, nativeAspectRatio, mediaAspectRatio): """ Calculate effective/utilised width of camera sensor when image/video is recorded at non-native aspect ratio. """ cropRatio = (nativeAspectRatio[1] / nativeAspectRatio[0] ) / (mediaAspectRatio[1] / mediaAspectRatio[0]) return sensorWidth * cropRatio
def update_schema(schema_old, schema_new): """ Given an old BigQuery schema, update it with a new one. Where a field name is the same, the new will replace the old. Any new fields not present in the old schema will be added. Arguments: schema_old: the old schema to update schema_new: the new schema which will overwrite/extend the old """ old_fields = schema_old["fields"] new_fields = schema_new["fields"] output_fields = list(old_fields) field_indices = {field["name"]: i for i, field in enumerate(output_fields)} for field in new_fields: name = field["name"] if name in field_indices: # replace old field with new field of same name output_fields[field_indices[name]] = field else: # add new field output_fields.append(field) return {"fields": output_fields}
def rle(seq): """Create RLE""" newstr = seq[0] initial_val = 1 for index, char in enumerate(seq[1:]): if char == newstr[-1]: initial_val += 1 if index == len(seq) - 2: newstr += str(initial_val) elif char != newstr[-1]: if initial_val != 1: newstr += str(initial_val) newstr += char initial_val = 1 return newstr
def _get_canonical_query_string(query): """Get canonical query string.""" query = query or "" return "&".join( [ "=".join(pair) for pair in sorted( [params.split("=") for params in query.split("&")], ) ], )
def to_seconds(days): """convert days to seconds""" return days*24*60*60
def fahr2cel(t): """Converts an input temperature in fahrenheit to degrees celsius Inputs: t: temperature, in degrees Fahrenheit Returns: Temperature, in C """ return (t - 32) * 5 / 9
def rpn2splitter(splitter_rpn): """ Convert from splitter_rpn to splitter. Recurrent algorithm to perform the conversion. Every time combines pairs of input in one input, ends when the length is one. Parameters ---------- splitter_rpn : splitter in reverse polish notation Returns ------- splitter : splitter in the standard/original form """ if splitter_rpn == []: return None if len(splitter_rpn) == 1: return splitter_rpn[0] splitter_rpn_copy = splitter_rpn.copy() signs = [".", "*"] splitter_modified = [] while splitter_rpn_copy: el = splitter_rpn_copy.pop() # element is a sign if el in signs: if ( splitter_rpn_copy[-1] not in signs and splitter_rpn_copy[-2] not in signs ): right, left = splitter_rpn_copy.pop(), splitter_rpn_copy.pop() if el == ".": splitter_modified.append((left, right)) elif el == "*": splitter_modified.append([left, right]) else: splitter_modified.append(el) else: splitter_modified.append(el) # reversing the list and combining more splitter_modified.reverse() return rpn2splitter(splitter_modified)
def uniquify(l): """ Return the given list without duplicates, retaining order. See Dave Kirby's order preserving uniqueifying list function http://www.peterbe.com/plog/uniqifiers-benchmark """ seen = set() seen_add = seen.add uniques = [x for x in l if x not in seen and not seen_add(x)] return uniques
def getannotationstrings(cann): """ get a nice string summary of a curation input: cann : dict from /sequences/get_annotations (one from the list) output: cdesc : str a short summary of each annotation """ cdesc = '' if cann['description']: cdesc += cann['description'] + ' (' if cann['annotationtype'] == 'diffexp': chigh = [] clow = [] call = [] for cdet in cann['details']: if cdet[0] == 'all': call.append(cdet[1]) continue if cdet[0] == 'low': clow.append(cdet[1]) continue if cdet[0] == 'high': chigh.append(cdet[1]) continue cdesc += ' high in ' for cval in chigh: cdesc += cval + ' ' cdesc += ' compared to ' for cval in clow: cdesc += cval + ' ' cdesc += ' in ' for cval in call: cdesc += cval + ' ' elif cann['annotationtype'] == 'isa': cdesc += ' is a ' for cdet in cann['details']: cdesc += 'cdet,' elif cann['annotationtype'] == 'contamination': cdesc += 'contamination' else: cdesc += cann['annotationtype'] + ' ' for cdet in cann['details']: cdesc = cdesc + ' ' + cdet[1] + ',' if len(cdesc) >= 1 and cdesc[-1] == ',': cdesc = cdesc[:-1] if cann['description']: cdesc += ')' return cdesc
def check_float(string): """ Helper function for checking if a string can be converted to a float. :param string: string to be verified as a float :returns: True if the string can be converted to a float, False otherwise """ try: float(string) return True except ValueError: return False
def padding(sent, sequence_len): """ convert sentence to index array """ if len(sent) > sequence_len: sent = sent[:sequence_len] padding = sequence_len - len(sent) sent2idx = sent + [0]*padding return sent2idx, len(sent)
def path_from_canonical_parts(prefix, controller, action, args): """ Returns a route ('/admin/users/edit/3') from canonical parts ('admin', 'users', 'edit', [id]) """ args_parts = ['<' + x + '>' for x in args] route_parts = [prefix, controller, action] + args_parts route_parts = [x for x in route_parts if x] route_path = '/' + '/'.join(route_parts) return route_path
def _normpath(path): """ Globus Transfer-specific normalization, based on a careful reading of the stdlib posixpath implementation: https://github.com/python/cpython/blob/ea0f7aa47c5d2e58dc99314508172f0523e144c6/Lib/posixpath.py#L338 this must be done without using os.path.normpath to be compatible with CLI calls from Windows systems Transfer requires forward slashes, even when communicating with Windows systems, so we must handle these strings appropriately. Note that this does not preserve leading slashes in the same way as python's posixpath module -- it's not clear how Transfer would treat such paths and non-obvious that we need to allow such usage Also, unlike normpath, we want to preserve trailing slashes because they may be required """ initial_slash = 1 if path.startswith("/") else 0 trailing_slash = 1 if path.endswith("/") and path != "/" else 0 parts = path.split("/") new_parts = [] for part in parts: if part in ("", "."): continue # either not adding a ".." OR chaining together multiple ".."s # OR working with a non-absolute path that starts with ".." if ( part != ".." or (new_parts and new_parts[-1] == "..") or (not initial_slash and not new_parts) ): new_parts.append(part) elif new_parts: # adding a ".." to a path which isn't already ending in one new_parts.pop() return ("/" * initial_slash) + "/".join(new_parts) + ("/" * trailing_slash)
def timedelta_nice_format(td_object): """Create string with nice formatted time duration""" if td_object is None: return "None" seconds = int(td_object.total_seconds()) if seconds == 0: return "0 seconds" periods = [ ('year', 60*60*24*365), ('month', 60*60*24*30), ('day', 60*60*24), ('hour', 60*60), ('minute', 60), ('second', 1) ] strings = [] for period_name, period_seconds in periods: if seconds > period_seconds: period_value, seconds = divmod(seconds, period_seconds) has_s = 's' if period_value > 1 else '' strings.append("%s %s%s" % (period_value, period_name, has_s)) return ", ".join(strings)
def construct_nomscen(mdl): """ Creates a nominal scenario nomscen given a graph object g by setting all function modes to nominal. Parameters ---------- mdl : Model Returns ------- nomscen : scen """ nomscen={'faults':{},'properties':{}} nomscen['properties']['time']=0.0 nomscen['properties']['rate']=1.0 nomscen['properties']['type']='nominal' return nomscen
def rsqrt_hidden(hidden_size): """rsqrt of hidden size""" return float(hidden_size) ** -0.5
def reverse_dict_old(dikt): """ takes a dict and return a new dict with old values as key and old keys as values (in a list) example _reverse_dict({'AB04a':'b', 'AB04b': 'b', 'AB04c':'b', 'CC04x': 'c'}) will return {'b': ['AB04a', 'AB04b', 'AB04c'], 'c': 'CC04x'} """ new_dikt = {} for k, v in dikt.items(): if v in new_dikt: new_dikt[v].append(k) else: new_dikt[v] = [k] return new_dikt
def quicksort(items): """O(n * log n).""" if len(items) < 2: return items else: pivot_index = len(items) // 2 pivot = items[pivot_index] left = [num for i, num in enumerate(items) if num <= pivot and i != pivot_index] right = [num for i, num in enumerate(items) if num > pivot and i != pivot_index] return quicksort(left) + [pivot] + quicksort(right)
def is_quote(code, idx=0): """Position in string is an unescaped quotation mark.""" return (0 <= idx < len(code) and code[idx] == '"' and (idx == 0 or code[idx-1] != '\\'))
def update_in(coll, path, update, default=None): """Creates a copy of coll with a value updated at path.""" if not path: return update(coll) elif isinstance(coll, list): copy = coll[:] # NOTE: there is no auto-vivication for lists copy[path[0]] = update_in(copy[path[0]], path[1:], update, default) return copy else: copy = coll.copy() current_default = {} if len(path) > 1 else default copy[path[0]] = update_in(copy.get(path[0], current_default), path[1:], update, default) return copy
def other_elements(element): """ Function to work with str,int,float """ print(element) return 111
def dydt(x, y, a): """Computes the equation for y prime""" dy = x + a * y return dy
def calc_q_c1ncs(q_c1n, delta_q_c1n): """ q_c1ncs from CPT, Eq 2.10 """ q_c1ncs = q_c1n + delta_q_c1n return q_c1ncs
def check_overlap(bbox1, bbox2): """ Checks if 2 boxes are overlapping. Also works for 2D tuples. Args: bbox1: [x1, y1, x2, y2] or [z1, z2] bbox2: [x1, y1, x2, y2] or [z1, z2] Returns: bool """ if bbox1[0] > bbox2[2] or bbox2[0] > bbox1[2]: return False if len(bbox1) > 2: if bbox1[1] > bbox2[3] or bbox2[1] > bbox1[3]: return False return True
def split_flat_pair_dict(the_dict): """Combine a pair of file dictionaries Parameters ---------- the_dict : `dict` A dictionary of data_ids or filenames keyed by raft, slot, filetype Returns ------- out_dict : `dict` A dictionary of data_ids or filenames keyed by raft, slot, filetype """ out_dict = {} for key, val in the_dict.items(): if isinstance(val, dict): out_dict[key] = split_flat_pair_dict(the_dict[key]) elif key == 'FLAT': full_list = the_dict[key] out_dict['FLAT1'] = full_list[0:-1:2] out_dict['FLAT2'] = full_list[1::2] return out_dict
def sort_datasplit(split): """Sorts a single split of the SidechainNet data dict by ascending length.""" sorted_len_indices = [ a[0] for a in sorted(enumerate(split['seq']), key=lambda x: len(x[1]), reverse=False) ] for datatype in split.keys(): split[datatype] = [split[datatype][i] for i in sorted_len_indices] return split
def findNextOpr(txt): """ >>> findNextOpr(' 3* 4 - 5') 3 >>> findNextOpr('8 4 - 5') 6 >>> findNextOpr('89 4 5') -1 """ # decide whether the data type is correct if not isinstance(txt, str) or len(txt) <= 0: return "error: findNextOpr" # use for loop to search for pos in range(0, txt.__len__()): # if find, return pos if txt[pos] == '*' or txt[pos] == '/' or txt[pos] == '+' or txt[pos] == '^': return pos # search next if txt[pos] == '-': for item in txt[pos - 1::-1]: if item != ' ': if item.isdigit(): return pos else: break #if cannot search, return -1 return -1
def update_loss_dict(old, new, weight=1, inplace=True): """Update a dictionary of losses/metrics with a new batch Parameters ---------- old : dict Previous (accumulated) dictionary of losses/metrics new : dict Dictionary of losses/metrics for the current batch weight : float, default=1 Weight for the batch inplace : bool, default=True Modify the dictionary in-place Returns ------- new : dict Updated (accumulated) dictionary of losses/metrics """ if not inplace: old = dict(old) for key, val in new.items(): if key in old.keys(): old[key] += val * weight else: old[key] = val * weight return old
def compare_pval_alpha_tf(p_val, alpha=.05): """ this functions tests p values vs our chosen alpha returns bool""" status = None if p_val > alpha: status = False else: status = True return status
def matrix_to_relations(matrix): """ Reads a boolean matrix and receives it as a list of tuples of relations :param matrix: list of lists :return: list of tuples >>> matrix_to_relations([[1, 1, 0, 0],\ [1, 1, 0, 0],\ [0, 0, 1, 1],\ [0, 0, 1, 1]]) [(0, 0), (0, 1), (1, 0), (1, 1), (2, 2), (2, 3), (3, 2), (3, 3)] """ relations = [] for row_id, row in enumerate(matrix): for col_id, column in enumerate(row): if column == 1: relations.append((row_id, col_id)) return relations
def parse_body(body_text): """ param: body_text :: string """ try: split_text = body_text.rsplit(" ") source_lang = split_text[0] target_lang = split_text[1] query_string = " ".join(split_text[2:]) except Exception: query_string = """Message not well formed. Message should be of form: [source lang] [target lang] [query]""" source_lang = "la" target_lang = "en" return query_string, source_lang, target_lang
def translation(vertex, delta): """Move, or slide, a coordinate in 2d space.""" [vertex_x, vertex_y] = vertex [delta_x, delta_y] = delta return [vertex_x + delta_x, vertex_y + delta_y]
def custom_MLP_lr_scheduler(epoch, lr): """Learning rate schedule for use with MLPs Parameters ---------- epoch : int The current epoch of training lr : float The current learning rate Returns ------- Float The learning rate for the next epoch of training """ if epoch <= 4: return 1e-4 elif epoch <= 10: return 1e-5 else: return 1e-6
def is_leap(year): """This function takes a year and returns if it is a leap year. Param: year (positive integer) Return: boolean """ return year % 400 == 0 or year % 4 == 0 and year % 100 != 0
def OOV_handeler(lemma, pos): """ Handles OOV words :param lemma: str :param pos: str :return: lemma, pos """ if pos in [".", "?", ","]: lemma = "<PUNCT>" elif pos == 'NUM': lemma = "<NUM>" elif pos == 'SYM': lemma = "<SYM>" return lemma, pos
def _remove_missing_resource_ids(config_records, resource_ids): """ Remove resource_ids found in config_results and return any remaining resource_ids :param config_records: config compliance records :param resource_ids: list of resource ids :returns: list of resource IDs found in compliance records """ resources_in_config = [] for config_record in config_records: config_record_id = config_record['EvaluationResultIdentifier'][ 'EvaluationResultQualifier']['ResourceId'] if config_record_id in resource_ids: resources_in_config.append(config_record_id) return resources_in_config
def _add_new_line_if_none(s: str): """Since graphviz 0.18, need to have a newline in body lines. This util is there to address that, adding newlines to body lines when missing.""" if s and s[-1] != "\n": return s + "\n" return s
def factors_of(number): """Get the factors of a given number. :param number: The number for which the factors will be obtained. :type number: int :rtype: list[int] :raise: TypeError """ if type(number) is not int: raise TypeError("Factors may only be acquired for an integer.") a = list() for i in range(1, number + 1): if number % i == 0: a.append(i) return a
def string(x, n): """Convert a float, x, to a string with n significant figures. This function returns a decimal string representation of a float to a specified number of significant figures. >>> create_string(9.80665, 3) '9.81' >>> create_string(0.0120076, 3) '0.0120' >>> create_string(100000, 5) '100000' Note the last representation is, without context, ambiguous. This is a good reason to use scientific notation, but it's not always appropriate. Note ---- Performing this operation as a set of string operations arguably makes more sense than a mathematical operation conceptually. It's the presentation of the number that is being changed here, not the number itself (which is in turn only approximated by a float). """ n = int(n) x = float(x) if n < 1: raise ValueError("1+ significant digits required.") # retrieve the significand and exponent from the S.N. form s, e = ''.join(( '{:.', str(n - 1), 'e}')).format(x).split('e') e = int(e) # might as well coerce now if e == 0: # Significand requires no adjustment return s s = s.replace('.', '') if e < 0: # Placeholder zeros need creating return ''.join(('0.', '0' * (abs(e) - 1), s)) else: # Decimal place need shifting s += '0' * (e - n + 1) # s now has correct s.f. i = e + 1 sep = '' if i < n: sep = '.' if s[0] is '-': i += 1 return sep.join((s[:i], s[i:]))