content
stringlengths
42
6.51k
def isUniqueWithSet (str): """Given a string, checks if the string has unique charachters""" return len(set(str)) == len(str)
def shape_is_known(shape): """Check whether a shape is completely known with or without np semantics. Please see the doc of is_np_shape for more details. """ if shape is None: return False unknown_dim_size = -1 if len(shape) == 0: return unknown_dim_size == -1 for dim_size in shape: if dim_size == unknown_dim_size: return False assert ( dim_size > unknown_dim_size ), "shape dimension size cannot be less than {}, while " "received {}".format( unknown_dim_size, dim_size ) return True
def kelvin(therm: float) -> float: """ Pyccd converts (and scales) the thermal values into Celsius, this converts them back to kelvin. Args: therm: thermal value in degrees Celsius Returns: thermal value in kelvin """ return therm / 10 + 27315
def adjust_epochs(train_epochs, width_scale, update_frequency, start_iteration, n_growth_steps, steps_per_epoch): """Adjust the epochs such as the total FLOPs are same as big-baseline.""" # Here we extend training according to the FLOP saved by starting with # a smaller width. saved_fraction = (1 - width_scale) # Saved before growth. saved_steps = saved_fraction * start_iteration growth_duration = (update_frequency * (n_growth_steps - 1)) # Saved during growth (2 is because of the trianble area). saved_steps += saved_fraction/2 * growth_duration new_epochs = train_epochs + int(saved_steps / steps_per_epoch) return new_epochs
def closest_point(l1, l2, point): """ compute the coordinate of point on the line l1l2 closest to the given point, reference: https://en.wikipedia.org/wiki/Cramer%27s_rule :param l1: start pos :param l2: end pos :param point: :return: """ A1 = l2[1] - l1[1] B1 = l1[0] - l2[0] C1 = (l2[1] - l1[1])*l1[0] + (l1[0] - l2[0])*l1[1] C2 = -B1 * point[0] + A1 * point[1] det = A1*A1 + B1*B1 if det == 0: cx, cy = point else: cx = (A1*C1 - B1*C2)/det cy = (A1*C2 + B1*C1)/det return [cx, cy]
def sorting(items): """sort the items""" items.sort() return items
def _gen_perm(order, mode): """ Generate the specified permutation by the given mode. Parameters ---------- order : int the length of permutation mode : int the mode of specific permutation Returns ------- list the axis order, according to Kolda's unfold Examples -------- >>> perm = _gen_perm(6, 2) list([2, 5, 4, 3, 1, 0]) """ tmp = list(range(order - 1, -1, -1)) tmp.remove(mode) perm = [mode] + tmp return perm
def get_candidates(row_no, col_no, row_candidates, col_candidates, square_candidates): """ Gets possible candidates for a square with 3-way set intersection of its row, col and encompassing square sets """ candidates = row_candidates[row_no] & col_candidates[col_no] & \ square_candidates[row_no // 3][col_no // 3] return candidates
def check_collision(board, shape, offset): """ See if the matrix stored in the shape will intersect anything on the board based on the offset. Offset is an (x, y) coordinate. """ off_x, off_y = offset for cy, row in enumerate(shape): for cx, cell in enumerate(row): if cell and board[cy + off_y][cx + off_x]: return True return False
def obter_pos_c(pos): """ obter_pos_c: posicao -> str Recebe uma posicao e devolve a componente coluna da posicao. """ return pos['c']
def get_pf_index(sib, paging): """ Calculates paging frame index based on SIB2 configuration and incoming S1AP paging. Complexity: O(1) :rtype: int """ t = min(sib['defaultPagingCycle'], paging['PagingDRX']) n = min(t, int(sib['nB'] * t)) return int((t / n) * (paging['UEIdentityIndexValue'] % n))
def nmea2deg(nmea): """ convert nmea angle (dddmm.mm) to degree """ w = float(nmea) / 100.0 d = int(w) return d + (w-d) * 100 / 60.0
def modal_form(title='', modal_id='id_modal_form', form_tag_id='id_modal_form_tag'): """ Return a modal bootstrap html, with custom code for manipulate form submition """ return { 'modal_id': modal_id, 'title': title, 'form_tag_id': form_tag_id, }
def _pad_list(given_list, desired_length, padding=None): """ Pads a list to be of the desired_length. """ while len(given_list) < desired_length: given_list.append(padding) return given_list
def wrap_braces_if_not_exist(value): """Wrap with braces if they don't exist.""" if '{{' in value and '}}' in value: # Already templated return value return '{{' + value + '}}'
def conv_kwargs_helper(norm: bool, activation: bool): """ Helper to force disable normalization and activation in layers which have those by default Args: norm: en-/disable normalization layer activation: en-/disable activation layer Returns: dict: keyword arguments to pass to conv generator """ kwargs = { "add_norm": norm, "add_act": activation, } return kwargs
def auc_helper(relationships, run_rules, run_confidences): """ Calculates auc-roc measuring the recall and precision of learned rules relative to a set of existing relationships """ targets = [] scores = [] for head, body in relationships.items(): targets.append(1.0) if [head, body] in run_rules: index = run_rules.index([head, body]) scores.append(run_confidences[index]) else: scores.append(0.0) for j, rule in enumerate(run_rules): if rule[0] in rule[1]: continue # Append incorrect rules with score of 0 if rule[0] in relationships: if relationships[rule[0]] == rule[1]: continue targets.append(0.0) scores.append(run_confidences[j]) return targets, scores
def sumall(*args): """Write a function called sumall that takes any number of arguments and returns their sum.""" total = 0 for elem in args: total += elem return total
def ProtocolNameToNumber(protocols, proto_to_num, name_to_num_map): """Convert a protocol name to a numeric value. Args: protocols: list of protocol names to inspect proto_to_num: list of protocol names that should be converted to numbers name_to_num_map: map of protocol names to protocol numbers Returns: return_proto: list of protocol names, converted if applicable """ return_proto = [] for protocol in protocols: if protocol in proto_to_num: return_proto.append(name_to_num_map[protocol]) else: return_proto.append(protocol) return return_proto
def modinv(a, m): """ The multiplicitive inverse of a in the integers modulo m. Return b when a * b == 1 mod m """ # Extended GCD lastrem, rem = abs(a), abs(m) x, lastx, y, lasty = 0, 1, 1, 0 while rem: lastrem, (quotient, rem) = rem, divmod(lastrem, rem) x, lastx = lastx - quotient*x, x y, lasty = lasty - quotient*y, y # if lastrem != 1: raise ValueError x = lastx * (-1 if a < 0 else 1) return x % m
def _make_attrs(annotation, annotation_dict, export_names, index): """Create a list with attribute-value strings for a structural element.""" attrs = [] for name, annot in annotation_dict[annotation].items(): export_name = export_names.get(":".join([annotation, name]), name) annotation_name = export_names.get(annotation, annotation) if annotation_name == "sentence": annotation_name = "sent" attrs.append("%s_%s = %s" % (annotation_name, export_name, annot[index])) return attrs
def download_content(username, project_name, resource_id, data, files, is_project): """ Recursive function to extract all files from a given project. Parameters ---------- username: str The user's Gitlab username project_name: str The name of the project being downloaded resource_id: str The id of the resource being downloaded data: dict The initial project data files : list A list of dictionaries with file information is_project: boolean Whether what is to be downloaded is a project or not Returns ------- A list of file dictionaries and a list of empty containers """ action_metadata = {"sourceUsername": username} # Loop through all data and create links where the file contents can be retrieved from for entry in data: if entry['type'] == 'blob': entry_path = entry['path'].replace('/', '%2F') file_url = "https://gitlab.com/api/v4/projects/{}/repository/files/{}?ref=master".format( resource_id, entry_path) # Format the path given a project or a directory if is_project: file_path = "/{}/{}".format(project_name, entry['path']) else: file_path = "/{}".format(entry['path']) files.append({ 'file': file_url, 'hashes': {}, 'title': entry['name'], 'path': file_path, 'source_path': "/{}/{}".format(project_name, entry['path']), 'extra_metadata': {}}) return files, [], action_metadata
def make_good_bad(cameras, car_id): """ make a list of cars of interest, and a list of other cameras(list(list(chips))): list of the cameras with the cars in each cameras car_id(): the id of the car of interest """ goodlist = list() bad_list = list() for camera in cameras: for car in camera: if car.car_id == car_id: goodlist.append(car) else: bad_list.append(car) return (goodlist, bad_list)
def mySdeCoeffsFunc(xList, u, r, mu, sig): """ @param[in] ### xList ### list of length one containing state values @param[in] ### u ### control value in the interval [0, 1] @param[in] ### r ### interest rate of the riskless asset @param[in] ### mu ### interest rate of the risky asset @param[in] ### sig ### volatility of the dynamics @return ### retVal1, retVal2 ### coefficients of the sde given the input parameters """ retVal1 = (r + u * (mu - r)) * xList[0] retVal2 = sig * u * xList[0] return retVal1, retVal2
def dict_lookup(d, value): """ Template filter that looks up a value from a dict. """ return d.get(value)
def nextDWord(offset): """ Align `offset` to the next 4-byte boundary. """ return ((offset + 3) >> 2) << 2
def split_dictionary(output_dict): """Splits a dictionary into two lists for IDs and its full file paths Parameters ---------- output_dict : dictionary A dictionary with keys: 'id_llamado' and 'fullpath' Returns ------- Two lists Two lists of 'id_llamado' and 'fullpath' """ id_llamado = output_dict['id_llamado'] filenames = output_dict['fullpath'] return filenames, id_llamado
def pythrule(first, second): """ Calculate the area of a right angled trangle based on Pythagoras' Theorem :type first: number :param first: The length of the first axis (x or y) :type second: number :param second: The length of the second axis (x or y) """ return (first * second) / 2
def day_lags(lags): """Translate day lags into 15-minute lags""" return [l * 96 for l in lags]
def is_string(val): """ Return whether a value is a ``str``. """ return isinstance(val, str)
def _iteritems(d): """Like d.iteritems, but accepts any collections.Mapping.""" return d.iteritems() if hasattr(d, "iteritems") else d.items()
def makeDestName(fileName): """ Determine the name of the file as it will appear after being moved into the directory for the class. Currently this follows a set pattern, where the data files are forum.mongo, and then .sql files for users, profiles, student modules, course enrollment, and certificates. This will need to be changed when different data files are produced. """ retName = '' if '.mongo' in fileName: retName = 'forum.mongo' elif 'profile' in fileName: retName = 'profiles.sql' elif 'certificate' in fileName: retName = 'certificates.sql' elif 'studentmodule' in fileName: retName = 'studentmodule.sql' elif 'auth_user' in fileName: retName = 'users.sql' elif 'courseenrollment' in fileName: retName = 'enrollment.sql' elif 'user_id_map' in fileName: retName = 'user_id_map.sql' elif 'course_structure' in fileName: retName = 'course_structure.json' elif 'course' in fileName and 'xml.tar.gz' in fileName: retName = 'course.xml.tar.gz' else: retName = fileName return retName
def tab(text, n=1): """ Indent generated code by `n` 4-space indents. """ lines = text.split('\n') lines = [(' ' * n) + line for line in lines] return '\n'.join(lines)
def inputoptions(datatype, item): """Home made match function""" if datatype == "invested_and_value": return [ { "date": item["date"], "value": item["total_value"], "category": "Value", }, { "date": item["date"], "value": item["total_invested"], "category": "Invested", }, ] # return nothing if no match return None
def get_thresholds(bins=1,interval=(0.5,0.5)): """ :param bins: the number of the threshold :param interval: give the min number and max number for a interval :return: the list that meet the conditions """ max_iter = interval[1]-interval[0] each_iter = float(max_iter)/bins threshold = interval[0] threshold_list = [] for i in range(bins): threshold_list.append(threshold) threshold+=each_iter threshold_list.append(interval[1]) return threshold_list
def turn_to_words(word): """Split on non-alphanumeric characters, if any.""" res = [] subword = "" for char in list(word): if char.isalnum(): subword = subword + char else: if subword: res.append(subword) subword = "" res.append(subword) return res
def _include_exclude_list(include, exclude): """ create the list of queries that would be checked for include or exclude """ keys = [] if include: for item in include: keys.append((item, 'included')) if exclude: for item in exclude: keys.append((item, 'excluded')) return keys
def _flatten(list_of_lists): """Transform a list of lists into a single list, preserving relative order.""" return [item for sublist in list_of_lists for item in sublist]
def percent_invert(value, total): """ Convert avail and total values to percent """ if total: v = (float(total) - float(value)) * 100.0 / float(total) if v >= 0.0: return v return 100.0
def human_readable_time(delta, terms=1): """Convert hours to human readable string Arguments: delta: time in seconds terms: how many word terms to use, to describe the timestep Returns str: Human readable string """ # Inspired by http://stackoverflow.com/questions/26164671/convert-seconds-to-readable-format-time from dateutil.relativedelta import relativedelta intervals = ['years', 'months', 'days', 'hours', 'minutes', 'seconds'] if delta > 31: delta = delta rd = relativedelta(hours=delta) out = "" for k in intervals[:terms]: if getattr(rd, k): out += '{} {} '.format(getattr(rd, k), k) return out.strip()
def class_name_to_slug(name): """Strip Directive and turn name into slug Example: Hands_OnDirective --> hands-on """ return name.split('Directive')[0].lower().replace('_', '-')
def number_of_cigarette(packCig) : """ Function that allow to count the number of cigarette into the packet :param packCig [] : packet of cigarettes :returns nb_cig : int : number of cigarette """ nb_cig = (len(packCig[0]), len(packCig[1]), len(packCig[2])) if nb_cig[0] == nb_cig[1] and nb_cig[0] == nb_cig[2] : return nb_cig[0] else : raise ValueError(" Erreur de len() au niveau des listes dans les listes")
def _NodeLabel(data): """Helper callback to set default node labels.""" node_type = data.get("type", "statement") if node_type == "statement": return data.get("text", "") elif node_type == "identifier": return data.get("name", "") elif node_type == "magic": return data.get("name", "") else: return ""
def lineSegment(lineWidth, x1, y1, x2, y2, idField): """Draw a line segment.""" return ('<line stroke="black" stroke-width="{}"\n' ' x1="{}" y1="{}" x2="{}" y2="{}"' ' id="{}" />\n').format(lineWidth, x1, y1, x2, y2, idField)
def busca_sentinela(list_to_search, value): """ Implementacao de um algoritmo de busca sentinela. Argumentos: value: Any. Valor a ser buscado na lista list_to_search: list. lista na qual o valor sera buscado Retorna o indice do valor em "list_to_search" ou -1 caso nao exista nela. """ list_to_search.append(value) list_index = 0 while list_to_search[list_index] != value: list_index = list_index + 1 list_to_search.pop() if list_index == len(list_to_search): return -1 return list_index
def verifyItExists(asset: str, available_assets: list) -> bool: """ Check if in the balances of the account an asset like that alredy exists to establish a trustline """ asset_name = asset.split(':')[0] asset_issuer = asset.split(':')[1] for elem in available_assets: if elem["asset_code"] == asset_name and elem["asset_issuer"] == asset_issuer: return True return False
def is_leap(year): """Determine whether a year is a leap year.""" return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def octal_to_string(octal): """The octal_to_string function converts a permission in octal format into a string format.""" result = "" value_letters = [(4,"r"),(2,"w"),(1,"x")] for digit in [int(n) for n in str(octal)]: for value, letter in value_letters: if digit >= value: result += letter digit -= value else: result += "-" return result
def norm3(vec3): """ Quick convenient function to get the norm of a 3-element vector norm3: 475 ns | np.linalg.norm: 4.31 us """ a, b, c = vec3 return (a*a + b*b + c*c)**0.5
def convertMac(macAddress): """ Performs conversion of Mac Address format to the colon type and return as string """ # Reference: https://forums.freebsd.org/threads/python-inserting-a-sign-after-every-n-th-character.26881/ # Convert to CAPS stdMac = str(macAddress).upper() # Checks if contains '-' character and replace with ':' if "-" in stdMac: return stdMac.replace('-', ':') elif ":" in stdMac: return stdMac else: return ':'.join([ i+j for i,j in zip(stdMac[::2],stdMac[1::2])])
def str_to_list(string): """remove [] and whitespace, then create list of integers to return""" string = string[1:-1].replace(' ', '').split(',') return [int(str_id) for str_id in string]
def count_run(lst, s_run): """Count the length of one run, returns starting/ending indices, a boolean value to present increasing/decreasing run, and the length of the run""" increasing = True # If count_run started at the final position of the array if s_run == len(lst) - 1: return [s_run, s_run, increasing, 1] else: e_run = s_run # Decreasing run (strictly decreasing): if lst[s_run] > lst[s_run + 1]: while lst[e_run] > lst[e_run + 1]: e_run += 1 if e_run == len(lst) - 1: break increasing = False return [s_run, e_run, increasing, e_run - s_run + 1] # Increasing run (non-decreasing): else: while lst[e_run] <= lst[e_run + 1]: e_run += 1 if e_run == len(lst) - 1: break return [s_run, e_run, increasing, e_run - s_run + 1]
def untokenize(tokens): """Return inverse of the Adeft word tokenizer The inverse is inexact. For simplicity, all white space characters are replaced with a space. An exact inverse is not necessary for adeft's purposes. Parameters ---------- tokens : list of tuple List of tuples of the form (word, (start, end)) giving tokens and coordinates as output by Adeft's word tokenizer Returns ------- output : str The original string that produced the input tokens, with the caveat that every white space character will be replaced with a space. """ # Edge cases: input text is empty string or only has one token if len(tokens) == 0: return '' elif len(tokens) == 1: return tokens[0][0] # This looks messy but is simple conceptually. # At each step add the current token and a number of spaces determined # by the coordinates of the previous token and the current token. output = [tokens[0][0]] + [' ']*(tokens[1][1][0] - tokens[0][1][1] - 1) for index in range(1, len(tokens)-1): output.append(tokens[index][0]) output.extend([' ']*(tokens[index+1][1][0] - tokens[index][1][1] - 1)) output.append(tokens[-1][0]) return ''.join(output)
def remove_selected_from_list(the_list, selected_indexes): """ Removes selected elements from the list. :param the_list: :param selected_indexes: list of indexes to be removed from the list :return: processed list """ doc_len = len(the_list) tmp = [] for i in range(0, doc_len): if i not in selected_indexes: tmp.append(the_list[i]) return tmp
def polygonal_number(sides: int, n: int) -> int: """ Returns the n-th polygonal number for a given number of sides. A polygonal number is a number represented as dots or pebbles arranged in the shape of a regular polygon. :param sides: The number of sides of the polygon :param n: A positive number :return: The n-th polygonal number for a given number of sides """ return (sides - 2)*n*(n-1)//2 + n
def _linspace(start, stop, num): """Returns `num` uniformly spaced floats between `start` and `stop`.""" if num == 1: return [start] return [start + (stop - start) * i / (num - 1.0) for i in range(num)]
def Sphere(name,radius=1.0,res=0, pos = [0.,0.,0.]): """ Create a hostobject of type sphere. @type name: string @param name: name of the sphere @type radius: float @param radius: the radius of the sphere @type res: float @param res: the resolution/quality of the sphere @type pos: array @param pos: the position of the cylinder @rtype: hostObject @return: the created sphere """ QualitySph={"0":6,"1":4,"2":5,"3":6,"4":8,"5":16} baseSphere = None#c4d.BaseObject(c4d.Osphere) # baseSphere radius = radius # baseSphere resolution = QualitySph[str(res)] # baseSphere position = position return baseSphere
def straight(start, dice): """ Score the dice based on rules for LITTLE_STRAIGHT or BIG_STRAIGHT. """ dice.sort() for die in dice: if die != start: return 0 start += 1 return 30
def RPL_USERS(sender, receipient, message): """ Reply Code 393 """ return "<" + sender + ">: " + message
def path(start, end): """ >>> path((1,1), (1,1)) [] >>> path((0,0), (2,2)) [(1, 1)] >>> path((1,2), (7,5)) [(3, 3), (5, 4)] >>> path((9, 10), (6, 10)) [(8, 10), (7, 10)] >>> path((10, 9), (10, 6)) [(10, 8), (10, 7)] >>> path((1,1), (3,8)) [] >>> path((4, 2), (4, 4)) [(4, 3)] """ (x, y) = start (ex, ey) = end dx = ex - x dy = ey - y path = [] if dx == 0 and dy == 0: pass elif dy == 0 or (dx != 0 and abs(dx) < abs(dy)): r = range(1, dx) if dx > 0 else range(-1, dx, -1) for mx in r: my = dy * mx / dx if my.is_integer(): my = int(my) path.append((x + mx, y + my)) else: r = range(1, dy) if dy > 0 else range(-1, dy, -1) for my in r: mx = dx * my / dy if mx.is_integer(): mx = int(mx) path.append((x + mx, y + my)) return path
def has_dupes(sequence, target): """Given a sequence and search object, return True if there's more than one, False if zero or one of them. """ # compare to .index version below, this version introduces less function # overhead and is usually the same speed. At 15000 items (way bigger than # a relationship-bound collection in memory usually is) it begins to # fall behind the other version only by microseconds. c = 0 for item in sequence: if item is target: c += 1 if c > 1: return True return False
def maximum_digital_sum(a: int, b: int) -> int: """ Considering natural numbers of the form, a**b, where a, b < 100, what is the maximum digital sum? :param a: :param b: :return: >>> maximum_digital_sum(10,10) 45 >>> maximum_digital_sum(100,100) 972 >>> maximum_digital_sum(100,200) 1872 """ # RETURN the MAXIMUM from the list of SUMs of the list of INT converted # from STR of BASE raised to the POWER return max( [ sum([int(x) for x in str(base ** power)]) for base in range(a) for power in range(b) ] )
def power(a,b): """ Computes a to the power of b using recursion """ if b == 0: return 1 if b == 1: return a return a * power(a,b-1)
def difference(v1, v2): """ compute the difference between two vectors """ return [x1 - x2 for x1, x2 in zip(v1, v2)]
def confusion_matrix(y_true, y_pred, pos_label=1): """ Function to return a confusion matrix given a set of predictions and ground truths. Args: y_true : List of ground truths y_pred : List of predictions pos_label : value to consider as positive. Default: 1 Returns: dict with true positives, true negatives, false positives and false negatives """ cm = {'TP': 0, 'FP': 0, 'FN': 0, 'TN': 0} for true, pred in zip(y_true, y_pred): if true == pred == 1: cm['TP'] += 1 elif true == pred == -1: cm['TN'] += 1 elif true == -pred == -1: cm['FP'] += 1 elif true == -pred == 1: cm['FN'] += 1 return cm
def left_pad(string, size): """Add zeros to the front of a string to reach a certain length.""" return string.zfill(size)
def parseCoords(s): """Parse a string of the form chr:start-end and return the three components as a tuple.""" p1 = s.find(":") p2 = s.find("-") return (s[:p1], s[p1+1:p2], s[p2+1:])
def _check_axis_in_range(axis, ndim): """Checks axes are with the bounds of ndim""" if not isinstance(axis, int): raise TypeError(f'axes should be integers, not {type(axis)}') if not -ndim <= axis < ndim: raise ValueError(f'axis {axis} is out of bounds for array of dimension {ndim}') return axis % ndim
def osiris_url(course_code, calendar_year) -> str: """ A template tag which resolves the params into a Osiris URL. Calendar Year is understood as Academic year which started in that year E.g. 2018 is understood as September 2018/ July 2019 :param course_code: The course-code you are looking for. [0-9,a-z,A-Z] is possible :param calendar_year: int :return: Osiris URL for course_code in calendar_year """ return "https://osiris.utwente.nl/student/OnderwijsCatalogusSelect.do" \ "?selectie=cursus&cursus={}&collegejaar={}" \ .format(course_code, calendar_year)
def correct_vgene(v, chain='B'): """Makes sure that the given v gene is in correct format, handles a different few formats.""" if chain is 'B': v = v.replace('TCR','TR').replace('TRBV0','TRBV').replace('-0','-') elif chain is 'A': v = v.replace('TCR','TR').replace('TRAV0','TRAV').replace('-0','-') else: print("chain must be 'A' or 'B'. No corrections were made") return v.split(';')[0]
def get_binary_column_type(column_type): """Return an appropriate binary column type for the input one, cf. https://codex.wordpress.org/Converting_Database_Character_Sets.""" try: return { 'char': 'binary', 'text': 'blob', 'tinytext': 'tinyblob', 'mediumtext': 'mediumblob', 'longtext': 'longblob' }[column_type.lower()] except KeyError: if column_type.lower().startswith('varchar('): return 'varbinary(%s)' % column_type[8:-1] return 'blob'
def enable_squash(input): """ Convert long specific enable strings to 'enabled' Takes in a dictionary of parsed output, iterates over the keys and looks for key names containing the string "enabled" at the end of the key name. Specifically the end of the key name is matched for safety. Replaces the key with simply "enabled", for example an input dictionary:: {"Path-selection enabled": False} becomes:: {"enabled": False} :param input: A dictionary of parsed output :return result: A dictionary with keys ending in "enabled" replaced with just "enabled" """ result = {} for key, value in input.items(): if key.endswith('enabled'): if 'enabled' in result: raise KeyError('Duplicate key exists') result['enabled'] = value else: result[key] = value return result
def from_datastore(entity): """Translates Datastore results into the format expected by the application. Datastore typically returns: [Entity{key: (kind, id), prop: val, ...}] This returns: [ name, email, date, message ] where name, email, and message are Python strings and where date is a Python datetime """ if not entity: return None if isinstance(entity, list): entity = entity.pop() return [entity['title'],entity['author'],entity['date'],entity['recipe']]
def sigmoid_backward(dout, cache): """ backward of sigmoid function, dx = x(x-1) :param dout: grad of outputs :param cache: cache stored before :return: dx """ x = cache return x * (x - 1) * dout
def out_first_order(triples): """ Sort a list of triples so outward (true) edges appear first. """ return sorted(triples, key=lambda t: t.inverted)
def same_shape(t1: tuple, t2: tuple) -> bool: """ Returns True if t1 and t2 are the same shape. False, otherwise.""" if t1 and t2: return len(t1) == len(t2) return False
def calculate_plane_coords(coords_list, infile_dims): """Calculate the coordinates of the triangular plane spanning from roughly around the participant's nose, down to roughly below the rear of the neck. :type coords_list: list :param coords_list: A list of lists, describing the three coordinate points of the triangular plane. :type infile_dims: list :param infile_dims: A list of the NIFTI file's dimensions. :rtype: dict :return: A dictionary mapping the z coordinates to the (x,y) coordinate pairs. """ import numpy as np # get the vectors connecting the points u = [] for a_pt, c_pt in zip(coords_list[0], coords_list[2]): u.append(int(a_pt - c_pt)) v = [] for b_pt, c_pt in zip(coords_list[1], coords_list[2]): v.append(int(b_pt - c_pt)) # vector cross product n = np.cross(u, v) # normalize the vector n = n / np.linalg.norm(n, 2) constant = np.dot(n, np.asarray(coords_list[0])) # now determine the z-coordinate for each pair of x,y plane_dict = {} for yvox in range(0, infile_dims[1]): for xvox in range(0, infile_dims[0]): zvox = (constant - (n[0] * xvox + n[1] * yvox)) / n[2] zvox = np.floor(zvox) if zvox < 1: zvox = 1 elif zvox > infile_dims[2]: zvox = infile_dims[2] plane_dict[(xvox, yvox)] = zvox return plane_dict
def countHits(hitMap): """ bin counts by hit and find total return map from assignments to number of reads """ total = 0 counts = {} if isinstance(hitMap, dict): hitIter = hitMap.items() else: hitIter = hitMap for (read, hit) in hitIter: total += 1 if isinstance(hit, type([])): for h in hit: counts[h] = 1 + counts.get(h, 0) else: counts[hit] = 1 + counts.get(hit, 0) return (total, counts)
def merge_dict_list(merged, x): """ merge x into merged recursively. x is either a dict or a list """ if type(x) is list: return merged + x for key in x.keys(): if key not in merged.keys(): merged[key] = x[key] elif x[key] is not None: merged[key] = merge_dict_list(merged[key], x[key]) return merged
def unique_cluster_indices(cluster_indx): """ Return a unique list of cluster indices :param cluster_indx: Cluster index list of ClusterExpansionSetting """ unique_indx = [] for symmgroup in cluster_indx: for sizegroup in symmgroup: for cluster in sizegroup: if cluster is None: continue for subcluster in cluster: for indx in subcluster: if indx not in unique_indx: unique_indx.append(indx) return unique_indx
def jaccard(list1,list2): """ Berechnet Jaccard-Koeffizienten :param list1: tokenliste 1 :param list2: tokenliste 2 :return: Jaccard-Koeffizient """ s1 = set(list1) s2 = set(list2) return (len(s1.intersection(s2)) / len(s1.union(s2)))
def PF_op_pw(u, df, inverses, x): """ PERRON-FROBENIUS OPERATOR POINTWISE Arguments: - <u> a function with one argument - <df> a function: the derivative of the dynamical system function f (should take one arg) - <inverses> a list of functions, each taking one argument, that find the inverse of x under each branch of f - <x> a float Returns: - a float, which is the value of PF(u) at the point x -- where PF is the PF-operator associated to the system f. NOTES: - Uses a formula for the PF-operator that only works if f is piecewise monotonic. """ y = 0 for inv in inverses: z = inv(x) y += u(z) / abs(df(z)) return y
def where(condition, x=None, y=None): """ where(condition, [x, y]) Return elements chosen from `x` or `y` depending on `condition`. .. note:: When only `condition` is provided, this function is a shorthand for ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be preferred, as it behaves correctly for subclasses. The rest of this documentation covers only the case where all three arguments are provided. Parameters ---------- condition : array_like, bool Where True, yield `x`, otherwise yield `y`. x, y : array_like Values from which to choose. `x`, `y` and `condition` need to be broadcastable to some shape. Returns ------- out : ndarray An array with elements from `x` where `condition` is True, and elements from `y` elsewhere. See Also -------- choose nonzero : The function that is called when x and y are omitted Notes ----- If all the arrays are 1-D, `where` is equivalent to:: [xv if c else yv for c, xv, yv in zip(condition, x, y)] Examples -------- >>> a = np.arange(10) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> np.where(a < 5, a, 10*a) array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) This can be used on multidimensional arrays too: >>> np.where([[True, False], [True, True]], ... [[1, 2], [3, 4]], ... [[9, 8], [7, 6]]) array([[1, 8], [3, 4]]) The shapes of x, y, and the condition are broadcast together: >>> x, y = np.ogrid[:3, :4] >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast array([[10, 0, 0, 0], [10, 11, 1, 1], [10, 11, 12, 2]]) >>> a = np.array([[0, 1, 2], ... [0, 2, 4], ... [0, 3, 6]]) >>> np.where(a < 4, a, -1) # -1 is broadcast array([[ 0, 1, 2], [ 0, 2, -1], [ 0, 3, -1]]) """ return (condition, x, y)
def alignmentTo1(alignment): """Converts an alignment to one-level by ignoring lower level.""" return [ (startTime, endTime, label, None) for startTime, endTime, label, subAlignment in alignment ]
def updata_data_dict(key_name, data_key, data_value, data_dict={}): """ updates the data dictionary :param key_name: :param data_key: :param data_value: :return: """ if data_key not in data_dict[key_name]: data_dict[key_name][data_key] = () data_dict[key_name][data_key] = data_value, return data_dict
def repr_tree_defs(data, indent_str=None): """return a string which represents imports as a tree""" lines = [] nodes = data.items() for i, (mod, (sub, files)) in enumerate(sorted(nodes, key=lambda x: x[0])): if not files: files = '' else: files = '(%s)' % ','.join(files) if indent_str is None: lines.append('%s %s' % (mod, files)) sub_indent_str = ' ' else: lines.append(r'%s\-%s %s' % (indent_str, mod, files)) if i == len(nodes)-1: sub_indent_str = '%s ' % indent_str else: sub_indent_str = '%s| ' % indent_str if sub: lines.append(repr_tree_defs(sub, sub_indent_str)) return '\n'.join(lines)
def set_nested_item(d, list_of_keys, value): """Sets the item from a nested dictionary. Each key in list_of_keys is accessed in order and last item is the one set. If value is None, item is removed. Args: d: dictionary list_of_keys: list of keys value: new value Returns: d such that d[list_of_keys[0]][list_of_keys[1]] == value """ dct = d for i, k in enumerate(list_of_keys[:-1]): assert ( k in dct ), f"Key {k} is not in dictionary after seeing {i+1} keys from {list_of_keys}" dct = dct[k] final_k = list_of_keys[-1] if value is None: del dct[final_k] else: dct[final_k] = value return d
def _get_memo_name(city, prov): """Create composite key of format 'City, Province' for memoization.""" return '{0}, {1}'.format(city, prov)
def remove_end_chars(text: str, char: str) -> str: """ Recursively remove a specific trailing character until none are left. Args: text (str): text to be modified char (str): character to remove Returns: str: the original text with all of the characters removed from the end """ if len(char) != 1: raise ValueError(f"char to be removed must only be 1 character, not {len(char)}") if text[-1] == char: text = text[:-1] return remove_end_chars(text, char) return text
def get_image_count(image_urls): """ :param image_urls: A string that contains image urls separated by the | symbol :return: the amout of image urls in the string """ return image_urls.count("|") + 1
def dehumanize(size): """converts humanized size to bytes >>> dehumanize(20K) 20480 >>> dehumanize(2000) 2000 >>> dehumanize(2M) 2097152 >>> dehumanize(1G) 1 * (1024 ** 3) """ if size.isdigit(): return float(size) eq = {'K': 1024, 'M':1024 ** 2, 'G':1024 ** 3} value, unit = size[0:-1], size[-1].upper() if not value.isdigit: return 0 return float(value) * eq[unit]
def combine_similarity_and_feedback_score(feedback_score, similarity_score, alpha=0.5): """ Compute the combination of embedding similarity and feedback score Input: feedback_score: feedback score computed by compute_feedback_score, if no feedbacks, default to (1 - alpha) similarity_score: similarity between the two keywords alpha: higher alpha = higher feedback weight Output: score combination of similarity and feedback """ return (1 - alpha) * similarity_score + alpha * feedback_score
def correct_gravity(sg, temp, cal_temp): """Correct a specific gravity reading to the specified calibration temperature Args: sg (float): Measured specific gravity temp (float): Measurement temperature in degrees Fahrenheit cal_temp (float): Hydrometer calibration temperature in degrees Fahrenheit """ numerator = 1.00130346 - 0.000134722124 * temp + 0.00000204052596 * temp**2 - 0.00000000232820948 * temp**3 denom = 1.00130346 - 0.000134722124 * cal_temp + 0.00000204052596 * cal_temp**2 - 0.00000000232820948 * cal_temp**3 corrected_gravity = sg * numerator / denom return corrected_gravity
def oc_sched(row): """Create opencast schedule for an event""" sched = { "agent_id": row["location"], "start": row["startTime"], "end": row["stopTime"], "inputs": ["camera", "screen", "AudioSource"], } return sched
def _simplify(str_): """Simplify source line""" return str_.strip()
def get_cut_limbs(life): """Returns list of cut limbs.""" _cut = [] for limb in life['body']: if life['body'][limb]['cut']: _cut.append(limb) return _cut
def add_extension(name, extension): """Adds an extension to the name if necessary Example: >>> add_extension('myfile', '.csv') >>> 'myfile.csv' """ return name if name.endswith(extension) else name + extension
def updateBounds(bounds, p, min=min, max=max): """Return the bounding recangle of rectangle bounds and point (x, y).""" (x, y) = p xMin, yMin, xMax, yMax = bounds return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
def is_float(x): """Is a string a float?. Return a bool.""" try: float(x) return True except ValueError: return False
def is_unique_no_additional_ds(s: str): """ Since we cannot modify a string, I've had to create a list from the string. If we are provided a mutable string, we could sort in place. Time: O(n log(n)), Space: O(1) """ l = list(sorted(s)) for i, c in enumerate(l): if i < len(l) - 2: if c == l[i + 1]: return False return True