content
stringlengths
42
6.51k
def sort_resources(resources): """Sort resources for inclusion on web page. A number of rules are followed: * resources are always grouped per renderer (.js, .css, etc) * resources that depend on other resources are sorted later * resources are grouped by library, if the dependencies allow it * libraries are sorted by name, if dependencies allow it * resources are sorted by resource path if they both would be sorted the same otherwise. The only purpose of sorting on library is so we can group resources per library, so that bundles can later be created of them if bundling support is enabled. Note this sorting algorithm guarantees a consistent ordering, no matter in what order resources were needed. """ for resource in resources: resource.library.init_library_nr() def key(resource): return ( resource.order, resource.library.library_nr, resource.library.name, resource.custom_order, # Added in MONKEY PATCH resource.dependency_nr, resource.relpath) return sorted(resources, key=key)
def carreau_model(shear_rate, mu_0, mu_inf, lambda_, n): """ Estimates the Carreau model for viscosity. Used for fitting data using scipy. Parameters ---------- shear_rate : array-like The experimental shear rate data, with units of 1/s. mu_0 : float The estimated viscosity at a shear rate of 0 1/s; units of Pa*s. mu_inf : float The estimated viscosity at infinite shear rate; units of Pa*s. lambda_ : float The reciprocal of the shear rate at which the material begins to flow in a non-Newtonian way; units of s. n : float The power law index for the material (1-n defines the slope of the curve of the non-Newtonian section of the log(viscosity) vs log(shear rate) curve); unitless. Returns ------- array-like The estimated viscosity following the Carreau model, with units of Pa*s. """ return mu_inf + (mu_0 - mu_inf) * (1 + (lambda_ * shear_rate)**2)**((n - 1) / 2)
def tts_ip_address(ip_address): """Convert an IP address to something the TTS will pronounce correctly. Args: ip_address (str): The IP address, e.g. '102.168.0.102' Returns: str: A pronounceable IP address, e.g. '192 dot 168 dot 0 dot 102' """ return ip_address.replace('.', ' dot ')
def cut_tag_preserve(tags, tag): """ Cuts a tag from a list of tags without altering the original. """ tag_list = tags[:] tag_list.remove(tag) return ",".join(tag_list)
def _split_data(data): """Split the keys of the data between 'properties' and 'data' data: list (or None) Result of a SQL query Returns ------- dict """ if data is None: return data data = data[0] properties = ["iris", "city", "citycode", "label"] result = {k: data.pop(k) for k in properties} result['data'] = data return result
def hideCode(notebook): """ Finds the tags '#!--' and '#--! in each cell and removes the lines in between. Returns dict """ for i, cell in enumerate(notebook['cells']): istart = 0 istop = -1 for idx, line in enumerate(cell['source']): if '#!--' in line: istart = idx if '#--!' in line: istop = idx notebook['cells'][i]['source'] = cell['source'][:istart] + cell['source'][istop+1:] return notebook
def add_saturate(a, b, upper_bound, lower_bound): """ Returns the saturated result of an addition of two values a and b Parameters ---------- a : Integer First summand b : Integer Second summand upper_bound : Integer Upper bound for the addition lower_bound : Integer Lower bound for the addition """ c = int(a) + int(b) if c > upper_bound: c = upper_bound elif c < lower_bound: c = lower_bound return c
def _extract_items(json_dict: dict): """Return the items key if it exists, otherwise the top level.""" return json_dict.get("items", json_dict)
def iterative(n): """Returns the n-th number of fib""" a, b = 0, 1 if n == 0: return a else: for _ in range(n): a, b = b, a + b return a
def jaccard_similarity(str_a, str_b): """ Calculate jaccard similarity Returns: jaccard score """ set1 = set(str_a.decode('utf8')) set2 = set(str_b.decode('utf8')) if len(set1) == 0 or len(set2) == 0: return 0 return len(set1 & set2) / float(len(set1 | set2))
def sum_of_odd_integers(start, end): """ Counts the sum of all odd integers from start through end, inclusively. Args: start (int): starting number in range. end (int): ending number in range. Returns: int: the sum of all odd integers from start through end, inclusively. """ if start % 2 == 0: start += 1 sum_of_numbers = 0 for number in range(start, end+1, 2): sum_of_numbers += number return sum_of_numbers
def backwards(inputString): """ Paramters --------- inputString : string Returns ------- string String with each space-separated word in inputString reversed """ wordList = inputString.split(" ") #list of space-separated words in inputString backwardsList = [] #list of reversed words #Generate list of reversed words for word in wordList: newWordList=[] for index,letter in enumerate(word): newWordList.append(word[-(index+1)]) newWord = "".join(newWordList) backwardsList.append(newWord) return " ".join(backwardsList).capitalize()
def _text_file_to_string(reader, content_if_empty): """Load text file and convert it to string for display.""" if reader: return reader.read().decode('utf-8') else: return content_if_empty
def add_txt(string): """Ensures a filename ends in '.txt'.""" if not string.endswith(".txt"): string += ".txt" return string
def not_the_same(user, other_user): """two users are not the same if they have different ids""" return user["id"] != other_user["id"]
def make_model_tuple(model): """ Take a model or a string of the form "app_label.ModelName" and return a corresponding ("app_label", "modelname") tuple. If a tuple is passed in, assume it's a valid model tuple already and return it unchanged. """ try: if isinstance(model, tuple): model_tuple = model elif isinstance(model, str): app_label, model_name = model.split(".") model_tuple = app_label, model_name.lower() else: model_tuple = model._meta.app_label, model._meta.model_name assert len(model_tuple) == 2 return model_tuple except (ValueError, AssertionError): raise ValueError( "Invalid model reference '%s'. String model references " "must be of the form 'app_label.ModelName'." % model )
def add_to_history(ll, lv): """Appends values from a list of values to a list from a list of lists. Parameters ---------- ll : list List of lists to which values will be appended. lv : list List of values to append to lists. Returns ------- list List of lists to which values have been appended. """ [ll[i].append(v) for i, v in enumerate(lv)] return [l for l in ll]
def check_insertion(nwork, info, citation_var, citation_file, should_add, ref=""): """Check info validity after executing snowballing.create_info_code Return dictionary of warning fields Default: returns place1 if place is not defined and always returns the pdf name """ result = {} result["pdf"] = "{}.pdf".format(info["pyref"]) if "place" not in info and info.get("_work_type") not in ("Site", "Ref"): result["place1"] = info["place1"] return result
def add_values_dict( d:dict ) ->float: """ Returns a float. Add all the values inside a dict. """ total:float = 0.0 for k in d: total += d[k] return total
def convertCIDRtoNetmask(cidr): """ :param cidr: :return: Netmask length CIDR Notation. It's only "how many 1 there are" """ print("[ip - convertNetmaskToCIDR] Receive - ", cidr, "|") bits = 0 for i in range(32-int(cidr), 32): bits |= (1 << i) return "%d.%d.%d.%d" % ((bits & 0xff000000) >> 24, (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8, (bits & 0xff))
def _iou(box1, box2, precision=1e-5): """ Calculate the Intersection over Union value for 2 bounding boxes :param box1: array of 4 values (top left and bottom right coords): [x0, y0, x1, x2] :param box2: same as box1 :param precision: calculate precision for calculating :return: IoU """ box1_x0, box1_y0, box1_x1, box1_y1 = box1 box2_x0, box2_y0, box2_x1, box2_y1 = box2 int_x0 = max(box1_x0, box2_x0) int_y0 = max(box1_y0, box2_y0) int_x1 = min(box1_x1, box2_x1) int_y1 = min(box1_y1, box2_y1) int_area = max(int_x1 - int_x0, 0) * max(int_y1 - int_y0, 0) b1_area = (box1_x1 - box1_x0) * (box1_y1 - box1_y0) b2_area = (box2_x1 - box2_x0) * (box2_y1 - box2_y0) # we add small epsilon of 1e-05 to avoid division by 0 ret_iou = int_area / (b1_area + b2_area - int_area + precision) return ret_iou
def adjustSize(imageSize, targetSize): """-> the size (x,y) the image must be scaled to in order to fit into the target size, while keeping the aspect ratio. """ sx, sy = imageSize tx, ty = targetSize x = tx y = sy * tx / sx if y > ty: y = ty x = sx * ty / sy return x, y
def miles_per_gallon(start_miles, end_miles, gallons): """Compute and return the average number of miles that a vehicle traveled per gallon of fuel. start_miles and end_miles are odometer readings in miles. gallons is a fuel amount in U.S. gallons. """ mpg = abs(end_miles - start_miles) / gallons return mpg
def bitmap_is_set(bitmap: bytes, i: int) -> bool: """Returns True if bitmap is set at the i'th bit.""" assert (i >> 3) < len(bitmap), f"{i} out of range ({len(bitmap)})" return bool(bitmap[i >> 3] & (0x80 >> (i & 7)))
def _function7(value): """function7(value)""" if value in [0, 2]: out = 0 elif value in [1, 3]: out = 1 else: raise RuntimeError(value) return out
def solution2(n): """ This function returns only the largest prime factor. """ i = 2 while i * i <= n: if n % i: i += 1 else: n //= i return n
def remove_duplicates(list_with_duplicates): """ Removes the duplicates and keeps the ordering of the original list. For duplicates, the first occurrence is kept and the later occurrences are ignored. Args: list_with_duplicates: list that possibly contains duplicates Returns: A list with no duplicates. """ unique_set = set() unique_list = [] for element in list_with_duplicates: if element not in unique_set: unique_set.add(element) unique_list.append(element) return unique_list
def unify_lookup(lookup, results): """ Create a unified serializable list from lookup and results. """ unified_list = [] for key in results.keys(): if key not in ('score', 'max_score'): created_testbench = {'title': lookup[key][0], 'rule-id': key, 'x_fixtext': lookup[key][1]} unified_list.append(created_testbench) return unified_list
def ShellEscape(s): """Escape a string to be passed to the shell. Args: s: String to escape. Returns: Escaped string. """ return "'" + s.replace("'", "'\\''") + "'"
def get_cos_instances(resource_instances): """return available cos instances by name and id""" storage_instances = [] for resource in resource_instances: if 'cloud-object-storage' in resource['id']: storage_instances.append({"name": resource['name'], "id": resource['id']}) return storage_instances
def valid_name(file_name): """determines that that the passed file_name is valid, mainly making sure that it contains no periods to prevent extension issues """ if '.' in file_name: raise NameError("Name must not contain periods") else: return True
def finds(itemlist, vec): """return the index of the first occurence of item in vec""" idlist = [] for x in itemlist: ix = -1 for i in range(len(vec)): if x == vec[i]: idlist.append(i) ix = i if ix == -1: idlist.append(-1) if not idlist: return -1 else: return idlist
def build_transcript(transcript, build='37'): """Build a transcript object These represents the transcripts that are parsed from the VCF, not the transcript definitions that are collected from ensembl. Args: transcript(dict): Parsed transcript information Returns: transcript_obj(dict) """ # Transcripts has to have an id transcript_id = transcript['transcript_id'] transcript_obj = dict( transcript_id = transcript_id ) # Transcripts has to belong to a gene transcript_obj['hgnc_id'] = transcript['hgnc_id'] if transcript.get('protein_id'): transcript_obj['protein_id'] = transcript['protein_id'] if transcript.get('sift_prediction'): transcript_obj['sift_prediction'] = transcript['sift_prediction'] if transcript.get('polyphen_prediction'): transcript_obj['polyphen_prediction'] = transcript['polyphen_prediction'] if transcript.get('swiss_prot'): transcript_obj['swiss_prot'] = transcript['swiss_prot'] if transcript.get('pfam_domain'): transcript_obj['pfam_domain'] = transcript.get('pfam_domain') if transcript.get('prosite_profile'): transcript_obj['prosite_profile'] = transcript.get('prosite_profile') if transcript.get('smart_domain'): transcript_obj['smart_domain'] = transcript.get('smart_domain') if transcript.get('biotype'): transcript_obj['biotype'] = transcript.get('biotype') if transcript.get('functional_annotations'): transcript_obj['functional_annotations'] = transcript['functional_annotations'] if transcript.get('region_annotations'): transcript_obj['region_annotations'] = transcript['region_annotations'] if transcript.get('exon'): transcript_obj['exon'] = transcript.get('exon') if transcript.get('intron'): transcript_obj['intron'] = transcript.get('intron') if transcript.get('strand'): transcript_obj['strand'] = transcript.get('strand') if transcript.get('coding_sequence_name'): transcript_obj['coding_sequence_name'] = transcript['coding_sequence_name'] if transcript.get('protein_sequence_name'): transcript_obj['protein_sequence_name'] = transcript['protein_sequence_name'] transcript_obj['is_canonical'] = transcript.get('is_canonical', False) return transcript_obj
def get_first_aligned_bp_index(alignment_seq): """ Given an alignment string, return the index of the first aligned, i.e. non-gap position (0-indexed!). Args: alignment_seq (string): String of aligned sequence, consisting of gaps ('-') and non-gap characters, such as "HA-LO" or "----ALO". Returns: Integer, >= 0, indicating the first non-gap character within alignment_seq. """ index_of_first_aligned_bp = [i for i,bp in enumerate(alignment_seq) if bp != '-'][0] return index_of_first_aligned_bp
def _config_info(mode, config): """Generate info about the config.""" return { "mode": mode, "resources": len(config.get("resources", [])), "views": len(config.get("views", [])), }
def camel_case(text): """Camel-cases text. Parameters ---------- text : str Text to be camel-cased. Returns ------- str A camel-cased string. """ return ''.join(text.title().split())
def stringify_sdg_number(sdg): """Converts integer to string and zero pads to 2 digits. Used for saving and loading individual goal data. Args: sdg (int): Typically 1 - 17 Returns: (str): e.g. '06' """ return str(sdg).zfill(2)
def sizeof_fmt(num, suffix='B'): """Get bit number and return formatted size string""" for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
def process_param(args):#{{{ """ Parse command-line parameters Some of them control the simulation, but all remaining will be passed to the model """ sim_param = { 'frequency_domain':False, 'frequency': None, 'MaxIter': 5000, 'MaxTol': 1e-2, 'BiCGStab': 16 } model_param = {} for namevalue in args: name, value = namevalue.split("=") if name == "frequency": sim_param['frequency'] = float(value) sim_param['frequency_domain' ] = True elif name == "maxtol": MaxTol = float(value) elif name == "maxiter": MaxIter = int(value) else: ## all other parameters will be passed to the model: try: model_param[name] = float(value) except ValueError: model_param[name] = value return sim_param, model_param
def get_dict_attrib(in_dict, key, default=None): """Attempt to retrieve attribute from dictionary""" try: return in_dict[key] except KeyError: return default
def bool_to_bytes(val): """ Encode boolean to a 1 byte array :param val: boolean value :return: a bytes object """ if bool(val): return b'\x01' else: return b'\x00'
def __build_where_clause(where_columns_dictionary): """ Builds the where portion of the SQL query to account for None values : :param where_columns_dictionary: the column name as a key and the value the column you are searching for in a dictionary format (to select the specific row to update) (i.e. {'column_name1': 'value_1', 'column_name2': None } this where clause must uniquely select a row in the table to update. This will select the row where column_name1 is equal to value_1 and column_name2 is NULL. :type where_columns_dictionary: dict [str, str or None] :return: the where clause properly built to account for None values as a string """ return ' AND '.join(key + " = ? " if value is not None else key + " IS NULL " for key, value in where_columns_dictionary.items())
def minPathSum(grid): """ https://leetcode-cn.com/problems/minimum-path-sum/ :return: """ n = len(grid) m = len(grid[0]) for i in range(n): for j in range(m): if i == 0 and j == 0: continue tp = 1e9 if i > 0: tp = min(tp, grid[i - 1][j]) if j > 0: tp = min(tp, grid[i][j - 1]) grid[i][j] += tp return grid[-1][-1]
def quality_to_factor(quality): """ Calculate factor corresponding to quality Args: quality(float): Quality for jpeg compression. Returns: float: Compression factor. """ if quality < 50: quality = 5000. / quality else: quality = 200. - quality * 2 return quality / 100.
def kwargs_to_tuple(d): """Convert expectation configuration kwargs to a canonical tuple.""" if isinstance(d, list): return tuple(kwargs_to_tuple(v) for v in sorted(d)) elif isinstance(d, dict): return tuple( (k, kwargs_to_tuple(v)) for k, v in sorted(d.items()) if k not in ["result_format", "include_config", "catch_exceptions", "meta"] ) return d
def get_xy_coord(n): """compute the xy coordinate given an index""" if n < 1: raise ValueError elif n == 1: return 0, 0 elif n == 2: return 1, 0 else: x, y = get_xy_coord(n-1) if abs(x) > abs(y): # spiral is going up the right side or down the left side if x >= 0: # spiral is going up the right return (x, y + 1) if (y < x) else (x - 1, y) else: # spiral is going down the left return (x, y - 1) if (y > x) else (x + 1, y) else: # spiral is going left on top or right on bottom if y >= 0: # spiral is going left on top return (x - 1, y) if (x > -y) else (x, y - 1) else: # spiral is going right on bottom (y is negative) return (x + 1, y)
def pruned_model_filename(sparsity, block_size): """Produces a human-readable name including sparsity parameters.""" return 'pruned_model_sparsity_%.2f_block_%s.tflite' % ( sparsity, '%dx%d' % block_size)
def convert_facet_list_to_dict(facet_list: list, reverse: bool = False) -> dict: """ Solr returns search facet results in the form of an alternating list. Convert the list into a dictionary key on the facet :param facet_list: facet list returned by Solr :param reverse: boolean flag indicating if the search results should be returned in reverse order :return: A dictonary of the facet values and counts """ facet_dict = {} for i in range(0, len(facet_list)): if i % 2 == 0: facet_dict[facet_list[i]] = facet_list[i + 1] if reverse: rkeys = sorted(facet_dict, reverse=True) facet_dict_r = {} for k in rkeys: facet_dict_r[k] = facet_dict[k] return facet_dict_r else: return facet_dict
def _get_dn_from_ldap_payload(payload): """ Returns the distinguished_name from the payload retrieved from rethinkDb """ return payload["distinguishedName"]
def s3_fmt_range(range): """ None -> None (in, out) -> "bytes={in}-{out-1}" """ if range is None: return None _in, _out = range return 'bytes={:d}-{:d}'.format(_in, _out-1)
def find_in_list_by_value(search_value, search_list, first_occurrence_only=True): """Find item(s) that match(es) the search value from the search list.""" valid = isinstance(search_list, list) if not valid: raise ValueError("Invalid argument was passed.") if not search_value or not search_list: return None if isinstance(search_list[0], dict): found_list = [m for m in search_list if search_value in list( m.values())] else: found_list = [m for m in search_list if search_value in list(m.__dict__.values())] if first_occurrence_only: return found_list[0] if found_list else None return found_list
def constraint_class(kind): """ The convention is that name of the class implementing the constraint has a simple relationship to the constraint kind, namely that a constraint whose kind is 'this_kind' is implemented by a class called ThisKindConstraint. So: ``min`` --> ``MinConstraint`` ``min_length`` --> ``MinLengthConstraint`` ``no_nulls`` --> ``NoNullsConstraint`` etc. This function maps the constraint kind to the class name using this rule. """ return '%sConstraint' % ''.join(part.title() for part in kind.split('_'))
def scale_to_internal(vec, scaling_factor, scaling_offset): """Scale a parameter vector from external scale to internal one. Args: vec (np.ndarray): Internal parameter vector with external scale. scaling_factor (np.ndarray or None): If None, no scaling factor is used. scaling_offset (np.ndarray or None): If None, no scaling offset is used. Returns: np.ndarray: vec with internal scale """ if scaling_offset is not None: vec = vec - scaling_offset if scaling_factor is not None: vec = vec / scaling_factor return vec
def cr_validation(value): """ :param value: charge rate value given by the system :return: Boolean """ max_value = 0.8 return True if (value < max_value) else False
def blendcolors(rgb1, rgb2, prop): """ Blends two rgb colors so that prop comes from rgb2 and (1-prop) from rgb1, where prop should be a proportion in the interval 0.0 - 1.0. """ return tuple(map(lambda c1, c2: int(round(c1 + (c2 - c1) * prop)), rgb1, rgb2))
def IsKillStep(chessboard: list, mv: list) -> bool: """ chessboard: current chessboard info [[x, y, class], [], []...] mv: AI move info, [x_src, y_src, x_dst, y_dst] return: BOOL: true if this step is kill step, false if normal step. """ for pc in chessboard: if mv[2] == pc[0] and mv[3] == pc[1]: return True else: continue return False
def is_librarys_flow_cell_owner(user, library): """Whether or not is owner of the given flow cell's library'""" if not library or not library.flow_cell: return False else: return library.flow_cell.owner == user
def look_and_say(numbers): """ Performs a look'n'say iteration. Repeated digits are collapsed into one and preceeded by their amount. Add 1 before each single digit. '111' -> '31' :param numbers: string of digits :return: look'n'say op over digits """ digit = "" result = "" count = 0 for c in numbers: if c == digit: count += 1 else: if count: result += str(count) + digit digit = c count = 1 result += str(count) + digit return result
def create_graph(network): """Create a graph of train lines Args: network(list): A list of dictionaries of lines and stations in the line Returns: graph(dictionary): A graph of train stations """ graph = {} line = 0 i = 0 while line < len(network): while i <len(network[line]['Stations']): graph[network[line]['Stations'][i]] = [] i += 1 i = 0 line += 1 line = 0 i = 0 while line < len(network): while i < len(network[line]['Stations']): if i != 0: graph[network[line]['Stations'][i]].append(network[line]['Stations'][i - 1]) if i != len(network[line]['Stations']) - 1: graph[network[line]['Stations'][i]].append(network[line]['Stations'][i + 1]) i += 1 i = 0 line += 1 return graph
def _parse(filename): """Parse reference data filename to extract land cover and year.""" basename = filename.split('.')[0] land_cover, year = basename.split('_') year = int(year) return land_cover, year
def ptoc(width, height, x, y, zxoff, zyoff, zoom): """ Converts actual pixel coordinates to complex space coordinates (zxoff, zyoff are always the complex offsets). """ zx_coord = zxoff + ((width / height) * (x - width / 2) / (zoom * width / 2)) zy_coord = zyoff + (-1 * (y - height / 2) / (zoom * height / 2)) return zx_coord, zy_coord
def get_old_asg_instances(asg_object): """Gets instances from ASG that are not using the correct launch configuration""" all_instances = [] instances = asg_object['AutoScalingGroups'][0]['Instances'] for instance in instances: if 'LaunchConfigurationName' not in instance: all_instances.append(instance['InstanceId']) return all_instances
def cut_trailing_quotes(text: str) -> str: """Cut trailing quotes into one quote. Args: text: Input text. Returns: Text up to the dangling double quote. """ num_quotes = text.count('"') if num_quotes == 1: return text.replace('"', "") elif num_quotes % 2 == 0: return text else: final_ind = text.rfind('"') return text[:final_ind]
def combinations_construct(tree_config_path, path=["root"]): """ >>> combinations_construct({"root": ["target", "target teams"], ... "target": ["teams"], ... "target teams": [], "teams": []}) [['target'], ['target', 'teams'], ['target teams']] """ # Remove the 'root' path tails = [path[1:]] if len(path[1:]) else [] for children in tree_config_path[path[-1]]: tails += combinations_construct(tree_config_path, path + [children]) return tails
def multi_split(s, split): """Splits on multiple given separators.""" for r in split: s = s.replace(r, '|') return [i for i in s.split('|') if len(i) > 0]
def get_gitlab_scripts(data): """GitLab is nice, as far as I can tell its files have a flat hierarchy with many small job entities""" def flatten_nested_string_lists(data): """helper function""" if isinstance(data, str): return data elif isinstance(data, list): return "\n".join([flatten_nested_string_lists(item) for item in data]) else: raise ValueError( f"unexpected data type {type(data)} in script section: {data}" ) result = {} for jobkey in data: if not isinstance(data[jobkey], dict): continue for section in ["script", "before_script", "after_script"]: if section in data[jobkey]: script = data[jobkey][section] result[f"{jobkey}/{section}"] = flatten_nested_string_lists(script) return result
def _mark_untranslated_strings(translation_dict): """Marks all untranslated keys as untranslated by surrounding them with lte and gte symbols. This function modifies the translation dictionary passed into it in-place and then returns it. """ # This was a requirement when burton was written, but may be an unwanted # side effect for other projects that adopt burton. We should replace it # with something more flexible. for key in translation_dict: if key is not None and translation_dict[key] is None: translation_dict[key] = u"\u2264" + key + u"\u2265" return translation_dict
def expand_qgrams(text, qsize, output): """Expands a text into a set of q-grams""" n = len(text) for start in range(n - qsize + 1): output.append(text[start:start+qsize]) return output
def erlangb(load, c): """ Return the the probability of loss in M/G/c/c system using recursive approach. Much faster than direct computation via scipy.stats.poisson.pmf(c, load) / scipy.stats.poisson.cdf(c, load) Parameters ---------- load : float average arrival rate * average service time (units are erlangs) c : int number of servers Returns ------- float probability arrival finds system full """ invb = 1.0 for j in range(1, c + 1): invb = 1.0 + invb * j / load b = 1.0 / invb return b
def _add_first_child_to_dic(base, dic, opt, tag): """Adds the first child element with a given tag to a dictionary. Args: base: The base node element. (lxml node) dic: The dictionary to add the element to (dictionary) opt: If false and id is not found in base, the element is added with an empty string (Bool) tag: Child elements group tag used to select the elements. (string) Returns: The dictionary with the added element. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: dic[tag] = node.text return dic if not opt: dic[tag] = "" return dic
def _add_path(root_path, relative_path): """Add another level to an LDAP path. eg, _add_path ('LDAP://DC=gb,DC=vo,DC=local', "cn=Users") => "LDAP://cn=users,DC=gb,DC=vo,DC=local" """ protocol = "LDAP://" if relative_path.startswith(protocol): return relative_path if root_path.startswith(protocol): start_path = root_path[len(protocol):] else: start_path = root_path return protocol + relative_path + "," + start_path
def get_byte_size(row, delimiter, line_terminator): """Get the byte size of this row split by this delimiter.""" line = delimiter.join(row) + line_terminator return len(line.encode("utf-8"))
def descendants_query(node_id, node_label, edge_label="edge"): """Generate Cypher query finding descendant nodes starting at 'node_id'.""" return ( "MATCH path=(n:{} {{id: '{}'}})-[:{}*1..]->(m:{})\n".format( node_label, node_id, edge_label, node_label) + "RETURN m.id AS descendant, REDUCE(p=[], n in nodes(path) | p + [n.id]) as path\n" )
def contains(seq, value): """ Description ---------- Checks to see if a value is in the sequence or dictionary. Parameters ---------- seq : (list or tuple or set or dict or string) - sequence/dictionary to search in\n value : any - value to search for Returns ---------- bool - True (value found), False (value not found) Examples ---------- >>> lst = [1, 2, 3, 4, 5] >>> contains(lst, 4) -> True >>> contains(lst, 10) -> False """ if isinstance(seq, dict): return value in seq.keys() or value in seq.values() return value in seq
def take_ingredients(request): """ We use this function to get ingredients from our request when creating / editing recipes. """ ingredients = {} for key in request: if key.startswith('nameIngredient_'): num = key[15:] if num.isdecimal(): ingredients[request[key]] = request[f'valueIngredient_{num}'] return ingredients
def state_probabilities(endclasses): """ Tabulates the probabilities of different states in endclasses. Parameters ---------- endclasses : dict Dictionary of end-state classifications 'classification' and 'prob' attributes Returns ------- probabilities : dict Dictionary of probabilities of different simulation classifications """ classifications = set([props['classification'] for k,props in endclasses.items()]) probabilities = dict.fromkeys(classifications) for classif in classifications: probabilities[classif] = sum([props['prob'] for k,props in endclasses.items() if classif==props['classification']]) return probabilities
def break_down_filename(image): """ filename: g04i2c08.png || |||| test feature (in this case gamma) ------+| |||| parameter of test (here gamma-value) ----+ |||| interlaced or non-interlaced --------------+||| color-type (numerical) ---------------------+|| color-type (descriptive) --------------------+| bit-depth ------------------------------------| """ return { "test_feature": image[0].lower(), "parameter_ot": image[1:3].lower(), "noninterlace": image[3].lower() != "i", "colortype_nm": int(image[4], 10), "colortype_ds": image[5].lower(), "n_bits_depth": int(image[6:8], 10), }
def message_from_lax(data): """ format a message from a Lax response data """ return data.get("message") if data.get("message") else "(empty message)"
def _validate_bool(s): """ A validation method to convert input s to boolean or raise error if it is not convertable """ try: return bool(s) except ValueError: raise ValueError('Could not convert input to boolean')
def extract_text(element): """ Return *text* attribute of an implied element if available. """ if element is not None: return element.text else: return None
def get_event_ends(T_part, n_repeats): """get the end points for a event sequence, with lenth T, and k repeats - event ends need to be removed for prediction accuracy calculation, since there is nothing to predict there - event boundaries are defined by these values Parameters ---------- T_part : int the length of an event sequence (one repeat) n_repeats : int number of repeats Returns ------- 1d np.array the end points of event seqs """ return [T_part * (k + 1) - 1 for k in range(n_repeats)]
def wavelength(fringe_spacing, slits_distance, screen_distance): """ This function provides the wavelength of light when the following parameters are provided. =========================================== Parameters: fringe_spacing : Fringe spacing slits_distance : Distance between two slits screen_distance : Distance between slits and screen """ return ((fringe_spacing*screen_distance)/slits_distance)
def is_indexed(value): """ Checks if `value` is integer indexed, i.e., ``list``, ``str`` or ``tuple``. Args: value (mixed): Value to check. Returns: bool: Whether `value` is integer indexed. Example: >>> is_indexed('') True >>> is_indexed([]) True >>> is_indexed(()) True >>> is_indexed({}) False .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0 Return ``True`` for tuples. """ return isinstance(value, (list, tuple, str))
def Probability(o): """Computes the probability corresponding to given odds. Example: o=2 means 2:1 odds in favor, or 2/3 probability o: float odds, strictly positive Returns: float probability """ return o / (o + 1)
def ParseIssueNames(names): """Because comics use the same upc for the whole series now parse out the series name from the returned list Noteable items that come after the series name: "issue #" and "#" There is probably a better way to do this but it seems to work in most cases. names: a list of series titles """ fixednames = [] for name in names: if name.Contains("issue"): n = name.split("issue")[0].strip() if not n in fixednames: fixednames.append(n) elif name.Contains("#"): n = name.split("#")[0].strip() if not n in fixednames: fixednames.append(n) else: fixednames.append(name) return fixednames
def is_full_section(section): """Is this section affected by "config.py full" and friends?""" return section.endswith('support') or section.endswith('modules')
def applianceLogsFileName(config): """ Returns the filename of the log zip file created by calling syscfg.zipApplianceLogs() """ return config['converter.appliance_logs_name']
def _ascii_decode(ascii: bytes) -> str: """Decode bytes of ASCII charactors to string. Args: ascii (bytes): ASCII charactors Returns: str: Converted string """ return ascii.decode("ascii", "replace").replace("\x00", "")
def _compute_char_shifts(tokens): """Compute the shifts in characters that occur when comparing the tokens string with the string consisting of all tokens separated with a space For instance, if "hello?world" is tokenized in ["hello", "?", "world"], then the character shifts between "hello?world" and "hello ? world" are [0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2] """ characters_shifts = [] if not tokens: return characters_shifts current_shift = 0 for token_index, token in enumerate(tokens): if token_index == 0: previous_token_end = 0 previous_space_len = 0 else: previous_token_end = tokens[token_index - 1].end previous_space_len = 1 current_shift -= (token.start - previous_token_end) - \ previous_space_len token_len = token.end - token.start index_shift = token_len + previous_space_len characters_shifts += [current_shift for _ in range(index_shift)] return characters_shifts
def get_cycle_stats(data_list): """ Calculates cycle statistics for test run. Returns min, max, avg cycle count. """ cycles = [data[0] for data in data_list] min_cycles = min(cycles) max_cycles = max(cycles) avg_cycles = sum(cycles) / len(cycles) return min_cycles, max_cycles, avg_cycles
def flatten(iterable): """Recursively denest iterable containers. >>> flatten([1, 2, 3]) [1, 2, 3] >>> flatten([1, 2, [3]]) [1, 2, 3] >>> flatten([1, [2, 3], [4, 5]]) [1, 2, 3, 4, 5] """ result = [] for item in iterable: if isinstance(item, list): result.extend(flatten(item)) else: result.append(item) return result
def write_node(doc, parent, name, value, attr=None): """ :param doc: document DOM :param parent: parent node :param name: tag of the element :param value: value of the child text node :param attr: attribute dictionary :return: True if something was appended, otherwise False """ if attr is None: attr = {} if value is not None: node = doc.createElement(name) node.appendChild(doc.createTextNode(str(value))) for item in attr: node.setAttribute(item, attr[item]) parent.appendChild(node) return True return False
def max_length(choices): """ Returns the size of the longest choice. :param: the available choice strings or tuples :return: the maximum length """ def length(item): if isinstance(item, str): value = item else: value = item[0] return len(value) return max((length(c) for c in choices))
def get_io_from_prov_json(prov_json): """Identify input and output files from provenance JSON Parameters ---------- prov_json : OrderedDict ordered dictionary generated from json prov file using python's json module i.e. json.load(path_to_json_file, object_pairs_hook=OrderedDict) Returns ------- (input_files, output_files, file_locs) : tuple of (list, list, dict) input files and output files (empty lists if none) and dict mapping files to location (empty if none) """ # initializing data structures entity_to_file = {} file_locs = {} input_files = [] output_files = [] # get file entity names and locations for key, value in prov_json['entity'].items(): if value['rdt:type'] == 'File': filename = value['rdt:name'] entity_to_file[key] = filename file_locs[filename] = value['rdt:location'] entities = set(entity_to_file.keys()) # if a file entity is used by an activity, it must be an input file for value in prov_json['used'].values(): if value['prov:entity'] in entities: input_files.append(entity_to_file[value['prov:entity']]) # if a file entity was generated by an activity, it must be an output file for value in prov_json['wasGeneratedBy'].values(): if value['prov:entity'] in entities: output_files.append(entity_to_file[value['prov:entity']]) return input_files, output_files, file_locs
def find_missing_int_using_sum(complete, incomplete): """ Problem: There are distinct integers in list `complete`. The same integers are in list `incomplete`, except for one. Task: Find the one integer which is missing from the incomplete list. Complexity: O(n) time, O(1) space """ s = 0 for a, b in zip(complete[:-1], incomplete): s += a s -= b return s + complete[len(complete) - 1]
def min_index(items): """Returns index of the smallest item. items can be any sequence. If there is a tie, returns earliest item""" return min([(item, index) for index, item in enumerate(items)])[1]
def parse_http_protocol(s): """ Parse an HTTP protocol declaration. Returns a (major, minor) tuple, or None. """ if not s.startswith(b"HTTP/"): return None _, version = s.split(b'/', 1) if b"." not in version: return None major, minor = version.split(b'.', 1) try: major = int(major) minor = int(minor) except ValueError: return None return major, minor
def format_desc(desc="", available_values=[]): """ """ output_str = "" output_str += desc if len(available_values) == 0: return output_str output_str += "\nExplain the meaning of the parameter:" for a_value in available_values: meaning, value = a_value output_str += "\n- {0}: {1}".format(value, meaning) return output_str
def extract_action_list_from_path(path_list): """ Converts a block idx path to action list. Args path_list: <list> list of block idx from source block to dest block. Returns action_list: <list> list of string discrete action commands (e.g. ['movesouth 1', 'movewest 1', ...] """ action_trans = {-21: 'movenorth 1', 21: 'movesouth 1', -1: 'movewest 1', 1: 'moveeast 1'} alist = [] for i in range(len(path_list) - 1): curr_block, next_block = path_list[i:(i + 2)] alist.append(action_trans[next_block - curr_block]) return alist
def author(context, request): """ Handles author's profile """ return {'author': context}