content
stringlengths
42
6.51k
def getMappingsBetweenUUIDsAndTraxels(model): """ From a dictionary encoded model, load the "traxelToUniqueId" mapping, create a reverse mapping, and return both. """ # create reverse mapping from json uuid to (timestep,ID) traxelIdPerTimestepToUniqueIdMap = model["traxelToUniqueId"] timesteps = [t for t in traxelIdPerTimestepToUniqueIdMap.keys()] uuidToTraxelMap = {} for t in timesteps: for i in traxelIdPerTimestepToUniqueIdMap[t].keys(): uuid = traxelIdPerTimestepToUniqueIdMap[t][i] if uuid not in uuidToTraxelMap: uuidToTraxelMap[uuid] = [] uuidToTraxelMap[uuid].append((int(t), int(i))) # sort the list of traxels per UUID by their timesteps for v in uuidToTraxelMap.values(): v.sort(key=lambda timestepIdTuple: timestepIdTuple[0]) return traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap
def to_var(field): """Converts a field name int a variable (snake_case) Args: string: a string to be converted to a var Returns: a string in lower snake case """ return field.replace(' ', '_').lower()
def derive_bt_mac(base_mac,offset): """ Derives the BT_MAC from the BASE_MAC and the BT_MAC LSB offset. """ base_mac_lsb=int(str(base_mac[-1]), base=16)+offset base_mac[-1]=format(base_mac_lsb, 'x') bt_mac='-'.join(base_mac) return bt_mac.strip()
def build_variant_sample_annotation_id(call_info, variant_uuid, file_accession): """ Helper function that builds a variant sample annotation ID from the required parts. """ return ':'.join([call_info, variant_uuid, file_accession])
def get_matches(geoms, tree_idx): """ Function to return the indici of the rtree that intersects with the input geometries Parameters ---------- geoms : list list of geometries to compare against the STRtree tree_idx: STRtree a STRtree indexing object Returns ------- list list of tuples, where the key of each tuple is the linestring index and the value of each key is a list of junctions intersecting bounds of linestring. """ # find near linestrings by querying tree matches = [] for idx_ls, obj in enumerate(geoms): intersect_ls = tree_idx.query(obj) if len(intersect_ls): matches.extend([[[idx_ls], [ls.i for ls in intersect_ls]]]) return matches
def split_box( fraction, x,y, w,h ): """Return set of two boxes where first is the fraction given""" if w >= h: new_w = int(w*fraction) if new_w: return (x,y,new_w,h),(x+new_w,y,w-new_w,h) else: return None,None else: new_h = int(h*fraction) if new_h: return (x,y,w,new_h),(x,y+new_h,w,h-new_h) else: return None,None
def valid_yes_or_no(user_input): """The purpose of this function is to receive user input and determine if the user input is a valid yes (Y) or no (N) response to the prompt. This function is called throughout the program to ensure errors do not occur. Once the user has entered a valid yes (Y) or no (N) response, the valid input is returned.""" # The initial if statement verifies that user_input is a string. The # not operator checks to see if the statement isinstance(user_input, # str) is false. However, because the input() function takes a string # by default, the while loop within this condition will never be run. if not isinstance(user_input, str): # This while loop prompts the user to input a valid "Y" or "N" # input and loops until the user inputs one of them. The != is a # rational operator that ensures the while loop continues until # the user_input is equivalent to "Y" or "N". The and operator # ensures the while loop is checking for both conditions ("Y" or "N"). while user_input.capitalize() != "Y" and user_input != "N": user_input = input("Please enter Y for Yes or N for No: ") # Once user_input is verified to be a string, this elif statement checks # to see if user_input is not equivalent to the valid input "Y" or "N". # If it is not, a while loop is run. elif user_input.capitalize() != "Y" or user_input != "N": # This while loop prompts the user to input a valid "Y" or "N" # input and loops until the user inputs one of them. The != is a # rational operator that ensures the while loop continues until # the user_input is equivalent to "Y" or "N". The and operator # ensures the while loop is checking for both conditions ("Y" or "N"). while user_input.capitalize() != "Y" and user_input != "N": user_input = input("Please enter Y for Yes or N for No: ") # If the user input is equivalent to "Y" or "N", the function returns # the user_input as it entered the function by passing the else failsafe. else: pass # The updated (or not updated) user_input is returned return user_input.capitalize()
def is_ok_url(url: str): """ Check doc url is valid """ ng_list = [ "aws-sdk-php", "AWSAndroidSDK", "AWSiOSSDK", "AWSJavaScriptSDK", "AWSJavaSDK", "awssdkrubyrecord", "encryption-sdk", "mobile-sdk", "pythonsdk", "powershell", "sdk-for-android", "sdk-for-cpp", "sdk-for-go", "sdk-for-ios", "sdk-for-java", "sdk-for-javascript", "sdk-for-net", "sdk-for-php", "sdk-for-php1", "sdk-for-ruby", "sdk-for-unity", "sdkfornet", "sdkfornet1", "xray-sdk-for-java", "cdk" ] for ng in ng_list: if ng in url: return False return True
def get_deviation(number: int, circle: int) -> int: """Get distance to horizontal or vertical line from 1.""" # Special case for memory storage center if number == 1: return 0 # Side length derived from progression n'th child formula side = circle * 2 + 1 # Normalize number - bottom left circle number is 0, then +1 clock-wise deviation = abs(number - side ** 2) # Split by side - number should not exceed side length - 1 deviation %= side - 1 # Subtract half of side length to count distance from side center deviation = abs(deviation - side // 2) return deviation
def is_banned_asset_tag(text): """ Determines whether the text is a banned asset tag through various tests. :param text: Text to be checked against banned asset tags :type text: str :return: `True` if a banned asset tag else `False` :rtype: bool """ # Is asset tag in banned list? text = text.lower() banned_tags = [ "Default string", "NA", "N/A", "None", " none", "Null", "oem", "o.e.m", "to be filled by o.e.m.", "Unknown", " ", "" ] banned_tags = [t.lower() for t in banned_tags] if text in banned_tags: result = True # Does it exceed the max allowed length for NetBox asset tags? elif len(text) > 50: result = True # Does asset tag contain all spaces? elif text.replace(" ", "") == "": result = True # Apparently a "good" asset tag :) else: result = False return result
def replace_valueCheck(value): """validate applyDQPlane.replace_value""" return (value in ('mean', 'median') or isinstance(value, float))
def quote_arg(s: str) -> str: """Return a quoted cmd arg, with all backslashes and double quotes escaped""" escaped = s.replace('\\', '\\\\').replace('"', '\\"') return f'"{escaped}"'
def trace_size_converter(value, params, key): """ Converts trace size from seconds to samples """ params = params[key] if value is None: return None if not value: return value if not params['frequency']: raise AttributeError('No frequency specified for trace-size argument!') return int(float(value) * params['frequency'])
def _sint(S): """ convert a string to an integer, treating an all-blank string as zero Parameter --------- S : str string that need to be converted as integer treating an all-blank strings as zero Returns ------- integer or zero """ if S.strip(): return int(S) else: return 0
def s_to_b(s: str) -> bytes: """convert string to bytes :param s: input string :type s: str :return: output bytes :rtype: bytes """ b = s.encode('utf8') return b
def _py_if_stmt(cond, body, orelse): """Overload of if_stmt that executes a Python if statement.""" return body() if cond else orelse()
def inside_bounding_box(bb_minlat, bb_minlon, bb_maxlat, bb_maxlon, start_lat, start_lon, end_lat, end_lon): """ Check if two given sets of coordinates (start_lat, start_lon) and (end_lat, end_lon) are within a bounding box (bb_minlat, bb_minlon, bb_maxlat, bb_maxlon) Examples: >>> inside_bounding_box(50.7777, 4.2359, 50.9204, 4.5216, 50.866232, 4.327700, 50.896571, 4.428547) True """ return (bb_minlat <= start_lat <= bb_maxlat and bb_minlon <= start_lon <= bb_maxlon) and \ (bb_minlat <= end_lat <= bb_maxlat and bb_minlon <= end_lon <= bb_maxlon)
def sort(word_p_lists): """ this method combine all the diction in word_p_list(word with its z_score) into totallist, with a mark to indicate which file the element(word with z_score) belongs to and then sort the totallist, to give user a clean output of which word in which file is the most abnormal :param word_p_lists: a array of dictionary each element of array represent a chunk, and it is a dictionary type each element in the dictionary maps word inside that chunk to its z_score :return: a array of tuple type (sorted via z_score): each element is a tuple: (the chunk it belong(the number of chunk in the word_p_list), the word, the corresponding z_score) """ totallist = [] i = 0 for list in word_p_lists: templist = [] for word in list: if not word[1] == 'Insignificant': temp = ('junk', i + 1) + word # add the 'junk' to make i+1 a tuple type temp = temp[1:] templist.append(temp) totallist += templist i += 1 totallist = sorted(totallist, key=lambda tup: tup[2]) return totallist
def distance1(point1, point2): """L1 distance""" return sum(abs(point1[i]-point2[i]) for i in range(len(point1)))
def check_overlap(peak1, peak2): """check_overlap checks if a peak overlaps, if they do, it returns 0, if not, it returns 1 if the first given peak is greater than the second, or -1 if otherwise Args: peak1 (tuple): start, end tuple which constitutes a peak peak2 (tuple): start, end tuple which constitutes a peak Returns: [type]: [description] """ peak1_start = peak1[0] peak2_start = peak2[0] peak1_end = peak1[1] peak2_end = peak2[1] if peak2_start <= peak1_start <= peak2_end or peak1_start <= peak2_start <= peak1_end: return 0 elif peak1_start > peak2_start: return 1 else: return -1
def text_attribute(attribute, operator, value): """ Select an audience to send to based on an attribute object with a TEXT schema type, including predefined and device attributes. Please refer to https://docs.airship.com/api/ua/?http#schemas-textattribute for more information about using this selector, including information about required data formatting for values. Custom attributes must be defined in the Airship UI prior to use. """ if operator not in ["equals", "contains", "less", "greater", "is_empty"]: raise ValueError( "operator must be one of 'equals', 'contains', 'less', 'greater', 'is_empty'" ) if type(value) is not str: raise ValueError("value must be a string") return {"attribute": attribute, "operator": operator, "value": value}
def cat_matrices2D(mat1, mat2, axis=0): """ args: two matrices and axies return: matrix """ if axis == 0: if len(mat1[0]) is not len(mat2[0]): return None return [item.copy() for item in mat1] + [item.copy() for item in mat2] else: if len(mat1) is not len(mat2): return None return [item.copy() + item2.copy() for item, item2 in zip(mat1, mat2)]
def ttr(text): """ Number of unique tokens :param str text: Input string of text :return float floatValue: Ratio of the unique/overall tokens """ if len(text.split(" ")) > 1 and len(text.split()) > 0: return len(set(text.split())) / len(text.split()) else: return 0
def _create_context_response(context_el, status_code): """ Function to build the context response model :param context_el: JSON including the context element attributes :param status_code: status code received from context manager :return (dict) Context response mode. The contextResponse in JSON will be like this: { "contextResponses" : [ { "contextElement" : { "type" : "Room", "isPattern" : "false", "id" : "Room1", "attributes" : [ { "name" : "temperature", "type" : "float", "value" : "23" } ] }, "statusCode" : { "code" : "200", "reasonPhrase" : "OK" } } ] } """ return [{"contextElement": context_el, "statusCode": status_code}]
def parse_attributes(attribute_str): """ Given the attribute part from a line record file, parse the attribute section. Returns a dict of numeric attribute values. Example: parse_attributes("0=0,2=0") => {0: 0, 2: 0} """ # Split into individual key/value pairs attrs = (s.strip() for s in attribute_str.split(",")) # Split each k/v pair into individual parts part_attrs = ( attr.partition("=") if "=" in attr else (attr, None, True) for attr in attrs) # Create dict of ints return { int(attr[0]): int(attr[2]) if not isinstance(attr[2], bool) else attr[2] for attr in part_attrs }
def is_human(user): """ checks that the user is not a bot exists for test mocking """ return user is not None and not user.is_bot
def check_against_gaps(regions, candidates): """Given a set of non-overlapping gaps and a list of candidate regions, return the candidates that do not overlap""" regions = sorted(regions, key=lambda l: l.start) candidates = sorted(candidates, key=lambda l: l.start) selected = [] r = 0 if not len(regions): return candidates # no existing predictions - all candidates accepted for c in candidates: if c < regions[0] or c > regions[-1]: # outside any of the regions: just append selected.append(c) else: while r < len(regions) - 1 and c >= regions[r]: r += 1 if c < regions[r]: # found a gap selected.append(c) return selected
def convert_extract_device(name): """ change Fusions UV to FusionsUV, etc """ n = "" if name: n = name.replace(" ", "") return n
def solution1(root): """ Inefficient solution by myself --- Runtime: 252ms --- :type root: TreeNode :rtype: list[int] """ if root is None: return [] queue = [(root, 0)] bfs_nodes = [] output = [] cur_depth = 0 while queue: bfs_nodes.append(queue.pop(0)) cur_node, cur_depth = bfs_nodes[-1] if cur_node.left is not None: queue.append((cur_node.left, cur_depth + 1)) if cur_node.right is not None: queue.append((cur_node.right, cur_depth + 1)) for i in range(cur_depth + 1): exec('row{} = []'.format(i)) for node in bfs_nodes: exec('row{}.append(node[0].val)'.format(node[-1])) for i in range(cur_depth + 1): exec('output.append(max(row{}))'.format(i)) return output
def _valdiate_ValueEntries(strInput : str, acttyp) -> bool: """ Description ----------- Helper method used for validation of entry widgets with only one parameter Parameters ---------- `strInput` : string Input string which should be validated Return ------ `bValidateResult` : bool Result of the validation encoded as integer """ if (acttyp == '1'): if (strInput != "." and strInput != "+" and strInput != "-" and strInput != "e" and not strInput.isdigit()): return False return True
def find_repeat(source, elmt): """Helper function, find index of repreat elements in source.""" elmt_index = [] s_index = 0;e_index = len(source) while(s_index < e_index): try: temp = source.index(elmt, s_index, e_index) elmt_index.append(temp) s_index = temp + 1 except ValueError: break return elmt_index
def _format_params(cols, fields, where, crs, precision): """ Transform parameters into a query input for ESRIs feature service: The feature service allows users to query & edit feature geoms & attributes A breakdown of the ESRIs feature service: https://developers.arcgis.com/rest/services-reference/enterprise/feature-service.htm Parameters ---------- cols: str or list (default: 'all') str list of columns or attributes to include fields: list list of fields supported by the boundary where : str sql like statement to filter geometries and attributes crs : int (default: British National Grid) epsg codes to transform and extract geometries precision: int number of digits past the decimal point are to be used Returns ------- dict: dictionary containing query inputs for ESRIs feature service """ if isinstance(cols, str): cols = cols.lower() if cols == "all": cols = "*" if isinstance(cols, list): cols = [col.lower() for col in cols] if all(elem in fields for elem in cols) is not True: raise ValueError(f"Only {fields} are supported for geometry type") cols = ", ".join(cols) return { "outFields": f"{cols}", "where": f"{where}", "outSR": crs, "f": "geojson", "geometryPrecision": f"{precision}", }
def get_word_freq(word_list, normalize=True): """Returns a sorted list of (word,word count) tuples in descending order. The frequency is normalized to values between 0 and 1 by default.""" word_freq_dict = {} for w in word_list: if w in word_freq_dict: word_freq_dict[w] += 1 else: word_freq_dict[w] = 1 if normalize: # inplace update to avoid recreating the dictionary again word_freq_dict.update((key, round(val / len(word_list), 3)) for key, val in word_freq_dict.items()) unsorted_word_freq = [(key, val) for key, val in word_freq_dict.items()] # sorting by word frequency in descending order word_freq = sorted(unsorted_word_freq, key=lambda tup: tup[1], reverse=True) return word_freq
def get_ngrams(text, n): """Returns all ngrams that are in the text. Inputs: text: string n: int Returns: list of strings (each is a ngram) """ if text == "": return [] tokens = text.split() return [" ".join(tokens[i:i + n]) for i in range(len(tokens) - (n - 1))]
def bytes_to_block(block_size: int, i: int) -> slice: """ Given the block size and the desired block index, return the slice of bytes from 0 to the end of the given block. :param block_size: The block size. :param i: The block index. :return: slice of bytes from 0 to the end of the specified block index. """ return slice(0, block_size * (i + 1))
def is_currently_on_packet(freshman): """ Helper method for the search command Handles the filtering of results """ for _ in filter(lambda packet: packet["open"], freshman["packets"]): return True return False
def get_newline_str(do_escape_n=True, do_br_tag=True): """ Get the string to be used to indicate newlines in documents. Return a space if neither are specified. :param do_escape_n: :param do_br_tag: :return: """ if do_escape_n is False and do_br_tag is False: return " " newline_str = "" if do_escape_n: newline_str += "\n" if do_br_tag: newline_str += "<br/>" return newline_str
def is_pos_int(number): """ Returns True if a number is a positive integer. """ return type(number) == int and number >= 0
def converged(losses, window=10, threshold=0.0001): """"Detect loss convergence in a window.""" try: if len(losses) < window: return False losses = losses[-window:] return max(losses) - min(losses) < threshold except: return False
def power(a, b): """ calculate the value of 'a' to the power 'b' :param a: :param b: :return: """ return pow(a, b)
def split_attribute(attribute): """Splits a list of attributes in head and remainder. """ return attribute[0], attribute[1:]
def fahrenheit_to_celsius(x): """Convert degrees in fahrenheit to celsius""" return (x - 32) * 0.5556
def get_source_region(global_cluster_response): """ Get writer region from 'describe_global_clusters' response. :param global_cluster_response: output of boto3 describe_global_clusters :return: aws region """ clusters = global_cluster_response["GlobalClusters"] for cluster in clusters: for member in cluster["GlobalClusterMembers"]: if member["IsWriter"] is True: return member["DBClusterArn"].split(":")[3] return "unknown"
def str_format_dict(jdict, **kwargs): """Pretty-format a dictionary into a nice looking string using the `json` package. Arguments --------- jdict : dict, Input dictionary to be formatted. Returns ------- jstr : str, Nicely formatted string. """ kwargs.setdefault('sort_keys', True) kwargs.setdefault('indent', 4) import json jstr = json.dumps(jdict, separators=(',', ': '), **kwargs) return jstr
def sum_nums(nums): """Given list of numbers, return sum of those numbers. For example: sum_nums([1, 2, 3, 4]) Should return (not print): 10 """ # Python has a built-in function `sum()` for this, but we don't # want you to use it. Please write this by hand. # YOUR CODE HERE total = 0 for num in nums: total += num return total
def show_navigator_boards(context): """Show navigator with for boards""" page = context['page'] index_begin = context['index_begin'] index_end = context['index_end'] index_total = context['index_total'] mindex_begin = context['mindex_begin'] mindex_end = context['mindex_end'] table = context['table'] return { 'table': table, 'page': page, 'index_begin': index_begin, 'index_end': index_end, 'index_total': index_total, 'mindex_begin': mindex_begin, 'mindex_end': mindex_end, }
def day_fraction(time): """Convert a 24-hour time to a fraction of a day. For example, midnight corresponds to 0.0, and noon to 0.5. :param time: Time in the form of 'HH:MM' (24-hour time) :type time: string :return: A day fraction :rtype: float :Examples: .. code-block:: python day_fraction("18:30") """ hour = int(time.split(":")[0]) minute = int(time.split(":")[1]) return hour/24 + minute/1440
def Series_info(series: list) -> dict: """[summary] Args: series (list): [Array Of Series] Attribute: third_term (flout): \n third_last_term (flout): \n sum_of_the_series (flout): \n Returns: dict: [Series Information] """ third_term = series[2] third_last_term = series[-3] sum_of_the_series = 0 for i in series: sum_of_the_series += i info = { "Third Term": third_term, "Third Last Term": third_last_term, "Sum Of The Series": sum_of_the_series, "Series": series } return info
def _is_authenticated_query_param(params, token): """Check if message is authentic. Args: params (dict): A dict of the HTTP request parameters. token (str): The predefined security token. Returns: bool: True if the auth param matches the token, False if not. """ return params.get('auth') == token
def _QuoteValue(value): """Returns value quoted, with preference for "...".""" quoted = repr(value) if quoted.startswith('u'): quoted = quoted[1:] if quoted.startswith("'") and '"' not in value: quoted = '"' + quoted[1:-1] + '"' return quoted
def char_name(c): """ Return the name of a character. Specifically, returns a descriptive name instead of whitespace. No type checking is done. Parameters: c: a string containing the character """ cnames = { ' ': 'space', '\t': 'tab', '\n': 'newline', '\r': 'carriage return', '\f': 'formfeed', '\v': 'vertical tab', '\b': 'backspace', '\a': 'bell', } return cnames[c[0]] if c[0] in cnames else c[0]
def docsim_freetext(document, sess_id=None): """ use document similarity to recommend trials based on similarity to title & abstract text of review @param review_id: PMID of review @param sess_id: session ID if transitting progress via websocket """ if sess_id: socketio = SocketIO(message_queue='amqp://localhost') socketio.emit('docsim_update', {'msg': 'started basicbot'}, room=sess_id) eventlet.sleep(0) if not document: if sess_id: socketio.emit('docsim_update', {'msg': 'Unable to make predictions. Basicbot complete'}, room=sess_id) return [] tf_transformer = TfidfVectorizer(use_idf=False) trials_vectorizer = pickle.load(open(utils.most_recent_tfidf_vec())) try: normalised_tf_vector = tf_transformer.fit_transform([document]) except ValueError as e: print(e) return [] if sess_id: socketio.emit('docsim_update', {'msg': 'vectorising stuff...'}, room=sess_id) eventlet.sleep(0) tfidf_matrix = utils.most_recent_tfidf() idf_indices = [trials_vectorizer.vocabulary_[feature_name] for feature_name in tf_transformer.get_feature_names() if feature_name in trials_vectorizer.vocabulary_.keys()] tf_indices = [tf_transformer.vocabulary_[feature_name] for feature_name in trials_vectorizer.get_feature_names() if feature_name in tf_transformer.vocabulary_.keys()] if not idf_indices: return [] final_idf = trials_vectorizer.idf_[np.array(idf_indices)] final_tf = np.array(normalised_tf_vector.toarray()[0])[np.array(tf_indices)] review_tfidf = np.asmatrix(final_tf * final_idf) tfidf_matrix = tfidf_matrix[:, np.array(idf_indices)] if sess_id: socketio.emit('docsim_update', {'msg': 'calculating similarity...'}, room=sess_id) eventlet.sleep(0) cos_sim = cosine_similarity(review_tfidf, tfidf_matrix).flatten() related_docs_indices = cos_sim.argsort()[:-100:-1] ids = np.load(utils.most_recent_tfidf_labels()) to_insert = ids[np.array(related_docs_indices)] if sess_id: # socketio.emit('docsim_update', {'msg': 'basicbot complete!'}, room=sess_id) eventlet.sleep(0) return list(to_insert)
def constr_leaf_novalue(xml_str, leafName): """onstruct the leaf update string""" xml_str += "<" + leafName + "></" + leafName + ">\r\n" return xml_str
def translate_signatures(signatures, rosetta, ignore_missing=False): """ Translate gene identifiers in a signature dictionary. Args: signatures (dict of list): signature dictionary rosetta (dict): translation table mapping one gene identifier to another ignore_missing (boolean): If true, no error will be raised if an identifier is not in the translation dictionary. Respective entries will be skipped. Returns: dict of list: translated signature dictionary Raises: KeyError: if a gene is not in the rosetta dictionary unless ignore_missing is specified """ if ignore_missing: # remove genes from signature which is not in rosetta. signatures = { tissue: [gene for gene in genes if gene in rosetta] for tissue, genes in signatures.items() } return { tissue: [rosetta[gene] for gene in genes] for tissue, genes in signatures.items() }
def rounded(minutes, base=5): """ Round the number of provided minutes based on the amount of minutes. :param minutes: Real number of minutes to apply round operation on. :type minutes: int :param base: The base number of minutes to use in rounding. :type base: int :return: Number of minutes rounded based on amount of real amount of minutes. :rtype: int """ div, mod = divmod(minutes, base) if round(float(mod) / base): return div * base + 5 return div * base
def sort_function(FF): """Remove all duplicate monomials in the feedback functions FF. """ FF_Sort = [] for i in range(len(FF)): # For each feedback function in FF if type(FF[i]) == list: tempx = FF[i] tempy = [] for sublist in tempx: if sublist not in tempy: tempy.append(sublist) else: tempy.remove(sublist) FF_Sort.append(tempy) else: FF_Sort.append(FF[i]) return FF_Sort
def sort_latency_keys(latency): """The FIO latency data has latency buckets and those are sorted ascending. The millisecond data has a >=2000 bucket which cannot be sorted in a 'normal' way, so it is just stuck on top. This function returns a list of sorted keys. """ placeholder = "" tmp = [] for item in latency: if item == '>=2000': placeholder = ">=2000" else: tmp.append(item) tmp.sort(key=int) if placeholder: tmp.append(placeholder) return tmp
def add_content(old_html, raw_html): """Add html content together""" old_html += raw_html return old_html
def is_bytes(data): """ Check if data is of type byte or bytearray. :param data: :return: """ try: data = data.decode() return False except AttributeError: return True
def get_items_from_index(x,y): """ decipher the values of items in a list from their indices. """ z = [] for i in y: try: z.append(x[i]) except: pass return z
def compute_score_interpretability_method(features_employed_by_explainer, features_employed_black_box): """ Compute the score of the explanation method based on the features employed for the explanation compared to the features truely used by the black box """ score = 0 for feature_employe in features_employed_by_explainer: if feature_employe in features_employed_black_box: score += 1 return score/len(features_employed_by_explainer)
def str2bool(value: str) -> bool: """ Parse a string value and cast it into its boolean value :param value: :return: """ if value in ["y", "yes", "t", "true", "on", "1"]: return True if value in ["n", "no", "f", "false", "off", "0"]: return False raise ValueError("boolean value unrecognised")
def generate_structure_for_lgb(fetch,main_key_value,derived_col_names): """ It returns a List where the nodes of the model are in a structured format. Parameters ---------- fetch : dictionary Contains the nodes in dictionary format. main_key_value: List Empty list used to append the nodes. derived_col_names: List Contains column names after preprocessing. Returns ------- main_key_value : Returns the nodes in a structured format inside a list. """ list_of_child=[] for k,v in fetch.items(): if k=='threshold': main_key_value.append(str(v)+' split_condition '+str(derived_col_names[int(fetch.get('split_feature'))])) if k=='leaf_value': main_key_value.append(str(v)+' score') if isinstance(v,dict): list_of_child.append(v) for ii in range(len(list_of_child)-1,-1,-1): generate_structure_for_lgb(list_of_child[ii],main_key_value,derived_col_names) return main_key_value
def make_inverse_map(lst): """ Make a map from Lin index to a state index. Parameters ---------- lst : list List containing Lin indices. Returns ------- rez : list List containing a map from Lin index to a state index. """ rez = [0]*len(lst) for j1 in range(len(lst)): rez[lst[j1]] = j1 return rez
def minmatch(userstr, mylist, case=0): #-------------------------------------------------------------- """ Purpose: Given a list 'mylist' with strings and a search string 'userstr', find a -minimal- match of this string in the list. Inputs: userstr- The string that we want to find in the list of strings mylist- A list of strings case- Case insensitive search for case=0 else search is case sensitive. Returns: 1) None if nothing could be matched 2) -1 if more than one elements match 3) >= 0 the index of the matched list element """ #-------------------------------------------------------------- indx = None if case == 0: ustr = userstr.upper() else: ustr = userstr for j, tr in enumerate(mylist): if case == 0: liststr = tr.upper() else: liststr = tr if ustr == liststr: indx = j break i = liststr.find(ustr, 0, len(tr)) if i == 0: if indx == None: indx = j else: indx = -1 return indx
def process_results(unprocessed, P, R, G): """Process the results returned by the worker pool, sorting them by policy and run e.g. results[i][j][k] are the results from policy i on run j on graph k. Parameters: - unprocessed: Unprocessed results (as returned by the worker pool) - P: number of policies - R: number of runs - G: number of graphs/SCMs/test cases """ results = [] for i in range(P): policy_results = [] for r in range(R): run_results = unprocessed[(i*G*R + G*r):(i*G*R + G*(r+1))] policy_results.append(run_results) results.append(policy_results) return results
def knapsack(capacity, items): """Return maximum value of the items with specified capacity.""" if not items or not capacity: return 0 item = items.pop() if (item.weight > capacity): return knapsack(capacity, items) capacity_with_item = capacity - item.weight with_item = item.value + knapsack(capacity_with_item, items) without_item = knapsack(capacity, items) return max(with_item, without_item)
def identifier(s): """ Return s as a double-quoted string (good for psql identifiers) """ return u'"' + s.replace(u'"', u'""').replace(u'\0', '') + u'"'
def input_data_task_id(conf): # type: (dict) -> str """Retrieve input data task id :param dict conf: configuration object :rtype: str :return: task id """ return conf['task_id']
def occupied_squares_by_player(state, white_player): """ Returns the the x, y coordinates of the squares occupied by the given player. :param state: the given state :param white_player: True if the current player is white, False otherwise :return: the x, y coordinates of the squares occupied by the given player. """ return state[0] if white_player else state[1]
def is_seq_list(list_or_dict): """Checks to "seq" key is list or dictionary. If one seq is in the prefix-list, seq is a dictionary, if multiple seq, seq will be list of dictionaries. Convert to list if dictionary""" if isinstance(list_or_dict, list): make_list = list_or_dict else: make_list = [list_or_dict] return make_list
def link_fix(text, name_dict): """ Replace old file names with new in markdown links. """ new_text = text for old, new in name_dict.items(): new_text = new_text.replace(f']({old})', f']({new})') return new_text
def get_subnet_cidr_block(cidr_block_formatting, instance_index, subnet_suffix): """Get subnet cidr block Args: cidr_block_formatting (string): Cidr block formating instance_index (integer): Instance index subnet_suffix (string): subnet suffix Returns: string: Subnet cidr block """ subnet_cidr_block = cidr_block_formatting.replace( "\\", "").format(instance_index, 0) + subnet_suffix return subnet_cidr_block
def vm_mhz_to_percentage(vm_mhz_history, host_mhz_history, physical_cpu_mhz): """ Convert VM CPU utilization to the host's CPU utilization. :param vm_mhz_history: A list of CPU utilization histories of VMs in MHz. :type vm_mhz_history: list(list(int)) :param host_mhz_history: A history if the CPU usage by the host in MHz. :type host_mhz_history: list(int) :param physical_cpu_mhz: The total frequency of the physical CPU in MHz. :type physical_cpu_mhz: int,>0 :return: The history of the host's CPU utilization in percentages. :rtype: list(float) """ max_len = max(len(x) for x in vm_mhz_history) if len(host_mhz_history) > max_len: host_mhz_history = host_mhz_history[-max_len:] mhz_history = [[0] * (max_len - len(x)) + x for x in vm_mhz_history + [host_mhz_history]] return [float(sum(x)) / physical_cpu_mhz for x in zip(*mhz_history)]
def alcohol_by_volume(original_gravity: float, final_gravity: float): """ Calculate the Alcohol By Volume (ABV). """ return (original_gravity - final_gravity) * 131.25
def get_item_filename(omeka_item_dict, hardlinks={}): """ Get filename for a given item. Assumes one file. """ # FIXME need to allow for more than one filename, or how to figure out definitive file for object. filename = omeka_item_dict['filenames'][0] if filename in hardlinks: filename = hardlinks[filename] return filename
def clz(v: int, bits: int) -> int: """ count leading zeroes """ mask = 1 << (bits - 1) count = 0 while (count < bits) and (v & mask) == 0: count += 1 v = v * 2 return count
def split_alnum(s): """ Split line to a sequence of iterating alpha and digit strings :param s: :type s: str :return: list :rtype: list >>> split_alnum("Fa 0/1") ['Fa ', 0, '/', 1] >>> split_alnum("Fa 0/1.15") ['Fa ', 0, '/', 1, '.', 15] >>> split_alnum("ge-1/0/1") ['ge-', 1, '/', 0, '/', 1] >>> split_alnum("ge-1/0/1.15") ['ge-', 1, '/', 0, '/', 1, '.', 15] """ def convert(x): try: return int(x) except ValueError: return x r = [] digit = None for c in s: d = c.isdigit() if d != digit: digit = d r += [c] else: r[-1] += c return [convert(x) for x in r]
def get_list_as_str(list_of_objs): """ Returns the list as a string. """ return '[' + ' '.join([str(x) for x in list_of_objs]) + ']'
def enc_vle(value): """takes an integer value, returns a bytearray""" if value >= 0xFFFFFFFF: print("writing unreasonably large VLE (0d{0})".format(value)) ret = bytearray() while value >= 0x80: value, octet = divmod(value, 0x80) ret.append(octet + 0x80) ret.append(value) return bytes(ret)
def maxloc(seq): """ Return the index of the (first) maximum in seq >>> assert maxloc([1,3,2,3]) == 1 """ return max(enumerate(seq), key=lambda s: s[1])[0]
def get_station_daily_path(station_id): """ Get path to a station daily file. :param station_id: :return: """ return "/pub/data/ghcn/daily/all/{0}.dly".format(station_id)
def nearest_25(num): """ Round the number to the nearest whole number dividable by 25. This will round up or down, to find the closest number Examples: --------- >>> nearest_25(5) 0 >>> nearest_25(25) 25 >>> nearest_25(40) 50 >>> nearest_25(810) 800 """ num = float(num) / 100 num = round(num * 4) / 4 num = num * 100 return int(num)
def check_small_primes(n): """ Returns True if n is divisible by a number in SMALL_PRIMES. Based on the MPL licensed https://github.com/letsencrypt/boulder/blob/58e27c0964a62772e7864e8a12e565ef8a975035/core/good_key.go """ small_primes = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751 ] for prime in small_primes: if n % prime == 0: return True return False
def ordered(obj): """Return ordered version of the passed object Dictionaries are not ordered in all Python versions, and the implementation of sort_keys() in the the JSON library seems erratic in terms of effect """ if isinstance(obj, dict): return sorted((k, ordered(v)) for k, v in obj.items()) if isinstance(obj, list): try: return sorted(ordered(x) for x in obj) except TypeError: # list contains non-comparable types return obj else: return obj
def get_peer_and_channel(peers, scid): """Look for the channel identified by {scid} in our list of {peers}""" for peer in peers: for channel in peer["channels"]: if channel.get("short_channel_id") == scid: return (peer, channel) return (None, None)
def find_point(coords, point_list): """ Coordinates represent either the source or destination [x,y] coordinates of a line. Given a list of unique points, map one to the given coordinates. """ for p in point_list: if p["coordinates"] == coords: return p["idx"] print("Couldn't find the point -- {}!\n".format(str(point_list)))
def polynomial1D(x, m, b): """Return the value of a line with slope m and offset b x: the independent variable m: the slope of the line b: the offset of the line """ return (m * x) + b
def straight(ranks): """Return True if the ordered ranks form a 5-card straight.""" return (max(ranks) - min(ranks) == 4) and len(set(ranks)) == 5
def init_model(N, y0): """Initialize the SEIR model. :param int y0: Infected rate at initial time step. :param int N: Population :returns: Model at initial time step :rtype: tuple(int, int, int) """ return N - y0, y0, 0, 0
def next_power_of_2(n): """ Return next power of 2 greater than or equal to n """ return 2**(n - 1).bit_length()
def gcd(a, b): """Returns the greatest common divisor of a and b. Should be implemented using recursion. >>> gcd(34, 19) 1 >>> gcd(39, 91) 13 >>> gcd(20, 30) 10 >>> gcd(40, 40) 40 """ "*** YOUR CODE HERE ***" min_val = min(a, b) # get max and min max_val = max(a, b) x = max_val % min_val # find remainder of max divided by min if x is 0: # if no remainder then we can return divisor as the greatest common denominator return min_val else: mod = max_val % min_val return gcd(min_val, mod)
def reverse_dotted_decimals(ipaddress): """ Reverse the order of the decimals in the specified IP-address. E.g. "192.168.10" would become "10.168.192" """ return '.'.join(ipaddress.split('.')[::-1])
def calc_f(params, strain): """Calculate the elastic free energy in the matrix """ return ( 0.5 * params["c11"] * (strain["e11"] ** 2 + strain["e22"] ** 2) + params["c12"] * strain["e11"] * strain["e22"] + 2 * params["c44"] * strain["e12"] ** 2 )
def extractDidParts(did, method="igo"): """ Parses and returns keystr from did raises ValueError if fails parsing """ try: # correct did format pre:method:keystr pre, meth, keystr = did.split(":") except ValueError as ex: raise ValueError("Malformed DID value") if pre != "did" or meth != method: raise ValueError("Invalid DID value") return keystr
def sunlight_duration(hour_angle_sunrise): """Returns the duration of Sunlight, in minutes, with Hour Angle in degrees, hour_angle.""" sunlight_durration = 8 * hour_angle_sunrise # this seems like the wrong output return sunlight_durration
def wavenumbers_to_nm(wavenumbers): """wavenumbers from given nm""" return 10**7/wavenumbers
def count_set_bits(n): """Number of '1' bits in binary expansion of a nonnnegative integer.""" return 1 + count_set_bits(n & n - 1) if n else 0
def median(numbers): """Return the median of the list of numbers. found at: http://mail.python.org/pipermail/python-list/2004-December/253517.html""" # Sort the list and take the middle element. n = len(numbers) copy = sorted(numbers[:]) # So that "numbers" keeps its original order if n & 1: # There is an odd number of elements return copy[n // 2] else: return (copy[n // 2 - 1] + copy[n // 2]) / 2.0
def num_decodings(code): """ Given an alphanumeric mapping and an encoded message, count the number of ways it can be decoded """ decodings = [] for idx in range(len(code)): # check one letter before if code[idx] == '0': # zero is special case that maps only to being part of # 10 or 20, so it doesn't have a place in the one-letter # mapping one_letter = 0 else: one_letter = decodings[idx-1] if idx > 0 else 1 # check two letter code if idx >= 1 and 10 <= int(code[idx-1:idx+1]) <= 26: two_letter = decodings[idx-2] if idx > 1 else 1 else: two_letter = 0 decodings.append(one_letter + two_letter) return decodings[-1]