content
stringlengths
42
6.51k
def command_parser(command): """Parse the input command and return type, and any accompanying arguments. :param command: str, command input by the user. :return: (str, tuple), command type, tuple of accompanying arguments. """ command = command.strip() if command == "": return 'EMPTY', () elif command == "help": return 'HELP', () elif command == "status": return 'STATUS', () elif command == "quit": return 'QUIT', () elif command.startswith("submit"): command_parts = command.split() if len(command_parts) != 2: return 'IMPROPER COMMAND', () else: jobfile_name = command_parts[1] return 'SUBMIT', (jobfile_name,) elif command.startswith("history"): command_parts = command.split() if len(command_parts) != 2: return 'IMPROPER COMMAND', () else: job_id = command_parts[1] return 'HISTORY', (job_id,) else: return 'IMPROPER COMMAND', ()
def author_name_check(agr_data, value): """ check a database reference has all authors with names, because <CollectiveName> in PubMed XML is not standard author pattern :param agr_data: :param value: :return: """ if 'authors' not in agr_data: return 'Failure: No authors found in database' result = 'Success' has_specific_value = False for author in agr_data['authors']: has_name = False if 'name' in author: if author['name'] != '': has_name = True if author['name'] == value: has_specific_value = True if not has_name: result = 'Failure' assert has_specific_value is True if not has_specific_value: result = 'Failure' return result
def read_md_file(filepath): """Reads an md file and returns the string.""" with open(filepath, "r") as md_file: md_file_str = md_file.read() return md_file_str
def linky_to_json(linky_dict, occurence): """ :param dictionnary :return: json array """ json_body = [ { "measurement": "Index_HC", "tags": { "Location": "Linky", "Occurence": occurence }, "fields": { "value": linky_dict['HC'] } }, { "measurement": "Index_HP", "tags": { "Location": "Linky", "Occurence": occurence }, "fields": { "value": linky_dict['HP'] } }, { "measurement": "Current_A", "tags": { "Location": "Linky", "Occurence": occurence }, "fields": { "value": linky_dict['IINST'] } }, { "measurement": "Power_VA", "tags": { "Location": "Linky", "Occurence": occurence }, "fields": { "value": linky_dict['PAPP'] } }, { "measurement": "Imax_A", "tags": { "Location": "Linky", "Occurence": occurence }, "fields": { "value": linky_dict['IMAX'] } } ] return json_body
def join_words(words, language="en"): """ Joins a list of words with a language specific separator. Because not all languages use the standard English white-space between words. :param words: List of words :type words: List[str] :param language: Language code :type language: str :return: Words joined with a language-specific separator. :rtype: str """ if language in {"ja", "cmn"}: separator = "" else: separator = " " return separator.join(words)
def exclude_from_weight_decay(name): """exclude_from_weight_decay """ if not isinstance(name, str): name = name.name if name.find("layer_norm") > -1: return True bias_suffix = ["_bias", "_b", ".b_0"] for suffix in bias_suffix: if name.endswith(suffix): return True return False
def get_list_years(start_date=1985, end_date=2019): """ picks a random year """ return [i for i in range(start_date, end_date)]
def get_major_domain(data, cutoff): """ Find the prevalent domain (for example, Eukaryota): {'Eukaryota': 100.0, 'Other': 0.0, 'Viruses': 0.0, 'unclassified sequences': 0.0, 'Viroids': 0.0, 'Archaea': 0.0, 'Bacteria': 0.0} """ major_domain = 'Mixed' maximum = max(data, key=data.get) if data[maximum] >= cutoff: major_domain = maximum else: # get distinct domains found_domains = [] for domain, value in data.iteritems(): if value > 0: found_domains.append(domain) # if only two domains and one of them is `unclassified`, consider the other one major domain if len(found_domains) == 2 and 'unclassified sequences' in found_domains: found_domains.remove('unclassified sequences') major_domain = found_domains.pop() return major_domain
def hex_to_RGB(hex): """'#FFFFFF' -> [255,255,255]""" # Pass 16 to the integer function for change of base return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def is_callable(x): """ this function identifies variables that are callable """ return hasattr(x, '__call__')
def first(iterable, default=None): """ Returns the first element of an iterable """ return next(iter(iterable), default)
def process_wifi(strength: int) -> str: """Process wifi signal strength and return string for display.""" if strength >= 86: return "Low" if strength >= 71: return "Medium" if strength >= 56: return "High" return "Full"
def self_multiply(tensor_tuple: tuple): """ :param tensor_tuple: :return: """ tensor_list = list(tensor_tuple) if None in tensor_list: tensor_list.remove(None) if len(tensor_list) == 0: return 0 s = tensor_list[0] for i in range(1, len(tensor_list)): s *= tensor_list[i] return s
def top_stream(cache, user_id): """ Peek at the top of the stack in the cache. :param cache: werkzeug BasicCache-like object :param user_id: id of user, used as key in cache :return: top item in user's cached stack, otherwise None """ if not user_id: return None stack = cache.get(user_id) if stack is None: return None return stack.pop()
def calc_PLF(PLR, PLFs): """ takes the part load ratio as an input and outputs the part load factor coefficients taken from https://comnet.org/index.php/382-chillers and only includes water source electrical chillers :param np.array PLR: part load ratio for each chiller :return np.array PLF: part load factor for each chiller """ PLF = PLFs['plf_a'] + PLFs['plf_b'] * PLR + PLFs['plf_c'] * PLR ** 2 return PLF
def get_T_value(conf_level: float = 0.05, dof: int = 10): """ get value of T two tail = 0.95: :param conf_level: :param dof: :return: """ print(conf_level) T_value = [ 12.71, 4.303, 3.182, 2.776, 2.571, 2.447, 2.365, 2.306, 2.262, 2.228, 2.201, 2.179, 2.160, 2.145, 2.131, 2.120, 2.110, 2.101, 2.093, 2.086, 2.080, 2.074, 2.069, 2.064, 2.060, 2.056, 2.052, 2.048, 2.045, 2.042, 2.040, 2.037, 2.035, 2.032, 2.030, 2.028, 2.026, 2.024, 2.023, 2.021, 2.020, 2.018, 2.017, 2.015, 2.014, 2.013, 2.012, 2.011, 2.010, 2.009, 2.008, 2.007, 2.006, 2.005, 2.004, 2.003, 2.002, 2.002, 2.001, 2.000, 2.000, 1.999, 1.998, 1.998, 1.997, 1.997, 1.996, 1.995, 1.995, 1.994, 1.994, 1.993, 1.993, 1.993, 1.992, 1.992, 1.991, 1.991, 1.990, 1.990, 1.990, 1.989, 1.989, 1.989, 1.988, 1.988, 1.988, 1.987, 1.987, 1.987, 1.986, 1.986, 1.986, 1.986, 1.985, 1.985, 1.985, 1.984, 1.984, 1.984] # infinity: if dof > 100: return 1.960 else: return T_value[dof - 1]
def jaccard_similarity(a, b): # noqa """ Examples: >>> jaccard_similarity('abc', 'cba') 1.0 >>> jaccard_similarity('abc', 'bcd') 0.5 Args: a: b: """ set_a = set(a) set_b = set(b) if not (set_a and set_b): return 0.0 return 1.0 * len(set_a & set_b) / len(set_a | set_b)
def get_float(string): """Get float function""" return float(string.replace('(', '').replace(')', '').replace(',', ''))
def _chord(x, y0, y1): """ Compute the area of a triangle defined by the origin and two points, (x,y0) and (x,y1). This is a signed area. If y1 > y0 then the area will be positive, otherwise it will be negative. """ return 0.5 * x * (y1 - y0)
def convert_int_to_ip(int_ip): """Converts an integer type IP to its string form. Args: int_ip (int): IP to be converted. """ drive_ip1 = (int_ip >> 24) & 0x000000FF drive_ip2 = (int_ip >> 16) & 0x000000FF drive_ip3 = (int_ip >> 8) & 0x000000FF drive_ip4 = int_ip & 0x000000FF return '{}.{}.{}.{}'.format(drive_ip1, drive_ip2, drive_ip3, drive_ip4)
def _get_node_url(node_type: str, node_name: str) -> str: """Constructs the URL to documentation of the specified node. Args: node_type (str): One of input, model, dabble, draw or output. node_name (str): Name of the node. Returns: (str): Full URL to the documentation of the specified node. """ node_path = f"peekingduck.pipeline.nodes.{node_type}.{node_name}.Node" url_prefix = "https://peekingduck.readthedocs.io/en/stable/" url_postfix = ".html#" return f"{url_prefix}{node_path}{url_postfix}{node_path}"
def filter_nan_str(x): """Filter 'nan' values as string from a list of strings""" if not isinstance(x, list): return x else: return [y for y in x if y.lower() != 'nan']
def parse_post_age(text): """ map 'posted 10 days ago' => '10' """ if 'hours' in text: return '1' return ''.join(list(filter(lambda c: c.isdigit(), text)))
def fontInfoPostscriptWindowsCharacterSetValidator(value): """ Version 2+. """ validValues = list(range(1, 21)) if value not in validValues: return False return True
def default_format(x: float) -> str: """General format used for loss, hyper params, etc.""" return str(round(x, 6))
def retrieve_nuts3(url: str) -> str: """Prepare nuts3 code from url.""" return url.split('/')[-2]
def add_outputs_from_dict( api_current_dict: dict, fields_to_keep: list ) -> dict: """ Filters a dict and keeps only the keys that appears in the given list :param api_current_dict: the origin dict :param fields_to_keep: the list which contains the wanted keys :return: a dict based on api_current_dict without the keys that doesn't appear in fields_to_keep """ if not api_current_dict or not fields_to_keep: return {} group_outputs = {} for field_to_keep in fields_to_keep: if field_to_keep in api_current_dict.keys(): group_outputs[field_to_keep] = api_current_dict.get(field_to_keep) return group_outputs
def axis_str2idx(a): """return index (0, 1, 2) for given axis designation (x, y, z)""" d = {'x': 0, 'y': 1, 'z': 2} return d[a.lower()[0]]
def create_levels(levels, base_factory): """Create a multidimensional array :param levels: An Iterable if ints representing the size of each level :param base_factory: A nullary function that returns a new instance of the basic unit of the array :return: Nested lists """ def loop(current, rest): if len(rest) == 0: return [base_factory() for _ in range(current)] else: return [loop(rest[0], rest[1:]) for _ in range(current)] return loop(levels[0], levels[1:])
def join(chars=()): """ Join a sequence of characters into a string. :param bytes chars: list of chars :rtype: str """ return b''.join(chars).decode()
def part1(data): """Return the value at position 0 after running the program. Put 12 and 2 in positions 1 and 2 before running. Instructions are: 1, x, y, r - add the values at positions x and y and put result at r 2, x, y, r - multiply instead of adding 99 - halt the execution """ program = data.split(',') for index in range(len(program)): program[index] = int(program[index]) # Test data has no position 12 and shouldn't be changed if len(program) > 12: program[1] = 12 program[2] = 2 index = 0 operation = program[index] while operation != 99: input1 = program[index + 1] input2 = program[index + 2] output = program[index + 3] if operation == 1: program[output] = program[input1] + program[input2] else: program[output] = program[input1] * program[input2] index += 4 operation = program[index] return program[0]
def counting_sort(input_array): """ input_array : the array that we want to sorted working_storage : it is an array, and it will be used as a temporary working storage sorted_array : it is a sorted array of the input_array in ascending order """ sorted_array = [0 for items in range(len(input_array))] k = max(input_array) working_storage = [] for iteration in range(k + 1): working_storage.append(0) for inspect in range(len(input_array)): working_storage[input_array[inspect]] = working_storage[input_array[inspect]] + 1 for scanning in range(1, k + 1): working_storage[scanning] = working_storage[scanning] + working_storage[scanning - 1] # uncomment one line below if we want to know the initial working_storage after scanning # print(f"working_storage : {working_storage}") for correct_place in range(len(input_array) - 1, -1, -1): sorted_array[working_storage[input_array[correct_place]] - 1] = input_array[correct_place] working_storage[input_array[correct_place]] -= 1 # uncomment one line below if we want to know the proccess # print(f"{correct_place} iteration of the loop : {sorted_array}") return sorted_array
def filter_nulls(field): """Filter out null field values, float conversion otherwise.""" if field is None or field == "<null>": return 0 else: try: return float(field) except ValueError: return str(field)
def check_for_output_match(output, test_suite): """Return bool list with a True item for each output matching expected output. Return None if the functions suspects user tried to print something when they should not have.""" output_lines = output.splitlines() if len(output_lines) != len(test_suite): return None # number of outputs != number of test cases result = list() for exe_output, test_case in zip(output_lines, test_suite): # check if exe_output has format "RESULT: <integer>" prefix = "RESULT: " if (not exe_output.startswith(prefix)): return None # the user printed something exe_value = exe_output[len(prefix):] try: int(exe_value) except ValueError: return None # the user printed something int(exe_value) if (test_case['output'] == exe_value): result.append(True) else: result.append(False) return result
def _avoid_duplicate_arrays(types): """Collapse arrays when we have multiple types. """ arrays = [t for t in types if isinstance(t, dict) and t["type"] == "array"] others = [t for t in types if not (isinstance(t, dict) and t["type"] == "array")] if arrays: items = set([]) for t in arrays: if isinstance(t["items"], (list, tuple)): items |= set(t["items"]) else: items.add(t["items"]) if len(items) == 1: items = items.pop() else: items = sorted(list(items)) arrays = [{"type": "array", "items": items}] return others + arrays
def link(url, name): """Print clickabl link on supporting terminals.""" return "\033]8;;{}\033\\{}\033]8;;\033\\".format(url, name)
def exhaustCall(v): """ Get an uncallable value at the end of a call chain or `v` itself if `v` is not callable. """ while callable(v): v = v() return v
def clean_up_spacing(sentence): """Trim any leading or trailing whitespace from the sentence. :param sentence: str - a sentence to clean of leading and trailing space characters. :return: str - a sentence that has been cleaned of leading and trailing space characters. """ clean_sentence = sentence.strip() return clean_sentence
def expand_suggested_responses(instrument, lookups, *responses): """ Maps any {'_suggested_response': pk} values to the SuggestedResponse by that id, as long as it is present in the ``lookups`` dict. """ values = [] for response in responses: data = response # Assume raw passthrough by default # Transform data referring to a SuggestedResponse into that instance directly if isinstance(data, dict) and "_suggested_response" in data: bound_response_id = data["_suggested_response"] if bound_response_id in lookups: data = lookups[bound_response_id] else: raise ValueError( "[CollectionInstrument id=%r] Invalid bound response id=%r in choices: %r" % ( instrument.id, bound_response_id, lookups, ) ) values.append(data) if len(responses) == 1: return values[0] return values
def listify(value, return_empty_list_if_none=True, convert_tuple_to_list=True): """ Ensures that the value is a list. If it is not a list, it creates a new list with `value` as an item. # Arguments value: A list or something else. empty_list_if_none: A boolean. When True (default), None you passed as `value` will be converted to a empty list (i.e., `[]`). But when False, it will be converted to a list that has an None (i.e., `[None]`) convert_tuple_to_list: A boolean. When True (default), a tuple you passed as `value` will be converted to a list. But when False, it will be unconverted (i.e., returning a tuple object that was passed as `value`). # Returns A list, but when `value` is a tuple and `convert_tuple_to_list` is False, a tuple. """ if not isinstance(value, list): if value is None and return_empty_list_if_none: value = [] elif isinstance(value, tuple) and convert_tuple_to_list: value = list(value) else: value = [value] return value
def flatten(items): """flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). Examples: >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8, 9, 10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]""" result = [] for item in items: if hasattr(item, '__iter__'): result.extend(flatten(item)) else: result.append(item) return result
def find_adjacent_segment_type(segments, time): """Find boundary type on left and right (NONSPEECH or SPEECH)""" # find previous segment type segments[0].append("NS") prev = segments[0] for i in range(1, len(segments)): if (segments[i][0] - time) > prev[2]: segments[i].append("NS") # nonspeech else: segments[i].append("SC") # speaker change prev = segments[i] # find following segment type for i in range(0, len(segments) - 1): if (segments[i][2] + time) < segments[i + 1][0]: segments[i].append("NS") # nonspeech else: segments[i].append("SC") # speaker change segments[len(segments) - 1].append("NS") return segments
def calculate_TB(salt, ver="Uppstrom"): """ Calculate Total Borate """ if "upp" in ver.lower(): return 0.0004157 * salt / 35 # in mol/kg-SW else: # Lee, Kim, Byrne, Millero, Feely, Yong-Ming Liu. 2010. # Geochimica Et Cosmochimica Acta 74 (6): 1801?1811. return 0.0004326 * salt / 35
def result_fixture(controller_state, uuid4): """Return a server result message.""" return { "type": "result", "success": True, "result": {"state": controller_state}, "messageId": uuid4, }
def buildConnectionString(params): """Build a connection string from a dictionary Returns string. """ return ";".join(["%s=%s" % (k, v) for k, v in params.items()])
def equalize_array(array): """ Expand all rows of 2D array to the same lenght """ if len(array) == 0: return array max_length = max(map(len, array)) for row in array: diff = max_length - len(row) row.extend([""] * diff) return array
def remove_string_float(string): """Pandas dataframe returns integers sometimes as floats. This function takes a string and removes the unnecessary .0 if the next character is a tab or new line. Args: string: tsv file in string format Return: string: string with float removed """ string = string.replace(".0\t", "\t") string = string.replace(".0\n", "\n") return string
def deserialize(node): """ Returns an ast instance from an expanded dict. """ if isinstance(node, tuple): klass, kws = node return klass(**deserialize(kws)) elif isinstance(node, dict): d = {} for k, v in node.items(): d[k] = deserialize(v) return d elif isinstance(node, list): return [deserialize(n) for n in node] else: return node
def relative_class_counts(data): """input: a dict mapping class keys to their absolute counts output: a dict mapping class keys to their relative counts""" counts_items = sum(data.values()) return {k: 1.0 * v / counts_items for k, v in data.items()}
def cut_text(value): """ returns a cut string if its length is greater than 50 chars """ return value if len(value) <= 50 else f"{value[:50]}..."
def split_id(image_id): """ Split Earth Engine image ID into collection and index components. Parameters ---------- image_id: str Earth engine image ID. Returns ------- tuple A tuple of strings: (collection name, image index). """ index = image_id.split("/")[-1] ee_coll_name = "/".join(image_id.split("/")[:-1]) return ee_coll_name, index
def transaction_data_list_create(in_trans_list): """ Returns tuple of transaction data Parameters: in_trans_list (List<str>): List of strings describing transactions Returns: (List<tuple>): List of tuples containing transaction data in the following format: symbol = tuple[0] (e.g. 'AAPL', 'Cash') type = tuple[1] (e.g. 'SELL', 'DIVIDEND') shares = tuple[2] (Decimal value of shares to sell/buy; 0 for cash transactions) amount = tuple[3] (Decimal value of cash exchanged) """ return [tuple(t.split(' ')) for t in in_trans_list]
def mergesort(unsorted_list): """Sort by dividing up halves and re-merging.""" def _merge(left_half, right_half): """Re-merge.""" merged = [] left_index, right_index = 0, 0 while left_index < len(left_half) and right_index < len(right_half): if left_half[left_index] < right_half[right_index]: merged.append(left_half[left_index]) left_index += 1 else: merged.append(right_half[right_index]) right_index += 1 merged += left_half[left_index:] merged += right_half[right_index:] return merged if len(unsorted_list) <= 1: return unsorted_list half = len(unsorted_list) // 2 left, right = mergesort(unsorted_list[:half]), mergesort(unsorted_list[half:]) return _merge(left, right)
def hash_string(s: str) -> bool: """ Verify the manifest hash string for a given artifact. :param s: str :return: bool """ # ToDo: verify hash pattern return type(s) is str
def update_dict(d, e, copy=True): """ Returns a new dictionary updated by another dictionary. Examples -------- Consider a dictionary `d` which we want to update: >>> d = {0: 'a', 1: 'b'} Now consider the dictionary for update: >>> e = {1: 'c', 2: 'd'} We can update the `d` as follows: >>> f = update_dict(d, e) >>> f {0: 'a', 1: 'c', 2: 'd'} Note that the previous dictionary is unchanged: >>> d {0: 'a', 1: 'b'} """ if copy: d = d.copy() d.update(e) return d
def tcl_escape(s): """Escape the given string as appropriate for using in a Tcl string and regex. """ return s.replace("[", "\\[").replace("]", "\\]") \ .replace("$", "\\$") \ .replace("?", "\\?") \ .replace("(", "\\(").replace(")", "\\)")
def get_base_to_base_mapping_from_aligned_pairs(reftuple, qLen, strand): """ Returns: dict of 0-based position --> 0-based ref position """ cur_genome_loc = reftuple[0][1] mapping = {} for qpos, rpos in reftuple: if qpos is not None and rpos is not None: mapping[qpos] = (rpos, True) elif qpos is not None: mapping[qpos] = (cur_genome_loc, None) if rpos is not None: cur_genome_loc = rpos if strand == '-': mapping = dict((qLen-1-k, v) for k,v in mapping.items()) for k in mapping: mapping[k] = mapping[k][0] return mapping
def _parse_resource_name(name): """Extracts project id, location, dataset id etc from the resource name.""" parts = name.split('/') return parts[1], parts[3], parts[5], parts[7], \ "%s/%s" % (parts[9], parts[10])
def modinv(a, n): """Extended Euclid's algorithm. See http://www.finetune.co.jp/~lyuka/technote/mod_calc/exeuclid.html """ s = a t = n u = 1 v = 0 while s > 0: q = t // s w = t - q*s t = s s = w w = v - q*u v = u u = w return (v+n) % n
def generation(x, g): """return how many generations have passed""" return int(x/g)
def _control_input(devices, control_inputs, idx): """Returns the `idx`-th item in control_inputs to be used in ops.control_dependencies. This is a helper function for building collective ops. Args: devices: a list of device strings the collective run on. control_inputs: a list or None. idx: the index into `inputs` and `control_inputs`. Returns: A one item list of the `idx`-th element of `control_inputs`, or an empty list if `control_inputs` is None. """ if control_inputs is None: return [] if len(control_inputs) != len(devices): raise ValueError( 'control_inputs must match the length of the devices, %s != %s' % (len(control_inputs), len(devices))) return [control_inputs[idx]]
def paginated_list(full_list, sort_key, max_results, next_token): """ Returns a tuple containing a slice of the full list starting at next_token and ending with at most the max_results number of elements, and the new next_token which can be passed back in for the next segment of the full list. """ if next_token is None or not next_token: next_token = 0 next_token = int(next_token) sorted_list = sorted(full_list, key=lambda d: d[sort_key]) values = sorted_list[next_token : next_token + max_results] if len(values) == max_results: new_next_token = str(next_token + max_results) else: new_next_token = None return values, new_next_token
def hasFunction(object_, methodname): """ Test if class of ``object`` has a method called ``methodname``. """ method = getattr(object_, methodname, None) return callable(method)
def _is_freezeable(obj): """Determine if obj has the same freezing interface as `PostgreSQLTestUtil`. For some reason isinstance doesn't work properly with fixtures, so checking ``isinstance(obj, PostgreSQLTestDB)`` will always fail. Instead, we check to see if obj.time.freeze()/unfreeze() are present, and that the `time` member has context manager behavior implemented. """ return ( hasattr(obj, 'time') and callable(getattr(obj.time, 'freeze', None)) and callable(getattr(obj.time, 'unfreeze', None)) and callable(getattr(obj.time, '__enter__', None)) and callable(getattr(obj.time, '__exit__', None)) )
def isstring(args, quoted=False): """Checks if value is a (quoted) string.""" isquoted = lambda c: c[0]==c[-1] and c[0] in ['"', "'"] if quoted: check = lambda c: isinstance(c, str) and isquoted(c) else: check = lambda c: isinstance(c, str) if isinstance(args, list): return all(map(check, args)) else: return check(args)
def delete(node): """ Simply copy the data from the next node over to the current node. """ if not node or not node.next: return False node.cargo = node.next.cargo node.next = node.next.next return True
def has_data(d, fullname): """Test if any of the `keys` of the `d` dictionary starts with `fullname`. """ fullname = r'%s-' % (fullname, ) for k in d: if not k.startswith(fullname): continue return True return False
def _rescale_score_by_abs(score: float, max_score: float, min_score: float) -> float: """ Normalizes an attribution score to the range [0., 1.], where a score score of 0. is mapped to 0.5. :param score: An attribution score :param max_score: The maximum possible attribution score :param min_score: The minimum possible attribution score :return: The normalized score """ if -1e-5 < min_score and max_score < 1e-5: return .5 elif max_score == min_score and min_score < 0: return 0. elif max_score == min_score and max_score > 0: return 1. top = max(abs(max_score), abs(min_score)) return (score + top) / (2. * top)
def merge_dicts(*dict_args): """Given any number of dicts, shallow copy and merge into a new dict, precedence goes to key value pairs in latter dicts. Source: Aaron Hall, https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression """ result = {} for dictionary in dict_args: result.update(dictionary) return result
def parse_params(func, params, models): """ get spec for (query, headers, cookies) """ if hasattr(func, 'query'): query = models[func.query] for name, schema in query['properties'].items(): params.append({ 'name': name, 'in': 'query', 'schema': schema, 'required': name in query.get('required', []), 'description': schema.get('description', ''), }) if hasattr(func, 'headers'): headers = models[func.headers] for name, schema in headers['properties'].items(): params.append({ 'name': name, 'in': 'header', 'schema': schema, 'required': name in headers.get('required', []), 'description': schema.get('description', ''), }) if hasattr(func, 'cookies'): cookies = models[func.cookies] for name, schema in cookies['properties'].items(): params.append({ 'name': name, 'in': 'cookie', 'schema': schema, 'required': name in cookies.get('required', []), 'description': schema.get('description', ''), }) return params
def sec_to_hms(duration): """ Return hours, minutes and seconds for given duration. >>> sec_to_hms('80') (0, 1, 20) :param int|str duration: Duration in seconds. Can be int or string. :return: tuple (hours, minutes, seconds) :rtype: (int, int, int) """ s = int(duration) h = s // 3600 s -= (h * 3600) m = s // 60 s -= (m * 60) return h, m, s
def compute_ngrams(tokens, max_len = None, min_len = 1): """ tokens : iterable of string a single sentence of tokens. Assumes start and stop tokens omitted max_len : int maximum ngram length min_len : int minimum ngram length """ if max_len == None: max_len = len(tokens) if min_len > max_len: raise Exception("min_len cannot be more than max_len") ngrams = set() # unigrams for ngram_size in range(min_len, max_len + 1): for start in range(0, len(tokens) - ngram_size + 1): end = start + ngram_size -1 words = [] for i in range(start, end + 1): words.append(tokens[i]) ngrams.add(tuple(words)) # make a tuple so hashable return ngrams
def lj_potential(rdist, eps, sig): """ Calculate potential energy value of two interacting bodies assuming a 12-6 Lennard-Jones potential. :param rdist: distance between two interacting bodies (Bohr) :type rdist: float :param eps: Lennard-Jones epsilon parameter for interaction (_) :type eps: float :param eps: Lennard-Jones sigma parameter for interaction (_) :type eps: float :rtpe: float """ return (4.0 * eps) * ((sig / rdist)**12 - (sig / rdist)**6)
def deault_parse_function(data: bytes,intro: str) -> bytes: """Default parse function. Print the data and who sent them and return it unchanged.""" print("[Received from {intro}]: {data}\n\n".format(intro=intro,data=str(data))) return data
def tostr(value): """ to use in for python 3 replacing python 2 str :param value: :return: """ try: encoded = value.encode('utf-8').decode('utf-8') except ValueError: encoded = "" return encoded
def calc_raid_partition_sectors(psize, start): """Calculates end sector and converts start and end sectors including the unit of measure, compatible with parted. :param psize: size of the raid partition :param start: start sector of the raid partion in integer format :return: start and end sector in parted compatible format, end sector as integer """ if isinstance(start, int): start_str = '%dGiB' % start else: start_str = start if psize == -1: end_str = '-1' end = '-1' else: if isinstance(start, int): end = start + psize else: # First partition case, start is sth like 2048s end = psize end_str = '%dGiB' % end return start_str, end_str, end
def neighbors(X, Y, x, y): """ returns a list of tuples containing all neighbor of the vertex (x, y) Arguments: X {int} -- number of vertices in X direction Y {int} -- number of vertices in Y direction x {int} -- x position of vertex 0 <= x <= X y {int} -- y position of vertex 0 <= y <= Y Returns: list -- neighbor of (x, y) """ return [(x2, y2) for x2 in range(x-1, x+2) for y2 in range(y-1, y+2) if (-1 < x < X and -1 < y < Y and (x != x2 or y != y2) and (0 <= x2 < X) and (0 <= y2 < Y))]
def find(x, condition, n=1): """ Return the index of the nth element that fulfills the condition. """ search_n = 1 for i in range(len(x)): if condition(x[i]): if search_n == n: return i search_n += 1 return -1
def PreOpL2(op, items): """ Uses algorithm from SecureSCM WP9 deliverable. op must be a binary function that outputs a new register """ k = len(items) half = k // 2 output = list(items) if k == 0: return [] u = [op(items[2 * i], items[2 * i + 1]) for i in range(half)] v = PreOpL2(op, u) for i in range(half): output[2 * i + 1] = v[i] for i in range(1, (k + 1) // 2): output[2 * i] = op(v[i - 1], items[2 * i]) return output
def get_bounding_box(points, padding=0): """Returns the bounding box of a list of points with the given padding""" x_coords = list(map(lambda x: x[0], points)) y_coords = list(map(lambda x: x[1], points)) return ( min(x_coords) - padding, min(y_coords) - padding, max(x_coords) + padding, max(y_coords) + padding, )
def and_(*args): """Build AND filters.""" return { 'operator': 'and', 'criteria': list(args) }
def b_overlap_a(a, b, alab='a', blab='b', verb=False): """Check if there is any value in `b` inside the range of the values in `a`. Only checks if there is overlap, but not how (e.g. do not tell if all values of b inside a, or just from one side). Examples -------- a: ' |---| ' b: '|--| ' -> Outside b: '|------| ' -> Inside b: ' |--| ' -> Inside b: ' |------|' -> Inside b: ' |--|' -> Outside """ if b[0] > a[-1]: if verb: print('All values of {} outside of the range of {} ({}[0] > {}[-1])'.format(blab, alab, blab, alab)) ret = False elif b[-1] < a[0]: if verb: print('All values of {} outside of the range of {} ({}[-1] < {}[0])'.format(blab, alab, blab, alab)) ret = False else: if verb: print('Some values of {} inside the range of {}.') ret = True return ret
def respuesta(res): """ Funcion para formatear la respuesta. """ return "iguales" if res else "diferentes"
def _radix_length(num: int) -> int: """Finds the number of digits for a number""" if num == 0: return 1 digits: int = 0 while num != 0: digits += 1 num = int(num / 10) return digits
def size_node(node): """ Check the size of this node :param node: the node to check :return: 0 if the node is None, the size of the node otherwise """ if node is None: return 0 return node.size
def find_mode(iid, modes): """ Called by get_display_info() below """ for mode in modes: if iid == mode.id: return "{}x{}".format(mode.width, mode.height)
def connected_nodes(node, neighbor_func, visited=None): """ Recursively build up a set of nodes connected to `node` by following neighbors as given by `neighbor_func(node)`, i.e. "flood fill." >>> def neighbor_func(node): ... return {-node, min(node+1, 5)} >>> connected_nodes(0, neighbor_func) {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5} """ visited = set() if visited is None else visited queue = set([node]) while queue: node = queue.pop() visited.add(node) neighbors = neighbor_func(node) - visited queue.update(neighbors) return visited
def evaluate_ids(new_data, old_data, id_field_key): """Summary Args: new_data (TYPE): Description old_data (TYPE): Description id_field_key (TYPE): Description Returns: TYPE: Description """ # each key in existing_ids is a record id in the admin table # each value is the application's record id of the field or object existing_ids = {record["id"]: record[id_field_key] for record in old_data} # each element in new_ids is the application's record id of the field or object new_ids = [record["_id"] for record in new_data] # check for new fields/objects create = [ record for record in new_data if record["_id"] not in existing_ids.values() ] # identify already existing fields/objects update = [] for row_id in existing_ids.keys(): if existing_ids[row_id] in new_ids: for record in new_data: if record["_id"] == existing_ids[row_id]: record["id"] = row_id update.append(record) continue # identify deleted fields/objects delete = [] for _id in existing_ids.keys(): if existing_ids[_id] not in new_ids: delete.append({"id": _id}) return {"create": create, "update": update, "delete": delete}
def chr_dict(d): """ Input dictionary generated by file2dict to make a new dict: chromosome as key name, positions as value list. """ new_dict = {} for k in d: chromosome, pos = tuple(k.split('_')) if chromosome in new_dict: new_dict[chromosome].append(int(pos)) else: new_dict[chromosome] = [int(pos)] return new_dict
def nested(path, query, *args, **kwargs): """ Creates a nested query for use with nested documents Keyword arguments such as score_mode and others can be added. """ nested = { "path": path, "query": query } nested.update(kwargs) return { "nested": nested }
def parse_csv_import(data_string): """Parse CSV data into an array. Parameters ---------- data_string : str entries should be delimited by \n, values by comma Returns ------- data_array : list rows are taken from \n delimiters, columns from commas """ data_array = data_string.split('\n') for _ in range(len(data_array)): data_array[_] = data_array[_].split(',') data_array[_] = list(filter(None, data_array[_])) return data_array
def run(dfs: dict, settings: dict) -> dict: """list(df)""" print() print("\n".join(list(dfs[settings['df']]))) print() return dfs
def mit_bubble(L): """Bubble sort Parameters: L (list): Unsorted (eventually sorted) list. swap (boolean): Whether list's elements have been swapped before, serves as an indicator on whether we're done sorting. Returns: L (list): Sorted list. """ swap = False while not swap: swap = True for j in range(1, len(L)): if L[j-1] > L[j]: swap = False (L[j-1], L[j]) = (L[j], L[j-1]) return L
def _odd_vertices_of_MST(M, number_of_nodes): """ Returns the vertices having Odd degree in the Minimum Spanning Tree(MST). """ odd_vertices = [0 for i in range(number_of_nodes)] for u, v, _ in M: odd_vertices[u] = odd_vertices[u] + 1 odd_vertices[v] = odd_vertices[v] + 1 odd_vertices = [ vertex for vertex, degree in enumerate(odd_vertices) if degree % 2 == 1 ] return odd_vertices
def homogenize(vectors, w=1.0): """Homogenise a list of vectors. Parameters ---------- vectors : list A list of vectors. w : float, optional Homogenisation parameter. Defaults to ``1.0``. Returns ------- list Homogenised vectors. Notes ----- Vectors described by XYZ components are homogenised by appending a homogenisation parameter to the components, and by dividing each component by that parameter. Homogenisatioon of vectors is often used in relation to transformations. Examples -------- >>> vectors = [[1.0, 0.0, 0.0]] >>> homogenise_vectors(vectors) """ return [[x / w, y / w, z / w, w] for x, y, z in vectors]
def PNT2TidalOcto_Pv16(XA,beta0PNT=0,beta1PNT=0): """ TaylorT2 1PN Octopolar Tidal Coefficient, v^16 Phasing Term. XA = mass fraction of object beta0PNT = 0PN Octopole Tidal Flux coefficient beta1PNT = 1PN Octopole Tidal Flux coefficient """ XATo2nd = XA*XA XATo3rd = XATo2nd*XA XATo4th = XATo3rd*XA XATo5th = XATo4th*XA return (5*(267520+995*beta0PNT+168*beta1PNT))/(1848) \ + (5/231)*(74010 + 119*beta0PNT)*XA \ - (85)/(66)*(1825+2*beta0PNT)*XATo2nd + (1625*XATo3rd)/(66)
def _try_lookup(table, value, default = ""): """ try to get a string from the lookup table, return "" instead of key error """ try: string = table[ value ] except KeyError: string = default return string
def render_if_string_starts_with_a_number(string): """ add "_" to the start if name starts with a digit :type string: str :rtype: str """ if string[0].isdigit(): string = "num" + string return string
def taputil_create_sorted_dict_key(dictionaryObject): """Searches for the specified keyword Parameters ---------- dictionaryObject : dictionary object, mandatory Dictionary Returns ------- A keyword based on a sorted dictionary key+value items """ if dictionaryObject is None: return None listTmp = [] for k in sorted(dictionaryObject): listTmp.append(f'{k}={dictionaryObject[k]}') return '&'.join(listTmp)
def relative_config_file_path(file_name): """Return the config file path based on the environment. Args: file_name (str): The file name to be loaded. Returns: str: The config file path. """ return f"/configs/{file_name}"