content
stringlengths
42
6.51k
def conv_16to8(v): """ Conversion d'une variable 16 bits en 8 bits :param v: var (16b) :return: 8bit conversion """ return (v >> 8) & 0xFF
def get_size(vol_size: int): """ Convert MB to Bytes""" tmp = int(vol_size) * 1024 * 1024 return tmp
def rating_over_budget(row) -> float: """ Utils function for dataframe manipulation """ if row["budget"] == 0 or row["budget"] == 1: return -1 else: return float(row["average_rating"]) / (float(row["budget"]) / 10 ** 6)
def _mask_by_gradient(refpt, neighbor_set, valuemask): """ mask neighbor set by valuemask that choose vertices with value smaller than value of vertex refpt """ return set([i for i in neighbor_set if valuemask[i]<valuemask[refpt]])
def alphanum_key(s): """ Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"] """ import re def tryint(s): try: return int(s) except: return s return [tryint(c) for c in re.split('([0-9]+)', s)]
def graphviz_html_string( s, *, repl_nl=None, repl_colon=None, xmltext=None, ): """ Workaround *pydot* parsing of node-id & labels by encoding as HTML. - `pydot` library does not quote DOT-keywords anywhere (pydot#111). - Char ``:`` on node-names denote port/compass-points and break IDs (pydot#224). - Non-strings are not quote_if_necessary by pydot. - NLs im tooltips of HTML-Table labels `need substitution with the XML-entity <see https://stackoverflow.com/a/27448551/548792>`_. - HTML-Label attributes (``xmlattr=True``) need both html-escape & quote. .. Attention:: It does not correctly handle ``ID:port:compass-point`` format. See https://www.graphviz.org/doc/info/lang.html) """ import html if s: s = html.escape(s) if repl_nl: s = s.replace("\n", "&#10;").replace("\t", "&#9;") if repl_colon: s = s.replace(":", "&#58;") if not xmltext: s = f"<{s}>" return s
def is_root_node(node): """ Return `True` if the given node is a "root" node, `False` otherwise. """ if not node: return False return node.is_root is True and node.is_child is False
def str_tile(raw: str, length: int) -> str: """ Tile the string to conform with the length. :param raw: The string to be tiled :param length: The target length. :return: The tiled string. """ return raw * (length // len(raw)) + raw[:length % len(raw)]
def str_fill(i, n): """Returns i as a string with at least n digits. i: int n: int length returns: string """ return str(i).zfill(n)
def parse_time(val: str) -> float: """ Parse the time out into minutes. """ unit = val.split(" ")[1] if 'minute' in unit: # Calculate the hour. mins = int(val.split(" ")[0]) hours = round(mins / 60, 3) return hours else: hours = val.split(" ")[0] return float(hours)
def error_dict(error_message: str): """Return an error dictionary containing the error message""" return {"status": "error", "error": error_message}
def distribute_value(value, num_proc): """Adjusts training parameters for distributed training. In case of distributed training frequencies expressed in global steps have to be adjusted to local steps, thus divided by the number of processes. """ return max(value // num_proc, 1)
def displayIngredient(ingredient): """ Format a list of (tag, [tokens]) tuples as an HTML string for display. displayIngredient([("qty", ["1"]), ("name", ["cat", "pie"])]) # => <span class='qty'>1</span> <span class='name'>cat pie</span> """ return "".join([ "<span class='%s'>%s</span>" % (tag, " ".join(tokens)) for tag, tokens in ingredient ])
def get_wait_time(retry_num): """ Compute time for exponential backoff. Args: retry_num (int): Retry attempt number to determine wait time. Returns: (int): Amount of time to wait. """ return 2 ** (retry_num+3)
def convert_binary_to_decimal(binary_number): """ Parameters ---------- binary_number Returns ------- >>> convert_binary_to_decimal('1010') 10 """ decimal = 0 i = 0 n = len(binary_number) - 1 for bit in binary_number: decimal += + int(bit) * pow(2, n - i) i += 1 return decimal
def _to_kebab_case(value: str) -> str: """Converts a snake_case string into a kebab-case one.""" return value.replace("_", "-").lower()
def escape_markdown(raw_string): """Returns a new string which escapes all markdown metacharacters. Args ---- raw_string : str A string, possibly with markdown metacharacters, e.g. "1 * 2" Returns ------- A string with all metacharacters escaped. Examples -------- :: escape_markdown("1 * 2") -> "1 \\* 2" """ metacharacters = ["\\", "*", "-", "=", "`", "!", "#", "|"] result = raw_string for character in metacharacters: result = result.replace(character, "\\" + character) return result
def _convertToFeatureDict(response, x, y): """ Converts the service response to the features response dict :param dict response: The response from the heatdrill service as dict :return dict: The features response dict. Return value of method layer_info(...) """ attributes = [ { 'name': 'requestId', 'value': response['requestId'] }, { 'name': 'permitted', 'value': response['permitted'] }, { 'name': 'depth', 'value': response['depth'] }, { 'name': 'gwsZone', 'value': response['gwsZone'] }, { 'name': 'gwPresent', 'value': response['gwPresent'] }, { 'name': 'spring', 'value': response['spring'] }, { 'name': 'gwRoom', 'value': response['gwRoom'] }, { 'name': 'wasteSite', 'value': response['wasteSite'] }, { 'name': 'landslide', 'value': response['landslide'] }, { 'name': 'infoTextRows', 'value': response['infoTextRows'] }, { 'name': 'x', 'value': x }, { 'name': 'y', 'value': y } ] geometry = "POINT(%s %s)" % (x, y) featureDict = {'attributes': attributes, 'geometry': geometry} multiFeatureDict = {'features': [featureDict]} return multiFeatureDict
def categorizePerformance(SH, selectivity): """Categorize Categorize the performance of the cture of MOF/CORE based on SH and selectivity SH above 5 and selectivity above 15000 are considered ideal 3 categories exist and 3 number from 1 -> 3 are assigned correspondingly. Return: - category number: 0 | 1 | 2 """ if SH <= 5 and selectivity <= 15000: return 0 # This type doesn't exist # elif SH > 5 and selectivity <=15000: # return 2 elif SH <= 5 and selectivity >= 15000: return 1 else: return 2
def ssh_cmd_docker_container_exec( detail, command_on_docker, wait_press_key=None ) -> str: """SSH command to execute a command inside a docker containter.""" wait_cmd = "" if wait_press_key: wait_cmd = "; echo 'Press a key'; read q" return ( f"TERM=xterm ssh -t {detail['ec2InstanceId']} docker exec -ti {detail['runtimeId']} {command_on_docker}" + wait_cmd )
def _get_pin_cw(pincount, loc): """Helper function to locate pin number for cw_dual. Args: pincount: Total number of pins loc: Starting location Returns: pin_number: Starting pin number """ pins_per_side = pincount // 2 if loc == "top_left": return 0 elif loc == "bottom_left": return pins_per_side * 2 + 1 elif loc == "bottom_right": return pins_per_side elif loc == "top_right": return pins_per_side + 1 return 0
def transformation_lowercase(text, *args): """ :param text: the text to run the transformation on :type text: str :return: the transformed text :type return: str """ return text.lower()
def required_for_output(inputs: set, outputs: set, connections: dict): """ Determine which nodes and connections are needed to compute the final output. It is considered that only paths starting at the inputs and ending at the outputs are relevant. This decision is made since a node bias can substitute for a 'floating' node (i.e. node with no input and constant output). :note: It is assumed that the input identifier set and the node identifier set are disjoint. By convention, the output node ids are always the same as the output index. :param inputs: Set of the used input identifiers :param outputs: Set of all the output node identifiers :param connections: Dictionary of genome connections :return: Sets of: used inputs, used hidden nodes, used output nodes, remaining connections """ # Network is invalid if no input is given if not inputs: return set(), set(), outputs, {} # Get all the enabled connections and the nodes used in those used_conn = {k: c for k, c in connections.items() if c.enabled} used_nodes = set(a for (a, _) in used_conn.keys()) used_nodes.update({b for (_, b) in used_conn.keys()}) used_nodes.update(inputs | outputs) not_recurrent_used_conn = {(a, b): c for (a, b), c in used_conn.items() if a != b} # Initialize with dummy to get the 'while' going removed_nodes = [True] # While new nodes get removed, keep pruning while removed_nodes: removed_nodes = [] # Search for nodes to prune for n in used_nodes: # Inputs and outputs cannot be pruned if n in inputs | outputs: continue # Node must be at least once both at the sender and the receiving side of a connection, without referring to # itself (i.e. recurrent connections) if not ((n in {a for (a, _) in not_recurrent_used_conn.keys()}) and (n in {b for (_, b) in not_recurrent_used_conn.keys()})): removed_nodes.append(n) # Delete the removed_nodes from the used_nodes set, remove their corresponding connections as well for n in removed_nodes: # Remove the dangling node used_nodes.remove(n) # Connection must span between two used nodes used_conn = {(a, b): c for (a, b), c in used_conn.items() if (a in used_nodes and b in used_nodes)} not_recurrent_used_conn = {(a, b): c for (a, b), c in used_conn.items() if a != b} # Network is invalid if no connections remain or none of the inputs or outputs are connected anymore used_nodes = set(a for (a, _) in used_conn.keys()) used_nodes.update({b for (_, b) in used_conn.keys()}) if (not used_conn) or \ (not any([i in used_nodes for i in outputs])) or \ (not any([i in used_nodes for i in inputs])): return set(), set(), outputs, {} # All the outputs should always be considered 'used nodes' used_nodes.update(outputs) # Do not include the inputs # Parse the used nodes and return result used_inp = {n for n in used_nodes if n < 0} used_out = outputs # All outputs are always considered used used_hid = {n for n in used_nodes if (n not in used_inp) and (n not in used_out)} return used_inp, used_hid, used_out, used_conn
def _format_path_with_rank_zero(path_format: str) -> str: """Formats ``path_format`` with the rank zero values.""" return path_format.format( rank=0, local_rank=0, node_rank=0, )
def check_binary(can_words, ref_words, word_bits, verbose=False): """ Return exact_match, score 0-1 """ #bits = len(candidate) * word_bits checks = 0 matches = 0 for wordi, (expect, mask) in ref_words.items(): got = can_words[wordi] verbose and print("word %u want 0x%04X got 0x%04X" % (wordi, expect, got)) for maski in range(word_bits): maskb = 1 << maski # Is this bit checked? if not mask & maskb: continue checks += 1 # Does the bit match? if expect & maskb == got & maskb: matches += 1 return checks == matches, matches / checks
def fetch_all_url_link(url_increasing_link, n): """ :param url_increasing_link: str, a link with an increasing index, e.g https//:abc.com/cars&page_number=idx :param n: int, the maximum number of index :return: a list of all the url links from index 1 to n """ url_links_to_crawl = [] for idx in range(1, n+1): # print("Sleeping for 10 seconds...") # time.sleep(10) url_links_to_crawl.append(url_increasing_link + str(idx)) return url_links_to_crawl
def formatCSV(pair): """ Formats the KV tuple ((K1, K2, ...), V) in a csv formatted string 'K1, K2, ..., V' :param pair: tuple :return: str """ return ','.join(str(p) for p in (pair[0] + (pair[1],)))
def fib(n: int) -> int: """Calculate n-th Fibonacci number recursively, short version.""" assert n >= 0 return 1 if n in [0, 1] else fib(n - 1) + fib(n - 2)
def profile_key(prof): """Get the sort key for profiles.""" if prof == 'core': return (0, prof) return (1, prof)
def toffoli(a, b, c): """ Compute c + ab over GF2. :param a: bit :param b: bit :param c: bit TESTS: >>> toffoli(0, 0, 0) 0 >>> toffoli(0, 0, 1) 1 >>> toffoli(0, 1, 0) 0 >>> toffoli(0, 1, 1) 1 >>> toffoli(1, 0, 0) 0 >>> toffoli(1, 0, 1) 1 >>> toffoli(1, 1, 0) 1 >>> toffoli(1, 1, 1) 0 """ assert(a in [0, 1]) assert(b in [0, 1]) assert(c in [0, 1]) return c ^ (a & b)
def get_color(cnts): """Determines the color of the shape we're trying to identify Args: cnts: an array of all the contour arrays after analyzing all possible colors of an image, organized as: [black_cnts, red_cnts, yellow_cnts, white_cnts] Returns: The index of 'cnts' that has the shortest array length, which corresponds to which color it is """ # The shorter the length of the array at a given index, the more likely # that the image is a cohesive shape (rather than a bunch of noise that # was picked up) cntlengths = [] for cnt in cnts: cntlengths.append(len(cnt)) return cntlengths.index(min(cntlengths))
def warning(text): """Create a pretty warning string from text.""" return f"\033[93m{text}\033[m"
def relu3(x: float) -> float: """ Like ReLu, but smoother Like GeLu, but cheaper """ if x < 0.0: return 0.0 elif x < 1.0: return 1 / 3 * x ** 3 else: return x - 2 / 3
def validate_billingmode_mode(billingmode_mode): """ Property: BillingMode.Mode """ VALID_BILLINGMODE_MODE = ("ON_DEMAND", "PROVISIONED") if billingmode_mode not in VALID_BILLINGMODE_MODE: raise ValueError( "BillingMode Mode must be one of: %s" % ", ".join(VALID_BILLINGMODE_MODE) ) return billingmode_mode
def sentence_factory(markup): """Simplistic builder of *parsed* sentences. Each line in the markup is interpreted as a whitespace-separated-values for token offset-in-chars ner lemma which are returned as a list of dicts. """ sentence = [] for line in markup.split("\n"): line = line.strip() if not line: continue token, offset, ner, lemma = line.split() sentence.append({ "word": token, "CharacterOffsetBegin": offset, "NER": ner, "lemma": lemma, }) return sentence
def tz_to_str(tz_seconds: int) -> str: """convert timezone offset in seconds to string in form +00:00 (as offset from GMT)""" sign = "+" if tz_seconds >= 0 else "-" tz_seconds = abs(tz_seconds) # get min and seconds first mm, _ = divmod(tz_seconds, 60) # Get hours hh, mm = divmod(mm, 60) return f"{sign}{hh:02}{mm:02}"
def first_non_repeating_letter(string): """ This was created for Code Wars: https://www.codewars.com/kata/52bc74d4ac05d0945d00054e This function takes a string as an input. It will return the first character the does not repeat in the string. It is case insensitive """ #checks for empty string if string == "": return "" else: #creates list which will check if there is a repeated character list = [] #iterates through string to count how many times it appears #keeps track of occurences by adding counts to list for i in string: list.append(string.lower().count(i.lower())) if string.lower().count(i.lower()) > 1: pass else: return i #break so only first occurence is taken break #if no character occured only a single time, a blank string will be return if 1 not in list: return ""
def append_filename(filename, impute_dates, moving_average): """! Creates consistent file names for all output. """ if moving_average > 0: filename = filename + '_ma' + str(moving_average) elif impute_dates: filename = filename + '_all_dates' return filename
def linear_annealing(n, total, p_initial, p_final): """Linearly interpolates a probability between p_initial and p_final. Current probability is based on the current step, n. Used to linearly anneal the exploration probability of the RLTuner. Args: n: The current step. total: The total number of steps that will be taken (usually the length of the exploration period). p_initial: The initial probability. p_final: The final probability. Returns: The current probability (between p_initial and p_final). """ if n >= total: return p_final else: return p_initial - (n * (p_initial - p_final)) / (total)
def get_ascii_from_lightness(lightness: float) -> str: """Returns a ASCII character for a specific lightness :param lightness: 0.0-1.0, lightness from the HSL color model :returns: str (single character) """ lightnessASCII = " .-+*wGHM#&%" brightness_index = int(lightness * len(lightnessASCII)) if brightness_index < 0: brightness_index = 0 elif brightness_index >= len(lightnessASCII): brightness_index = len(lightnessASCII) - 1 return lightnessASCII[brightness_index]
def count_lines(line, wanted_length=77): """Return an approximate line count given a string""" lines = line.split("\n") count = len(lines) - 1 for row in lines: length = len(row)/wanted_length if length < 1.0: continue count += int(length) return count
def filter_by_key(dct, func=lambda val: val): """ filter dictionary entries by their values """ return {key: val for key, val in dct.items() if func(key)}
def poly(start, end, steps, total_steps, period, power): """ Default goes from start to end """ delta = end - start rate = float(steps) / total_steps if rate <= period[0]: return start elif rate >= period[1]: return end base = total_steps * period[0] ceil = total_steps * period[1] return end - delta * (1. - float(steps - base) / (ceil - base)) ** power
def hex2int(s: str): """Convert a hex-octets (a sequence of octets) to an integer""" return int(s, 16)
def _find_contraction(positions, input_sets, output_set): """Copied from NumPy's _find_contraction Finds the contraction for a given set of input and output sets. Parameters ---------- positions : iterable Integer positions of terms used in the contraction. input_sets : list List of sets that represent the lhs side of the einsum subscript output_set : set Set that represents the rhs side of the overall einsum subscript Returns ------- new_result : set The indices of the resulting contraction remaining : list List of sets that have not been contracted, the new set is appended to the end of this list idx_removed : set Indices removed from the entire contraction idx_contraction : set The indices used in the current contraction Examples -------- # A simple dot product test case >>> pos = (0, 1) >>> isets = [set('ab'), set('bc')] >>> oset = set('ac') >>> _find_contraction(pos, isets, oset) ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) # A more complex case with additional terms in the contraction >>> pos = (0, 2) >>> isets = [set('abd'), set('ac'), set('bdc')] >>> oset = set('ac') >>> _find_contraction(pos, isets, oset) ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) """ idx_contract = set() idx_remain = output_set.copy() remaining = [] for ind, value in enumerate(input_sets): if ind in positions: idx_contract |= value else: remaining.append(value) idx_remain |= value new_result = idx_remain & idx_contract idx_removed = (idx_contract - new_result) remaining.append(new_result) return (new_result, remaining, idx_removed, idx_contract)
def _check_state(enabled_states, paper_states): """Checks to see if the newspaper object contains a state that we want to scrape. Parameters ---------- enabled_states : list The states that we want to scrape paper_states : list The states that are applicable to the newspaper Returns ------- valid : bool True if the paper meets the criteria for the scraping job """ enabled_states = set(enabled_states) paper_states = set(paper_states) return bool(enabled_states.intersection(paper_states))
def slice_length(orange): """ Returns the length of the index corresponding to a slice. For example, slice(0,4,2) has a length of two. This is used by some of the methods in message.py. """ t = type(orange) if t is slice: if orange.step: return int((orange.stop-orange.start)/orange.step) # This will also work for negative steps else: # Step size is 1 by default return orange.stop - orange.start else: return len(orange)
def roll(*parts: str) -> str: """ formats parts of a roll into a string Returns: The string of the sum of the parts """ return " + ".join(parts)
def atmospheric_pressure(z): """ Calculates the atmospheric pressure in kpa as a function of the elevation above the sea level. The atmospheric pressure, P, is the pressure exerted by the weight of the earth's atmosphere. Evaporation at high altitudes is promoted due to low atmospheric pressure as expressed in the psychrometric constant. The effect is, however, small and in the calculation procedures, the average value for a LOCATION is sufficient. A simplification of the ideal gas law, assuming 20 C for a standard atmosphere, can be employed to calculate P (FAO56) :param z: elevation above sea level [m] :return: atmospheric pressure [kPa] """ return 101.3*((293.0-0.00652*z)/293.0)**5.26
def build_service_catalog_parameters(parameters: dict) -> list: """Updates the format of the parameters to allow Service Catalog to consume them Args: parameters (dict): List of parameters in the format of {"key1":"value1", "key2":"value2"} Returns: list: Parameters in the format of {"Key":"string", "Value":"string"} """ new_parameters = list() for key, value in parameters.items(): y = dict() y['Key'] = key y['Value'] = value new_parameters.append(y) return new_parameters
def _get_table_sql_columns(columns=[]): """ return a string of a list of columns to be inserted into the sql code Params ------ columns=[] (list) Return ------ sql_columns (str) """ if len(columns) == 0: sql_columns = '*' else: sql_columns = ",".join(columns) return sql_columns
def set24_to_list(v): """Convert an integer to a bit vector""" return [x for x in range(24) if v & (1 << x)]
def _extend_set(a_set, a_union, a_form2lemma1, a_form2lemma2): """ Extend exisiting set by adding to it forms whose lemmas are in the set @param a_set - set to be expanded @param a_union - container of additional terms (should subsume `a_set`) @param a_form2lemma1 - dictionary mapping forms to lemmas @param a_form2lemma2 - dictionary mapping forms to lemmas @return pointer to the new extended set """ return set(term for term in a_union if term in a_set or \ a_form2lemma1.get(term) in a_set or \ a_form2lemma2.get(term) in a_set)
def merge(nums1, nums2): """ Do not return anything, modify nums1 in-place instead. """ i=0 j=0 result=[] while (i<len(nums1) and j<len(nums2)): if(nums1[i]<nums2[j]): result.append(nums1[i]) i=i+1 else: result.append(nums2[j]) j=j+1 if(i<len(nums1)): result=result+nums1[i:len(nums1)] else: result=result+nums2[j:len(nums2)] return result
def bigy(y): """An approximation of the magic function Y(y) for dipolar fields. We use the approximation from Schulz & Lanzerotti 1974. The following approximation looks fancier, but fails at y = 0, which is not what happens in the exact definition, so we do *not* use it: T0 = 1.3802 # 1 + ln(2 + 3**0.5)/(2 * 3**0.5) T1 = 0.7405 # pi * 2**0.5 / 6 Y = 2 * (1 - y) * T0 + (T0 - T1) * (y * np.log(y) + 2 * y - 2 * np.sqrt(y)) Note that this function gets called with both Numpy arrays and Sympy expressions. """ return 2.760346 + 2.357194 * y - 5.11754 * y**0.75
def average(*args): """ returns simple average of list of values """ val = 0. for arg in args: val += arg return val / len(args)
def strict_bool(s): """ Variant of bool() that only accepts two possible string values. """ if s == 'True': return True elif s == 'False': return False else: raise ValueError(s)
def gray_code_gen(N): """Generates the sequence of binary gray codes of width N Parameters ---------- N : integer Width of the binary gray codes Returns ------- data : list The list of generated binary gray codes of width N """ data = [] for i in range(0, 1 << N): gray = i ^ (i >> 1) data.append(gray) return data
def _merge_environment_settings(base_env_settings, more_env_settings): """Merge environment settings from two different sources. :param list base_env_settings: A (possibly undefined) set of metadata. :param list more_env_settings: Metadata to add (also possibly undefined). """ result = [] if base_env_settings: result.extend(base_env_settings) if more_env_settings: conflicts = [k for k in [m.name for m in more_env_settings] if k in [m['name'] for m in result]] if conflicts: raise ValueError("May not have multiple definitions for environment settings " "value(s) '{}'".format(', '.join(conflicts))) else: result.extend([{'name': m.name, 'value': m.value} for m in more_env_settings]) return result
def determine_field_list(arbor, fields, update): """ Get the list of fields to be saved. """ if fields in [None, "all"]: # If this is an update, don't resave disk fields. field_list = arbor.analysis_field_list.copy() # Add in previously saved analysis fields if update: field_list.extend( [field for field in arbor.field_list if arbor.field_info[field].get("type") == "analysis_saved"]) else: field_list.extend(arbor.field_list) # If a field has an alias, get that instead. fields = [] for field in field_list: fields.extend( arbor.field_info[field].get("aliases", [field])) else: fields.extend([f for f in ["uid", "desc_uid"] if f not in fields]) return fields
def acc(modifier: int, number: int) -> int: """ acc adds or subtracts from accumulator :param modifier: Number to added/subtracted from acc :param number: current acc value :type modifier: int :type number: int :rtype: int """ return number + int(modifier)
def function_with_pep484_type_annotations(param1: int, param2: str) -> bool: """Compare if param1 is greater than param2. Example function with PEP 484 type annotations. The return type must be duplicated in the docstring to comply with the NumPy docstring style. Parameters ---------- param1 The first parameter. param2 The second parameter. Returns ------- bool True if successful, False otherwise. """ result = None try: converted_param2 = int(param2) if param1 > converted_param2: result = True else: result = False except ValueError: print("Parameter 2 must be a string representing a number using digits [0-10]") raise ValueError except TypeError: print("Parameter 1 must be an integer") raise TypeError print(f"Function called with: {param1} and {param2}") print(f"Function returns: {result}") return result
def x(score): """Check score and give user feedback""" if score in range(0,5): a = "poor" elif score in range(5,11): a = "good" else: a = "excellent" return a
def get_styles(features, collections, links=True): """ Based on which features and collection are provided, the styles for all features are determined here """ models = [] models.extend([f.kml_style for f in features]) models.extend([c.kml_style for c in collections]) if not links: # Collections will be represented by Folders, not NetworkLinks # So every feature n the entire tree will be in this KML Doc # We need to recurse down to determine what's in there for c in collections: children = c.feature_set(recurse=True) models.extend([child.kml_style for child in children]) unique_set = set(models) return list(unique_set)
def _hashable(value): """Determine whether `value` can be hashed.""" try: hash(value) except TypeError: return False return True
def is_palindrome_v4(string): """check if the string is palindrome or not while checking if the reversed string begin from index 0""" return string.find(string[::-1]) == 0
def parse_events(events): """Parses events based on the structure used to create events. In order to check its content and situation, each event must be parsed into a predefined structure. Args: events: all events returned from read_events() func. Returns: parsed_events: a dictionary of structured events. Key is wp ID. err: a list of could not structured events with Exception info """ parsed_events = {} err = [] for elem in events: try: tmp = {} tmp['event_id'] = elem['id'] # ID required in deletion and update summary = elem['summary'].split(':') # Split title of event tmp['wp_id'] = summary[0] # Workpackage ID on Openproject tmp['subject'] = summary[-1] # Workpackage subject on OpenProject description = elem['description'].split('\n') # Split description tmp['assignee'] = description[-2] # Assigne on OpenProject tmp['updated_at'] = description[-1] # Update time of wp saved on Calendar due = elem['end']['dateTime'].split('T') # Split dueDate to date, time tmp['due_date'] = due[0] # DueDate of created event tmp['due_hour'] = due[1] # DueHour of created event parsed_events[int(tmp['wp_id'])] = tmp # Save to parsed_events dic. except Exception as error: err.append([elem, error]) # If a parsin error occurs, save to err list return parsed_events, err
def get_element_at(element_idx, it): """ :return it[element_idx] also for non-indexable iterators. NOTE: exhausts iterator! """ try: return it[element_idx] except TypeError: # it is not a list pass for i, el in enumerate(it): if i == element_idx: return el raise IndexError('Iterator does not have {} elements'.format(element_idx))
def vert_to_string(*args): """Converts list of vertices to string""" return "v %10.8f %10.8f\n" % tuple(args)
def check_params(params): """ Checks if the parameters are defined in the domain [0, 1]. :param params: parameters (u, v, w) :type params: list, tuple :raises ValueError: input parameters are outside of the domain [0, 1] """ tol = 10e-8 # Check parameters for prm in params: if prm is not None: if not (0.0 - tol) <= prm <= (1.0 + tol): raise ValueError("Parameters should be between 0 and 1") return True
def truncate_min_zoom_to_2dp(shape, properties, fid, zoom): """ Truncate the "min_zoom" property to two decimal places. """ min_zoom = properties.get('min_zoom') if min_zoom: properties['min_zoom'] = round(min_zoom, 2) return shape, properties, fid
def get_algo(mining_pool): """ Retrieves algo when passed mining pool URL. Very often the algo and coin are actually in the POOL URL. Returns: String """ # returns the algorithm string algo = None if mining_pool is not None: x = mining_pool.count('.') if x >= 3: algo = mining_pool.split(".")[0] return algo
def extract(parse_tree, symbol): """ Returns the string generated by the first occurence of the key `symbol` in the parse tree. """ def flatten(parse_tree): if not isinstance(parse_tree, tuple): return [parse_tree] root, subtree_list = parse_tree return [item for subtree in subtree_list for item in flatten(subtree)] if not isinstance(parse_tree, tuple): return False root, subtree_list = parse_tree if root == symbol: words = flatten(parse_tree) return ' '.join(words) else: for subtree in subtree_list: subresult = extract(subtree, symbol) if subresult: return subresult return ''
def get_feedstock_dirs(feedstock_dirs, feedstock_file): """ Return a list of feedstock directories to examine. """ if feedstock_file is None: return feedstock_dirs # skip comments (#) and blank lines is_valid = lambda x: not x.startswith('#') and len(x.strip()) with open(feedstock_file) as f: feedstock_dirs = [l.strip() for l in f if is_valid(l)] return feedstock_dirs
def create_histogram_string(data): """A convenience function that creates a graph in the form of a string. :param dict data: A dictionary, where the values are integers representing a count of the keys. :return: A graph in string form, pre-formatted for raw printing. """ assert isinstance(data, dict) for key in data.keys(): assert isinstance(data[key], int) total_results = sum([value for value in data.values()]) txt = "" # order keys for printing in order (purly ascetics) ordered_keys = sorted(data, key=lambda k: data[k]) results = [] # longest_key used to calculate how many white spaces should be printed # to make the graph columns line up with each other longest_key = 0 for key in ordered_keys: value = data[key] longest_key = len(key) if len(key) > longest_key else longest_key # IMPOSING LIMITATION: truncating keys to 95 chars, keeping longest key 5 chars longer longest_key = 100 if longest_key > 100 else longest_key percent = value / total_results * 100 results.append((key[:95], value, percent, u"\u25A0" * (int(percent / 2)))) # two for loops are ugly, but allowed us to count the longest_key - # so we loop through again to print the text for r in results: txt += "%s%s: %5s - %5s%% %s\n" % (int(longest_key - len(r[0])) * ' ', r[0], r[1], str(r[2])[:4], u"\u25A0" * (int(r[2] / 2))) return txt
def make_extension(name, content): """ Makes extensions for cybox objects """ return {name: content}
def guess_bl_category(identifier: str) -> str: """Guess category for a given identifier. Note: This is a temporary solution and should not be used long term. Args: identifier: A CURIE Returns: The category for the given CURIE """ prefix = identifier.split(':')[0] if prefix in {'UniProtKB', 'ComplexPortal'}: category = 'biolink:Protein' elif prefix in {'GO'}: category = 'biolink:OntologyClass' else: category = 'biolink:NamedThing' return category
def get_free_index(dictionary): """ Get the first free integer index of dictionary starting at 0. """ index = 0 while True: if index in dictionary: index += 1 else: return index
def has_unexpected_subset_keys(expected_keys, minimum_keys, actual_keys, name): """Returns an error if unexpected keys are present or expected keys are missing. Accepts optional keys. This is important to catch typos. """ actual_keys = frozenset(actual_keys) superfluous = actual_keys - expected_keys missing = minimum_keys - actual_keys if superfluous or missing: msg_missing = (' missing: %s' % sorted(missing)) if missing else '' msg_superfluous = ( (' superfluous: %s' % sorted(superfluous)) if superfluous else '') return 'Unexpected %s%s%s; did you make a typo?' % ( name, msg_missing, msg_superfluous)
def _getVersionString(value): """Encodes string for version information string tables. Arguments: value - string to encode Returns: bytes - value encoded as utf-16le """ return value.encode("utf-16le")
def _inputs(param): """Return a string with all the inputs property formatted. """ if not param: return '' return '%s, ' % ', '.join([par['name'][1:-1] for par in param])
def cost2(x: int) -> int: """Compute the cost of carb movement by x steps for part 2.""" return x * (x + 1) // 2
def solution(n): """Returns the sum of all the multiples of 3 or 5 below n. >>> solution(3) 0 >>> solution(4) 3 >>> solution(10) 23 >>> solution(600) 83700 """ xmulti = [] zmulti = [] z = 3 x = 5 temp = 1 while True: result = z * temp if result < n: zmulti.append(result) temp += 1 else: temp = 1 break while True: result = x * temp if result < n: xmulti.append(result) temp += 1 else: break collection = list(set(xmulti + zmulti)) return sum(collection)
def FilterAttributeValue(ObjectInstanceList,*condition_tuples): """ Purpose: Filter a list of object instances by their attribute value; supply with (attribute_name,attribute_value) tuples Use: FilterAttributeValue([ins1,ins2,ins3],(attr1,value1),(attr2,value2)) Note: 1. Instances in ObjectInstanceList must all have attribute as listed in attr/attr_value pairs. 2. logical AND is applied among multiple tuples. """ select_bool=[True]*len(ObjectInstanceList) for i,artist in enumerate(ObjectInstanceList): for condition_tuple in condition_tuples: attribute_name,attribute_value=condition_tuple try: if artist.__dict__[attribute_name]==attribute_value: pass else: select_bool[i]=False break except: pass return [obinstance for boolflag,obinstance in zip(select_bool,ObjectInstanceList) if boolflag==True]
def _limit_cell_length(iterable, limit): """ Limit the length of each string in the iterable. :param iterable: iterable object containing string values. :type iterable: ``list`` or ``tuple`` etc. :param limit: The number of characters allowed in each string :type limit: ``int`` :return: The list of limited strings. :rtype: ``list`` of ``str`` """ return [val if len(str(val)) < limit else '{}...'.format(str(val)[:limit-3]) for val in iterable]
def get_underlined_header(header: str, char: str = '-') -> str: """Underline the header with given char.""" underline = char * len(header) return '\n'.join([header, underline])
def add_modelines(scriptlines, language): """Sets file metadata/modelines so that editors will treat it correctly Since setting the shebang destroys auto-detection for scripts, we add mode-lines for Emacs and vi. """ shebang = scriptlines[0:1] body = scriptlines[1:] if not any('-*-' in line for line in scriptlines): emacs_modeline = ['# -*- mode: %s -*-\n' % language] else: emacs_modeline = [] if not any(' vi:' in line for line in scriptlines): vi_modeline = ['# vi: filetype=python\n'] else: vi_modeline = [] return shebang + emacs_modeline + body + vi_modeline
def most_common(lst): """Find the most common elements in a list, excluding ties.""" return max(set(lst), key=lst.count)
def _get_name_fi(name, fi_index): """ Generate variable name taking into account fidelity level. Parameters ---------- name : str base name fi_index : int fidelity level Returns ------- str variable name """ if fi_index > 0: return "%s_fi%d" % (name, fi_index + 1) else: return name
def rhombus_area(diagonal_1, diagonal_2): """Returns the area of a rhombus""" diagonal_1 = float(diagonal_1) diagonal_2 = float(diagonal_2) if (diagonal_1 < 0.0 or diagonal_2 < 0.0): raise ValueError('Negative numbers are not allowed') return diagonal_1 * diagonal_2 / 2.0
def an_capabilities(b: bytes) -> list: """ Decode autonegotiation capabilities Args: b: coded *** Returns: human readable *** """ cap: list = [] i: int = (b[0] << 8) + b[1] cap_list = ['1000BASE-T (full duplex mode)', '1000BASE-T (half duplex mode)', '1000BASE-X (-LX, -SX, -CX full duplex mode)', '1000BASE-X (-LX, -SX, -CX half duplex mode)', 'Asymmetric and Symmetric PAUSE (for full-duplex links)', 'Symmetric PAUSE (for full-duplex links)', 'Asymmetric PAUSE (for full-duplex links)', 'PAUSE (for full-duplex links)', '100BASE-T2 (full duplex mode)', '100BASE-T2 (half duplex mode)', '100BASE-TX (full duplex mode)', '100BASE-TX (half duplex mode)', '100BASE-T4', '10BASE-T (full duplex mode)', '10BASE-T (half duplex mode)', 'Other or unknown'] for bit in range(len(cap_list)): if (i & (2**bit) ) > 0: cap.append(cap_list[bit]) return cap
def _check_method(estimator, method): """Check that an estimator has the method attribute. If method == 'transform' and estimator does not have 'transform', use 'predict' instead. """ if method == 'transform' and not hasattr(estimator, 'transform'): method = 'predict' if not hasattr(estimator, method): ValueError('base_estimator does not have `%s` method.' % method) return method
def addition(a, b): """ Addition modulo 2^16 :param a: <int> :param b: <int> :return: result: <int> """ result = (a + b) % 0x10000 assert 0 <= result <= 0xFFFF return result
def egcd(a, b): """ Part of the `modInv` implementation. """ if a == 0: return (b, 0, 1) else: g, y, x = egcd(b % a, a) return (g, x - (b // a) * y, y)
def stringQuote(X): """ Return input as a quoted string without leading or trailing spaces, newlines or tabs """ X = str(X).strip(' ') X = X.strip('\n| |\r|\t') X = '"' + X + '"' return(X)
def filter_taught_workshops(queryset, name, values): """Limit Persons to only instructors from events with specific tags. This needs to be in a separate function because django-filters doesn't support `action` parameter as supposed, ie. with `method='filter_taught_workshops'` it doesn't call the method; instead it tries calling a string, which results in error.""" if not values: return queryset return queryset.filter(task__role__name='instructor', task__event__tags__in=values) \ .distinct()
def merge(a, b): """ Given two clocks, return a new clock with all values greater or equal to those of the merged clocks. """ return tuple(map(max, zip(a, b)))
def validate_predictivescalingmaxcapacitybehavior(predictivescalingmaxcapacitybehavior): """ Validate PredictiveScalingMaxCapacityBehavior for ScalingInstruction Property: ScalingInstruction.PredictiveScalingMaxCapacityBehavior """ VALID_PREDICTIVESCALINGMAXCAPACITYBEHAVIOR = ( "SetForecastCapacityToMaxCapacity", "SetMaxCapacityToForecastCapacity", "SetMaxCapacityAboveForecastCapacity", ) if ( predictivescalingmaxcapacitybehavior not in VALID_PREDICTIVESCALINGMAXCAPACITYBEHAVIOR ): # noqa raise ValueError( "ScalingInstruction PredictiveScalingMaxCapacityBehavior must be one of: %s" % ", ".join(VALID_PREDICTIVESCALINGMAXCAPACITYBEHAVIOR) # noqa ) return predictivescalingmaxcapacitybehavior
def _covariant_conic(A_scaled_coeffs, B_scaled_coeffs, monomials): """ Helper function for :meth:`TernaryQuadratic.covariant_conic` INPUT: - ``A_scaled_coeffs``, ``B_scaled_coeffs`` -- The scaled coefficients of the two ternary quadratics. - ``monomials`` -- The monomials :meth:`~TernaryQuadratic.monomials`. OUTPUT: The so-called covariant conic, a ternary quadratic. It is symmetric under exchange of ``A`` and ``B``. EXAMPLES:: sage: ring.<x,y,z> = QQ[] sage: A = invariant_theory.ternary_quadratic(x^2+y^2+z^2) sage: B = invariant_theory.ternary_quadratic(x*y+x*z+y*z) sage: from sage.rings.invariant_theory import _covariant_conic sage: _covariant_conic(A.scaled_coeffs(), B.scaled_coeffs(), A.monomials()) -x*y - x*z - y*z """ a0, b0, c0, h0, g0, f0 = A_scaled_coeffs a1, b1, c1, h1, g1, f1 = B_scaled_coeffs return ( (b0*c1+c0*b1-2*f0*f1) * monomials[0] + (a0*c1+c0*a1-2*g0*g1) * monomials[1] + (a0*b1+b0*a1-2*h0*h1) * monomials[2] + 2*(f0*g1+g0*f1 -c0*h1-h0*c1) * monomials[3] + 2*(h0*f1+f0*h1 -b0*g1-g0*b1) * monomials[4] + 2*(g0*h1+h0*g1 -a0*f1-f0*a1) * monomials[5] )
def get_violated_bounds(val, bounds): """ This function tests a value against a lower and an upper bound, returning which if either is violated, as well as a direction that the value needs to move for feasibility. Arguments: val: Value to be tested bounds: Tuple containing the lower, upper bounds """ lower = bounds[0] upper = bounds[1] if upper is not None: if val > upper: return (upper, -1) if lower is not None: if val < lower: return (lower, 1) return (None, 0)