content
stringlengths
42
6.51k
def islist(val): """ check if the entry is a list or is a string of list Parameters ---------- val an entry of any type Returns ------- bool True if the input is either a list or a string of list, False otherwise """ text = str(val) if text[0] == '[' and text[len(text) - 1] == ']': return True else: return False
def nb_predicates(self): """Get the number of predicates in the database""" # return self._hdt.nb_predicates return 0
def extract_feature_sequence(extracted_results, frame_idx, causal, seq_len, step=1): """Extract the target frame from person results, and pad the sequence to a fixed length. Args: extracted_results (List[List[Dict]]): Multi-frame feature extraction results stored in a nested list. Each element of the outer list is the feature extraction results of a single frame, and each element of the inner list is the feature information of one person, which contains: features (ndarray): extracted features track_id (int): unique id of each person, required when ``with_track_id==True``` bbox ((4, ) or (5, )): left, right, top, bottom, [score] frame_idx (int): The index of the frame in the original video. causal (bool): If True, the target frame is the first frame in a sequence. Otherwise, the target frame is in the middle of a sequence. seq_len (int): The number of frames in the input sequence. step (int): Step size to extract frames from the video. Returns: List[List[Dict]]: Multi-frame feature extraction results stored in a nested list with a length of seq_len. int: The target frame index in the padded sequence. """ if causal: frames_left = 0 frames_right = seq_len - 1 else: frames_left = (seq_len - 1) // 2 frames_right = frames_left num_frames = len(extracted_results) # get the padded sequence pad_left = max(0, frames_left - frame_idx // step) pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step) start = max(frame_idx % step, frame_idx - frames_left * step) end = min(num_frames - (num_frames - 1 - frame_idx) % step, frame_idx + frames_right * step + 1) extracted_results_seq = [extracted_results[0]] * pad_left + \ extracted_results[start:end:step] + [extracted_results[-1]] * pad_right return extracted_results_seq
def _match(property): """Triggered for match event type :@param property: string :@return event_type: string """ event_mapper = { "ForkEvent": "forked", "WatchEvent": "started", "CheckRunEvent": "checked run", "CommitCommentEvent": "committed comment", "CreateEvent": "created", "DeleteEvent": "deleted", "ForkApplyEvent": "forked apply", "IssueCommentEvent": "issueed comment", "IssuesEvent": "iussue", "LabelEvent": "lebel", "MemberEvent": "member", "MembershipEvent": "membership", "MilestoneEvent": "milestone", "PullRequestEvent": "pulled a request", "PullRequestReviewEvent": "review pull request", "PullRequestReviewCommentEvent": "review & comment pull request", "RepositoryEvent": "repo", "PushEvent": "pushed", "RepositoryVulnerabilityAlertEvent": "repo sequirity", "TeamEvent": "team", "TeamAddEvent": "added team", } if property not in event_mapper: return "" return event_mapper.get(property)
def cleanhtml(txt): """Remove html tags from a string""" import re clean = re.compile('<.*?>') return re.sub(clean, '', txt)
def b_f_general(graph_array, current_node): """Runs the Bellman-Ford algorithm to find shortest paths.""" distance = [] predecessor = [] ## Step 1: Initialize graph: for _ in range(len(graph_array)): distance.append(float('inf')) predecessor.append(None) distance[current_node] = 0 ## Step 2: Relax edges repeatedly: for _ in range(1, len(graph_array)): ## For each node_1: for node_1, edges in enumerate(graph_array): ## For each node_2: for node_2, edge in enumerate(edges): if distance[node_1] + edge < distance[node_2]: distance[node_2] = distance[node_1] + edge predecessor[node_2] = node_1 return distance
def _end_of_encoding(encoding: bytes, start: int) -> int: """Find the end index of the encoding starting at index start. The encoding is not validated very extensively. There are no guarantees what happens for invalid encodings; an error may be raised, or a bogus end index may be returned. Callers are expected to check that the returned end index actually results in a valid encoding. """ if start not in range(len(encoding)): raise ValueError(f"Start index {start} not in range({len(encoding)})") paren_depth = 0 i = start while i < len(encoding): c = encoding[i:i+1] if c in b"([{": # Opening parenthesis of some type, wait for a corresponding closing paren. # This doesn't check that the parenthesis *types* match # (only the *number* of closing parens has to match). paren_depth += 1 i += 1 elif paren_depth > 0: if c in b")]}": # Closing parentheses of some type. paren_depth -= 1 i += 1 if paren_depth == 0: # Final closing parenthesis, end of this encoding. return i else: # All other encodings consist of exactly one character. return i + 1 if paren_depth > 0: raise ValueError(f"Incomplete encoding, missing {paren_depth} closing parentheses: {encoding!r}") else: raise ValueError(f"Incomplete encoding, reached end of string too early: {encoding!r}")
def convert_genre(genre: str) -> str: """Return the HTML code to include for the genre of a word.""" return f" <i>{genre}.</i>" if genre else ""
def get_lighter_color(color): """Generates a lighter color. Keyword arguments: color -- color you want to change, touple with 3 elements (Doesn't matter if it is RGB or BGR) Return: Return a lighter version of the provided color """ add = 255 - max(color) add = min(add,30) return (color[0] + add, color[1] + add, color[2] + add)
def parCondense(form, tar): """ Performs paranthesis reduction at a particular depth. Parameters ---------- form : string Formula. tar : int Target depth for paranthesis condensation. Returns ------- ans : string The condensed paranthesis form of the given formula. """ form += '@' ans = "" temp = "" ref = "" refctr = 0 ctr = 0 for ch in form: if(ch == '('): ctr += 1 if(ctr >= tar): temp += ch else: if(len(ref) > 0): ans += ref ans += str(refctr) if refctr > 1 else "" ref = "" refctr = 0 ans += ch if(ch == ')'): if(ctr == tar): if(temp == ref): refctr += 1 else: ans += ref ans += str(refctr) if refctr > 1 else "" ref = temp refctr = 1 temp = "" ctr -= 1 ans = ans[:-1] return ans
def l(a: float, b: float) -> float: """ l = b * b / a :param a: semi-major axis :type a: float :param b: semi-minor axis :type b: float :return: semi-latus rectum :rtype: float """ return b * b / a
def bitarray2fasm(bitarray): """ Convert array of bits ('0', '1') into FASM value. Note: index 0 is the LSB. """ bitstr = ''.join(bitarray[::-1]) return "{}'b{}".format(len(bitstr), bitstr)
def update_transmission_parameters(parameters, compartments_to_update): """ Update parameters with transmission rates for each compartment with altered immunity/sucseptibility to infection """ for compartment in compartments_to_update: parameters.update( { "contact_rate_" + compartment: parameters["contact_rate"] * parameters["rr_transmission_" + compartment] } ) return parameters
def stripNamespace(name): """ Method used to remove any colon characters from the supplied name. :type name: str :rtype: str """ return name.split(':')[-1]
def dict_to_string(input_dict: dict, separator=", ") -> str: """ :param input_dict: :param separator: :return: """ combined_list = list() for key, value in input_dict.items(): individual = "{} : {:.5f}".format(key, value) combined_list.append(individual) return separator.join(combined_list)
def mclag_ka_session_dep_check(ka, session_tmout): """Check if the MCLAG Keepalive timer and session timeout values are multiples of each other and keepalive is < session timeout value """ if not session_tmout >= ( 3 * ka): return False, "MCLAG Keepalive:{} Session_timeout:{} values not satisfying session_timeout >= (3 * KA) ".format(ka, session_tmout) if session_tmout % ka: return False, "MCLAG keepalive:{} Session_timeout:{} Values not satisfying session_timeout should be a multiple of KA".format(ka, session_tmout) return True, ""
def _get_function_name(func, aliases): """Returns the associated name of a function.""" try: return aliases[func] except KeyError: # Has to be a different branch because not every function has a # __name__ attribute. So we cannot simply use the dictionaries `get` # with default. try: return func.__name__ except AttributeError: raise TypeError('function "func" does not have a __name__ attribute. ' 'Please use "function_aliases" to provide a function name alias.')
def add3(a, b): """Adds two 3D vectors c=a+b""" return [a[0] + b[0], a[1] + b[1], a[2] + b[2]]
def normalize_typename(typename): """ Drops the namespace from a type name and converts to lower case. e.g. 'tows:parks' -> 'parks' """ normalized = typename if ":" in typename: normalized = typename.split(":")[1] return normalized.lower()
def discount_with_dones(rewards, dones, gamma): """ Apply the discount value to the reward, where the environment is not done :param rewards: ([float]) The rewards :param dones: ([bool]) Whether an environment is done or not :param gamma: (float) The discount value :return: ([float]) The discounted rewards """ discounted = [] ret = 0 # Return: discounted reward for reward, done in zip(rewards[::-1], dones[::-1]): ret = reward + gamma * ret * (1. - done) # fixed off by one bug discounted.append(ret) return discounted[::-1]
def time_taken(elapsed): """To format time taken in hh:mm:ss. Use with time.monotic()""" m, s = divmod(elapsed, 60) h, m = divmod(m, 60) return "%d:%02d:%02d" % (h, m, s)
def factorial(n): """Return n * (n - 1) * (n - 2) * ... * 1. >>> factorial(5) 120 """ if n == 0: return 1 else: return n * factorial(n-1)
def area(region): """Returns the area of the specified region. Args: region (dict): A dictionary containing {x1, y1, x2, y2} arguments. Returns: float: The area of the region. """ w = region["x2"] - region["x1"] h = region["y2"] - region["y1"] return w * h
def partition(array, first, last): """helper for quick_sort""" pivot_value = array[first] left_mark = first + 1 right_mark = last done = False while not done: while left_mark <= right_mark and array[left_mark] <= pivot_value: left_mark = left_mark + 1 while array[right_mark] >= pivot_value and right_mark >= left_mark: right_mark = right_mark - 1 if right_mark < left_mark: done = True else: temp = array[left_mark] array[left_mark] = array[right_mark] array[right_mark] = temp temp = array[first] array[first] = array[right_mark] array[right_mark] = temp return right_mark
def score_time_cost(event, attributes): """ Score based on indicators of time cost (WIP) Key indicators of resource cost will be numbers of targets and analysis effort """ score = 0 for attribute in attributes: if attribute["category"] == "Network activity": ty = attribute["type"] if ty == "domain": score += 1000 elif ty == "hostname" or ty == "url" or ty == "ip-src": score += 500 elif attribute["category"] == "Payload delivery" or attribute["category"] == "Payload installation" or \ attribute["category"] == "Artifacts dropped": ty = attribute["type"] if ty == "vulnerability": score += 10000 elif ty == "malware-sample": score += 5000 elif ty == "filename" or ty == "filename|md5" or ty == "filename|sha1" or ty == "filename|sha256" or ty == "attachment": score += 10 elif ty == "md5" or ty == "sha1" or ty == "sha256": score += 10 elif attribute["category"] == "External analysis": ty = attribute["type"] if ty == "vulnerability": score += 10000 elif ty == "filename" or ty == "filename|md5" or ty == "filename|sha1" or ty == "filename|sha256": score += 10 elif ty == "md5" or ty == "sha1" or ty == "sha256": score += 10 elif ty == "comment": score += 100 elif ty == "link" or ty == "url": score += 100 return score
def keyify_value(value): """ :type value: str :return: """ return value.lower().replace(' ', '-').replace("'", "-")
def ensure_operators_are_strings(value, criteria_pattern): """ This function ensures that both value and criteria_pattern arguments are unicode (string) values if the input value type is bytes. If a value is of types bytes and not a unicode, it's converted to unicode. This way we ensure all the operators which expect string / unicode values work, even if one of the values is bytes (this can happen when input is not controlled by the end user - e.g. trigger payload under Python 3 deployments). :return: tuple(value, criteria_pattern) """ if isinstance(value, bytes): value = value.decode('utf-8') if isinstance(criteria_pattern, bytes): criteria_pattern = criteria_pattern.decode('utf-8') return value, criteria_pattern
def update_tuple(origin_tuple, update_value, update_index): """Update tuple/namedtuple for specified update_index and update_value.""" # Namedtuple is inherit from tuple. if not isinstance(origin_tuple, tuple): raise ValueError("Only tuple/namedtuple supported. Origin_tuple type: " "%s." % type(origin_tuple)) if update_index >= len(origin_tuple): raise ValueError("Update index is out of range. Length of original tuple " "%s, Update index: %s." % (len(origin_tuple), update_index)) values = [] for index, item in enumerate(origin_tuple): if index == update_index: values.append(update_value) else: values.append(item) def _is_namedtuple(x): base = type(x).__bases__ if len(base) == 1 and base[0] == tuple: return True return False if _is_namedtuple(origin_tuple): return type(origin_tuple)(*values) return tuple(values)
def sort_by_priority_list(values, priority): """ Sorts a list of parameter dictionaries by a list of priority. Useful when setting up parameters automatically. """ # priority_dict = {k: i for i, k in enumerate(priority)} # try to get a value from priority_dict using priority_dict.get(value). # If not found we just return the length of the list. # def priority_getter(value): # return priority_dict.get(value['title'], len(values)) def get_priority(value): try: return priority.index(value['title']) + 1 except ValueError: return len(values) return sorted(values, key=get_priority)
def coerce_to_strict(const): """ This is used to ultimately *encode* into strict JSON, see `encode` """ # before python 2.7, 'true', 'false', 'null', were include here. if const in ("Infinity", "-Infinity", "NaN"): return None else: return const
def rm_spaces_and_chars_from_str(input_str, remove_slashes=True, replace_brackets=True, replace_quotes=True, replace_dots=True, remove_plus=True, swap_pcent=True, replace_braces=True): """ remove the spaces and extra vars from strings """ input_str = input_str.replace(' ', '_') if replace_brackets: input_str = input_str.replace('(', '_') input_str = input_str.replace(')', '_') if replace_braces: input_str = input_str.replace('{', '_') input_str = input_str.replace('}', '_') if replace_quotes: input_str = input_str.replace("'", '_') if replace_dots: input_str = input_str.replace(".", '_') if remove_slashes: input_str = input_str.replace("\\", '_') input_str = input_str.replace("/", '_') if remove_plus: input_str = input_str.replace("+", '_plus_') if swap_pcent: input_str = input_str.replace("%", 'pcent') return input_str
def splitFilename(filename): """ Pass in a standard style rpm fullname Return a name, version, release, epoch, arch, e.g.:: foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64 """ if filename[-4:] == '.rpm': filename = filename[:-4] archIndex = filename.rfind('.') arch = filename[archIndex+1:] relIndex = filename[:archIndex].rfind('-') rel = filename[relIndex+1:archIndex] verIndex = filename[:relIndex].rfind('-') ver = filename[verIndex+1:relIndex] epochIndex = filename.find(':') if epochIndex == -1: epoch = '' else: epoch = filename[:epochIndex] name = filename[epochIndex + 1:verIndex] return name, ver, rel, epoch, arch
def bind_method(value, instance): # good with this """ Return a bound method if value is callable, or value otherwise """ if callable(value): def method(*args): return value(instance, *args) return method else: return value
def write_values(value): """Write a `*values` line in an LTA file. Parameters ---------- value : [sequence of] int or float or str Returns ------- str """ if isinstance(value, (str, int, float)): return str(value) else: return ' '.join([str(v) for v in value])
def combine(*styles): """Combine multiple style specifications into one. Parameters ---------- styles: sequence of :class:`dict` instances A collection of dicts containing CSS-compatible name-value pairs. Returns ------- styles: :class:`dict` containing CSS-compatible name-value pairs. """ computed_style = {} for style in styles: if style is not None: computed_style.update(style) return computed_style
def isa(obj, types): """an alias for python built-in ``isinstance``.""" if types is callable: return callable(obj) return isinstance(obj, types)
def short_to_full_git_sha(short, refs): """Converts a short git sha to a full sha :param short: A short git sha represented as a string :param refs: A list of refs in the git repository :return: The full git sha or None if one can't be found """ return [sha for sha in set(refs.values()) if sha.startswith(short)]
def getHeight(root): """ Start with 0 height and recurse going down, increasing the levels """ if not root: return 0 return 1 + max(getHeight(root.left), getHeight(root.right))
def check_line_start(line_breaks, char_count): """ Determines whether the current word is the start of a line. line_breaks: dict The words split across a line in the current page char_count: int The number of characters examined so far in the current line Returns ------- bool True if the current word starts a line, False otherwise """ if len(line_breaks["start"]) == 0: return True if len(line_breaks["end"]) == 0: return False can_process = not (line_breaks["start"][-1] == line_breaks["end"][-1]) can_process = can_process and (char_count == 0) return can_process
def parse_course_info(course): """ Parse information of a course that is retrieved from Moodle. :param course: A json statement, received as response on a Moodle call. :type course: dict(str, list(dict(str, int))) :return: The name of a course. :rtype: str """ course_name = course['courses'][0]['fullname'] return course_name
def string_reverser(our_string): """ Reverse the input string Args: our_string(string): String to be reversed Returns: string: The reversed string """ return our_string[::-1]
def convert_empty_sets_to_none(config_dict): """Convert empty lists to None type objects in configuration dictionary. :param config_dict: dict, CAZy classes and families to be scraped Return dictionary with no empty lists. """ for key in config_dict: if config_dict[key] is not None: if len(config_dict[key]) == 0: config_dict[key] = None return config_dict
def resolve_time(delta: int, sep: str = "") -> str: """ Converts an int to its human-friendly representation :param delta: time in seconds :param sep: string separator :return: string """ if type(delta) is not int: delta = int(delta) years, days, hours, minutes = 0, 0, 0, 0 # Calculate best representations of the number while True: if delta >= 60 * 60 * 24 * 365: # 1 Year years += 1 delta -= 31556926 if delta >= 60 * 60 * 24: # 1 Day days += 1 delta -= 86400 elif delta >= 60 * 60: # 1 hour hours += 1 delta -= 3600 elif delta >= 60: # 1 minute minutes += 1 delta -= 60 else: break # Form calculations into a string fields = [] if years: fields.append(f"{years}y") if days: fields.append(f"{days}d") if hours: fields.append(f"{hours}h") if minutes: fields.append(f"{minutes}m") fields.append(f"{delta}s") # If tm is less than a minute, do not add "and". return sep.join(fields)
def get_dotted_attr(obj, attr_name): """ Get the value of the attribute. Unlike getattr this accepts nested or 'dotted' attributes. """ if '.' not in attr_name: return getattr(obj, attr_name) else: L = attr_name.split('.', 1) return get_dotted_attr(getattr(obj, L[0]), L[1])
def normalize_tuple(value, n, name): """Transforms an integer or iterable of integers into an integer tuple. A copy of tensorflow.python.keras.util. Args: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. name: The name of the argument being validated, e.g. "strides" or "kernel_size". This is only used to format error messages. Returns: A tuple of n integers. Raises: ValueError: If something else than an int/long or iterable thereof was passed. """ if isinstance(value, int): return (value,) * n else: try: value_tuple = tuple(value) except TypeError: raise TypeError( "The `" + name + "` argument must be a tuple of " + str(n) + " integers. Received: " + str(value) ) if len(value_tuple) != n: raise ValueError( "The `" + name + "` argument must be a tuple of " + str(n) + " integers. Received: " + str(value) ) for single_value in value_tuple: try: int(single_value) except (ValueError, TypeError): raise ValueError( "The `" + name + "` argument must be a tuple of " + str(n) + " integers. Received: " + str(value) + " " "including element " + str(single_value) + " of type" + " " + str(type(single_value)) ) return value_tuple
def flatten(x): """ Flatten list of lists """ import itertools flatted_list = list(itertools.chain(*x)) return flatted_list
def fib_matrix(n): """Efficient algorithm to return F(n) via matrix multiplication. """ if (n <= 1): return n v1, v2, v3 = 1, 1, 0 for rec in bin(n)[3:]: calc = v2 * v2 v1, v2, v3 = v1 * v1 + calc, (v1 + v3) * v2, calc + v3 * v3 if rec == '1': v1, v2, v3 = v1 + v2, v1, v2 return v2
def get_capitalized_words(text): """Finds individual capitalized words and return in a list""" s = [] if isinstance(text, list): text = " ".join(text) for t in text.split(): if len(t) > 1: if t[0].isupper() and t[1:].islower(): s.append(t) return s
def getToPlane(p, shape): """Convert coordonnates in the plane coordonnates system.""" x, y = p h, w = shape[:2] m = max(w, h) return (x / m, -y / m)
def convert_byte_to( n , from_unit, to , block_size=1024 ): """ This function converts filesize between different units. By default, it assumes that 1MB = 1024KB. Modified from https://github.com/mlibre/byte_to_humanity/blob/master/byte_to_humanity/bth.py. The mods let this transform units of any type into specified units. """ table = {'b': 1, 'k': 2 , 'm': 3 , 'g': 4 , 't': 5 , 'p': 6} number = float(n) change_factor = table[to] - table[from_unit] number /= (block_size ** change_factor) return number
def cut_rod_bottom_up(p, n): """ Only difference from book is p[i-1] instead of p[i] due to indexing, also create to arrays to n+1 since range doesn't include end bound. """ r = [0 for k in range(n+1)] for j in range(1, n+1): q = -100000 for i in range(1, j+1): q = max(q, p[i-1] + r[j-i]) r[j] = q return r[n]
def get_top_header(table, field_idx): """ Return top header by field header index. :param table: Rendered table (dict) :param field_idx: Field header index (int) :return: dict or None """ tc = 0 for th in table['top_header']: tc += th['colspan'] if tc > field_idx: return th
def inner_product(D1, D2): """ Take the inner product of the frequency maps. """ result = 0. for key in D1: if key in D2: result += D1[key] * D2[key] return result
def bubble_sort(array): """Bubble sort in Python >>> bubble_sort([]) [] >>> bubble_sort([2,1]) [1, 2] >>> bubble_sort([6,1,4,2,3,5]) [1, 2, 3, 4, 5, 6] """ is_sorted = False while not is_sorted: is_sorted = True for i in range(len(array)-1): if array[i] > array[i+1]: is_sorted=False array[i], array[i+1] = array[i+1], array[i] return array
def get_paths_threshold(plist, decreasing_factor): """ Get end attributes cutting threshold Parameters ---------- plist List of paths ordered by number of occurrences decreasing_factor Decreasing factor of the algorithm Returns --------- threshold Paths cutting threshold """ threshold = plist[0][1] for i in range(1, len(plist)): value = plist[i][1] if value > threshold * decreasing_factor: threshold = value return threshold
def parsed_path(path): """ message=hello&user=yoo { 'message': 'hello', 'user': 'yoo', } """ index = path.find('?') if index == -1: return path, {} else: path, query_str = path.split('?', 1) args = query_str.split('&') query = {} for arg in args: k, v = arg.split('=') query[k] = v return path, query
def is_parallel_ui_tests(args): """ This function checks for coverage args exists in command line args :return: boolean """ if "parallel" in args and args["parallel"]: return True return False
def regex_split(original_output, regex_split_cmd): """ Takes in a regex string and output, returns a list of output split :param original_output: :param regex_split_cmd: :return: """ def _regex_split(): return original_output.split(regex_split_cmd) return _regex_split()
def rotate_axes(xs, ys, zs, zdir): """ Reorder coordinates so that the axes are rotated with zdir along the original z axis. Prepending the axis with a '-' does the inverse transform, so zdir can be x, -x, y, -y, z or -z """ if zdir == 'x': return ys, zs, xs elif zdir == '-x': return zs, xs, ys elif zdir == 'y': return zs, xs, ys elif zdir == '-y': return ys, zs, xs else: return xs, ys, zs
def estimate_arpu(x): """ Allocate consumption category given a specific luminosity. """ arpu = 0 if x['mean_luminosity_km2'] > 5: # #10 year time horizon # for i in range(0, 10): # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year # arpu += ( # (20*12) / (1 + 0.03) ** i # ) return 20 * 12 * 10#arpu elif x['mean_luminosity_km2'] > 1: # for i in range(0, 10): # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year # arpu += ( # (5*12) / (1 + 0.03) ** i # ) return 5 * 12 * 10#arpu else: # for i in range(0, 10): # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year # arpu += ( # (2*12) / (1 + 0.03) ** i # ) return 2 * 12 * 10
def sub_test_noiser(new_bots, old_bots, turn, should_noise, test_other): """ sub test function to check if noiser worked Parameters ---------- new_bots: bots after noising old_bots: bots before noising turn: which turn is it now? 0,1,2,3 should_noise: should the noiser do something right now, or return same bots? test_other: true: then it checks if what was meant to happen to other bots happened and vice versa Returns ------- a boolean """ if test_other: if not turn % 2: # even case if should_noise: test_bot1 = not old_bots[1] == new_bots[1] test_bot3 = not old_bots[3] == new_bots[3] return test_bot1 or test_bot3 else: test_bot1 = old_bots[1] == new_bots[1] test_bot3 = old_bots[3] == new_bots[3] return test_bot1 and test_bot3 else: if should_noise: test_bot0 = not old_bots[0] == new_bots[0] test_bot2 = not old_bots[2] == new_bots[2] return test_bot0 or test_bot2 else: test_bot0 = old_bots[0] == new_bots[0] test_bot2 = old_bots[2] == new_bots[2] return test_bot0 and test_bot2 else: # test_own should always mean no change if turn % 2: test_bot0 = old_bots[0] == new_bots[0] test_bot2 = old_bots[2] == new_bots[2] return test_bot0 and test_bot2 else: test_bot1 = old_bots[1] == new_bots[1] test_bot3 = old_bots[3] == new_bots[3] return test_bot1 and test_bot3
def _get_versioned_config(config, version = ""): """select version from config Args: config: config version: specified version, default is "". Returns: updated config with specified version """ versioned_config = {} versioned_config.update(config) used_version = config["used_version"] if version and version in config["versions"]: used_version = version versioned_config.update(config["versions"][used_version]) if used_version.startswith("heads/"): versioned_config["branch"] = used_version.split("/")[1] elif used_version.startswith("tags/"): versioned_config["tag"] = used_version.split("/")[1] versioned_config.pop("versions") versioned_config.pop("used_version") return versioned_config
def mode_assignment(arg): """ Translates arg to enforce proper assignment """ arg = arg.upper() stream_args = ('STREAM', 'CONSOLE', 'STDOUT') try: if arg in stream_args: return 'STREAM' else: return arg except Exception: return None
def sum_even_fibonaccis(limit): """Find the sum of all even terms in the Fibonacci sequence whose values do not exceed the provided limit. """ # Fibonacci seed values are 0 and 1. previous, current, even_fibonacci_sum = 0, 1, 0 while previous + current <= limit: # This is a memoized calculation; a matrix calculation would be faster. previous, current = current, previous + current # Check if the current term in the sequence is even. if current % 2 == 0: even_fibonacci_sum += current return even_fibonacci_sum
def calculate_average(result): """Calculates the average package size""" vals = result.values() if len(vals) == 0: raise ValueError("Cannot calculate average on empty dictionary.") return sum(vals)/float(len(vals))
def filter_positive_even_numbers(numbers): """Receives a list of numbers, and filters out numbers that are both positive and even (divisible by 2), try to use a list comprehension""" return [x for x in numbers if x > 0 and x % 2 == 0]
def group_metrics(metrics): """ Groups metrics with the same name but different label values. Takes metrics as a list of tuples containing: * metric name, * metric documentation, * dict of label key -> label value, * metric value. The metrics are grouped by metric name. All metrics with the same metric name must have the same set of label keys. A dict keyed by metric name is returned. Each metric name maps to a tuple containing: * metric documentation * label keys tuple, * dict of label values tuple -> metric value. """ metric_dict = {} for (metric_name, metric_doc, label_dict, value) in metrics: curr_label_keys = tuple(label_dict.keys()) if metric_name in metric_dict: label_keys = metric_dict[metric_name][1] assert set(curr_label_keys) == set(label_keys), \ 'Not all values for metric {} have the same keys. {} vs. {}.'.format( metric_name, curr_label_keys, label_keys) else: label_keys = curr_label_keys metric_dict[metric_name] = (metric_doc, label_keys, {}) label_values = tuple([label_dict[k] for k in label_keys]) metric_dict[metric_name][2][label_values] = value return metric_dict
def proglen(s): """ Program length is measured in characters, but in order to keep the values in a similar range to that of compressibility, DTW and Levenshtein, we divide by 100. This is a bit arbitrary. :param s: A string of a program phenotype. :return: The length of the program divided by 100. """ return len(s) / 100.0
def str_to_dec(string): """Converts fractions in the form of strings to decimals """ tokens = string.split() if string == None: return 0 elif string == "a" or string == "an" or string == "the": return 1 elif tokens and tokens[-1] == "eighth": return 0.125 elif tokens[-2:] == ["one", "quarter"] or tokens[-2:] == ["a", "quarter"]: return 0.25 elif tokens and tokens[-1] == "half": return 0.5 elif tokens[-2:] == ["three", "quarter"] or tokens[-2:] == ["three", "quarter"]: return 0.75 elif u'\u2044' in string: # alexa seems to automatically convert "one half" to "1/2", "one eighth" to "1/8", etc. # but somewhat inconsistently so keep both elif statements for fractions like one half return float(string[0]) / float(string[1:-1]) else: try: return float(string) except: raise Exception("Unable to convert that number to decimal.")
def interval_class( pitch1: int, pitch2: int, ) -> int: """Finds the interval class between two pitches or pitch-classes. """ diff_mod_12 = abs(pitch1 - pitch2) % 12 if diff_mod_12 > 6: diff_mod_12 = 12 - diff_mod_12 return diff_mod_12
def compute_weighted_percentiles(weighted_values, number_of_percentiles, key=lambda x: x): """ Compute weighted percentiles from a list of values and weights. number_of_percentiles evenly distributed percentiles values will be returned, including the 0th (minimal value) and the 100th (maximal value). A custom key function can be supplied to customize the sort order :type weighted_values: list of tuple :type number_of_percentiles: int :type key: function Examples with 0th, 50th and 100th percentiles: >>> compute_weighted_percentiles([(2, 0.2), (1, 0.1), (3, 0.7)], 3) [1, 3, 3] >>> compute_weighted_percentiles([(1, 10), (2, 20), (3, 20)], 3) [1, 2, 3] >>> compute_weighted_percentiles([(i, 1) for i in range(1, 101)], 1) Traceback (most recent call last): ... ValueError: number_of_percentiles must be at least 2 >>> compute_weighted_percentiles([(i, 1) for i in range(1, 101)], 2) [1, 100] >>> compute_weighted_percentiles([(i, 1) for i in range(1, 101)], 3) [1, 50, 100] >>> compute_weighted_percentiles([(i, 1) for i in range(1, 101)], 4) [1, 34, 67, 100] >>> compute_weighted_percentiles([(i, 1) for i in range(1, 101)], 5) [1, 25, 50, 75, 100] >>> compute_weighted_percentiles([ ... ((1, "b"), 10), ... ((2, "c"), 20), ... ((3, "a"), 20) ... ], 3, key=lambda row: row[1]) [(3, 'a'), (1, 'b'), (2, 'c')] """ if number_of_percentiles == 1: raise ValueError("number_of_percentiles must be at least 2") ordered_values = sorted(weighted_values, key=lambda weighted_value: key(weighted_value[0])) total_weight = sum(weight for value, weight in ordered_values) bounds = [] cumulative_weight = 0 for value, weight in ordered_values: cumulative_weight += weight while len(bounds) / (number_of_percentiles - 1) <= cumulative_weight / total_weight: bounds.append(value) return bounds
def _is_surf(config): """Returns True iff we are on the surface""" return "surface_file" in config and config["surface_file"]
def _list_union_inter_diff(*lists): """Return 3 lists: intersection, union and differences of lists """ union = set(lists[0]) inter = set(lists[0]) for l in lists[1:]: s = set(l) union = union | s inter = inter & s diff = union - inter return list(union), list(inter), list(diff)
def __control_dict(v): """ Wrap a control field value in a dict. """ return {"type": "control", "value": v}
def arg_process(number): """Fake function for pytest""" number_added = number + 1 return number_added
def wait_for_line(input_string): """ Should the intepreter wait for another line of input or try to evaluate the current line as is. """ trailing_ops = ['+', '-', '/', '*', '^', '=', '>', '<', '/;', '/:', '/.', '&&', '||'] if any(input_string.rstrip().endswith(op) for op in trailing_ops): return True brackets = [('(', ')'), ('[', ']'), ('{', '}')] kStart, kEnd, stack = 0, 1, [] for char in input_string: for bracketPair in brackets: if char == bracketPair[kStart]: stack.append(char) elif char == bracketPair[kEnd]: if len(stack) == 0: return False if stack.pop() != bracketPair[kStart]: # Brackets are not balanced, but return False so that a # parse error can be raised return False if len(stack) == 0 and input_string.count('"') % 2 == 0: return False return True
def tuple_factor(tuple1, factor): """ returns the tuple multiplied with the factor """ return tuple1[0] * factor, tuple1[1] * factor
def find_all_indexes(text, pattern): """Return a list of starting indexes of all occurrences of pattern in text, or an empty list if not found. Time Complexity: O(p * t) -- p being length of pattern and t being length of text """ assert isinstance(text, str), 'text is not a string: {}'.format(text) assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text) # Edge case - this check makes the code O(p * n) instead of O(p^2 + n*p) if len(pattern) > len(text): return [] # check if the pattern is an empty string and return a list of all the indexes if true if pattern == '': return [i for i in range(len(text))] # list of indexes to return indexes = [] # loop through the text keeping the index and current character for i, character in enumerate(text): # check if the character is the same as the beginning of the pattern if character == pattern[0]: # check that if the characters starting at character to the length of the pattern is equal to the pattern for j, character2 in enumerate(pattern): # current index of i + where we are on j curr_index = j + i # check bounds and letter if curr_index > len(text)-1 or text[curr_index] != character2: break else: # will only occur if loop is not broken out of indexes.append(i) # return the list of indexes return indexes
def unique_name(name, all_names): """Make the name unique by appending "#n" at the end.""" if not isinstance(all_names, set): all_names = set(all_names) if name not in all_names: return name i = 1 head, sep, tail = name.rpartition('#') if sep: try: i = int(tail) except ValueError: pass else: name = head.rstrip() while True: new_name = '{} #{}'.format(name, i) if new_name in all_names: i += 1 else: return new_name
def _check_handle(handle): """Checks if provided file handle is valid.""" return handle is not None and handle.fileno() >= 0
def build_vocab(posts): """ Given the training set of posts, constructs the vocabulary dictionary, `tok_to_ix`, that maps unique tokens to their index in the vocabulary. """ tok_to_ix = {} for post in posts: tokens = post.split(' ') for token in tokens: tok_to_ix.setdefault(token, len(tok_to_ix)) # Manually add our own placeholder tokens tok_to_ix.setdefault('<UNK>', len(tok_to_ix)) return tok_to_ix
def order_steps(steps): """Return order steps must be taken given their requirements.""" num_steps = len(steps) order = '' while num_steps: ready_steps = [] for step, requirements in steps.items(): if step in order: continue ready = True for required in requirements: if required not in order: ready = False break if ready: ready_steps.append(step) ready_steps.sort() order += ready_steps[0] num_steps -= 1 return order
def neatify_string_to_list(input_string): """Gets me my actual list of items to query""" clean_brackets = input_string.replace('[', '')\ .replace(']', '') print(clean_brackets) return clean_brackets.split(' ')
def get_table_name(table_name): """Get table name from full table name.""" parts = table_name.split('.', 1) return parts[-1]
def Get(x, start, end=None, step=None): """ iterable >> Get(start, end, step) Extract elements from iterable. Equivalent to slicing [start:end:step] but per element of the iterable. >>> from nutsflow import Collect >>> [(1, 2, 3), (4, 5, 6)] >> Get(1) >> Collect() [2, 5] >>> [(1, 2, 3), (4, 5, 6)] >> Get(0, 2) >> Collect() [(1, 2), (4, 5)] >>> [(1, 2, 3), (4, 5, 6)] >> Get(0, 3, 2) >> Collect() [(1, 3), (4, 6)] >>> [(1, 2, 3), (4, 5, 6)] >> Get(None) >> Collect() [(1, 2, 3), (4, 5, 6)] :param iterable iterable: Any iterable :param indexable x: Any indexable input :param int start: Start index for columns to extract from x If start = None, x is returned :param int end: End index (not inclusive) :param int step: Step index (same as slicing) :return: Extracted elements :rtype: object|list """ return x if start is None else x[slice(start, end, step) if end else start]
def wmode(x: bytes) -> str: """ Wireless mode decoding Args: x: byte encoded representation of wireless mode Returns: String representation of wireless mode """ if x == b"\x02": return "sta" elif x == b"\x03": return "ap" return ""
def is_int(string): """ Checks if a string can be converted to an int :param str string: a string of any kind :return: True if possible, False if not :rtype: bool """ try: int(string) return True except ValueError: return False
def _tgrep_parens_action(_s, _l, tokens): """ Builds a lambda function representing a predicate on a tree node from a parenthetical notation. """ assert len(tokens) == 3 assert tokens[0] == "(" assert tokens[2] == ")" return tokens[1]
def StartsWithBuiltinMessages(messages): """Whether the message list starts with the vim built in messages.""" return len(messages) >= 2 and not messages[0] and messages[1] == ( 'Messages maintainer: Bram Moolenaar <Bram@vim.org>')
def utm_isNorthern(latitude): """Determine if a latitude coordinate is in the northern hemisphere. Arguments --------- latitude: float latitude coordinate (Deg.decimal degrees) Returns ------- out: bool ``True`` if `latitude` is in the northern hemisphere, ``False`` otherwise. """ return (latitude > 0.0)
def isMultiline(s): """ Returns C{True} if this string has a newline in it. """ return (s.find('\n') != -1)
def choose_file_location(default=True): """ Created for user's choice of directory. None -> list Usage: (suggested) > choose_file_location() """ choice = ['docs/locations.list', 'docs/locations.csv', 'docs/info.csv', 'docs/countries.csv'] if default: return choice else: while True: choice[0] = input('Source (<locations.list>) location:') choice[1] = input('Dir and name of CSV file#1:') choice[2] = input('Dir and name of CSV file#2:') choice[3] = input('Dir and name of CSV file#3:') confirmation = input('Press Enter if you are sure ..') if '' in choice: print('Reenter locations and names of files!') continue elif confirmation == str(): return choice else: continue
def format_number(n, accuracy=6): """Formats a number in a friendly manner (removes trailing zeros and unneccesary point.""" fs = "%."+str(accuracy)+"f" str_n = fs%float(n) if '.' in str_n: str_n = str_n.rstrip('0').rstrip('.') if str_n == "-0": str_n = "0" #str_n = str_n.replace("-0", "0") return str_n
def num_to_s8(num): """Convert signed number to 8bit (unsigned)""" assert -0x80 <= num < 0x80, '{} out of range'.format(num) return num & 0xff
def makepath(path): """ creates missing directories for the given path and returns a normalized absolute version of the path. - if the given path already exists in the filesystem the filesystem is not modified. - otherwise makepath creates directories along the given path using the dirname() of the path. You may append a '/' to the path if you want it to be a directory path. from holger@trillke.net 2002/03/18 """ from os import makedirs from os.path import normpath,dirname,exists,abspath dpath = normpath(dirname(path)) if not exists(dpath): makedirs(dpath) return normpath(abspath(path))
def classify_design_space(action: str) -> int: """ The returning index corresponds to the list stored in "count": [sketching, 3D features, mating, visualizing, browsing, other organizing] Formulas for each design space action: sketching = "Add or modify a sketch" + "Copy paste sketch" 3D features = "Commit add or edit of part studio feature" + "Delete part studio feature" - "Add or modify a sketch" mating = "Add assembly feature" + "Delete assembly feature" + "Add assembly instance" + "Delete assembly instance" visualizing = "Start assembly drag" + "Animate action called" browsing = Opening a tab + Creating a tab + Deleting a tab + Renaming a tab other organizing = "Create version" + "Cancel Operation" + "Undo Redo Operation" + "Merge branch" + "Branch workspace" + "Update version" :param action: the action to be classified :return: the index of the action type that this action is accounted for; if the action does not belong to any category, return -1 Note: "Add or modify a sketch" is special (+1 for sketching and -1 for 3D features), return -10 """ # Creating a sketch is special as it affects both the sketching and the 3D features counts if action == "Add or modify a sketch": return -10 # Sketching elif action == "Copy paste sketch": return 0 # 3D features elif action in ["Commit add or edit of part studio feature", "Delete part studio feature"]: return 1 # Mating elif action in ["Add assembly feature", "Delete assembly feature", "Add assembly instance" "Delete assembly instance"]: return 2 # Visualizing elif action in ["Start assembly drag", "Animate action called"]: return 3 # Browsing elif "Tab" in action and ("opened" in action or "created" in action or "deleted" in action or "renamed" in action): return 4 # Other organizing elif action in ["Create version", "Cancel Operation", "Undo Redo Operation", "Merge branch", "Branch workspace", "Update version"]: return 5 # Not classified (Optional: print out the unclassified actions) else: return -1
def add_at_idx(seq, k, val): """ Add (subtract) a value in the tuple at position k """ list_seq = list(seq) list_seq[k] += val return tuple(list_seq)
def sort_dictionary_to_list_by_keys(dictionary): """ Extract a list of paired tuples with each tuple being a key, value pair from the input dictionary and the order of the list according to the order of the keys of the dictionary. Args: dictionary: Input dictionary with keys that have an inherent ordering Return: Ordered list containing pairs that were keys and values """ return [(i, dictionary[i]) for i in sorted(dictionary)]
def _CheckUploadStatus(status_code): """Validates that HTTP status for upload is 2xx.""" return status_code / 100 == 2
def len_eq(node, length): """Return whether the match lengths of 2 nodes are equal. Makes tests shorter and lets them omit positional stuff they don't care about. """ node_length = None if node is None else node.end - node.start return node_length == length