content
stringlengths
42
6.51k
def _break_ip_address(cidr_ip_address): """ Function divides the input parameter into IP address and network mask. :param cidr_ip_address: IP address in format of IP/prefix_size :return: IP, prefix_size """ if "/" in cidr_ip_address: ip_address, prefix_size = cidr_ip_address.split("/") else: ip_address = cidr_ip_address prefix_size = 0 return ip_address, prefix_size
def _tuple_replace(s, Lindices, Lreplace): """ Replace slices of a string with new substrings. Given a list of slice tuples in C{Lindices}, replace each slice in C{s} with the corresponding replacement substring from C{Lreplace}. Example: >>> _tuple_replace('0123456789',[(4,5),(6,9)],['abc', 'def']) '0123abc5def9' """ ans = [] Lindices = Lindices[:] Lindices.sort() if len(Lindices) != len(Lreplace): raise ValueError('lists differ in length') for i in range(len(Lindices) - 1): if Lindices[i][1] > Lindices[i + 1][0]: raise ValueError('tuples overlap') if Lindices[i][1] < Lindices[i][0]: raise ValueError('invalid tuple') if min(Lindices[i][0], Lindices[i][1]) < 0 or \ max(Lindices[i][0], Lindices[i][1]) >= len(s): raise ValueError('bad index') j = 0 offset = 0 for i in range(len(Lindices)): len1 = Lindices[i][1] - Lindices[i][0] len2 = len(Lreplace[i]) ans.append(s[j:Lindices[i][0] + offset]) ans.append(Lreplace[i]) j = Lindices[i][1] ans.append(s[j:]) return ''.join(ans)
def str__task_results_to_table(results): """ results: collections.OrderedDict - results from pypospack.task.TaskManager.results attribute. """ table_col_names = ['name','value'] table_rows = [] for k,v in results.items(): table_rows.append([str(k),str(v)]) return '\n'.join([','.join(v) for v in table_rows])
def with_previous_s4(iterable): """Provide each sequence item with item before it Comments: Solution passes the first bonus i.e.: the solution works not only with lists, but also with tuples, and strings. """ previous = None items = [] for item in iterable: items.append((item, previous)) previous = item return items
def surface_area_cuboid(l, w, h): """This function takes in length, width and height of a cuboid and returns the surface area.""" print('function is from', __name__) return 2 * (l * w + w * h + l * h)
def mystery_2b_no_if(n: int) -> bool: """Return the same value as mystery_2b_if, but without using any if statements.""" return (n % 2 == 0 and n % 3 == 1) or (n % 2 == 1 and (n < 0 or (n > 4 and n % 3 != 1)))
def _normalize_deviation_args(lower, upper, msg): """Normalize deviation acceptance arguments to support both "tolerance" and "lower, upper" signatures. This helper function is intended for internal use. """ if isinstance(upper, str) and msg is None: upper, msg = None, msg # Shift values if using "tolerance" syntax. if upper == None: tolerance = lower if tolerance != abs(tolerance): raise ValueError('tolerance should not be negative, ' 'for full control of lower and upper ' 'bounds, use "lower, upper" syntax') lower, upper = -tolerance, tolerance if lower > upper: raise ValueError('lower must not be greater than upper, got ' '{0} (lower) and {1} (upper)'.format(lower, upper)) return (lower, upper, msg)
def UnderscoreToTitleCase(under_score): """Helper function which converts under_score names to TitleCase. Args: under_score: A name, segmented by under_scores. Returns: A name, segmented as TitleCase. """ segments = under_score.split('_') return ' '.join([s.title() for s in segments])
def tran(s): """answer""" if len(s) == 1: return int(s) else: return int(s[-1]) + 10*int(s[:-1])
def is_complex_parsing_required(value): """ Determine if the string being parsed requires complex parsing. Currently, this is solely determined by the presence of a colon (:). Args: value (str): A string that will be parsed. Returns: bool: Flag value to indicate whether the string requires complex parsing. """ return ":" in value
def volume_id_from_cli_create(output): """Scrape the volume id out of the 'volume create' command The cli for Nova automatically routes requests to the volumes service end point. However the nova api low level commands don't redirect to the correct service endpoint, so for volumes commands (even setup ones) we use the cli for magic routing. This function lets us get the id out of the prettytable that's dumped on the cli during create. """ for line in output.split("\n"): fields = line.split() if len(fields) > 4: if fields[1] == "id": return fields[3]
def _repeat_string(source, size): """repeat or truncate <source> string, so it has length <size>""" cur = len(source) if size > cur: mult = (size+cur-1)//cur return (source*mult)[:size] else: return source[:size]
def is_robot_localized(beliefs, true_pos): """ Returns None if the robot has no "strong opininon" about its belief. The robot has a strong opinion when the size of it's best belief is greater than twice the size of its second best belief. If it DOES have a strong opinion then this function returns True if that opinion is correct and False if it is not. """ best_belief = 0.0 best_pos = None second_best = 0.0 for y, row in enumerate(beliefs): for x, belief in enumerate(row): if belief > best_belief: second_best = best_belief best_belief = belief best_pos = (y,x) elif belief > second_best: second_best = belief if second_best <= 0.00001 or best_belief / second_best > 2.0: # robot thinks it knows where it is localized = best_pos == true_pos return localized, best_pos else: # No strong single best belief return None, best_pos
def message_to_list_form(message): """Convert *graph nodes/edges and node/edge bindings to list forms.""" if message["results"]: message["results"] = [ { "node_bindings": [ { "qg_id": qg_id, **binding, } for qg_id, binding in result["node_bindings"].items() ], "edge_bindings": [ { "qg_id": qg_id, **binding, } for qg_id, binding in result["edge_bindings"].items() ], } for result in message.get("results", []) ] if message["knowledge_graph"]["nodes"]: message["knowledge_graph"]["nodes"] = [ { "id": node["id"], **node, } for node in message["knowledge_graph"]["nodes"] ] if message["knowledge_graph"]["edges"]: message["knowledge_graph"]["edges"] = [ { "id": edge["id"], **edge, } for edge in message["knowledge_graph"]["edges"] ] return message
def newton(number): """ Newton's method for square root, according to Copilot """ global test_num test_num = 0 x = number / 2 while True: y = (x + number / x) / 2 if y == x: return y x = y
def feline_fixes(start, goal, limit): """A diff function that computes the edit distance from START to GOAL.""" #assert False, 'Remove this line' if limit < 0 or len(start) == 0 or len(goal) == 0: # Fill in the condition # BEGIN "*** YOUR CODE HERE ***" return abs(len(start) - len(goal)) # END elif start[0] == goal[0]: # Feel free to remove or add additional cases # BEGIN "*** YOUR CODE HERE ***" return feline_fixes(start[1:], goal[1:], limit) # END else: add_diff = 1 + feline_fixes(start, goal[1:], limit - 1) # Fill in these lines remove_diff = 1 + feline_fixes(start[1:], goal, limit - 1) substitute_diff = 1 + feline_fixes(start[1:], goal[1:], limit - 1) # BEGIN "*** YOUR CODE HERE ***" return min(add_diff, remove_diff, substitute_diff) # END
def makeTestName(name, configOpts): """ Generates a name (i.e. folder name) for a run based on its base-name and the ChaNGa configure options used """ name += '_' + '_'.join(configOpts.replace('--enable-', '').split(' ')) return name
def get_credits(data_set) -> None: """ Calculate VBS credit amounts. Args: data_set (dict): Totals for all calculated values """ # Add credit totals to data set data_set['8210s BW Credit'] = data_set['8210s BW'] * .0019 data_set['7100s BW Credit'] = data_set['7100s BW'] * .0095 data_set['7100s Color Credit'] = data_set['7100s Color'] * .04 return data_set
def _find_lang_with_country_code(tracks_list, lang_code): """ Try to find a language code with country code :param tracks_list: list of tracks where search the language code :param lang_code: the language code to find (2 letters - ISO_639_1) :return: the language code with country code or 'None' if it does not exist """ # The search checks whether a language exists with "-" char. # Usually for the same language there might be two different countries, # e.g. "es" and "es-ES" (that will be converted in "es-Spain" by LOCALE_CONV_TABLE) _stream = next((track for track in tracks_list if track['language'].startswith(lang_code + '-')), None) if _stream: return _stream['language'] return None
def find_corrupt(vcs, revs, corrupt): """ Find first corrupt section and return its index :param vcs: GridVCS instance :param revs: List of Revision :param corrupt: Corrupted index (zero-based) or None if not corrupted :return: """ for n, rev in enumerate(revs): if rev.id in corrupt: return n return None
def array_swap_changed_distances(size, move): """Aid function for delta evaluation. Works with the array_swap move type. This function returns the pairs who would have an altered evaluation value due to the move. Parameters ---------- size : int The size of the array. move : tuple of int A tuple of 2 ints that represents a single unique move. Returns ------- set of tuple this set contains a tuple with every (from,to) pair that would have an altered evaluation value due to the move. Examples -------- Some simple examples to demonstrate the behaviour: .. doctest:: >>> from lclpy.evaluation.deltaeval.delta_tsp \\ ... import array_swap_changed_distances \\ ... as changed_distances ... # init >>> size = 10 ... # tests ... # since the order of the items in a set might be different, ... # they are compared to an equivalent set. >>> changed = changed_distances(size, (4, 8)) >>> changed == {(3, 4), (4, 5), (7, 8), (8,9)} True >>> changed = changed_distances(size, (4, 9)) >>> changed == {(3, 4), (4, 5), (8, 9), (9, 0)} True >>> changed = changed_distances(size, (0, 8)) >>> changed == {(9, 0), (0, 1), (7, 8), (8, 9)} True >>> changed = changed_distances(size, (0, 9)) >>> changed == {(0, 1), (8, 9), (9, 0)} True """ changed_dist = set() # iterating over the 2 swapped indices for order_index in move: # get the change in the lower indices if order_index != 0: changed_dist.add((order_index - 1, order_index)) else: # between index 0 and index _size-1, the pair is # (_size - 1, 0), this because we move from _size-1 to 0 changed_dist.add((size - 1, 0)) # get the change in the higher indices if order_index != size - 1: changed_dist.add((order_index, order_index + 1)) else: changed_dist.add((size - 1, 0)) return changed_dist
def is_ascii(test_str): """Check if the characters in string s are in ASCII, U+0-U+7F.""" return len(test_str) == len(test_str.encode())
def get_value(header_string): """Return the value within square brackets of an SDRF header.""" field_value = header_string.split('[')[-1] return field_value.strip(']')
def status_calc(stock, sp500, outperformance=10): """A simple function to classify whether a stock outperformed the S&P500 :param stock: stock price :param sp500: S&P500 price :param outperformance: stock is classified 1 if stock price > S&P500 price + outperformance :return: true/false """ if outperformance < 0: raise ValueError("outperformance must be positive") return stock - sp500 >= outperformance
def has_conflicting_info(passages, qa_answers, nlg_answers): """ Checks whether an example has conflicting information regarding its answerability Args: passages: list[{"is_selected": int, "passage_text": str}] qa_answers: list[str] nlg_answers: list[str] Returns: bool """ has_rel_passage = sum([p_info["is_selected"] for p_info in passages]) != 0 qa_avail = (qa_answers != ['No Answer Present.']) and (qa_answers != [""]) nlg_avail = nlg_answers != "[]" # there is at least one nlg anser but no qa answer if nlg_avail and not qa_avail: return True # there is at least one answer but no relevant passage elif qa_avail and not has_rel_passage: return True # there is at least one relevant passage but no answer is available elif has_rel_passage and not qa_avail: return True else: return False
def is_divisible_by(n: int, divisor: int) -> bool: """ Return True if n is divisible by divisor, False otherwise. """ return n % divisor == 0
def replace_u_to_t(seq): """Replaces the U's in a sequence with T's. Args: seq (str): A nucleotide sequence. Returns: str: The sequence with the U's replaced by T's. Examples: >>> replace_u_to_t("ACGU") 'ACGT' >>> replace_u_to_t(None) """ if seq is None: return None return seq.replace("U", "T").replace("u", "t")
def gas_estimation_error_message(tx_error: Exception) -> str: """ Use this method in ``ProviderAPI`` implementations when error handling transaction errors. This is to have a consistent experience across providers. """ return ( f"Gas estimation failed: '{tx_error}'. This transaction will likely revert. " "If you wish to broadcast, you must set the gas limit manually." )
def solar_true_longitude(solar_geometric_mean_longitude, solar_equation_of_center): """Returns the Solar True Longitude with Solar Geometric Mean Longitude, solar_geometric_mean_longitude, and Solar Equation of Center, solar_equation_of_center.""" solar_true_longitude = solar_geometric_mean_longitude + solar_equation_of_center return solar_true_longitude
def FeatureCollection(features): """ FeatureCollection creates a `FeatureCollection` with the features in `features` """ return {"type": "FeatureCollection","features": features} # return {"type": "FeatureCollection","properties":{"features": len(features)},"features": features}
def get_max_size_eci(eci): """Find the maximum cluster name given in the ECIs. :return: Maximum cluster size in the ECIs given :rtype: int """ max_size = 0 for key in eci.keys(): size = int(key[1]) if size > max_size: max_size = size return max_size
def dict_print(d): """convert a dictionary to a string that can be print nicely Args: d (dictionary): the dictionary to be printed """ def listToString(s): str1 = " " return str1.join(s) s = [] counter = 0 for key, value in d.items(): counter += 1 if counter < len(d): s.append(f"{key}: {value}; ") else: s.append(f"{key}: {value}") return listToString(s)
def prepare_state(state): """ state: string, string of 0's and 1's. returns: algorithm (list of strings), algorithm to prepare the state. """ return ["x(%d)" % i for i in range(len(state)) if state[i] == "1"]
def get_learning_rate(init_lr, iteration, lr_stair_decay_points, lr_decreasing_factor): """ Calculate learning rate decay """ epoch_lr_to_be_decayed_boundaries = [ y * (iteration) for y in lr_stair_decay_points] epoch_lr_to_be_decayed_value = [init_lr * (lr_decreasing_factor ** y) for y in range(len(lr_stair_decay_points) + 1)] lr = init_lr if epoch_lr_to_be_decayed_boundaries[0] < iteration <= epoch_lr_to_be_decayed_boundaries[1]: lr = epoch_lr_to_be_decayed_value[1] if iteration > epoch_lr_to_be_decayed_boundaries[1]: lr = epoch_lr_to_be_decayed_value[2] return lr
def rearrange_2D_list(nth_list): """Rearranges information from a 2D_list of length m with entries of length n to an outer array of length n, with entries of length m. Each entry now holds the ith entry of original entry in a new entry. [[a1,b1,c1],[a2,b2,c2]] => [[a1,a2],[b1,b2],[c1,c2]], making it easier to handle for cases like dataframes. """ list_rearranged = [] for i in range(len(nth_list[0])): ith_of_each_sublist = [sublist[i] for sublist in nth_list] list_rearranged.append(ith_of_each_sublist) return list_rearranged
def is_aw_function(symbol): """ is the given function name an A/W function? these are variants of functions that, on Windows, accept either a narrow or wide string. """ if len(symbol) < 2: return False # last character should be 'A' or 'W' if symbol[-1] not in ("A", "W"): return False # second to last character should be lowercase letter return "a" <= symbol[-2] <= "z" or "0" <= symbol[-2] <= "9"
def is_superset(a, b): """Check if a is a superset of b. This is typically used to check if ALL of a list of phrases is in the ngrams returned by an lf_helper. :param a: A collection of items :param b: A collection of items :rtype: boolean """ return set(a).issuperset(b)
def _best_by_hole_size(policy_self, deep_ann, second_ann): """Best by hole size""" # Holes aren't counted for this prefix if not hasattr(deep_ann, "temp_holes"): return None if len(deep_ann.temp_holes) > len(second_ann.temp_holes): return True elif len(deep_ann.temp_holes) < len(second_ann.temp_holes): return False else: return None
def is_idparam(id): """Determine if an id is a parameter in the form $N, where N is an integer. Returns: True if the id is a parameter False if the id is not a parameter """ if len(id) > 1 and id[0] == '$': try: int(id[1:]) except ValueError: return False return True else: return False
def tuple_eq_empty_not_eq( t_1, t_2): """ those which are empty are in fact not equal""" return len( t_1) > 0 and t_1 == t_2
def format_version(version): """Produce filesystem-friendly string from integer version Arguments: version (int): Version number Returns: string of `version`. Raises: TypeError on non-integer version Example: >>> format_version(5) 'v005' >>> format_version("x") Traceback (most recent call last): ... TypeError: %d format: a number is required, not str """ return "v%03d" % version
def numeral(number): """Returs a roman numeral for an integer.""" to_roman = {0: '', 1: 'I', 2: 'II', 3: 'III', 4: 'IV', 5: 'V', 6: 'VI', 7: 'VII', 8: 'VIII', 9: 'IX', 10: 'X', 20: 'XX', 30: 'XXX', 40: 'XL', 50: 'L', 60: 'LX', 70: 'LXX', 80: 'LXXX', 90: 'XC', 100: 'C', 200: 'CC', 300: 'CCC', 400: 'CD', 500: 'D', 600: 'DC', 700: 'DCC', 800: 'DCCC', 900: 'CM', 1000: 'M', 2000: 'MM', 3000: 'MMM'} roman_numeral = [] count = 1 # We travel the number from the left and add corresponding values from the to_roman # dictionary for digit in str(number)[::-1]: digit = int(digit) i = to_roman.get(digit * count) roman_numeral.append(i) count *= 10 return ''.join(roman_numeral[::-1])
def image_orientation(horizontal_flip, side): """ Returns the direction where the breast should be facing in the original image This information is used in cropping.crop_img_horizontally_from_largest_connected """ assert horizontal_flip in ['YES', 'NO'], "Wrong horizontal flip" assert side in ['L', 'R'], "Wrong side" if horizontal_flip == 'YES': if side == 'R': return 'right' else: return 'left' else: if side == 'R': return 'left' else: return 'right'
def is_kaprekar(number: int) -> bool: """Return whether the specified number is a kaprekar number.""" squared = str(number ** 2) # check whether squared has an even length if len(str(squared)) % 2 == 0: first = int(squared[:int(len(squared) / 2)]) second = int(squared[int(len(squared) / 2):]) # check whether squared has only one digit elif int(squared) < 10: first = 0 second = int(squared) # executes when squared has an odd length else: first = int(squared[:int(len(squared) / 2)]) second = int(squared[int(len(squared) / 2) + 1:]) if first + second == number and second != 0: return True return False
def intersect(a, b): """ returns common items between two sets """ if a is None or b is None: return set() return set(a & b)
def _converted_string_to_list(full_str: str) -> list: """ Take a SPACE-DELIMITED string and split it into a list. This function is used by the generate-helm-upgrade-jobs subcommand to ensure that the list os added or modified files parsed from the command line is transformed into a list of strings instead of one long string with spaces between the elements """ return full_str.split(" ")
def obj_version_from_env(env): """ Fetch an object version from a request environment dictionary. This discards 'null' versions since they are not supported by the oio backend. """ vers = env.get('oio.query', {}).get('version') if isinstance(vers, str) and vers.lower() == 'null': vers = None return vers
def ParseAndroidDevices(text): """Return Dictionary [name] -> status""" text = text.split('List of devices attached')[-1] lines = [line.strip() for line in text.split('\n')] lines = [line for line in lines if len(line) > 0] devices = {} for line in lines: lineItems = line.split('\t') devices[lineItems[0]] = lineItems[1] return devices
def build_command(chunks): """ Create a command from various parts. The parts provided may include a base, flags, option-bound arguments, and positional arguments. Each element must be either a string or a two-tuple. Raw strings are interpreted as either the command base, a pre-joined pair (or multiple pairs) of option and argument, a series of positional arguments, or a combination of those elements. The only modification they undergo is trimming of any space characters from each end. :param Iterable[str | (str, str | NoneType)] chunks: the collection of the command components to interpret, modify, and join to create a single meaningful command :return str: the single meaningful command built from the given components :raise ValueError: if no command parts are provided """ if not chunks: raise ValueError( "No command parts: {} ({})".format(chunks, type(chunks))) if isinstance(chunks, str): return chunks parsed_pieces = [] for cmd_part in chunks: if cmd_part is None: continue try: # Trim just space, not all whitespace. # This prevents damage to an option that specifies, # say, tab as a delimiter. parsed_pieces.append(cmd_part.strip(" ")) except AttributeError: option, argument = cmd_part if argument is None or argument == "": continue option, argument = option.strip(" "), str(argument).strip(" ") parsed_pieces.append("{} {}".format(option, argument)) return " ".join(parsed_pieces)
def last_success_for_job(job): """ Given a job, find the last time it was successful. In the case that it hasn't completed a successful run, return None. """ return job.get('lastSuccess', None)
def parse_fingerprints(key): """Return parsed fingerprint string(s) or tuple of.""" if isinstance(key, str): return key elif isinstance(key, (tuple, list)): return tuple(parse_fingerprints(fp) for fp in key) return getattr(key, "fingerprint")
def match_tagged_items(objects, tags): """ Finds objects with matching attributes/values (tags) as tag_dictionary. :param objects: Any Maya object. :param tags: Type dict. Example {'Region': 'Arm', 'Side': 'R', 'Type': 'IK'} :return: list of matching objects. """ object_list = [] for obj in objects: output = obj for key in tags.keys(): if obj.hasAttr(key) and obj.getAttr(key) == tags[key]: pass else: output = None # Could not find attribute or attribute value did not match. if output: object_list.append(output) return object_list
def temp_gradient(bottom_hole_temperature, surface_temperature, bottom_hole_depth): """ Temperature gradient calculation. Parameters ---------- bottom_hole_temperature : float Bottom hole temperature (deg F or deg C) surface_temperature : float Surface temperature (deg F or deg C) bottom_hole_depth : float Bottom hole depth (ft or m) Returns ------- float Returns temperature gradient in deg per depth unit (degF/ft or deg C/m) """ gradient = (bottom_hole_temperature - surface_temperature) / bottom_hole_depth return gradient
def damping_heuristic(damping, rho): """ The damping heuristic implements the Levenberg-Marquardt adaptive rule for lambda; it takes a lambda and a rho, and returns a new lambda. If rho is small, lambda gets larger, and if rho is large, lambda gets smaller. Also copied verbatim from the original HF paper. """ from numpy import isnan decr = 2./3 incr = 1./decr if rho < 1/4. or isnan(rho): # the reductino was bad, rho was small d = incr elif rho > 3/4.: # the reduction was good since rho was large d = decr else: d = 1. return damping*d
def encode_chromosome(chrom): """Encodes a chromosome. Args: chrom (str): the chromosome to encode Returns: int: the encoded chromosome """ chrom = chrom.upper() if chrom == "X": return 23 if chrom == "Y": return 24 if chrom == "XY" or chrom == "YX": return 25 if chrom == "M" or chrom == "MT": return 26 try: chrom = int(chrom) except ValueError: return 0 if chrom < 1 or chrom > 26: return 0 return chrom
def split_label_feats(lfeats, split=0.75): """ Splits each list of feature into two sets """ train_feats = [] test_feats = [] for label, feats in lfeats.items(): cutoff = int(len(feats) * split) train_feats.extend([(feat, label) for feat in feats[:cutoff]]) test_feats.extend([(feat, label) for feat in feats[cutoff:]]) return train_feats, test_feats
def get_maintenance_messages(records): """ Combine maintenance messages together into a string. """ messages = "" for r in records: if messages: messages += " | " messages += "%s: %s" % (r.title, r.message) return messages
def _find_logging_config(config): """ Look for the dictionary containing logging-specific configurations, starting in the 'distributed' dictionary and then trying the top-level """ logging_keys = {"logging-file-config", "logging"} if logging_keys & config.get("distributed", {}).keys(): return config["distributed"] else: return config
def simple_fill(text, width=60): """ This is a simplified version of textwrap.fill. It splits the string into exactly equal-sized chuncks on length <width>. This avoids the pathological case of one long string (e.g., when splitting long DNA sequences). The code is adapted from: http://stackoverflow.com/questions/11781261 Args: text (string) : the text to split width (int) : the (exact) length of each line after splitting Returns: string : a single string with lines of length width (except possibly the last line) """ return '\n'.join(text[i:i+width] for i in range(0, len(text), width))
def _get_sinks_with_source(node_str, connections): """Returns the sink nodes that are connected to the source node with the given string representation. Args: node_str: a string representation of a PipelineNode connections: a list of PipelineConnection instances Returns: a list of PipelineNodes that are connected to the given source node. """ return [c.sink for c in connections if c.source.is_same_node_str(node_str)]
def userspec_from_params(user, password): """Given username and password, return a dict containing them.""" return dict( username=user, password=password, )
def parse_line(line): """0 <-> 454, 528, 621, 1023, 1199""" line = line.split(" ") return {"id": int(line[0]), 'connected': list(map(lambda num: int(num.replace(",", "")), line[2:]))}
def int_list_string(ilist, mesg='', nchar=0): """like float list string, but use general printing (mesg is printed first, if nchar>0, it is min char width)""" istr = mesg if nchar > 0: slist = ['%*d' % (nchar, val) for val in ilist] else: slist = ['%d' % val for val in ilist] istr += ' '.join(slist) return istr
def coalesce(*values): """Returns the first not-None arguement or None""" return next((v for v in values if v is not None), None)
def tile_slippy_to_tms(z, x, y): """OSGeo Tile Map Service Specification style Y coordinate. Same meaning as '{-y}'. https://josm.openstreetmap.de/wiki/Maps """ return z, x, 2 ** z - 1 - y
def resolveObliteration(dependencies): """ resolve those who depend on me """ obliterated = [] def __obliterate(me): if me not in obliterated: obliterated.append(me) def __depends_on(me): depends_on_me = set() for them in dependencies: deps = dependencies[them] if me in deps and them is not me: if them not in obliterated: depends_on_me.add(them) ret = list(depends_on_me) return ret if len(depends_on_me) > 0 else None def __resolve(me): if me in obliterated: """ Hey, I'm me and I'm already dead :\\ """ return """ look for those who depend on me """ depends_on_me = __depends_on(me) if depends_on_me is not None: """ Some guys depend on me. Let's destroy them! """ for destroy_them in depends_on_me: __resolve(destroy_them) """ well, no one depends on me. commit suicide =( """ __obliterate(me) for some_guy in dependencies: __resolve(some_guy) return obliterated
def computeStandardDeviation(datatype, mean): """Computes the standard deviation of the inputed data values """ import math sum = 0 for row in datatype: sum += (float(row) - mean)**2 return round(math.sqrt(sum / len(datatype)), 3)
def count_failed_students(student_scores): """ :param student_scores: list of integer student scores. :return: integer count of student scores at or below 40. """ count = 0 for score in student_scores: if score <= 40: count += 1 return count
def calc_precipitation_excess(throughfall, canopyDrain): """ Calculate excess precipitation (the sum of throughfall and canopy drainage) Parameters ---------- throughfall : int or float Throughfall flux [mm day^-1] canopyDrain : int or float Canopy drainage flux [mm day^-1] Returns ------- precipExcess : int or float Excess precipitation [mm day^-1] """ precipExcess = throughfall + canopyDrain return precipExcess
def _jaccard(a, b): """ Return the Jaccard similarity between two sets a and b. """ return 1. * len(a & b) / len(a | b)
def remove_all_gap_columns( texts ): """ Remove any columns containing only gaps from alignment texts """ seqs = [ list( t ) for t in texts ] i = 0 text_size = len( texts[0] ) while i < text_size: all_gap = True for seq in seqs: if seq[i] not in ( '-', '#', '*', '=', 'X', '@' ): all_gap = False if all_gap: for seq in seqs: del seq[i] text_size -= 1 else: i += 1 return [ ''.join( s ) for s in seqs ]
def str2fndef(s, verbose=0): """return fndef string from field definition string :param verbose: print resulting fndef line >>> fnlev, fn, fnlen, fnform, fnopt = str2fndef('1,UT,6,U,DE,DT=E(TIME),NU') >>> print( fnlev, fn, fnlen, fnform, fnopt) 1,'UT',6,'U',{'NU':None,'DE':None,'DT':'TIME'} todo: special descriptors not yet supported PE option set with members of PE group? """ import re fnlev,fn,fnlen,fnform,fnopt=0,'',0,'',{} cmnt = '' fnam = '' poshash = s.find('#') possemi = s.find(';') s3 = '' if poshash==0 or possemi==0: # s.startswith('#'): # comment return fnlev,fn,fnlen,fnform,fnopt elif poshash < possemi and poshash > 0 : s2 = s.split('#') # remove any comment after hash sign s3 = s2[1].split(' ',1) elif possemi > 0: s2 = s.split(';',1) # definition;longname comment s3 = s2[1].split(None,1) # print( 's=%s' % s) # print( 's2=%s, s3=%s' % (s2, s3)) else: s2= s, if len(s3)>1: fnam,cmnt = s3 elif len(s3) == 1: fnam = s3[0] ss = s2[0].split(',') # tokenize by comma separator if verbose: print( ss) if len(ss)>0: try: fnlev=int(ss[0]) except ValueError: pass # might be special descriptor definition if len(ss)>1: fn = ss[1].strip() # field name if len(ss)>2: try: fnlen=int(ss[2]) #field length except ValueError: if ss[2].startswith('PE'): # check if occurrences are specified PE(100) mat = re.search("""\( (\d+) \)""", ss[2][2:],re.VERBOSE) if mat: occs = int (mat.group(1)) fnopt['PE'] = occs else: fnopt['PE'] = None fnform = ' ' # format is set to blank if len(ss)>3: fnform = ss[3].strip() # field format for kv in ss[4:]: # field options to dictionary oo = kv.split('=') if len(oo) <= 1: if kv.startswith('MU'): # check if occurrences are specified MU(30) mat = re.search("""\( (\d+) \)""", kv[2:],re.VERBOSE) if mat: occs = int (mat.group(1)) fnopt['MU'] = occs continue # next option fnopt[kv]=None else: o1,o2 = oo[0].strip(),oo[1].strip() if o2.startswith('E('): o2=o2[2:-1] # remove edit mask fnopt[o1] = o2 if fnam: fnopt['longname'] = fnam if cmnt: fnopt['comment'] = cmnt return fnlev, fn, fnlen, fnform, fnopt
def Cross_C3V(vec1, vec2, result): """Computes the cross product between the 3d vectors vec1 and vec2 (indexable objects of length 3) and stores the result in the third argument, named result.""" x1, y1, z1 = vec1; x2, y2, z2 = vec2; result[0] = y1 * z2 - z1 * y2; result[1] = z1 * x2 - x1 * z2; result[2] = x1 * y2 - y1 * x2; return None
def dist_uniform(nwork, workers, id=None): """ Statically distribute some number of items among workers. This assumes that each item has equal weight, and that they should be divided into contiguous blocks of items and assigned to workers in rank order. This function returns the index of the first item and the number of items for the specified worker ID, or the information for all workers. Args: nwork (int): the number of things to distribute. workers (int): the number of workers. id (int): optionally return just the tuple associated with this worker Returns (tuple): A tuple of ints, containing the first item and number of items. If id=None, then return a list containing the tuple for all workers. """ dist = [] for i in range(workers): ntask = nwork // workers firsttask = 0 leftover = nwork % workers if i < leftover: ntask += 1 firsttask = i * ntask else: firsttask = ((ntask + 1) * leftover) + (ntask * (i - leftover)) dist.append( (firsttask, ntask) ) if id is not None: if id < len(dist): return dist[id] else: raise RuntimeError("worker ID is out of range") else: return dist
def get_builder_image_url(benchmark, fuzzer, docker_registry): """Get the URL of the docker builder image for fuzzing the benchmark with fuzzer.""" return '{docker_registry}/builders/{fuzzer}/{benchmark}'.format( docker_registry=docker_registry, fuzzer=fuzzer, benchmark=benchmark)
def pv_annuity(n,c,r): """Objective : estimate present value of an annuity n : number of payments c : payment amount r : discount formula : c/r*(1-1/(1+r)**n) e.g., >>>pv_annuity(29,1,0.08) 11.158406010577684 """ return c/r*(1-1/(1+r)**n)
def x_from_sun_moon_time(progress, period, x_range): """Recalculate/rescale an amount of progress through a time period.""" x = int((progress / period) * x_range) return x
def replace_with_phrases(tokens, phrases): """Replace applicable tokens in semantically-ordered tokens with their corresponding phrase. Args: tokens (list of str): A list of semantically-ordered tokens. phrases (list of str): A list of phrases. Returns: list of str: A list of tokens which applicable tokens are replaced with a corresponding phrase. Examples: >>> print(tokens) ['it', 'is', 'straight', 'forward'] >>> print(phrases) ['straight forward'] >>> print(replace_with_phrases(tokens, phrases)) ['it', 'is', 'straight forward'] """ tempTokens = tokens.copy() for phrase in phrases: phraseTokens = phrase.split(' ') isPhrase = False for i in range(len(tempTokens) + 1 - len(phraseTokens)): matches = 0 for key, token in enumerate(phraseTokens): if tempTokens[i + key] == token: matches += 1 if matches == len(phraseTokens): isPhrase = True break if isPhrase: start = tempTokens.index(phraseTokens[0]) end = start + len(phraseTokens) tempTokens[start:end] = [' '.join(phraseTokens)] return tempTokens
def dms2deg(valin): """ Converts DMS input to decimal degrees. Input can be either a string delimeted by : or spaces, or a list of [D,M,S] numbers. Parameters ---------- valin: float Input value in DMS. Can be either: \n - a string delimeted by : or spaces \n - a list of [D,M,S] numbers (floats or ints) \n Returns ------- valout : float Degrees corresponding to the DMS value Examples -------- # e.g., '-78:12:34.56' corresponds to -77.7904 deg \n obs.dms2deg('-78:12:34.56') #--> -77.79039999999999 \n obs.dms2deg('-78 12 34.56') #--> -77.79039999999999 \n obs.dms2deg([-78,12,34.56]) #--> -77.79039999999999 """ if type(valin)==str: if ':' in valin: ra=[float(val) for val in valin.split(':')] else: ra=[float(val) for val in valin.split(' ')] else: ra=valin valout=ra[0]+ra[1]/60.+ra[2]/3600. return valout
def get_previous_season(season): """ Convert string e.g. '1819' into one for previous season, i.e. '1718' """ start_year = int(season[:2]) end_year = int(season[2:]) prev_start_year = start_year - 1 prev_end_year = end_year - 1 prev_season = "{}{}".format(prev_start_year, prev_end_year) return prev_season
def find_trigger(trigger_wrapper, trigger_name): """Search through a collection of triggers and return the trigger with the given name :param trigger_wrapper: A collection of triggers in the Google Tag Manager List Response format :type trigger_wrapper: dict :param trigger_name: The name of a trigger to find :type trigger_name: str :return: A Google Tag Manager trigger :rtype: dict """ if "trigger" in trigger_wrapper: for trigger in trigger_wrapper["trigger"]: if trigger["name"] == trigger_name: return trigger return None
def has_no_numbers(value): """Checks if the string does not contains numbers""" if isinstance(value, str): return not(any(char.isdigit() for char in value)) return False
def one_of_k_encoding(x, allowable_set): """Maps inputs to one hot vectors.""" if x not in allowable_set: raise Exception("input {0} not in allowable set{1}:".format( x, allowable_set)) return list(map(lambda s: x == s, allowable_set))
def getClues(guess, secretNum): """Returns a string with the pico, fermi, bagels clues for a guess and secret number pair.""" if guess == secretNum: return 'You got it!' clues = '' for i in range(3): if guess[i] == secretNum[i]: # A correct digit is in the correct place. clues += 'Fermi ' elif guess[i] in secretNum: # A correct digit is in the incorrect place. clues += 'Pico ' if len(clues) == 0: return 'Bagels' # There are no correct digits at all. else: # Make a single string from the list of string clues. return clues
def _get_username(agent_string): """Retrieve username from the standard agent string stored in the database, normally formatted as: * username="test", first_name="", last_name="" """ USERNAME = "username=" return agent_string.split(",", 1)[0].replace(USERNAME, "").replace('"', "")
def filterEvent(evt_src=-1, evt_type=-1, evt_value=-1, filters=()): """The event is processed by each and all filters in strict order unless one of them returns None (in which case the event is discarded) :param evt_src: (object) object that triggered the event :param evt_type: (TaurusEventType) type of event :param evt_value: (object) event value :param filters: (sequence<callable>) a sequence of callables, each returning either None (to discard the event) or the tuple (with possibly transformed values) of (evt_src, evt_type, evt_value) :return: (None or tuple) The result of piping the event through the given filters. """ evt = evt_src, evt_type, evt_value for f in filters: evt = f(*evt) if evt is None: return None return evt
def mean(values): """ Arithmetic mean """ return float(sum(values)) / len(values)
def _merge(interval1, interval2): """ Finds merge of two intersecting intervals. This function should be called only if it's checked that intervals intersect, e.g. if "_intersect(interval1, interval2)" is True :param interval1: list [a, b], where 'a' is the left border and 'b' is the right border :param interval2: list [c, d], where 'c' is the left border and 'd' is the right border :return: new interval that contains only both intervals """ return [min(interval1[0], interval2[0]), max(interval1[1], interval2[1])]
def always_list(x): """ Always return a list """ from six import string_types return ([y.strip() for y in x.split(',')] if isinstance(x, string_types) else list(x))
def filter_topics(docs): """ Reads all of the documents and creates a new list of two-tuples that contain a single "label" and the body. Also, extract interested topics and replace topics with the number of index of this topic in categories list. For example, ('money', 'body') is ('0','body'). """ categories = ['money','fx', 'crude','grain', 'trade', 'interest', 'wheat', 'ship', 'corn', 'oil', 'dlr', 'gas', 'oilseed', 'supply', 'sugar', 'gnp', 'coffee', 'veg', 'gold', 'soybean','bop','livestock', 'cpi', 'money-fx','money-supply','veg-oil'] ref_docs = [] for d in docs: if d[0] == [] or d[0] == "" or d[1]=="": continue for n in d[0]: if n in categories: d_tup = (categories.index(n), d[1]) ref_docs.append(d_tup) break return ref_docs
def clean_wiki_extract(data): """ Change the content format of extract returned by wiki api """ # content after See also such as references, further reading, External links can be ignored data, ignore = data.split('\n== See also') # change wiki tag representation to text equivalent data = data.replace('\n== ', '<heading>') data = data.replace(' ==\n', '</heading>') data = data.replace('\n=== ', '<subheading>') data = data.replace(' ===\n', '</subheading>') data = data.replace('\n==== ', '<sub_subheading>') data = data.replace('====\n', '</sub_subheading>') # clean redundent symbols data = data.replace('\n', '') data = data.replace('\t', '') data = data.replace('\\', '') return data
def SplitLines( contents ): """Return a list of each of the lines in the byte string |contents|. Behavior is equivalent to str.splitlines with the following exceptions: - empty strings are returned as [ '' ]; - a trailing newline is not ignored (i.e. SplitLines( '\n' ) returns [ '', '' ], not [ '' ] ).""" if contents == b'': return [ b'' ] lines = contents.splitlines() if contents.endswith( b'\r' ) or contents.endswith( b'\n' ): lines.append( b'' ) return lines
def isinstance_qutip_qobj(obj): """Check if the object is a qutip Qobj. Args: obj (any): Any object for testing. Returns: Bool: True if obj is qutip Qobj """ if ( type(obj).__name__ == "Qobj" and hasattr(obj, "_data") and type(obj._data).__name__ == "fast_csr_matrix" ): return True return False
def build_dependencies(intertable_dependencies): """Figure out which tables depend on which other ones (through foreign keys) intertable_dependencies is a dict with values of Dependency objects. Returns two things: 1. a dictionary allowing easy lookup of dependencies by parent table 2. a dictionary allowing lookups by (tablename, fieldname) pairs """ dependencies = {} reference_fields = {} for dep in intertable_dependencies: table_deps = dependencies.setdefault(dep.table_name_from, set()) table_deps.add(dep) reference_fields[(dep.table_name_from, dep.field_name)] = dep.table_name_to return dependencies, reference_fields
def pkg_dir(workspace_root, package_name): """Returns a relative path to a package directory from the root of the sandbox. Useful at execution-time or run-time.""" if workspace_root and package_name: return workspace_root + "/" + package_name if workspace_root: return workspace_root if package_name: return package_name return "."
def share_edge(label, uv1, uv2, h, w): """Determine if two uv share an edge.""" if uv1[0] == uv2[0]: if uv1[0] == 0 or uv1[0] == w: return True elif uv1[1] == uv2[1]: if uv1[1] == 0 or uv1[1] == h: return True else: return False
def _sort_by_build_order(lib_names, lib_dict, deps_key_name, verbose=False): """Sort library names to form correct build order. Use metadata from lib_dict""" # we find correct build order by performing a topological sort # expected output: if library B depends on A, A should be listed first # all libs that are not in the dictionary are considered external. external_deps = list( sorted([lib_name for lib_name in lib_names if lib_name not in lib_dict ])) if verbose: print('topo_ordering ' + str(lib_names)) print(' external_deps ' + str(external_deps)) result = list(external_deps) # external deps will be listed first while len(result) < len(lib_names): more_results = [] for lib in lib_names: if lib not in result: dep_set = set(lib_dict[lib].get(deps_key_name, [])) dep_set = dep_set.intersection(lib_names) # if lib only depends on what's already built, add it to the results if not dep_set.difference(set(result)): more_results.append(lib) if not more_results: raise Exception( 'Cannot sort topologically, there seems to be a cyclic dependency' ) if verbose: print(' adding ' + str(more_results)) result = result + list( sorted(more_results )) # when build order doesn't matter, sort lexicographically return result
def str_ellipsis(txt, length=60): """Truncate with ellipsis too wide texts""" txt = str(txt) ret = [] for string in txt.splitlines(): string = (string[: length - 4] + " ...") if len(string) > length else string ret.append(string) ret = "\n".join(ret) return ret
def get_entrypoint(request_path): """ Get the entrypoint url from a request path, splitting off sub-indexes and query strings """ entrypoint = request_path.replace('/index', '').split('?')[0] if entrypoint == '': entrypoint = '/index' return entrypoint
def ImportString(context, object): """ f:import-string takes a Unicode FO and returns an XPath string. It is an error if the FO contains illegal XML chars. (although eventually this function might be extended to recover from this error) """ #FIXME: Add validation of object as valid XPath string, #and possibly mapping ops to PUA for illegal characters. #We probably need an Export string if we add PUA shifting return object