content
stringlengths
42
6.51k
def sharded_filenames(filename_prefix, num_shards): """Sharded filenames given prefix and number of shards.""" shard_suffix = "%05d-of-%05d" return [ "%s-%s" % (filename_prefix, shard_suffix % (i, num_shards)) for i in range(num_shards) ]
def int_to_byte(n): """ convert int into byte """ return n.to_bytes(1, byteorder='big')
def nearest_multiple(value,multiple,scale=1): """ Return the nearest multiple of the value for nearest_multiple(129, 2, 1), the output is 128, or 2**7 """ orig = multiple comp_value = value*scale while True: if multiple > comp_value: break multiple*=orig if multiple/comp_value < comp_value/(multiple/orig): return multiple else: return multiple//orig
def fix_species_name(spname): """Return one species name lowercase-dashseparated.""" return spname.replace(".", "").replace("? ", "").replace( "(?)", "").replace("?", "").strip().replace(" ", "-").lower()
def page_not_found(e): """return a custom 404 error.""" return 'sorry, nothing at this url.', 404
def pct_to_kelvin(pct, max_k=6500, min_k=2700): """Convert percent to kelvin.""" kelvin = ((max_k - min_k) * pct / 100) + min_k return kelvin
def str_to_bytes(s): """Convert str to bytes.""" if isinstance(s, str): return s.encode() return s
def get(obj, k, default=None): """ Return obj[k] with a default if __getitem__ fails. If default is callable, return a call to it passing in obj and k. """ try: # pragma: no cover return obj[k] except (KeyError, AttributeError, TypeError, IndexError): # pragma: no cover if callable(default): return default(obj, k) return default
def _remove_empty_items(d, required): """Return a new dict with any empty items removed. Note that this is not a deep check. If d contains a dictionary which itself contains empty items, those are never checked. This method exists to make to_serializable() functions cleaner. We could revisit this some day, but for now, the serialized objects are stripped of empty values to keep the output YAML more compact. Args: d: a dictionary required: list of required keys (for example, TaskDescriptors always emit the "task-id", even if None) Returns: A dictionary with empty items removed. """ new_dict = {} for k, v in d.items(): if k in required: new_dict[k] = v elif isinstance(v, int) or v: # "if v" would suppress emitting int(0) new_dict[k] = v return new_dict
def update_dict_recursively(dst, src, tuples_too=False, overwrite_by_none=True): """ Update `dst` dictionary recursively using items in `src` dictionary. Parameters ---------- dst : dict The destination dictionary. src : dict The source dictionary. tuples_too : bool If True, recurse also into dictionaries that are members of tuples. overwrite_by_none : bool If False, do not overwrite destination dictionary values by None. Returns ------- dst : dict The destination dictionary. """ def tuplezip(a): if isinstance(a[0], dict) and isinstance(a[1], dict): return update_dict_recursively(a[0], a[1], True) return a[1] for key in src: if key in dst: if isinstance(src[key], dict) and isinstance(dst[key], dict): dst[key] = update_dict_recursively(dst[key], src[key], tuples_too) continue if tuples_too and isinstance(dst[key], tuple) \ and isinstance(src[key], tuple): out = map(tuplezip, zip(src[key], dst[key])) out = tuple(out) dst[key] = out[:len(dst[key])] continue if overwrite_by_none or not src[key] is None: dst[key] = src[key] return dst
def getAllFWImageIDs(fwInvDict): """ gets a list of all the firmware image IDs @param fwInvDict: the dictionary to search for FW image IDs @return: list containing string representation of the found image ids """ idList = [] for key in fwInvDict: if 'Version' in fwInvDict[key]: idList.append(key.split('/')[-1]) return idList
def relative_matrix(matrix_a, matrix_b): """Creates a relative matrix from two given matrices ((a-b)/b*100%). Parameters ---------- matrix_a : Dict[Tuple[int], int] New matrix. matrix_b : Dict[Tuple[int], int] Base matrix. Returns ------- Dict[Tuple[int], int] Relative matrix of the two given ones. """ return {coords : int(round(float((matrix_a[coords] - matrix_b[coords])) / matrix_b[coords] * 100)) for coords in matrix_b}
def describe_bug(bug): """ Return a textual description for a single bug. """ if bug['type'] == 'RHBZ': return 'rhbz#%d' % bug['id'] return '%s %s' % (bug['type'], bug['id'])
def maximum_of(*column_names, **extra_columns): """ Return the maximum value over the supplied columns e.g max_value=patients.maximum_of("some_column", "another_column") Additional columns can be defined within the function call which will be used in computing the maximum but won't themselves appear in the output: max_value=patients.maximum_of( "some_column", another_colum=patients.with_these_medications(...) ) """ aggregate_function = "MAX" column_names = column_names + tuple(extra_columns.keys()) return "aggregate_of", locals()
def removeKey(d, key): """Returns a dictionary with a key removed Args: d: a dictionary key: the key to be removed Returns: copy: a copy of the dictionary d with the key removed """ copy = dict(d) del copy[key] return copy
def _any_positive_rows(rows, start, end, thresholds): """ Searches through a set of feature annotations for positive examples according to a threshold specific to each feature. For each feature in `rows`, the overlap between the feature and the query region must be greater than that feature's threshold to be considered positive. Parameters ---------- rows : list(tuple(int, int, str)) or None A list of tuples of the form `(start, end, feature_name)`, or `None`. start : int The 0-based start coordinate of the region to query. end : int One past the last coordinate of the region to query. thresholds : dict A dictionary mapping feature names (`str`) to thresholds (`float`), where the threshold is the minimum fraction of a region that must overlap with a label for it to be considered a positive example of that label. Returns ------- bool `True` if there is at least one feature in `rows` that meets its feature-specific cutoff. `False` otherwise, or if `rows==None`. """ if rows is None: return False for row in rows: # features within [start, end) is_positive = _is_positive_row( start, end, int(row[1]), int(row[2]), thresholds[row[3]]) if is_positive: return True return False
def line_range(lines, ind1, comment_flag='#'): """ Find a range of data lines within a line list. Given an input line list and a starting index, subsequent lines are examined to see where the next comment line is. Comment lines are assumed to start with the # character by default, or one can set this with the comment_flag variable in the call. Lines that are not comments are assumed to be data lines. The index of the next comment line is returned, or the index that gives a range to the end of the line list where there is no such comment line after the index specified. Parameters ---------- lines : A list of input lines (assumed to be from the readlines() function) ind1 : A starting index in the list of lines comment_flag: An optional string variable that marks comment lines Returns ------- n1 : an integer value for the next comment line (assumed to start with '#') in the list of input lines, or the index for the length of the line list if no other comment line is found """ ncomment = len(comment_flag) for n1 in range(ind1+1, len(lines)): if comment_flag in lines[n1][0:ncomment]: return n1 return len(lines)
def get_empty_action_space(num_actions): """ Returns an action space with nothing selected. """ return [0] * num_actions
def calculateFumenOffset(block, rotation): """Given a fumen tetromino index and rotation state, output a tuple (x, y) that represents how far to offset TGM2+'s (x, y) location coordinate.""" if block == 1: if rotation == 1 or rotation == 3: return (1, 0) elif block == 6: if rotation == 2: return (0, -1) elif block == 2: if rotation == 2: return (0, -1) elif block == 5: if rotation == 2: return (0, -1) return (0, 0)
def cica_const_ratio(ambient_co2, const): """ci/ca is constant.""" """Return ci = intercellular CO2 concentration, kg/m^3.""" return const * ambient_co2
def extract_spreadsheet_id(string): """Extracts the sprreadsheet id from an url.""" if "/edit" in string: string = string.split("/edit")[0] if "/" in string: string = string.rstrip("/") string = string.split("/")[-1] string = string.split("&")[0] string = string.split("#")[0] return string
def int_from_digits(digits): """ Returns a positive integer ``n`` which is a decimal expansion of a sequence of digits in descending order from the most significant digit. The input can be a sequence (list, tuple) or a generator, e.g. :: [1,2,3] -> 1x10^2 + 2x10^1 + 3x10^0 = 123 (2, 4, 5, 1) -> 2x10^3 + 4x10^2 + 5x10 + 1x10^0 = 2451 digits(123) -> 1x10^2 + 2x10^1 + 3x10^0 = 123 """ dgs = list(digits) n = len(dgs) return sum(d * 10 ** i for d, i in zip(dgs, reversed(range(n))))
def hexstr(s): """Compact inline hexdump""" return ' '.join(['%02x' % ord(b) for b in s])
def get_data_type(value): """ Get data type :param arg: value for which to determine data type """ return str(type(value).__name__)
def most_frequent(s): """ Count the ocurrence of letters in the given string (s) and return a list of tuples in descending order of frequency. """ freq = dict() for c in s: c = c.lower() if c >= 'a' and c <= 'z': freq[c] = freq.setdefault(c, 0) + 1 res = [] for ltr, cnt in freq.items(): res.append((cnt, ltr)) res.sort(reverse=True) return res
def _escape_special_params(elem, gen_param: str) -> str: """ Handles special parameters in a generator expression: - #text : current element text - #len : current element text length - ## : # sing :param elem: XML element :param gen_param: unprocessed generator expression :return: processed generator expression """ if gen_param == '#text': return elem.text elif gen_param == '#len': return str(len(elem.text)) elif gen_param.startswith("##"): return gen_param[1:] else: return gen_param
def filter_coordinate(coordinate): """Returns the coordinate given in parameter, rounder three decimal places""" # 1mm accuracy is enough for coordinates return round(coordinate, 3)
def word(i: int) -> int: """Overflow a Python arbitrary precision integer into a 16bit unsigned integer. """ if i < 0: i = -i i = ~i+1 return i % 0xFFFF
def getDuplicateElements(lst): """ Return the elements that are duplicated in list """ if lst == None or type(lst) != list: raise ValueError("lst must be a list.") return list(set([x for x in lst if lst.count(x) > 1]))
def fibonacci(n): """Calcula el la secuencia de fibonacci de n n int > 1 returns (n - 1) + (n -2) """ print(n) if n == 0 or n == 1: return 1 return fibonacci(n - 1) + fibonacci(n - 2)
def queryset_to_dict(qs, key): """ Given a queryset will transform it into a dictionary based on ``key``. """ dic = {} if qs: for q in qs: dic[str(q.id)] = q return dic
def value_from_list_of_bits(lst): """Converts given list of bits to int value. """ val = str() for bit in lst: val += str(bit) val = int(val, 2) return val
def top_greatest_hits(sorted_clusters_target_species_hits_list, top_x_tfs_count): """ Identify the best scoring hits up to some threshold of number of tfs. Allows plotting more than one instance of a top tf, without increasing the total tf used count. e.g. 3 instances of KLF4 will count as only one tf used towards the top_x_tfs_count threshold. """ # to keep track of how many tfs have been added top_x_tfs = [] # to store the x greatest tfs and their locations top_x_greatest_hits_dict = {} added_count = 0 # add all hits to single pool so top hits can be identified for sorted_clusters_target_species_hit in sorted_clusters_target_species_hits_list: # ref-point tf_name = sorted_clusters_target_species_hit[0] ## if (len(top_x_tfs) < top_x_tfs_count): if added_count < 1000: # keep track of what & how many tfs have been added if tf_name not in top_x_tfs: top_x_tfs.append(tf_name) # add the hit to the top hits if the count threshold has not been met if tf_name in top_x_greatest_hits_dict: top_x_greatest_hits_dict[tf_name].append(sorted_clusters_target_species_hit) else: top_x_greatest_hits_dict[tf_name] = [sorted_clusters_target_species_hit] added_count += 1 return top_x_greatest_hits_dict
def get_freq_band(freq_band_name, freq_band_names, freq_bands): """Get frequency band.""" if freq_band_name in freq_band_names: print(freq_band_name) print(freq_band_names.index(freq_band_name)) return freq_bands[freq_band_names.index(freq_band_name)] return None
def getBytesSize(bytesIn=0, suffix="B"): """ Scale bytes to its proper format e.g: 1253656 => '1.20MB' 1253656678 => '1.17GB' """ bytesValue = bytesIn if bytesValue is None or 0: return int(0) elif (isinstance(bytesValue, int) or isinstance(bytesValue, float)) and (int(bytesValue) > 0): factor = 1024 for unit in ["", "K", "M", "G", "T", "P"]: if bytesValue < factor: return f"{bytesValue:.2f}{unit}{suffix}" bytesValue /= factor return bytesValue return int(0)
def test_size(STDictonary_root, count=0): """Test function for the size counter of the STDict-class, STDictonary_root = root of STDict to test , O(n)""" if STDictonary_root is None: return count else: count += 1 count = test_size(STDictonary_root._left, count) count = test_size(STDictonary_root._right, count) return count
def get_split_attributes_by_type(config): """Get the split_attributes_by_type config setting.""" return config.get("split_attributes_by_type", False)
def get_unnasigned_json(json_data,populated_object): """ Given an object which has had fields assigned to it, as well as the JSON dict from which these values were retrieved, this function returns a list of keys that were not used for populating the object. In order to match the attribute names and dictionary keys must have the same names. """ if len(json_data) == 0: return {} else: temp_keys = populated_object.__dict__.keys() return dict((key,json_data[key]) for key in set(json_data) if key not in temp_keys)
def put_ai_mark_row_col(board, ai_mark, player_mark): """Put AI mark in a row or column if there is only one AI mark and two dots. WARNING: works only if len_board() == 3""" # copy of board board_copy = board.copy() # Changing 'board' lines from vertical to horizontal and put in 'tmp_list', # this means that the vertical lines of the list are the horizontal tmp_vertical = [] for i in range(3): tmp_in_v = [] for j in range(3): tmp_in_v.append(board[j][i]) tmp_vertical.append(tmp_in_v) # Adding two lists board_copy, tmp_list and two diagonal lines lines = board_copy + tmp_vertical counter = 0 row = 0 for line in lines: if counter <= 2: # Searching in horizontal lines one mark and two dots in this case # is adding AI mark to the line. if line.count(ai_mark) == 1 and line.count(".") == 2: if board[0][0] == player_mark or board[2][0] == player_mark: if board[row][0] == ".": board[row][0] = ai_mark return True elif board[0][2] == player_mark or board[2][2] == player_mark: if board[row][2] == ".": board[row][2] = ai_mark return True else: board[row][line.index(".")] = ai_mark return True row += 1 elif 2 < counter <= 5: # Searching in vertical lines one mark and two dots in this case # is adding AI to the line. if counter == 3: row = 0 if line.count(ai_mark) == 1 and line.count(".") == 2: if board[0][0] == player_mark or board[2][0] == player_mark: if board[0][row] == ".": board[0][row] = ai_mark return True elif board[0][2] == player_mark or board[2][2] == player_mark: if board[2][row] == ".": board[2][row] = ai_mark return True else: board[line.index(".")][row] = ai_mark return True row += 1 counter += 1 return False
def check_changed_repo(repos, prefix, super_visor, requires, blacklist): """ Helper to compare current yaml and previous yaml """ errors_found = 0 curr_dict = {f["name"]: f for f in repos[0]} remove_repos = set() sigs_attention = set() for repo in repos[1]: if repo["name"] in curr_dict: if repo["type"] == "private" and curr_dict[repo["name"]]["type"] == "public": continue else: curr_dict.pop(repo["name"]) else: remove_repos.add(repo["name"]) for name in curr_dict: curr_repo = curr_dict[name] sigs = super_visor.get(prefix + curr_repo["name"], set()) print("INFO: adding " + curr_repo["name"] + " to SIG " + str(sigs)) sigs_attention = sigs_attention | sigs errors_found += requires(curr_repo, blacklist) if curr_repo.get("rename_from", "") in remove_repos: remove_repos.remove(curr_repo.get("rename_from")) for rm_name in remove_repos: sigs = super_visor.get(prefix + rm_name.lower(), set()) sigs_attention = sigs_attention | sigs print("WARNING! deleting " + prefix + "%s." % rm_name) return errors_found, sigs_attention
def bumpVersion(oldVersion): """ Bump the ver number. Is dumb and expects 0.0. Will bump by .1 """ dot = oldVersion.rfind('.') prefix = oldVersion[0:dot + 1] decimal = int(oldVersion[dot + 1:]) decimal += 1 return prefix + str(decimal)
def init_step(idx, cols): """Helper function to find init suffix in a column Parameters ---------- idx: int Index of 'init' column in cols. cols: list[str] List of column names. """ for i in range(idx, len(cols)): if cols[i] != 'init': return 'init-' + cols[i] return None
def add(path: str, content: str, encoding: str = "utf-8") -> int: """ add(path: str, content: str, encoding = "utf-8") -> int ---- Add content into file. Return amount of written symbols. If file doesn't exist, create it. """ with open(path, "a", encoding = encoding) as file: size = file.write(content) return size
def count_nines(loss_rate): """ Returns the number of nines after the decimal point before some other digit happens. """ nines = 0 power_of_ten = 0.1 while True: if power_of_ten < loss_rate: return nines power_of_ten /= 10.0 nines += 1 if power_of_ten == 0.0: return 0
def get_common_prefix_len(s1, s2): """Return the longest common prefix of two strings Adopted from example in https://stackoverflow.com/questions/9114402/regexp-finding-longest-common-prefix-of-two-strings @param s1: the first string to compare @param s2: the second string to compare @returns: the length of the longest common prefix of the two strings """ i = 0 for i, (x, y) in enumerate(zip(s1, s2)): if x != y: break return i
def pull_main_opt(bits, pos): """ Returns the main part of bits specified by the pos :param bits :param pos :return val, pos """ val = bits[pos] pos += 1 if val == '!': val += ' ' + bits[pos] pos += 1 return val, pos
def fib(n): """Calculate Fibonacci of `n`. This implementation uses recursion.""" if n == 0: return 0 elif n == 1: return 1 else: return fib(n - 1) + fib(n - 2)
def raw_gma (val=None): """ Set or get raw gma """ global _raw_gma if val is not None: _raw_gma = val return _raw_gma
def validate(config): """ Validate the beacon configuration """ # Configuration for pkg beacon should be a list if not isinstance(config, list): return False, ("Configuration for pkg beacon must be a list.") # Configuration for pkg beacon should contain pkgs pkgs_found = False pkgs_not_list = False for config_item in config: if "pkgs" in config_item: pkgs_found = True if isinstance(config_item["pkgs"], list): pkgs_not_list = True if not pkgs_found or not pkgs_not_list: return False, "Configuration for pkg beacon requires list of pkgs." return True, "Valid beacon configuration"
def is_float(value): """ Check is value convertable to float Arguments: value {string/int} -- Either string or integer Returns: Boolean -- Either True or False """ if value: if isinstance(value, str) or isinstance(value, int): try: float(value) return True except ValueError: return False else: return False else: return False
def is_blacklisted(frase, palabras): """ Filtra de una lista elementos prohibidos""" for palabra in palabras: if palabra in frase: return True return False
def move_tower(discs, origin, target, helper): """Handle recursive shuffle of discs.""" temp = [] if discs > 0: move_tower(discs - 1, origin, helper, target) temp.append(f'move disk from {origin} to {target}') move_tower(discs - 1, helper, target, origin) return temp
def render_humidity(data, query): """ humidity (h) """ humidity = data.get('humidity', '') if humidity: humidity += '%' return humidity
def length_of_code(code): """ Centralize the character counting to one place """ return len(code.replace('\r\n', '\n'))
def generate_body(fields: dict = {}, custom_fields: dict = {}) -> dict: """Generates a body from fields and custom fields. Args: fields: fields data. custom_fields: custom fields data. Returns: body object for SNOW requests. """ body = {} if fields: for field in fields: body[field] = fields[field] if custom_fields: for field in custom_fields: # custom fields begin with "u_" if field.startswith('u_'): body[field] = custom_fields[field] else: body['u_' + field] = custom_fields[field] return body
def mean(vector): """ Calculates the arithmetic mean of the given vector. Args: ----- vector : list A non-empty list/array of numbers to be averaged. Returns: -------- mean : float The arithmetic mean of the given vector. """ return sum(vector) / len(vector)
def get_fd(f_t1, f_t2, args): """ Get frequency difference parameters ----------- :param f_t1: int frequency at t1 :param f_t2: int frequency at t2 :param args: :return: """ if args['token_similarity_change'] == 'abs': fd = abs(f_t2 - f_t1) / max(f_t2, f_t1) else: fd = (f_t2 - f_t1) / max(f_t2, f_t1) return fd
def check_file_position(filename): """ Check if -r file was inserted with colon (:) If yes, only read the position specified after colon Args: filename: User's input -r Returns: position number """ new_file = filename.partition(":")[0] position = filename.partition(":")[2] return new_file, int(position) if len(position) is not 0 else 0
def func_ref(x: str) -> str: """ Encode a function reference. Args: x (str): reference to a function. Returns: The encoded string. """ return f"____{x}____"
def flex_direction(keyword): """``flex-direction`` property validation.""" return keyword in ('row', 'row-reverse', 'column', 'column-reverse')
def isaudio(file: str)->bool: """ test whether param file is has a known ending for audio formats :param file: str :return: bool """ audio_files = ["mp3", "aac", "ogg", "m4a"] end = file.rsplit(".", 1)[-1] return True if end in audio_files else False
def get_unique_list(predictors): """ Return a list of unique predictors. Two Predictors are equal if they have the same name.""" results = [] for P in predictors: #if not P.name in [p.name for p in results] : results.append(P) if P not in results : results.append(P) return results
def VmaxWeight( distance, R_25, R_25_limit=30.0, maxSurveyDist=30.0 ): """Returns W = V_tot / V_max, where V_tot is the total survey volume (out to a distance of maxSurveyDist in Mpc) and V_max is volume out to distance = distMax in Mpc, with distMax = maximum distance object could have been observed at, assuming an R_25 limiting radius of R_25_limit and an observed radius of R_25 (both in arcsec). If V_tot > V_max (i.e., the galaxy would have been observed regardless of distance), then W = 1 For S4G, R25_limit = 30 arcsec """ V_tot = maxSurveyDist**3 distMax = distance * (R_25 / R_25_limit) V_max = distMax**3 if V_max > V_tot: return 1.0 else: return (V_tot / V_max)
def _remove_long_seq(maxlen, seq, label): """ Removes sequences that exceed the maximum length. # Arguments maxlen: Int, maximum length of the output sequences. seq: List of lists, where each sublist is a sequence. label: List where each element is an integer. # Returns new_seq, new_label: shortened lists for `seq` and `label`. """ new_seq, new_label = [], [] for x, y in zip(seq, label): if len(x) < maxlen: new_seq.append(x) new_label.append(y) return new_seq, new_label
def labels_status(labels): """ Check is labels is clean or not. """ if not labels: return 1 leaf = max(labels, key=lambda x: x.count('/')) clean = 1 for label in labels: if label not in leaf: clean = 0 return clean
def bit_to_index(bit): """ get the index of a bit """ cnt = (bit & 0xAAAAAAAAAAAAAAAA) != 0 cnt |= ((bit & 0xCCCCCCCCCCCCCCCC) != 0) << 1 cnt |= ((bit & 0xF0F0F0F0F0F0F0F0) != 0) << 2 cnt |= ((bit & 0xFF00FF00FF00FF00) != 0) << 3 cnt |= ((bit & 0xFFFF0000FFFF0000) != 0) << 4 cnt |= ((bit & 0xFFFFFFFF00000000) != 0) << 5 return cnt
def dictJoin(dict1,dict2): """ A handy function to join two dictionaries If there is any key overlap, dict1 wins! (just make sure this doesn't happen) """ for key in dict1.keys(): dict2[key] = dict1[key] return dict2
def update_pandoc_options(old, new, mutable): """ return dictionary of pandoc command line options 'old' updated with 'new' only options marked as mutable can be changed """ for p in ['r', 'w']: for key in new[p]: # if not mutable commandline line option, then skip it if not mutable[p][key]: continue # if 'False', reset old[p][key] to default elif new[p][key] is False: if type(old[p][key]) is list: old[p][key] = list() elif type(old[p][key]) is str: old[p][key] = None elif type(old[p][key]) is bool: old[p][key] = False # if list, extend old list with new elif key in old[p] and type(old[p][key]) is list: old[p][key].extend(new[p][key]) # otherwise, override old with new else: old[p][key] = new[p][key] return old
def is_numeric(lit): """ value of numeric: literal, string, int, float, hex, binary From http://rosettacode.org/wiki/Determine_if_a_string_is_numeric#Python """ # Empty String if len(lit) <= 0: return lit # Handle '0' if lit == '0': return 0 # Hex/Binary if len(lit) > 1: # sometimes just '-' means no data... litneg = lit[1:] if lit[0] == '-' else lit if litneg[0] == '0': if litneg[1] in 'xX': return int(lit, 16) elif litneg[1] in 'bB': return int(lit, 2) else: try: return int(lit, 8) except ValueError: pass # Int/Float/Complex try: return int(lit) except ValueError: pass try: return float(lit) except ValueError: pass try: return complex(lit) except ValueError: pass return lit
def _canonical(seq): """ Rotates and flips a sequence into its minimal form. Useful for identifying node sequences that are identical except for starting point and direction. """ def rotated(seq, i): return seq[i:] + seq[:i] def flipped(seq): return list(reversed(seq)) candidates = [] for i in range(len(seq)): for f in (flipped, lambda seq: seq): candidates.append(f(rotated(seq, i))) return tuple(min(candidates))
def _get_duration(element): """ Return the duration of an element. :param element: either a step or a scenario or a feature """ return (element._stopped - element._started).seconds if hasattr(element, '_started') else None
def accepts(source): """ Determines whether we want to handle this source """ if source["type"] == "buildbot": return True return False
def hsl2rgb(h,s,l): """ Convert between HSL and RGB [0-255] """ h = float(h) s = float(s) l = float(l) if s == 0: r = l*255. g = l*255. b = l*255. else: t3 = [0.,0.,0.] c = [0.,0.,0.] if l < 0.5: t2 = l * (1.+s) else: t2 = l + s - l*s t1 = 2. * l - t2 h /= 360. t3[0] = h + 1./3. t3[1] = h t3[2] = h - 1./3. for i in range(3): if t3[i] < 0.: t3[i] += 1. elif t3[i] > 1.: t3[i] -= 1. if (6. * t3[i]) < 1.: c[i] = t1 + (t2 - t1) * 6. * t3[i] elif (2. * t3[i]) < 1.: c[i] = t2 elif (3. * t3[i]) < 2.: c[i] = t1 + (t2 - t1) * ((2./3.) - t3[i]) * 6. else: c[i] = t1 r = round(c[0]*255.) g = round(c[1]*255.) b = round(c[2]*255.) # out return (int(r), int(g), int(b))
def check_args(argv): """ Validates `main()` input arguments :param argv: program arguments :return: True/False """ if len(argv) != 3: print("Github login and password are expected as script parameters") return False return True
def vec_reverse(a): """ Reverses a vector Parameters ---------- a: list[] A vector of scalar values Returns ------- list[] The reversed vector """ return a[::-1]
def _should_pack(arg): """Determines whether the caller needs to pack the argument in a tuple. If user-defined function returns a list of tensors, `nest.flatten()` and `ops.convert_to_tensor()` and would conspire to attempt to stack those tensors into a single tensor because the tf.data version of `nest.flatten()` does not recurse into lists. Since it is more likely that the list arose from returning the result of an operation (such as `tf.numpy_function()`) that returns a list of not-necessarily-stackable tensors, we treat the returned value as a `tuple` instead. A user wishing to pack the return value into a single tensor can use an explicit `tf.stack()` before returning. Args: arg: argument to check Returns: Indication of whether the caller needs to pack the argument in a tuple. """ return isinstance(arg, list)
def negate(signal): """ Negate the signal i.e., vertical flip """ negated_signal = signal * (-1) return negated_signal
def ref_str_to_tuple(ref): """String like ' a : b ' to tuple like ('a', 'b').""" return tuple(x.strip() for x in ref.split(':'))
def can_connect_via_site_wire(a_site, a_site_wire, b_site, b_site_wire): """ Are these two site wires the same connection resource? """ if a_site != b_site: # Not in same site, not connected return False # Must be connected via the same site wire return a_site_wire == b_site_wire
def byte_to_megabyte(byte): """convert bytes into megabytes. """ return round(((byte / 1000) / 1000),4)
def format_types(types): """Format types for SQL statements""" return ["\"{}\"".format(type_) for type_ in types]
def part1(commands): """ forward X increases the horizontal position by X units. down X increases the depth by X units. up X decreases the depth by X units. Calculate the horizontal position and depth you would have after following the planned course. What do you get if you multiply your final horizontal position by your final depth """ x = 0 y = 0 for command in commands: dir = command[0] num = command[1] if dir == "forward": x += num elif dir == "up": y -= num else: y += num return x * y
def _utf8(s): """Converts the given string to sequence of bytes according to UTF-8.""" return s.encode("utf8")
def decode_resouce_type(id: str) -> int: """Decode a number between 0 and 0xFFFF from a 4-character hex string""" return int(id, 16)
def SliceCoordsConstant(minVal, maxVal, divisions): """ Generate one dimension of annulus slice coordinates based upon the supplied geometry information, with constant node spacing """ return [minVal + (maxVal - minVal) * (float(i) / divisions) for i in range(divisions + 1)]
def id_from_uri(uri): """Return the ID from a URI. For example, "spotify:album:kjasg98qw35hg0" returns "kjasg98qw35hg0". Args: uri (str): The URI string. Returns: str: The ID. """ return uri.split(":")[-1]
def max_shove(dic, key, value): """Update a key in the dictionary if the previous value was lower. dic -- dictionary to insert into key -- key to query value -- proposed new value The dictionary is queried for the key. If the key is not in the dictionary at all, it is inserted with the specified value. If it is in the dictionary, its corresponding value is compared to the provided value. If the new value is higher, the value in the dictionary is updated and the function returns True (same as if there is no key); if it is not, the dictionary is unchanged and the function returns False.""" if (key not in dic) or value > dic[key]: dic[key] = value return True return False
def expand_into_dict(raw_package): """Take a raw tuple of (name, versions) and return a dict that is easier to use. >>> expand_into_dict(('mypi', ['1.0.0', '0.8.8'])) {'name': 'mypi', 'versions': [{'url': '/mypi-1.0.0.tar.gz', 'name': '1.0.0'}, {'url': '/mypi-0.8.8.tar.gz', 'name': '0.8.8'}]} """ package_name, version_strings = raw_package versions = [ {"name": v, "url": "/{0}-{1}.tar.gz".format(package_name, v)} for v in version_strings ] return { "name": package_name, "versions": versions, }
def gen_pair(row, V, PBC=False): """ assume row is an in order array generate a cyclic pairs in the row array given with interaction strength V. For example: row = [1, 2, 3, 5] will gives [(1, 2, V), (2, 3, V), (3, 5, V), (5, 1, V)] """ if PBC: return [(row[i], row[(i + 1) % len(row)], V) for i in range(len(row))] else: return [(row[i], row[(i + 1) % len(row)], V) for i in range(len(row) - 1)]
def test_gif(h, f): """GIF ('87 and '89 variants)""" if h[:6] in ('GIF87a', 'GIF89a'): return 'gif'
def application_name(config): """Returns the application_name from the configuration. :param config: Configuration to extract the application_name from. :type config: dict :returns: The application_name from the configuration. :rtype: str """ return config['application_name']
def is_equiv(a,b): """ Determins if 2 MIC's are functionally equivalent Allows for 1% error due to float casting See also acheron.download.are_equal_mic """ allowed_error = 0.1 if abs(1-(a/b)) < allowed_error: return True else: return False
def url_safe(value): """ Turn spaces into dashes and lowercase. """ if value: return value.replace(" ", "-").lower()
def get_bytes_human(nbytes): """Converts nbytes (assuming it is a bytes count) to human-readable format. Also works with negative nbytes, and handles larger-than-largest numbers gracefully. Parameters ---------- nbytes : int Returns ------- prefix : float Mantissa (or significand) of the human-readable bytes count. suffix : str The human-readable string representing the exponent power of the the human-readable bytes count. In log steps of 3. Examples -------- .. code:: python prefix, suffix = get_bytes_human(1023) print prefix, suffix 1023.0 bytes prefix, suffix = get_bytes_human(1024) print prefix, suffix 1.0 KB """ from math import log from numpy import sign suffixes = ('bytes','KB','MB','GB','TB','PB','EB','ZB','YB') maxorder = len(suffixes)-1 sig = sign(nbytes) if nbytes != 0: order = int(log(abs(nbytes),2)/10.) else: order = 0 order = min((order,maxorder)) prefix = abs(nbytes)/(1024.**order) suffix = suffixes[order] return sig*prefix, suffix
def sanitize_url(url): """ Removes spaces from urls. """ return url.replace(" ", "%20")
def find_largest_digit_help(n, n_max): """ The function will return the largest digit. """ # Set the stopping rule of the recursion algorithm. if n < 10: return n_max n = n // 10 if n > 0: # Explore n_mode = n % 10 if n_mode > n_max: n_max = n_mode # Explore max_ = find_largest_digit_help(n, n_max) return max_
def nested_update(this, that): """Merge two nested dictionaries. Effectively a recursive ``dict.update``. Examples -------- Merge two flat dictionaries: >>> nested_update( ... {'a': 1, 'b': 2}, ... {'b': 3, 'c': 4} ... ) {'a': 1, 'b': 3, 'c': 4} Merge two nested dictionaries: >>> nested_update( ... {'x': {'a': 1, 'b': 2}, 'y': 5, 'z': 6}, ... {'x': {'b': 3, 'c': 4}, 'z': 7, '0': 8}, ... ) {'x': {'a': 1, 'b': 3, 'c': 4}, 'y': 5, 'z': 7, '0': 8} """ for key, value in this.items(): if isinstance(value, dict): if key in that and isinstance(that[key], dict): nested_update(this[key], that[key]) elif key in that: this[key] = that[key] for key, value in that.items(): if key not in this: this[key] = value return this
def is_triangle(a, b, c): """ Check if the given length for three sticks can form a triangle a,b,c : int -> the length of every side """ if a < (b + c) and b < (a + c) and c < (a + b): print('Si') return True else: print('No') return False
def _is_asc(filename): """ Checks whether a file is a Seismic Handler ASCII file or not. :type filename: str :param filename: Name of the ASCII file to be checked. :rtype: bool :return: ``True`` if a Seismic Handler ASCII file. .. rubric:: Example >>> _is_asc("/path/to/QFILE-TEST-ASC.ASC") #doctest: +SKIP True """ # first six chars should contain 'DELTA:' try: with open(filename, 'rb') as f: temp = f.read(6) except Exception: return False if temp != b'DELTA:': return False return True
def my_product1(n): """ >>> my_product1(3) 6 >>> my_product1(10) 3628800 """ res = 1 for i in range(n): res *= i+1 return res