content
stringlengths
42
6.51k
def weighed_interest_distance(vec1, vec2, weights): """interest distance with weights for each dimension""" return sum(abs(vec1[i] - vec2[i]) * 1 / weights[i] for i in range(len(vec1)))
def extrapolate(x0, x, y): """ Returns extrapolated data point given two adjacent data points. :param x0: Data point to be extrapolated to. :param x: x-coordinates of known data points. :param y: y-coordinates of known data points. """ return y[0] + (y[1]-y[0]) * (x0 - x[0])/(x[1] - x[0])
def pathToLoc(p): """Associate path with location. v1.0 should be specifying location but older YAML uses path -- this provides back compatibility. """ if "path" in p: p["location"] = p["path"] return p
def ee_bandnames(mission): """ visible to short-wave infrared wavebands (EarthEngine nomenclature) notes: [1] skipped Landsat7 'PAN' to fit Py6S """ switch = { 'Sentinel2':['B1','B2','B3','B4','B5','B6','B7','B8','B8A','B9','B10','B11','B12'], 'Landsat8':['B1','B2','B3','B4','B5','B6','B7','B8','B9'], 'Landsat7':['B1','B2','B3','B4','B5','B7'], 'Landsat5':['B1','B2','B3','B4','B5','B7'], 'Landsat4':['B1','B2','B3','B4','B5','B7'] } return switch[mission]
def trim_version_string(version_string): """Trims all lone trailing zeros in the version string after major/minor. Examples: 10.0.0.0 -> 10.0 10.0.0.1 -> 10.0.0.1 10.0.0-abc1 -> 10.0.0-abc1 10.0.0-abc1.0 -> 10.0.0-abc1 """ if version_string is None or version_string == '': return '' version_parts = version_string.split('.') # strip off all trailing 0's in the version, while over 2 parts. while len(version_parts) > 2 and version_parts[-1] == '0': del version_parts[-1] return '.'.join(version_parts)
def make_menu_dict_from_dict(orig_dict, dict_key_for_display): """ Function to create a menu dictionary with sub dictionary :type orig_dict: Dict :param orig_dict: Dictionary you want to make a menu from :type dict_key_for_display: String :param dict_key_for_display: Dictionary item to become the menu :rtype: Dict :return: A dictionary with menu and dictionary in line """ temp_dict = dict() menu_new_key = 1 for orig_dict_key in orig_dict: temp_dict[menu_new_key] = {'MENU': orig_dict[orig_dict_key][dict_key_for_display], 'SUBDIC': orig_dict[orig_dict_key]} menu_new_key += 1 return temp_dict
def change_to_id(obj): """Change key named 'uuid' to 'id' API returns objects with a field called 'uuid' many of Horizons directives however expect objects to have a field called 'id'. """ obj['id'] = obj.pop('uuid') return obj
def conditionalJoin(first, second, separator='; '): """ Join two strings with seperator, if one string is empty or none, return the other unmodified. :param first: String to join infront of `separator` :type str :param second: String to join behind of `separator` :type str :param separator: String to join `first` and `second`. Defaults to '; '. :type str :returns :type MSDataset """ if not isinstance(separator, str): raise TypeError('`separator` must be a string.') # If either argument is None, skip if (first is None) or first == '': return second elif (second is None) or (second == ''): return first return separator.join((first, second))
def effective_length(list_of_elements, looplength): """ Function: effective_length() Purpose: For a given pseudoknot loop with recursive secondary structure elements, calculate effective looplength. That is the number of unpaired nucleotides outside the recursive helices plus the number of internal helices. Input: Recursive elements and looplength. Return: Effective looplength. """ effective_looplength = looplength if list_of_elements: # Subtract number of nucleotides in recursive elements for item in list_of_elements: effective_looplength = effective_looplength - (item[1] - item[0] + 1) effective_looplength = effective_looplength + 1 # Plus number of helices return effective_looplength
def distanceBetweenStrings(needle, haystack): """Calculates the fuzzy match of needle in haystack, using a modified version of the Levenshtein distance algorithm. The function is modified from the levenshtein function in the bktree module by Adam Hupp""" doesLevenshteinModuleExist = False m, n = len(needle), len(haystack) # base cases if m == 1: return not needle in haystack if not n: return m row1 = [0] * (n+1) for i in range(0,m): row2 = [i+1] for j in range(0,n): cost = ( needle[i] != haystack[j] ) row2.append( min(row1[j+1]+1, # deletion row2[j]+1, #insertion row1[j]+cost) #substitution ) row1 = row2 return min(row1)
def add_satellite_gateway(vpn, elements): """ Add a satellite VPN gateway. :param PolicyVPN vpn: reference to VPN :param list elements: instances of Element :raises PolicyCommandFailed: Error assigning gateway :return: None """ changed = False if elements: current = [gw.gateway.href for gw in vpn.satellite_gateway_node.all()] for element in elements: if element.href not in current: vpn.add_satellite_gateway(element.href) changed = True return changed
def set_default(obj): """Sets default option for write_json function""" if isinstance(obj, set): return list(obj) raise TypeError
def recursive_min_additive(additive_func, q, q_max, cache=None, cache_offset=1): """ Super-additive extension for event models. Any additive function additive_func valid in the domain q \in [0, q_max] is extended and the value f(q) is returned. It is optional to supply a cache dictionary for speedup. NOTE: this cannot be directly used with delta curves, since they are "1-off", thus if you supply a delta function to additive_func, note to add 1 and supply q-1. e.g. ret = util.recursive_min_additive(lambda x: self.delta_plus(x + 1), n - 1, q_max, self.delta_plus_cache) By default, the cache is filled according to the delta domain notion, so it can be used with delta-based event models. To override this behavior, change the cache_offset parameter to zero """ if cache is None: cache = dict() if q <= q_max: return additive_func(q) else: ret = float('inf') for a in range(1, q_max + 1): b = cache.get(q - a + cache_offset, None) # cache is in delta domain (thus +1) if b is None: b = recursive_min_additive(additive_func, q - a, q_max, cache, cache_offset) cache[q - a + cache_offset] = b # print a, q - a, additive_func(a), b, additive_func(a) + b ret = min(ret, additive_func(a) + b) return ret
def make_postback_action(data, label=None, i18n_labels=None, display_text=None, i18n_display_texts=None): """ make post back action. reference - https://developers.worksmobile.com/jp/document/1005050?lang=en :param data: post back string :return: actions content """ action = {"type": "postback", "data": data} if display_text is not None: action["displayText"] = display_text if label is not None: action["label"] = label if i18n_labels is not None: action["i18nLabels"] = i18n_labels if i18n_display_texts is not None: action["i18nDisplayTexts"] = i18n_display_texts return action
def _adjoint(op, ignore_errors=False): """Calculate adjoint of an object, specifically an operator in QuTiP nested-list format. Controls are not modified. If the adjoint of `op` cannot be calculated, raise a :exc:`ValueError` or return `op` unchanged if `ignore_errors` is True. """ if isinstance(op, list): adjoint_op = [] for item in op: if isinstance(item, list): if len(item) != 2: if ignore_errors: return op else: raise ValueError( "%s is not the in the expected format of the " "two-element list '[operator, control]'" % item ) adjoint_op.append([_adjoint(item[0]), item[1]]) else: adjoint_op.append(_adjoint(item)) return adjoint_op elif op is None: return None elif isinstance(op, str): return op # e.g. "PE" target else: try: return op.dag() # qutip except AttributeError: try: return op.conj().T # numpy except AttributeError: try: return op.conjugate().transpose() # numpy-like except AttributeError: if ignore_errors: return op else: raise ValueError("Cannot calculate adjoint of %s" % op)
def generate_block_command(ip: str) -> str: """Generate a command that when executed in the shell, blocks this IP. The blocking will be based on `iptables` and must drop all incoming traffic from the specified IP.""" return f"iptables -A INPUT -s {ip} -j DROP"
def iobes_iob(tags): #{{{ """ IOBES -> IOB """ new_tags = [] for i, tag in enumerate(tags): if tag.split('-')[0] == 'B': new_tags.append(tag) elif tag.split('-')[0] == 'I': new_tags.append(tag) elif tag.split('-')[0] == 'S': new_tags.append(tag.replace('S-', 'B-')) elif tag.split('-')[0] == 'E': new_tags.append(tag.replace('E-', 'I-')) elif tag.split('-')[0] == 'O': new_tags.append(tag) else: raise Exception('Invalid format!') return new_tags
def set_bit(v, index, x): """Set the index:th bit of v to 1 if x is truthy, else to 0, and return the new value.""" mask = 1 << index # Compute mask, an integer with just bit 'index' set. v &= ~mask # Clear the bit indicated by the mask (if x is False) if x: v |= mask # If x was True, set the bit indicated by the mask. return v # Return the result, we're done.
def ends_with(string, end_str="/"): """ :param string: "s3:mw-bucket" :param end_str: "/" :return: "s3:mw-bucket/" """ string = str(string) if string.endswith(end_str): return string return string + end_str
def fix_fits_keywords(header): """ Update header keyword to change '-' by '_' as columns with '-' are not allowed on SQL """ new_header = {} for key in header.keys(): new_key = key.replace('-', '_') new_header[new_key] = header[key] return new_header
def alma_resol(longest_baseline_in_wavelengths): """ Alma resolution in arcsec https://casaguides.nrao.edu/index.php/Image_Continuum """ res = 206265.0/(longest_baseline_in_wavelengths) return res
def as_int_or_none(val): """ Convert int or str(int) to an integer, preserving None values and returning False for other types. """ if val is None or isinstance(val, int): result = val elif isinstance(val, str): try: result = int(val) except (ValueError, TypeError): result = False else: result = False return result
def VerifyIterations(num_iters, num_epochs, num_hidden_layers, num_archives, max_models_combine, add_layers_period, num_jobs_final): """ Verifies that number of iterations are sufficient for various phases of training.""" finish_add_layers_iter = num_hidden_layers * add_layers_period if num_iters <= (finish_add_layers_iter + 2): raise Exception(' There are insufficient number of epochs. These are not even sufficient for layer-wise discriminatory training.') approx_iters_per_epoch_final = num_archives/num_jobs_final # First work out how many iterations we want to combine over in the final # nnet3-combine-fast invocation. (We may end up subsampling from these if the # number exceeds max_model_combine). The number we use is: # min(max(max_models_combine, approx_iters_per_epoch_final), # 1/2 * iters_after_last_layer_added) half_iters_after_add_layers = (num_iters - finish_add_layers_iter)/2 num_iters_combine = min(max(max_models_combine, approx_iters_per_epoch_final), half_iters_after_add_layers) return num_iters_combine
def offset(freqs, re0, im0): """Complex offset re + j*im. Freqs vector is ignored, but required for lmfit Model.""" return re0 + 1j * im0
def _fixup_resource_keys(resource, key_map, only_fixup_lists=False): """Correct different attribute names between CAI and json representation. Args: resource (dict): The resource dictionary to scan for keys in the key_map. key_map (dict): A map of bad_key:good_key pairs, any instance of bad_key in the resource dict is replaced with an instance of good_key. only_fixup_lists (bool): If true, only keys that have values which are lists will be fixed. This allows the case where there is the same key used for both a scalar entry and a list entry, and only the list entry should change to the different key. Returns: dict: A resource dict with all bad keys replaced with good keys. """ fixed_resource = {} for key, value in resource.items(): if isinstance(value, dict): # Recursively fix keys in sub dictionaries. value = _fixup_resource_keys(value, key_map) elif isinstance(value, list): # Recursively fix keys in sub dictionaries in lists. new_value = [] for item in value: if isinstance(item, dict): item = _fixup_resource_keys(item, key_map) new_value.append(item) value = new_value # Only replace the old key with the new key if the value of the field # is a list. This behavior can be overridden by setting the optional # argument only_fixup_lists to False. should_update_key = bool( not only_fixup_lists or isinstance(value, list)) if key in key_map and should_update_key: fixed_resource[key_map[key]] = value else: fixed_resource[key] = value return fixed_resource
def _param_name_2_layer_name(param_name): """Convert a weights tensor's name to the name of the layer using the tensor. By convention, PyTorch modules name their weights parameters as self.weight (see for example: torch.nn.modules.conv) which means that their fully-qualified name when enumerating a model's parameters is the modules name followed by '.weight'. We exploit this convention to convert a weights tensor name to the fully-qualified module name.""" return param_name[:-len('.weight')]
def remove_duplicates(lst): """ Returns the given list without duplicates. The order of the returned list doesn't matter. For example: - If we call remove_duplicates([1,2,1,3,4]), we'll get [1,2,3,4] in return - If we call remove_duplicates([]), we'll get [] in return Hint(s): - Remember, you can create a set from a string, which will remove the duplicate elements """ # your code here test_list = list(set(lst)) return test_list
def calculate_maximum_position(velocity: int) -> int: """Calculate the maximum position if `velocity` decreases by one after each step""" final_position = (velocity * (velocity + 1)) // 2 # Gauss summation strikes again return final_position
def getenv(s): """ Convert string of key pairs to dictionary format :param s: str: key pairs separated by newlines :returns: dict: converted key pairs """ results = [] for x in s.splitlines(): if not x: continue pair = x.split('=', 1) if len(pair) < 2: pair.append('') results.append(pair) return dict(results)
def CreateLyndonIndices(p,k): """This function creates the set of Lyndon indices. Parameters ---------- p : the considered order k : 1 <=k<=p Returns ------- array : A list of Lyndon multiindices """ retval = [] S = list(range(1,p+1))#create the alphabet w = [S[0] - 1] while len(w) > 0: w[-1]=w[-1]+1 m = len(w) if m == k: str = [] for it in w: str.append(S[it-1]) retval.append(str) # repeat w to get a string of length n while len(w) < k: w.append(w[-m]) # remove the last character as long as it is equal to the largest character in S while (len(w) > 0) and w[-1] == S[-1]: w.pop() retval2=[] for el in retval: if sum(el)<=p: retval2.append([num - 1 for num in el])# subtract 1 from the final result. FIXME: this could be optimized return retval2
def test_command(source): """ Creates a command to be run via subprocess :param source: str|None :return: list """ command = ['pytest', '--cov', source] if source is not None else ['pytest', '--cov'] return command
def patch(attrs, updates): """Perform a set of updates to a attribute dictionary, return the original values.""" orig = {} for attr, value in updates: orig[attr] = attrs[attr] attrs[attr] = value return orig
def richness_to_mass(richness, norm=2.7e13, slope=1.4): """Calculate mass from richness. Mass-richness relation assumed is: mass = norm * (richness / 20) ^ slope. Parameters ---------- richness : ndarray or float Cluster richness value(s). norm : float, optional Normalization of mass-richness relation in units of solar masses, defaults to 2.7e13. slope : float, optional Slope of mass-richness relation in units of solar masses, defaults to 1.4. Returns ---------- ndarray or float Cluster mass(es) in units of solar masses, of same type as richness. See Also ---------- mass_to_richness : The inverse of this function. """ mass = norm * ((richness / 20.) ** slope) return mass
def svo_classification(angle): """ Determine a person's social value orientation category based on slider measure angle. (Cutoffs as per Murphy, Ackermann & Handgraaf 2011) params: SVO angle in degrees returns: The person's social value orientation classification effects: None """ if angle < -12.04: return "Competitive" elif angle < 22.45: return "Individualistic" elif angle < 57.15: return "Prosocial" else: return "Altruistic"
def get_attr(line, pos): """get the attribute""" try: out = line.strip().lower().split(':')[pos] except Exception: try: out = line.lower().split(' ') out = out[len(out) - 1].strip() except Exception: out = None return out
def sti(b11, b12): """ Soil Tillage Index (Van Deventer, 1997). .. math:: STI = b11/b12 :param b11: SWIR 1. :type b11: numpy.ndarray or float :param b12: SWIR 2. :type b12: numpy.ndarray or float :returns STI: Index value .. Tip:: Van Deventer, A. P., Ward, A. D., Gowda, P. H., Lyon, J. G. 1997. \ Using thematic mapper data to identify contrasting soil plains and \ tillage practices. Photogrammetric Engineering and Remote Sensing \ 63, 87-93. """ STI = b11/b12 return STI
def ComputeSpeedIndex(completeness_record): """Computes the speed-index from a completeness record. Args: completeness_record: list(CompletenessPoint) Returns: Speed-index value. """ speed_index = 0.0 last_time = completeness_record[0][0] last_completness = completeness_record[0][1] for time, completeness in completeness_record: if time < last_time: raise ValueError('Completeness record must be sorted by timestamps.') elapsed = time - last_time speed_index += elapsed * (1.0 - last_completness) last_time = time last_completness = completeness return speed_index
def arguments_from_inspect(f): """Check inspect.signature for arguemnts""" import inspect signature = inspect.signature(f) ok = True for name, par in signature.parameters.items(): # Variable number of arguments is not supported if par.kind is inspect.Parameter.VAR_POSITIONAL: ok = False if par.kind is inspect.Parameter.VAR_KEYWORD: ok = False return ok, list(signature.parameters)
def calculateJitterRatio(data): """ calculate the jitter ratio as defined by Horii (Horii: Fundamental frequency perturbation observed in sustained phonation. Journal of Speech and Hearing Research. 1979;22:5-19) @param data a list or numpy array with periods [s] of consecutive glottal cycles """ n = len(data) sum1 = 0 sum2 = 0 for i in range(n): if i > 0: sum1 += abs(data[i-1] - data[i]) sum2 += data[i] sum1 /= float(n - 1) sum2 /= float(n) return 1000.0 * sum1 / sum2
def make_ordinal(num): """ Create an ordinal (1st, 2nd, etc.) from a number. """ base = num % 10 if base in [0,4,5,6,7,8,9] or num in [11,12,13]: ext = "th" elif base == 1: ext = "st" elif base == 2: ext = "nd" else: ext = "rd" return str(num) + ext
def parse_symbol(s): """ Converts from btcmxn to ?book=btc_mxn """ s = s[:3] + "_" + s[3:] return "?book={0}".format(s)
def do_print(i) : """Returns True for events which need to be printed""" return True #return False #if i==1 : return True #return not i%10
def fibonacci(n=0): """calculate fibonacci number""" if n == 0: return 0 if n == 1: return 1 return fibonacci(n-1) + fibonacci(n-2)
def substr_ind(seq, line, *, skip_spaces=True, ignore_case=True): """ Return the start and end + 1 index of a substring match of seq to line. Returns: [start, end + 1] if needle found in line [] if needle not found in line """ if ignore_case: seq = seq.lower() line = line.lower() if skip_spaces: seq = seq.replace(' ', '') start = None count = 0 for ind, char in enumerate(line): if skip_spaces and char == ' ': continue if char == seq[count]: if count == 0: start = ind count += 1 else: count = 0 start = None if count == len(seq): return [start, ind + 1] return []
def build_resilient_url(host, port): """ Build basic url to resilient instance :param host: host name :type host: str :param port: port :type port: str|int :return: base url :rtype: str """ if host.lower().startswith("http"): return "{0}:{1}".format(host, port) return "https://{0}:{1}".format(host, port)
def strtobool(val): # pragma: no cover """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. """ val = val.lower() if val in ('y', 'yes', 't', 'true', 'on', '1'): return 1 if val in ('n', 'no', 'f', 'false', 'off', '0'): return 0 raise ValueError("invalid truth value %r" % (val,))
def split_version(version): """ Separate a string including digits separated by periods into a tuple of integers. """ return tuple(int(ver) for ver in version.split('.'))
def max_cw(l): """Return max value of a list.""" a = sorted(l) return a[-1]
def is_requirement(line): """ Return True if the requirement line is a package requirement; that is, it is not blank, a comment, or editable. """ # Remove whitespace at the start/end of the line line = line.strip() # Skip blank lines, comments, and editable installs return not ( line == '' or line.startswith('-r') or line.startswith('#') or line.startswith('-e') or line.startswith('git+') or line.startswith('--') )
def get_file_from_input(line): """ Retorna el archivo importado en input. :param line: Linea de codigo latex :type line: str :return: Archivo """ return line.strip().replace('\input{', '').replace('}', '').split(' ')[0] + '.tex'
def booleans(key, val): """returns ucsc formatted boolean""" if val in (1, True, "on", "On", "ON"): val = "on" else: val = "off" return val
def is_homogenous(data, classifier): """ Test whether the data set data is homogenous """ tag = data[0][-1] for row in data: if tag != row[-1]: return False return True
def hello_world(name: str) -> str: """ Retuns hello name Args ---------- name: string name to say hello Returns ---------- text: string hello name text """ text = f"hello {name}" print(text) return text
def merge_sort(nums): """A comparison based sorting algorithm.""" the_length = len(nums) left_list = [] right_list = [] final_list = [] if the_length == 1: return nums i = 0 for item in nums: if i < round(the_length / 2): left_list.append(item) i += 1 else: right_list.append(item) left_list = merge_sort(left_list) right_list = merge_sort(right_list) while left_list and right_list: if left_list[0] <= right_list[0]: final_list.append(left_list[0]) left_list = left_list[1:] else: final_list.append(right_list[0]) right_list = right_list[1:] while left_list: final_list.append(left_list[0]) left_list = left_list[1:] while right_list: final_list.append(right_list[0]) right_list = right_list[1:] return final_list
def slugify(s): """ Slugify a string. This works in a less-agressive manner than Django's slugify, which simply drops most drops most non-alphanumeric characters and lowercases the entire string. It would likely to cause uniqueness conflicts for things like interface names, such as Eth1/2/3 and Eth12/3, which would slugify to be the same. >>> slugify('switch-foo01:Ethernet1/2') 'switch-foo01:Ethernet1_2' :param s: String to slugify """ disallowed_chars = ['/'] replacement = '_' for char in disallowed_chars: s = s.replace(char, replacement) return s
def _update_phasesets(phasesets, phaseset): """Update phase sets.""" if phaseset is not None and phaseset not in phasesets: # only copy when adding a new phaseset phasesets = phasesets.copy() phasesets.add(phaseset) return phasesets
def fmt(f): """ Take a number, and return a string with 3 digits before the decimal and at least 2 after. It really seems like there must be a way to do this with printf. """ rstr = "%03d" % f + str(f%1)[1:] try: dot_pos = rstr.index('.') except ValueError: # if there is no decimal part, add one rstr += ".00" else: ## if there's just one digit in the decimal part, add a zero if (len(rstr) - dot_pos) < 3: rstr += "0" return rstr
def keep_never_expires(flags): """Filter flags to contain only flags that never expire. >>> keep_never_expires([{'expiry_milestone': -1}, {'expiry_milestone': 2}]) [{'expiry_milestone': -1}] """ return [f for f in flags if f['expiry_milestone'] == -1]
def invert_dictionary(dictionary): """Swaps all pairs of key:value into value:key Does not check for uniqueness.""" return {v:k for k,v in dictionary.items()}
def convert_http_status(status): """Converts HTTP 200 to OK or dispays error message from def try_except_status. """ if status == 200: return ": *OK*" return ": *FAIL - " + status + "*"
def part1(instructions): """ Find accumulator before bootloop """ index = 0 accumulator = 0 visited = set() while index not in visited and index < len(instructions): visited.add(index) opr, val = instructions[index].split() index = index + int(val) if opr == "jmp" else index + 1 if opr == "acc": accumulator += int(val) return accumulator
def tc_to_frame(tc, edit_rate): """ Convert timecode to sample count. Args: tc (str): Timecode string (format HH:MM:SS:FF). edit_rate (int): number of samples per second. Returns: Total samples count. >>> tc_to_frame('00:00:02:00', 24) 48 """ hours, minutes, seconds, frames = map(int, tc.split(':')) framePerHour = edit_rate * 60 * 60 framePerMinute = edit_rate * 60 framePerSecond = edit_rate return hours * framePerHour + \ minutes * framePerMinute + \ seconds * framePerSecond + frames
def assert_raises(c, exc): """Test whether callable c raises an exception of type exc.""" try: c() except exc: return True else: raise AssertionError("%r failed to raise exception %r" % (c, exc))
def ngrams(sequence, n): """Create ngrams from sequence, e.g. ([1,2,3], 2) -> [(1,2), (2,3)] Note that fewer sequence items than n results in an empty list being returned""" # credit: http://stackoverflow.com/questions/2380394/simple-implementation-of-n-gram-tf-idf-and-cosine-similarity-in-python sequence = list(sequence) count = max(0, len(sequence) - n + 1) return [tuple(sequence[i:i + n]) for i in range(count)]
def change_case(globalkey): """convert camelString""" return ( "".join(["_" + i.lower() if i.isupper() else i for i in globalkey]) .lstrip("_") .title() )
def remove_duplicates(l: list) -> list: """Return the given list without duplicates""" output = [] for item in l: if item not in output: output.append(item) return output
def file_name_parser(line): """ Parses lines of the following form: extracts only the seconds part File: to_html.py :param line: string :return: string containing the name of the file """ ie = line.find(':') if ie == -1: return '' else: return line[ie+1:].strip()
def rec_deps(services, service_name, start_point=None): """ return all dependencies of service_name recursively """ if not start_point: start_point = service_name deps = services[service_name]["_deps"] for dep_name in deps.copy(): # avoid A depens on A if dep_name == service_name: continue dep_srv = services.get(dep_name, None) if not dep_srv: continue # NOTE: avoid creating loops, A->B->A if start_point and start_point in dep_srv["_deps"]: continue new_deps = rec_deps(services, dep_name, start_point) deps.update(new_deps) return deps
def count_amount(checker, sample): """ Count the amount of the character in a word :param checker: the thing you want to count inside the :param be_checked: the :return: amount of the character in the word """ counter = 0 for thing in sample: if thing == checker: counter += 1 return counter
def calculate(retrieve, order, relevant_number): """Return recall and precision of each found relevant element.""" recall = round(retrieve / relevant_number, 4) precision = round(retrieve / order, 4) return recall, precision
def linspace(xmin, xmax, N): """ Return a list of N linearly spaced floats in the range [xmin,xmax], i.e. including the endpoints """ if N==1: return [xmax] dx = (xmax-xmin)/(N-1) return [xmin] + [xmin + (dx*float(i)) for i in range(1,N)]
def remove_blank_spaces(syllables) -> list: """Given a list of letters, remove any blank spaces or empty strings. >>> remove_blank_spaces(['', 'a', ' ', 'b', ' ', 'c', '']) ['a', 'b', 'c'] """ cleaned = [] for syl in syllables: if syl == " " or syl == '': pass else: cleaned.append(syl) return cleaned
def append_file_extension(path, ext): """Appends the given extension to the path. If `ext` is `None`, `path` is returned.""" if ext is not None: return '%s.%s' % (path, ext) return path
def nine_interp(x, a0, a1, a2, a3, a4, a5, a6, a7, a8): """``Approximation degree = 9`` """ return ( a0 + a1 * x + a2 * (x ** 2) + a3 * (x ** 3) + a4 * (x ** 4) + a5 * (x ** 5) + a6 * (x ** 6) + a7 * (x ** 7) + a8 * (x ** 8) )
def all_subclasses(cls): """ Gets all subclasses of a class. """ return set(cls.__subclasses__()).union( [s for c in cls.__subclasses__() for s in all_subclasses(c)] )
def sort_population(individuals): """ Return a list sorted on the fitness value of the individuals in the population. Descending order. :param individuals: The population of individuals :type individuals: list :return: The population of individuals sorted by fitness in descending order :rtype: list """ # Sort the individual elements on the fitness # Reverse for descending order individuals = sorted(individuals, key=lambda x: float(x['fitness']), reverse=True) # fraalpe2 scenario: fitness = number of UI abstract states #individuals = sorted(individuals, key=lambda x: float(x['fitness']), reverse=False) # by urueda (best is lowest fitness) return individuals
def filter_tuples(l): """Return only the tuples in a list. In a tupletree, tuples correspond to XML elements. Useful for stripping out whitespace data in a child list.""" if l is None: return [] else: return [x for x in l if type(x) == tuple]
def _format_digest(digest, scheme, encoding): """Formats the arguments to a string: {scheme[.encoding]}digest.""" if not encoding: return "{%s}%s" % (scheme, digest) return "{%s.%s}%s" % (scheme, encoding, digest)
def is_sorted(values, debug=False): """Check if the values are sorted in increasing order, worst case is O(n).""" n = len(values) if n <= 1: return True xn, xnext = values[0], values[1] for i in range(1, n + 1): if xn > xnext: if debug: print("Values x[{}] = {} > x[{}+1] = {} are not in the good order!".format(xn, i, i, xnext)) return False if i >= n: return True xn, xnext = xnext, values[i] return True
def split(value, separator): """Return the string split by separator. Example usage: {{ value|split:"/" }} """ return value.split(separator)
def get_exif_data_as_json(exif_data): """ returns exif_data as json :param exif_data: :return: json """ exif = dict() for tag in exif_data: keys = tag.split(' ') value = exif_data[tag].__str__() main = keys[0] if len(keys) == 1: sub = 'tag' elif len(keys) == 2: sub = keys[1] elif len(keys) == 3: sub = keys[2] else: main = tag sub = 'tag' if main not in exif: exif[main] = dict() exif[main][sub] = value return exif
def answer_save_feedback(answer_stored): """ Sends a immediate feedback, explaining, if the answer was saved or not. :param answer_stored: True, if the answer was stored else False :return: Feedback text. """ if answer_stored: response = "Thanks for your answer. Your answer has been saved. " \ "I will get back to you when the destined asker, rates your response. " \ "Keep your fingers crossed. Hopefully the asker will give you good ratings, " \ "and your karma points will boost up." \ "Meanwhile, you can ask another question, or post answer for requested question." else: response = "Sorry, you did not enter the Answer in the required format. " \ "Eg - \"[<placeholder for qid>] <Placeholder for Answer>\". Try again" return response
def reduce_pipeline(pipeline, iterable): """Run through a pipeline.""" val = iterable # Process through the pipeline. for fn in pipeline: val = fn(val) return val
def _extract_additional_datasets(datasets): """ Converts the additional datasets from the name=location format into dictionaries for injection into the ast. """ return dict(map(lambda x: x.split("="), datasets))
def _error_repr(error): """A compact unique representation of an error.""" error_repr = repr(error) if len(error_repr) > 200: error_repr = hash(type(error)) return error_repr
def valid_recipient_object(recipient): """Check the recipient object has a good email address :param recipient: object with a property named e_mail :returns: boolean True if valid """ if recipient is None: return False if not hasattr(recipient, "e_mail"): return False if recipient.e_mail is None: return False if recipient.e_mail is not None and str(recipient.e_mail).strip() == "": return False return True
def remove_duplicates(n): """ Removes duplicates in a given list (referenced list) Returns a list of all duplicates. """ seen = [] duplicates = [] # determine duplicates for x in n: if x in seen: duplicates.append(x) else: seen.append(x) # modify list in-place by assiining to its slice n[:] = seen; # return duplicates return duplicates
def digit_to_string(digit: int) -> str: """Converts an integer into its word representation""" if digit == 0: return "zero" elif digit == 1: return "one" elif digit == 2: return "two" elif digit == 3: return "three" elif digit == 4: return "four" elif digit == 5: return "five" elif digit == 6: return "six" elif digit == 7: return "seven" elif digit == 8: return "eight" elif digit == 9: return "nine" else: raise ValueError("Only digits 0-9 are supported")
def is_leading_high(x, j): """Return True if bit ``j`` is the lowest bit set in ``x``.""" return x.bit_length() == j + 1
def build_abstract(info, example=None, detailed_info={}): """ Constructs the abstract of an entry according to the new fathead HTML template elements. """ abstract = '' if info: abstract += '<p>%s</p>' % info.replace('\n', '\\n') if example: abstract += '<pre><code>%s</code></pre>' % example.replace('\n', '\\n') for key, value in detailed_info.items(): abstract += '<span class="prog__sub">%s</span><p>%s</p>' % (key, value) abstract = '<section class="prog__container">%s</section>' % abstract.replace('\n', '\\n') return abstract
def encode(nucleotide): """ Args: nucleotide: A string of one character Returns: An integer encoding the nucleotide, or -1 if not a valid nucleotide """ # WRITE YOUR QUESTION 1 CODE HERE nucleotide_dict = {"A":0 , "C":1 , "G":2 , "T":3} if nucleotide in nucleotide_dict: return nucleotide_dict[nucleotide] else: return -1
def build_header(token): """Builds the headers part of the request to Web Services API.""" headers = {'Accept': 'application/json;odata.metadata=minimal', 'Connection': 'Keep-Alive', 'Host': 'services-api.lexisnexis.com'} headers['Authorization'] = 'Bearer ' + token return headers
def file2uri(path): """Returns the web file URI for the given file path""" return 'file:///' + path.replace('\\', '/')
def get_panorama_ip(contents): """ :param contents: :return: """ contents=contents.replace('\n', '::') list=contents.split("::") for i in list: if i == "": continue s=i.split("=") if s[0] != "" and s[0] == "panorama-server" and s[1] != "": return s[1] print('Panorama IP not found') return None
def getdate(targetconnection, ymdstr, default=None): """Convert a string of the form 'yyyy-MM-dd' to a Date object. The returned Date is in the given targetconnection's format. Arguments: - targetconnection: a ConnectionWrapper whose underlying module's Date format is used - ymdstr: the string to convert - default: The value to return if the conversion fails """ try: (year, month, day) = ymdstr.split('-') modref = targetconnection.getunderlyingmodule() return modref.Date(int(year), int(month), int(day)) except Exception: return default
def lerp(a, b, u): """Smoothed interpolation""" uu = 6 * u ** 5 - 15 * u ** 4 + 10 * u ** 3 return a + uu * (b - a)
def merge_group_sets(group): """ Called in the remove_corrupted_samples() function. 'extends' group to merged_sets. """ merged_set = [] for i in range(0, len(group)): merged_set.extend(group[i]) return merged_set
def allowed(access_level, auth): """Checks to see if the user that's looked up has the required access level""" print(access_level) print(auth) # todo(evan) return False
def stround(x, force_dec=-99): """automatic str(round(x))""" if force_dec != -99: return str(round(x, force_dec)) #else if x < 0.05: force_dec = 4 elif x < 0.5: force_dec = 3 elif x < 2: force_dec = 2 else: force_dec = 0 return str(round(x, force_dec))
def warning(cause): """ Display warning msg (Yellow) """ return ("\033[1;33;40m [!] "+cause+" \033[0m")