content
stringlengths
42
6.51k
def nCkarray(*k_values): """Calculate nCk on a series of k values.""" result = 1 for i, j in enumerate((m for k in k_values for m in range(1, k+1)), 1): result = (result * i) // j return result
def mag_squared(x): """ Return the squared magnitude of ``x``. .. note:: If ``x`` is an uncertain number, the magnitude squared is returned as an uncertain real number, otherwise :func:``abs(x)**2`` is returned. """ try: return x._mag_squared() except AttributeError: return abs(x)**2
def url_param_dict_to_list(url_items_dict): """Turn this dictionary into a param list for the URL""" params_list = "" for key, value in url_items_dict: if key != "page": params_list += "&%s=%s" % (key, value) return params_list
def macAddressToString(mac, separator=':'): """ Converts a MAC address from bytes into a string. :param mac: bytes :param separator: str :return: str """ return separator.join('%02X' % b for b in mac)
def impact(data, impact): """Obtain impact metrics, prioritising CVSS3 if available. Args: data (dict): Empty CVE entry object created using createCVEEntry impact (dict): Impact Object parsed from NVD JSON Returns: dict: Data object with fields filled in using CVE Object """ if "baseMetricV2" in impact and "baseMetricV3" not in impact: data["vector"] = impact["baseMetricV2"]["cvssV2"]["vectorString"] data["attackVector"] = impact["baseMetricV2"]["cvssV2"]["accessVector"] data["attackComplexity"] = impact["baseMetricV2"]["cvssV2"]["accessComplexity"] if "userInteractionRequired" in impact["baseMetricV2"]: data["userInteraction"] = impact["baseMetricV2"]["userInteractionRequired"] data["confidentialityImpact"] = impact["baseMetricV2"]["cvssV2"][ "confidentialityImpact" ] data["integrityImpact"] = impact["baseMetricV2"]["cvssV2"]["integrityImpact"] data["availabilityImpact"] = impact["baseMetricV2"]["cvssV2"][ "availabilityImpact" ] data["baseScore"] = impact["baseMetricV2"]["cvssV2"]["baseScore"] data["severity"] = impact["baseMetricV2"]["severity"] if "exploitabilityScore" in impact["baseMetricV2"]: data["exploitabilityScore"] = impact["baseMetricV2"]["exploitabilityScore"] if "impactScore" in impact["baseMetricV2"]: data["impactScore"] = impact["baseMetricV2"]["impactScore"] if "baseMetricV3" in impact: data["vector"] = impact["baseMetricV3"]["cvssV3"]["vectorString"] data["attackVector"] = impact["baseMetricV3"]["cvssV3"]["attackVector"] data["attackComplexity"] = impact["baseMetricV3"]["cvssV3"]["attackComplexity"] data["privilegeRequired"] = impact["baseMetricV3"]["cvssV3"][ "privilegesRequired" ] data["userInteraction"] = impact["baseMetricV3"]["cvssV3"]["userInteraction"] data["scope"] = impact["baseMetricV3"]["cvssV3"]["scope"] data["confidentialityImpact"] = impact["baseMetricV3"]["cvssV3"][ "confidentialityImpact" ] data["integrityImpact"] = impact["baseMetricV3"]["cvssV3"]["integrityImpact"] data["availabilityImpact"] = impact["baseMetricV3"]["cvssV3"][ "availabilityImpact" ] data["baseScore"] = impact["baseMetricV3"]["cvssV3"]["baseScore"] data["severity"] = impact["baseMetricV3"]["cvssV3"]["baseSeverity"] if "exploitabilityScore" in impact["baseMetricV3"]: data["exploitabilityScore"] = impact["baseMetricV3"]["exploitabilityScore"] if "impactScore" in impact["baseMetricV3"]: data["impactScore"] = impact["baseMetricV3"]["impactScore"] return data
def parse_command(line): """Parse a command from line.""" args = [] kwargs = {} command, *parts = line.split(' ') for part in parts: if '=' in part: k, v = part.split('=') kwargs[k] = v else: args.append(part) return command, args, kwargs
def get_undo_pod(module, array): """Return Undo Pod or None""" try: return array.get_pod(module.params['name'] + '.undo-demote', pending_only=True) except Exception: return None
def cleanup(code): """Remove anything that is not a brainfuck command from a string.""" return ''.join( filter(lambda x: x in ['.', ',', '[', ']', '<', '>', '+', '-'], code))
def application_layer(data, parse = False): """Adds application layer header, return': test()s header. Uses telnet protocol """ if parse == False: return 'telnet' + '||' + data else: temp = data.split('||') print("Application Layer: " + str(temp[0])) return '||'.join(temp[1:])
def pattern_to_regex(pattern): """Takes a string containing regular UNIX path wildcards and returns a string suitable for matching with regex.""" pattern = pattern.replace('.', r'\.') pattern = pattern.replace('?', r'.') pattern = pattern.replace('*', r'.*') if pattern.endswith('/'): pattern += r'.*' elif pattern.endswith('.*'): pattern = pattern[:-2] pattern += r'(?!.*?/.*?)' return pattern
def _is_response(duck): """ Returns True if ``duck`` has the attributes of a Requests response instance that this module uses, otherwise False. """ return hasattr(duck, 'status_code') and hasattr(duck, 'reason')
def bani(registers, opcodes): """bani (bitwise AND immediate) stores into register C the result of the bitwise AND of register A and value B.""" test_result = registers[opcodes[1]] & opcodes[2] return test_result
def average_gateset_infidelity(model_a, model_b): """ Average model infidelity Parameters ---------- model_a : Model The first model. model_b : Model The second model. Returns ------- float """ # B is target model usually but must be "model_b" b/c of decorator coding... #TEMPORARILY disabled b/c RB analysis is broken #from ..extras.rb import theory as _rbtheory return -1.0
def generate_queries_union(uterance_vector): """ Performs a union of the current query with each of the previous turns separately uterance_vector - list, queries in the order they were made returns - list of queries in union """ if len(uterance_vector) > 1: generate_queries = [utterance + " " + uterance_vector[-1] for utterance in uterance_vector[:-1]] return generate_queries else: return uterance_vector
def get_flattened_qconfig_dict(qconfig_dict): """ flatten the global, object_type and module_name qconfig to the same qconfig_dict so that it can be used by propagate_qconfig_ function. "module_name_regex" is ignored for now since it's not supported in propagate_qconfig_, but it can be fixed later. For example: Input: { "": qconfig, "object_type": [ (torch.add, qconfig) ], "module_name": [ ("conv", qconfig) ] } Output: { "": qconfig, torch.add: qconfig, "conv": qconfig } """ flattened = dict() if '' in qconfig_dict: flattened[''] = qconfig_dict[''] def flatten_key(key): if key in qconfig_dict: for obj, qconfig in qconfig_dict[key]: flattened[obj] = qconfig flatten_key('object_type') flatten_key('module_name') return flattened
def check_new_str(methods, new_dict_def, new_dict_att, old_dict_def, old_dict_att): """ Check if there is new strategy produced by different method. :param methods: different meta-strategy solver. :param new_dict_def: defender's current str dict. :param new_dict_att: attacker's current str dict. :param old_dict_def: defender's old str dict. :param old_dict_att: attacker's old str dict. :return: """ new_att = {} new_def = {} for method in methods: new_def[method] = new_dict_def[method] - old_dict_def[method] new_att[method] = new_dict_att[method] - old_dict_att[method] if len(new_def[method]) == 0 and len(new_att[method]) == 0: print(method + " has not produced a new strategy.") else: print(method + ":defender's new str is ", new_def[method]) print(method + ":attacker's new str is ", new_att[method]) return new_def, new_att
def mix(c1, c2, r=0.5): """ Mixes two colors """ def _mix(m1, m2): return m1 * r + m2 * (1-r) return tuple(map(lambda x: int(_mix(*x)), zip(c1, c2)))
def GetFirstWord(buff, sep=None):#{{{ """ Get the first word string delimited by the supplied separator """ try: return buff.split(sep, 1)[0] except IndexError: return ""
def value2str(value): """ format a parameter value to string to be inserted into a workflow Parameters ---------- value: bool, int, float, list Returns ------- str the string representation of the value """ if isinstance(value, bool): strval = str(value).lower() elif isinstance(value, list): strval = ','.join(map(str, value)) elif value is None: strval = value else: strval = str(value) return strval
def stripString(inputString): """ imported from /astro/sos/da/scisoft/das/daLog/MakeDaDataCheckLogDefs.py """ delimString ="'" delimList = [] for index in range(len(inputString)): if inputString[index] == delimString: delimList.append(index) outFull = inputString[delimList[0]+1:delimList[-1]] outPut = outFull.replace(" ","") # return outPut
def measurement_time_convert(time_parameter: str) -> str: """ Converts time to ISO 8601 standard: "20200614094518" -> "2020-06-14T09:45:18Z". Parameters: time_parameter (str): measurement time received via MQTT Returns: measurement time on ISO 8601 standard """ # Gets "20200614094518" # 20200614 to 2020-06-14T iso_timestamp = time_parameter[0:4]+'-' + \ time_parameter[4:6]+'-'+time_parameter[6:8]+'T' # 094518 to 09:45:18Z iso_timestamp = iso_timestamp + \ time_parameter[8:10]+':'+time_parameter[10:12] + \ ':'+time_parameter[12:]+'Z' # Returns '2020-06-14T09:45:18Z' return iso_timestamp
def _clean_header_str(value: bytes) -> str: """Null-terminates, strips, and removes trailing underscores.""" return value.split(b'\x00')[0].decode().strip().rstrip('_')
def hk_sp_enabled(bits: list) -> bytes: """ First bit should be on the left, bitfield is read from left to right """ enabled_bits = 0 bits.reverse() for i in range(len(bits)): enabled_bits |= bits[i] << i return enabled_bits.to_bytes(2, byteorder='big')
def displayCommands(data): """ Description : Read Command List and print out all Available functions/parameters """ cmds=list(data.keys()) for cmd in cmds: x=data[cmd] if type(x)==str: print(f"{cmd} : {x}") else: funcp=(next(iter(x))) print(f"{cmd} : {funcp}") for param,val in x[funcp].items(): print(f"\t\\{param} : {val}") return None
def eval_token(token): """Converts string token to int, float or str. """ if token.isnumeric(): return int(token) try: return float(token) except TypeError: return token
def get_books_by_years(start_year, end_year, books_list): """ Get a dictionary of books and their published based on a range of year Parameters: start_year: The lower bound of the search range. end_year: The upper bound of the search range. books_list: The list of books to search in Returns: books_with_years: A dictionary of books with their publised year. """ print("You search for books published from " + str(start_year) + " to " + str(end_year)) books_with_years = {} for book in books_list: if book["published_year"] >= int(start_year) and book["published_year"] <= int(end_year): books_with_years[book["title"]] = book["published_year"] return books_with_years
def is_permutation(inputs): """Return True if strings in list are permutations of each other.""" if len(inputs) < 2: return False identity_of_inputs = set() for item in inputs: item = str(item) if len(item) < 2: return False item = sorted(list(item)) identity_of_inputs.add(str(item)) return len(identity_of_inputs) == 1
def to_string(x, maxsize=None): """Convert given value to a string. Args: x (Any): The value to convert maxsize (int, optional): The maximum length of the string. Defaults to None. Raises: ValueError: String longer than specified maximum length. Returns: str: The converted value """ x = str(x) if maxsize and len(x) > int(maxsize): raise ValueError("String %s is longer than " % x + "allowed maximum size of %s" % maxsize) return x
def find_minimum(diff): """ Find unaccessed track which is at minimum distance from head """ index = -1 minimum = 999999999 for i in range(len(diff)): if (not diff[i][1] and minimum > diff[i][0]): minimum = diff[i][0] index = i return index
def _safe_decr(line_num): """ Return @line_num decremented by 1, if @line_num is non None, else None. """ if line_num is not None: return line_num - 1
def is_int_type_suspicious_score(confidence_score, params): """ determine if integer type confidence score is suspicious in reputation_params """ return params['override_confidence_score_suspicious_threshold'] and isinstance(confidence_score, int) and int( params['override_confidence_score_suspicious_threshold']) <= confidence_score
def _structure_attachments_to_add(_attachment_count): """This function formats the JSON for the ``attachments_to_add`` field when updating existing messages. .. versionadded:: 2.8.0 :param _attachment_count: The number of attachments being added :type _attachment_count: int :returns: The properly formatted JSON data as a dictionary :raises: :py:exc:`TypeError` """ _attachments = [] if _attachment_count > 0: for _count in range(1, (_attachment_count + 1)): _attachment = { "type": "attachment", "field": f"attachment{_count}" } _attachments.append(_attachment) return {"attachments_to_add": _attachments}
def adj_r2(r2, n, p): """ Calculates the adjusted R^2 regression metric Params: r2: The unadjusted r2 n: Number of data points p: number of features """ adj_r2 = 1 - (1 - r2) * (n - 1) / (n - p - 1) return adj_r2
def validate_strip_right_slash(value): """Strips the right slash""" if value: return None, value.rstrip("/") return None, value
def seg(h,m,s): """Pasa una cantidad de horas minutos y segundos solo a segundos""" h1 = h * 3600 m1 = m * 60 return h1 + m1 + s
def one_hot(length, current): """ Standard one hot encoding. >>> one_hot(length=3,current=1) [0, 1, 0] """ assert length > current assert current > -1 code = [0] * length code[current] = 1 return code
def newton_raphson( fp , fpp , xi , epsilon=1e-5 , max=100 ): """ newton_raphson( fp , fpp , xi , epsilon , max ). This is a root-finding method that finds the minimum of a function (f) using newton-raphson method. Parameters: fp (function): the first derivative of the function to minimize fpp (function): the second derivative of the function to minimize xi (float): the starting point for the iterative process epsilon (float): very small number max (int): max number of iterations Returns: x: optimum value """ k=0 x=xi fpx=fp(x) fppx=fpp(x) while abs(fpx)>epsilon and k<max: x=x-fpx/fppx fpx=fp(x) fppx=(fp(x+epsilon)-fpx)/epsilon k+=1 if k==max: print("Error") else: return x
def IsDisk( htypeText ): """Simple function to take morphological type code from Buta+2015 and determine if it refers to a disk galaxy (no Sm or Im) """ # simple way to get only proper disks: anything with "I" or "m" in text if (htypeText.find("I") >= 0) or (htypeText.find("m") >=0): return False else: return True
def collide ( ri, vi, rj, vj, box ): """Implements collision dynamics, updating the velocities.""" import numpy as np # The colliding pair (i,j) is assumed to be in contact already rij = ri - rj rij = rij - np.rint ( rij ) # Separation vector rij = rij * box # Now in sigma=1 units vij = vi - vj # Relative velocity factor = np.dot ( rij, vij ) vij = -factor * rij vi = vi + vij vj = vj - vij virial = np.dot ( vij, rij ) / 3.0 return vi, vj, virial
def vtoz(v,w,g,n): """ Calculates the dimensionless radius Input: v - Dimensionless velocity w - Ambient density power law index g - Adiabatic index n - Geometry (1 - plane, 2 - cylinderical, 3 - spherical) """ return (4.**(1./(2. + n - w))*((-1. + g)/(1. + g))**((-1. + g)/(2. - n + g*(-2. + \ w)))*(2. + n - (2. + (-1. + g)*n)*v - w)**((g**2.*(n**2. + (-2. + w)**2.) + \ 2.*(-2. + n)*(2.*n - w) + g*(-4. - 3.*n**2. - 2.*n*(-4. + w) + w**2.))/((2. + \ (-1. + g)*n)*(2. - n + g*(-2. + w))*(2. + n - w))))/(((1. + g)*v)**(2./(2. + \ n - w))*(-1. + g*v)**((-1. + g)/(2. - n + g*(-2. + w)))*(-((2. - 3.*n + w + \ g*(-2. + n + w))/(1. + g)))**((g**2.*(n**2. + (-2. + w)**2.) + 2.*(-2. + \ n)*(2.*n - w) + g*(-4. - 3.*n**2. - 2.*n*(-4. + w) + w**2.))/((2. + (-1. + \ g)*n)*(2. - n + g*(-2. + w))*(2. + n - w))))
def StringToRawPercent(string): """Convert a string to a raw percentage value. Args: string: the percentage, with '%' on the end. Returns: A floating-point number, holding the percentage value. Raises: ValueError, if the string can't be read as a percentage. """ if len(string) <= 1: raise ValueError("String '%s' too short to be percentage." % string) if string[-1] != '%': raise ValueError("Percentage '%s' must end with '%%'" % string) # This will raise a ValueError if it can't convert the string to a float. val = float(string[:-1]) if val < 0.0 or val > 100.0: raise ValueError('Quantity %s is not a valid percentage' % val) return val
def parse_num_seq(string): """Convert the number sequence to list of numbers. Args: string (str): The input string to be parsed. Returns: list: A list of integer numbers. """ lst = [] g1 = string.strip().split(',') for rec in g1: rec = rec.strip() if '-' in rec: g2 = rec.split('-') n1 = int(g2[0]) n2 = int(g2[1]) for n in range(n1,n2+1): lst.append(n) elif rec.isdigit(): lst.append(int(rec)) else: continue return lst
def deparameterize(dataset): """Delete all variables and formulas from the dataset.""" if 'parameters' in dataset: dataset['parameters'] = [] for exc in dataset['exchanges']: for field in ('formula', 'variable'): if field in exc: del exc[field] if 'production volume' in exc: if field in exc['production volume']: del exc['production volume'][field] if 'properties' in exc: del exc['properties'] return dataset
def type_name(value): """return pretty-printed string containing name of value's type""" cls = value.__class__ if cls.__module__ and cls.__module__ not in ["__builtin__", "builtins"]: return "%s.%s" % (cls.__module__, cls.__name__) elif value is None: return 'None' else: return cls.__name__
def all_keys_true(d, keys): """Check for values set to True in a dict""" if isinstance(keys, str): keys = (keys,) for k in keys: if d.get(k, None) is not True: return False return True
def nu_factor(nu, nu_pivot, alpha): """ Frequency rescaling factor, depending on the spectral index. Parameters ---------- nu : frequency [GHz] nu_pivot : pivot (i.e. reference) frequency [GHz] alpha : spectral index """ return (nu/nu_pivot)**-alpha
def tarjan(nodes, edge_functor): """ Returns a set of "Starting" non terminals which have atleast one production containing left recursion. """ def strongconnect(currNode, index, indexes, lowlink, stack): indexes[currNode] = index lowlink[currNode] = index index = index + 1 stack.insert(0, currNode) # consider all rules of currNode which start with a non term for nextNode in edge_functor(currNode): if nextNode not in indexes: # not yet been visited so recurse on it index, _ = strongconnect(nextNode, index, indexes, lowlink, stack) lowlink[currNode] = min(lowlink[currNode], lowlink[nextNode]) elif nextNode in stack: # success is in the stack so we are good lowlink[currNode] = min(lowlink[currNode], lowlink[nextNode]) scc = [] if lowlink[currNode] == indexes[currNode]: # start a new strongly connected component while True: nextNT = stack.pop(0) scc.append(nextNT) if nextNT == currNode: break return index, scc out = [] index = 0 indexes = {} lowlink = {} stack = [] for currNode in nodes: if currNode not in indexes: index, scc = strongconnect(currNode, index, indexes, lowlink, stack) out.append(scc) return out
def new_polygon(biggon, ratio): """ Given a polygon (biggon), this function returns the coordinates of the smaller polygon whose corners split the edges of the biggon by the given ratio. """ smallgon = [] L = len(biggon) for i in range(L): new_vertex = (biggon[i][0] *ratio + biggon[(i+1)%L][0] *(1-ratio), biggon[i][1] *ratio + biggon[(i+1)%L][1] *(1-ratio), ) smallgon.append(new_vertex) return tuple(smallgon)
def rgba2hex(rgba): """ Convert rgba tuple (e.g. (0.5, 0.4, 0.2, 1.0)) to hex code """ hx = '' for f in rgba[:3]: s = hex(int(f*255))[2:] if len(s)<2: s = '0'+s hx += s return "#"+hx
def isnumber(x): """Is x a number?""" return hasattr(x, '__int__')
def ipAddrToDecimal(ipAddrStr: str) -> int: """ Convert IP address string to number. """ if ":" in ipAddrStr: """ IPv6 """ pos = ipAddrStr.find("/") if pos >= 0: ipv6AddrStr = ipAddrStr[:pos] else: ipv6AddrStr = ipAddrStr hexs = [0]*16 flds = ipv6AddrStr.split(":") index = 0 for i in range(0, len(flds)): if flds[i] != "": hexs[index] = int(flds[i], 16) index += 1 else: break index = len(hexs) - 1 for i in range(len(flds) - 1, -1, -1): if flds[i] != "": hexs[index] = int(flds[i], 16) index -= 1 else: break res = 0 for hex in hexs: res <<= 16 res += hex """ for hex in hexs: print("{0:04x} ".format(hex), end = "") print() """ return res else: """ IPv4 """ pos = ipAddrStr.find("/") if pos > 0: ipaddr = ipAddrStr[:pos] else: ipaddr = ipAddrStr flds = ipaddr.split(".") if len(flds) > 4: print("ipAddrToDecimal() Error!!") print(ipAddrStr) return -1 elif len(flds) == 4: res = 0 for fld in flds: res <<= 8 res += int(fld) else: for _ in range(len(flds), 4): flds.append("0") res = 0 for fld in flds: res <<= 8 res += int(fld) return res
def verMod11(cprNumbStr): """Perform modulus 11 check for validity of CPR-number. Input: cprNumbStr, str[10], holds a full CPR number Output: True/False, logical, returns failure or succes of check """ if len(cprNumbStr) != 10: raise ValueError("CPR-number to be validated must be 10 ciphers.") sumation = 0 cntrlVect = [4,3,2,7,6,5,4,3,2,1] for index in range(len(cntrlVect)): sumation += int(cprNumbStr[index])*cntrlVect[index] rem = sumation % 11 if rem == 0: return True else: return False
def unique(values): """Returns list of unique entries in values.""" unique = [] for value in values: if not value in unique: unique.append(value) return unique
def select_utxo(utxos, sum): """Return a list of utxo sum up to the specified amount. Args: utxos: A list of dict with following format. [{ txid: '' value: 0, vout: 0, scriptPubKey: '', }, { txid: '' value: 0, vout: 0, scriptPubKey: '', }] sum: The sum of the returned utxos' total value. Returns: A list of utxo with value add up to `sum`. If the given utxo can't add up to `sum`, an empty list is returned. This function returns as many utxo as possible, that is, utxo with small value will get picked first. """ if not utxos: return [] utxos = sorted(utxos, key=lambda utxo: utxo['value']) value = 0 for i, utxo in enumerate(utxos): value += utxo['value'] if value >= sum: return utxos[:i + 1] return []
def max_sequence_length_from_log(log): """ Returns length of the longest sequence in the event log. :param log: event log. :return: max_seq_length. """ max_seq_length = 0 for sequence in log: max_seq_length_temp = 0 for activity_ in sequence: max_seq_length_temp += 1 if max_seq_length_temp > max_seq_length: max_seq_length = max_seq_length_temp return max_seq_length
def insert_interval(intervals, new_interval): """Return a mutually exclusive list of intervals after inserting new_interval Params: intervals: List(Intervals) new_interval: Interval Return: List(Interval) """ length = len(intervals) if length < 1: return [new_interval] i, start, end, merged = 0, new_interval.start, new_interval.end, [] while i < length and intervals[i].end < start: merged.append(intervals[i]) i += 1 while i < length and new_interval.start < intervals[i].end: new_interval.start = min(new_interval.start, intervals[i].start) new_interval.end = max(new_interval.end, intervals[i].end) i += 1 merged.append(new_interval) while i < length: merged.append(intervals[i]) i += 1 return merged
def total_intersection_size(fabric_cuts): """ Determine the total size of the intersection of a list of fabric cuts. :param fabric_cuts: the list of fabric cuts :return: the total size of the intersection >>> fabric_cuts = [FabricCut('#1 @ 1,3: 4x4'), FabricCut('#2 @ 3,1: 4x4'), FabricCut('#3 @ 5,5: 2x2'), FabricCut('#4 @ 3,4: 2x2')] >>> total_intersection_size(fabric_cuts) 6 """ s = set() for i in range(len(fabric_cuts) - 1): for j in range(i+1, len(fabric_cuts)): s.update(fabric_cuts[i].intersection(fabric_cuts[j])) return len(s)
def pade_21_lsq(params, k, ksq, lmbda, sigma): """ model to fit f(k[i]) to lmbda[i] ksq = k**2 is computed only once params: [lambda0, alpha, beta] returns model(k) - lbs For details see DOI: 10.1140/epjd/e2016-70133-6 """ l0, a, b = params A, B = a**2, b**2 TA = 2*A A2B = A*A + B f1 = ksq + TA*k + A2B den = A2B + TA*k f=l0 * f1 / den return (f - lmbda)*sigma
def str_dist(new, original, lowerize=True): """Measures difference between two strings""" if lowerize: new = new.lower() original = original.lower() len_diff = abs(len(new) - len(original)) length = min(len(new), len(original)) for i in range(length): len_diff += not(new[i] == original[i]) return len_diff
def unlist(value): """ If given value is a list, it returns the first list entry, otherwise it returns the value. """ return value[0] if isinstance(value, list) else value
def get_mention(user): """ Formateo para las menciones. """ return '<@{user}>'.format(user=user)
def single_letter_count(word, letter): """How many times does letter appear in word (case-insensitively)? """ return word.count(letter)
def func_name(x, short=False): """Return function name of `x` (if defined) else the `type(x)`. If short is True and there is a shorter alias for the result, return the alias. Examples ======== >>> from sympy.utilities.misc import func_name >>> from sympy import Matrix >>> from sympy.abc import x >>> func_name(Matrix.eye(3)) 'MutableDenseMatrix' >>> func_name(x < 1) 'StrictLessThan' >>> func_name(x < 1, short=True) 'Lt' See Also ======== sympy.core.compatibility get_function_name """ alias = { 'GreaterThan': 'Ge', 'StrictGreaterThan': 'Gt', 'LessThan': 'Le', 'StrictLessThan': 'Lt', 'Equality': 'Eq', 'Unequality': 'Ne', } typ = type(x) if str(typ).startswith("<type '"): typ = str(typ).split("'")[1].split("'")[0] elif str(typ).startswith("<class '"): typ = str(typ).split("'")[1].split("'")[0] rv = getattr(getattr(x, 'func', x), '__name__', typ) if '.' in rv: rv = rv.split('.')[-1] if short: rv = alias.get(rv, rv) return rv
def count_unique (items): """This takes a list and returns a sorted list of tuples with a count of each unique item in the list. Example 1: count_unique(['a','b','c','a','c','c','a','c','c']) returns: [(5,'c'), (3,'a'), (1,'b')] Example 2 -- get the most frequent item in a list: count_unique(['a','b','c','a','c','c','a','c','c'])[0][1] returns: 'c' """ stats = {} for i in items: if i in stats: stats[i] = stats[i] + 1 else: stats[i] = 1 stats = [(v, k) for k, v in stats.items()] stats.sort() stats.reverse() return stats
def str2bool(v): """ Convert a string into a boolean """ return v and str(v.lower()) == "true"
def update_parameters(params, grads, alpha): """ Updates model parameters using gradient descent. Arguments: params -- dictionary containing model parameters grads -- dictionary with gradients, output of L_model_backward() Returns: params -- dictionary with updated parameters params['w' + str(l)] = ... params['b' + str(l)] = ... """ n_layers = len(params) // 2 for i in range(n_layers): params['w%s' % (i+1)] = ( params['w%s' % (i+1)] - alpha * grads['dw%s' % (i+1)]) params['b%s' % (i+1)] = ( params['b%s' % (i+1)] - alpha * grads['db%s' % (i+1)]) return params
def validate_project_element_name(name, isdir=False): """"Validate name for a test, page or suite (or folders). `name` must be a relative dot path from the base element folder. """ errors = [] parts = name.split('.') last_part = parts.pop() for part in parts: if len(part) == 0: errors.append('Directory name cannot be empty') break elif len(part) > 150: errors.append('Maximum name length is 150 characters') break if len(last_part) == 0: if isdir: errors.append('Folder name cannot be empty') else: errors.append('File name cannot be empty') elif len(last_part) > 150: errors.append('Maximum name length is 150 characters') for c in name: if not c.isalnum() and c not in ['_', '.']: errors.append('Only letters, numbers and underscores are allowed') break return errors
def sum(arg): """ sum() takes an iterable (a list, tuple, or set) and adds the values together: """ total = 0 for val in arg: total += val return total
def init_field(height=20, width=20): """Creates a field by filling a nested list with zeros.""" field = [] for y in range(height): row = [] for x in range(width): row.append(0) field.append(row) return field
def words_to_word_ids(data=[], word_to_id={}, unk_key = 'UNK'): """Given a context (words) in list format and the vocabulary, Returns a list of IDs to represent the context. Parameters ---------- data : a list of string or byte the context in list format word_to_id : a dictionary mapping words to unique IDs. unk_key : a string Unknown words = unk_key Returns -------- A list of IDs to represent the context. Examples -------- >>> words = tl.files.load_matt_mahoney_text8_dataset() >>> vocabulary_size = 50000 >>> data, count, dictionary, reverse_dictionary = \ ... tl.nlp.build_words_dataset(words, vocabulary_size, True) >>> context = [b'hello', b'how', b'are', b'you'] >>> ids = tl.nlp.words_to_word_ids(words, dictionary) >>> context = tl.nlp.word_ids_to_words(ids, reverse_dictionary) >>> print(ids) ... [6434, 311, 26, 207] >>> print(context) ... [b'hello', b'how', b'are', b'you'] Code References --------------- - `tensorflow.models.rnn.ptb.reader <https://github.com/tensorflow/tensorflow/tree/master/tensorflow/models/rnn/ptb>`_ """ # if isinstance(data[0], six.string_types): # print(type(data[0])) # # exit() # print(data[0]) # print(word_to_id) # return [word_to_id[str(word)] for word in data] # else: word_ids = [] for word in data: if word_to_id.get(word) is not None: word_ids.append(word_to_id[word]) else: word_ids.append(word_to_id[unk_key]) return word_ids # return [word_to_id[word] for word in data] # this one # if isinstance(data[0], str): # # print('is a string object') # return [word_to_id[word] for word in data] # else:#if isinstance(s, bytes): # # print('is a unicode object') # # print(data[0]) # return [word_to_id[str(word)] f
def _get_linkage_function(linkage): """ >>> "f_linkage" in str(_get_linkage_function(0.5)) True >>> "i_linkage" in str(_get_linkage_function(2)) True >>> any is _get_linkage_function('single') True >>> all is _get_linkage_function('complete') True >>> ff = _get_linkage_function(0.5) >>> ff([True, False, False]) False >>> ff([True, True, False]) True >>> fi = _get_linkage_function(3) >>> fi([True, False, False]) False >>> fi([True, True, False]) False >>> fi([True, True, True]) and fi([True] * 10) True >>> _get_linkage_function('complete') == all == _get_linkage_function(1.0) True >>> _get_linkage_function(1) == any True """ if linkage == 'single': return any if linkage == 'complete': return all if isinstance(linkage, float): assert 0 < linkage <= 1 if linkage == 1: return all def f_linkage(bools, p=linkage): v = list(bools) if len(v) == 0: return False return (sum(v) / float(len(v))) >= p return f_linkage if isinstance(linkage, int): assert linkage >= 1 if linkage == 1: return any def i_linkage(bools, n=linkage): v = list(bools) return sum(v) >= min(len(v), n) return i_linkage
def msg_get_hw_id(raw_json): """ extract hardware ID from JSON """ return raw_json['hardware_serial']
def custom_metric(metric_type): """Generate custom metric name. :param metric_type: name of the metric. :type metric_type: str :returns: Stacdriver Monitoring custome metric name. :rtype: str """ return "custom.googleapis.com/{}".format(metric_type)
def checkForUppercase(alphabet): """This method takes specified alphabet and checks if it supports upper case. Args: alphabet (list): List of chars from specified alphabet. Returns: bool: True if alphabet supports upper case, otherwise False. """ for char in alphabet: if char.upper().isupper(): return True return False
def perpetuity(A, r): """ A = annual payment r = discount rate returns present value """ return(A/r)
def flatten_list_of_lists(some_list, remove_duplicates=False, sort=False): """ Convert a list of lists into a list of all values :param some_list: a list such that each value is a list :type some_list: list :param remove_duplicates: if True, return a unique list, otherwise keep duplicated values :type remove_duplicates: bool :param sort: if True, sort the list :type sort: bool :return: a new object containing all values in teh provided """ data = [item for sublist in some_list for item in sublist] if remove_duplicates: if sort: return list(set(data)) else: ans = [] for value in data: if value not in ans: ans.append(value) return ans elif sort: return sorted(data) return data
def is_position_valid(position, password): """Check if position is valid for password.""" return 0 <= position < len(password) and password[position] is None
def is_in(elt, seq): """Similar to (elt in seq), but compares with 'is', not '=='.""" return any(x is elt for x in seq)
def valid_blockname(name): """Tests if a 5-character string is a valid blockname. Allows names with the first three characters either letters, numbers, spaces or punctuation, the fourth character a digit or a space and the last character a digit. """ from string import ascii_letters, digits, punctuation digit_space = digits + ' ' letter_digit_space_punct = ascii_letters + digit_space + punctuation return all([s in letter_digit_space_punct for s in name[0:3]]) and \ (name[3] in digit_space) and (name[4] in digits)
def kumaraswamy(a,b,x): """ CDF of the Kumaraswamy distribution """ return 1.0-(1.0-x**a)**b
def metrics2str(mean_scores: dict, scores: dict, labels: list): """ Converts given metrics to a string for display """ metric_keys = list(scores.keys()) mean_keys = list(mean_scores.keys()) # output format out_str = "" for mean_metrics, metric in zip(mean_keys, metric_keys): out_str += f"{metric} scores: \n{'-' * 20}\n" out_str += f"mean: {mean_scores[mean_metrics].item():.3f} " for j, label in enumerate(labels): if len(labels) > 1: out_str += f"{label}: {scores[metric][j].item():.3f} " else: out_str += f"{label}: {scores[metric].item():.3f} " out_str += "\n\n" return out_str
def numara_aparitii(opt, patt, repw, teorema): """Contorizeaza, modifica teorema, retine aparitiile.""" contor = 0 i = 0 copie = str(teorema) linii = [] liniecurenta = 0 inceplinie = 0 while i+len(patt) <= len(copie): turn = 0 if copie[i] == '\n': liniecurenta += 1 inceplinie = -1 if 'i' in opt: if copie[i:i+len(patt)].lower() == patt.lower(): if 'e' in opt: if ((i-1 < 0 or not copie[i-1].isalnum()) and (i+len(patt) > len(copie) or not copie[i+len(patt)].isalnum())): turn += 1 linii.append((liniecurenta, inceplinie, inceplinie+len(repw))) else: turn += 1 linii.append((liniecurenta, inceplinie, inceplinie+len(repw))) else: if copie[i:i+len(patt)] == patt: if 'e' in opt: if ((i-1 < 0 or not copie[i-1].isalnum()) and (i+len(patt) > len(copie) or not copie[i+len(patt)].isalnum())): turn += 1 linii.append((liniecurenta, inceplinie, inceplinie+len(repw))) else: turn += 1 linii.append((liniecurenta, inceplinie, inceplinie+len(repw))) if 's' in opt and turn > 0: copie = copie[:i]+repw+copie[i+len(patt):] i = i+len(repw)-1 inceplinie += (len(repw)-1) contor += turn i += 1 inceplinie += 1 return (contor, copie, linii)
def flatten(lol): """ See http://stackoverflow.com/questions/406121/flattening-a-shallow-list-in-python e.g. [['image00', 'image01'], ['image10'], []] -> ['image00', 'image01', 'image10'] """ import itertools chain = list(itertools.chain(*lol)) return chain
def rev_grey_code(g): """ Decode grey code : Enter a number @g, return the vaule before encode to grey code. """ n = 0 while g: n ^= g g >>= 1 return n
def efficientnet_params(model_name): """Get efficientnet params based on model name.""" params_dict = { # (width_coefficient, depth_coefficient, resolution, dropout_rate) 'efficientnet-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), } return params_dict[model_name]
def rivers_with_station(stations): """Returns a set with the names of rivers that have a monitoring station """ rivers = set() for station in stations: if station.river != None: rivers.add(station.river) return rivers
def random_map(nmers, vocabulary): """ Totally random map, totally unconstrained, totally boring. """ forward_mapping = dict(zip(nmers, vocabulary)) backward_mapping = dict(zip(vocabulary, nmers)) return forward_mapping, backward_mapping
def stations_by_river(stations): """This function returns a dictionary that maps river names to a list of stations on that river""" rivers = {} for station in stations: river = station.river if river not in rivers: rivers[river] = [] rivers[river].append(station) return rivers
def get_trig_val(abs_val: int, max_unit: int, abs_limit: int) -> int: """Get the corresponding trigger value to a specific limit. This evenly devides the value so that the more you press the trigger, the higher the output value. abs_val - The current trigger value max_unit - The maximum value to remap the trigger value abs_limit - The maximum range of the trigger """ inc = abs_limit / max_unit return int(abs_val / inc)
def op_encode(opcode, pads, operand): """encode a lut instruction code""" return ((opcode & 0x3f) << 10) | ((pads & 0x3) << 8) | (operand & 0xff)
def task5(b: float) -> str: """ Function that calculates area of an equilateral triangle. Suare root of 3 is 1.732. It returns the area as string that has two numbers after comma. Input: N -> float number Output: area of triangle as string such as: "1.23" """ pole = (1.732/2) * b ** 2 pole_str = f"{pole:.2f}" return pole_str
def bytesto(bytes, to, bsize=1024): """convert bytes to megabytes, etc. sample code: print('mb= ' + str(bytesto(314575262000000, 'm'))) sample output: mb= 300002347.946 """ a = {'k' : 1, 'm': 2, 'g' : 3, 't' : 4, 'p' : 5, 'e' : 6 } r = float(bytes) for i in range(a[to]): r = r / bsize return(r)
def q_max_ntu(c_min, temp_hot_in, temp_cold_in): """Computes the maximum q value for the NTU method Args: c_min (int, float): minimum C value for NTU calculations. temp_hot_in (int, float): Hot side inlet temeprature. temp_cold_in (int, float): Cold side inlet temeprature. Returns: int, float: The value of the maximum q value for the NTU method """ return c_min*(temp_hot_in-temp_cold_in)
def db_to_linear_gain(db_value): """ Parameters ---------- db_value : float Decibel value Examples -------- >>> db_to_linear_gain(db_value=-3) 0.5011872336272722 >>> db_to_linear_gain(db_value=10) 10.0 """ return 10 ** (db_value / 10)
def old_filter_nodes(nodes_list, ids=None, subtree=True): """Filters the contents of a nodes_list. If any of the nodes is in the ids list, the rest of nodes are removed. If none is in the ids list we include or exclude the nodes depending on the subtree flag. """ if not nodes_list: return None nodes = nodes_list[:] if ids is not None: for node in nodes: if node.id in ids: nodes = [node] return nodes if not subtree: nodes = [] return nodes
def collapse(board_u): """ takes a row/column of the board and collapses it to the left """ i = 1 limit = 0 while i < 4: if board_u[i]==0: i += 1 continue up_index = i-1 curr_index = i while up_index>=0 and board_u[up_index]==0: board_u[up_index] = board_u[curr_index] board_u[curr_index] = 0 up_index -= 1 curr_index -= 1 if up_index >= limit and board_u[up_index]==board_u[curr_index]: board_u[up_index] *= 2 board_u[curr_index] = 0 limit = curr_index i += 1 return board_u
def check_8_v1(oe_repos, srcoe_repos): """ All repositories' must have protected_branches """ print("All repositories' must have protected_branches") errors_found = 0 for repos, prefix in [(oe_repos, "openeuler/"), (srcoe_repos, "src-openeuler/")]: for repo in repos: branches = repo.get("protected_branches", []) if not branches: print("ERROR! {pre}{name} doesn\'t have protected_branches" .format(pre=prefix, name=repo["name"])) errors_found += 1 elif "master" not in branches: print("ERROR! master branch in {pre}{name} is not protected" .format(pre=prefix, name=repo["name"])) errors_found += 1 if errors_found == 0: print("PASS WITHOUT ISSUES FOUND.") return errors_found
def _handle_string(val): """ Replace Comments: and any newline found. Input is a cell of type 'string'. """ return val.replace('Comments: ', '').replace('\r\n', ' ')
def array_to_concatenated_string(array): """DO NOT MODIFY THIS FUNCTION. Turns an array of integers into a concatenated string of integers separated by commas. (Inverse of concatenated_string_to_array). """ return ",".join(str(x) for x in array)
def increment_year_month(year, month): """Add one month to the received year/month.""" month += 1 if month == 13: year += 1 month = 1 return year, month