content
stringlengths
42
6.51k
def invert_how(how): """because of the Dask DF usage, `how` has to be inverted in some cases, where formally, not the other DF is joined onto the current df, but the current df is joined onto the other DF (which is a Dask DF): left becomes right and vice versa.""" how = how.lower() if how == "left": how = "right" elif how == "right": how = "left" return how
def is_overlapping(node2comm): """Determine whether a graph partition contains overlapping communities. Parameters ---------- node2comm : list of set of nonnegative integers **or** ndarray Community structure. A mapping from node IDs [0..NN-1] to community IDs [0..NC-1]. Examples -------- >>> is_overlapping([set([0,1]), set([1,2])]) True >>> is_overlapping(np.array([0, 1, 1])) False """ if type(node2comm).__name__ in ['ndarray']: # numpy array cannot contain overlapping communities return False else: if len(node2comm) > 0 and type(node2comm[0]) != set: # this is just a list of numbers (should be equiv to np array) return False for comm_set in node2comm: if len(comm_set) > 1: return True return False
def gen_h1_row(A,num_blocks,block_id): """ generates a list made of either A or None, """ block_list = [] for i in range(num_blocks): if i == block_id - 1 and i >=0: block_list.append(A) elif i == block_id + 1 and i < num_blocks: block_list.append(A) else: block_list.append(None) return block_list
def string_to_bool(value): """ boolean string to boolean converter """ if value == "true": return True else: return False
def density_ratio_across_normal_shock( mach_upstream, gamma=1.4 ): """ Computes the ratio of fluid density across a normal shock. Specifically, returns: rho_after_shock / rho_before_shock Args: mach_upstream: The mach number immediately before the normal shock wave. gamma: The ratio of specific heats of the fluid. 1.4 for air. Returns: rho_after_shock / rho_before_shock """ return ( (gamma + 1) * mach_upstream ** 2 ) / ( (gamma - 1) * mach_upstream ** 2 + 2 )
def translate_severity(severity: str) -> int: """ Translates Demisto text severity to int severity :param severity: Demisto text severity :return: Demisto integer severity """ severity_dictionary = { 'Unknown': 0, 'Low': 1, 'Medium': 2, 'High': 3, 'Critical': 4 } return severity_dictionary.get(severity, 0)
def isfloat(x): """Return True if can convert `x` to float, return False otherwise. https://stackoverflow.com/questions/736043/checking-if-a-string-can-be-converted-to-float-in-python """ try: float(x) return True except ValueError: return False
def _ysp_safe_refs(refs): """ Sometimes refs == None, if so return the empty list here. So we don't have to check everywhere. """ if not refs: return [] return refs
def genProvenanceName(params: dict) -> str: """Function to produce the provenance in RDF Args: element: element containing the header layer params: dict of params to store results Returns: str: name of provenance """ output = params["out"] params["provenanceNumber"] += 1 name: str = "_:provenance" + str(params["provenanceNumber"]) output.write("# provenance for data from same naf-file\n") output.write(name + " \n") output.write(' xl:instance "' + params["provenance"] + '".\n\n') return name
def distance_home(x, y): """ >>> distance_home(0, 0) 0 >>> distance_home(3, 3) 3 >>> distance_home(-3, 3) 3 >>> distance_home(0, 8) 4 """ x, y = abs(x), abs(y) steps = 0 while x != 0 or y != 0: if y > x: y -= 2 steps += 1 elif y < x: x -= 2 steps += 2 else: x -= 1 y -= 1 steps += 1 return steps
def triangle_shape(n): """[summary] Args: n (int): height of the triangle Returns: str: string representing the triangle """ width = 2 * n - 1 return "\n".join(("x" * (2 * i + 1)).center(width, " ") for i in range(n))
def lscmp(a, b): """ Compares two strings in a cryptographically safe way: Runtime is not affected by length of common prefix, so this is helpful against timing attacks. .. from vital.security import lscmp lscmp("ringo", "starr") # -> False lscmp("ringo", "ringo") # -> True .. """ l = len return not sum(0 if x == y else 1 for x, y in zip(a, b)) and l(a) == l(b)
def _pick_counters(log_interpretation): """Pick counters from a dictionary possibly containing step and history interpretations.""" for log_type in 'step', 'history': counters = log_interpretation.get(log_type, {}).get('counters') if counters: return counters else: return {}
def fib(n): """ return Fibonacci series up to n """ a, b = 0, 1 result = [] while a < n: result.append(a) a, b = b, a+b return result
def make_dataset(features, pain_index): """Construct the X and y data matrices. features is a dict with key = pain name, value = all feature vectors for that pain source. pain_index is a dict with key = pain name, value = intensity In addition to X and y, we return a vector painsources which records the pain name for each example. """ X, y, painsources = [], [], [] for pain in features: for f in features[pain]: X.append(f) y.append(pain_index[pain]) painsources.append(pain) return X, y, painsources
def report (a_accuracy,a_precision,a_recall,a_f1) : """ Use print() to show the result """ loc_report = '' loc_report = loc_report + 'Accuracy : ' + str(a_accuracy) loc_report = loc_report + "\n" +'Precision : ' + str(a_precision) loc_report = loc_report + "\n" +'Recall : ' + str(a_recall) loc_report = loc_report + "\n" +'F1 : ' + str(a_f1) return loc_report
def _make_valid_name(name: str) -> str: """ Transform a string in order to make it a valid Enum name Parameters ----------- name: [:class:`str`] The status code name to make a valid :class:`StatusCode` Returns --------- :class:`str` The name that can be used to get a :class:`StatusCode` """ return name.replace(" ", "_").upper()
def linear_segment(x0, x1, y0, y1, t): """Return the linear function interpolating the given points.""" return y0 + (t - x0) / (x1 - x0) * (y1 - y0)
def reduce_id(id_): """Reduce the SsODNet ID to a string with fewer free parameters.""" return id_.replace("_(Asteroid)", "").replace("_", "").replace(" ", "").lower()
def split_command_line(command_line, escape_char='^'): """This splits a command line into a list of arguments. It splits arguments on spaces, but handles embedded quotes, doublequotes, and escaped characters. It's impossible to do this with a regular expression, so I wrote a little state machine to parse the command line. """ arg_list = [] arg = '' # Constants to name the states we can be in. state_basic = 0 state_esc = 1 state_singlequote = 2 state_doublequote = 3 state_whitespace = 4 # The state of consuming whitespace between commands. state = state_basic for c in command_line: if state == state_basic or state == state_whitespace: if c == escape_char: # Escape the next character state = state_esc elif c == r"'": # Handle single quote state = state_singlequote elif c == r'"': # Handle double quote state = state_doublequote elif c.isspace(): # Add arg to arg_list if we aren't in the middle of whitespace. if state == state_whitespace: None # Do nothing. else: arg_list.append(arg) arg = '' state = state_whitespace else: arg = arg + c state = state_basic elif state == state_esc: arg = arg + c state = state_basic elif state == state_singlequote: if c == r"'": state = state_basic else: arg = arg + c elif state == state_doublequote: if c == r'"': state = state_basic else: arg = arg + c if arg != '': arg_list.append(arg) return arg_list
def get_properties(finding_counts): """Returns the color setting of severity""" if finding_counts['CRITICAL'] != 0: properties = {'color': 'danger', 'icon': ':red_circle:'} elif finding_counts['HIGH'] != 0: properties = {'color': 'warning', 'icon': ':large_orange_diamond:'} else: properties = {'color': 'good', 'icon': ':green_heart:'} return properties
def generate_another_slug(slug, cycle): """A function that takes a slug and appends a number to the slug Examle: slug = 'hello-word', cycle = 1 will return 'hello-word-1' """ if cycle == 1: # this means that the loop is running # first time and the slug is "fresh" # so append a number in the slug new_slug = "%s-%s" % (slug, cycle) else: # the loop is running more than 1 time # so the slug isn't fresh as it already # has a number appended to it # so, replace that number with the # current cycle number original_slug = "-".join(slug.split("-")[:-1]) new_slug = "%s-%s" % (original_slug, cycle) return new_slug
def emap(fn, iterable): """eager map because I'm lazy and don't want to type.""" return list(map(fn, iterable))
def potentiometer_to_color(value): """Scale the potentiometer values (0-1023) to the colorwheel values (0-255).""" return value / 1023 * 255
def recommended_winding_exponent(cooling_mode): """ Get recommended oil exponent as per AS 60076.7-2013 Table 5 """ Cooling_List = ['ONAN', 'ONAF', 'ON', 'OB', 'OFAN', 'OF', 'OFB'] if any(cooling_mode in s for s in Cooling_List): y = 1.3 else: y = 2.0 return y
def sort_012(input_list): """ Given an input array consisting on only 0, 1, and 2, sort the array in a single traversal. Args: input_list(list): List to be sorted """ pivot = 1 i, j, k = 0, 0, len(input_list) - 1 # print("Start ", input_list) while j <= k: if input_list[j] < pivot: if i != j: input_list[i], input_list[j] = input_list[j], input_list[i] # swap i and j i += 1 else: j += 1 elif input_list[j] > pivot: input_list[j], input_list[k] = input_list[k], input_list[j] # swap j and k k -= 1 else: j += 1 return input_list
def custom_1(d, _): """ Example of custom step path """ if d < 10: return d elif d >= 10 and d < 20: return d elif d >= 20: return -d
def convert_band(old_band): """ Convert an old band string into a new string. NOTE: Only exists to support old data formats. """ if old_band: old_band = old_band.strip() # Strip off superfluous white space if old_band == 'A': return 'SHORT' elif old_band == 'B': return 'MEDIUM' elif old_band == 'C': return 'LONG' else: # For any other value, return the same string return old_band
def remove_trailing_characters_from_string(element, char_list): """ This function removes trailing characters from a given string Args: element: String to be formatted char_list: List of characters to be removed from that string Returns: element: formatted string """ for el in char_list: element = element.strip(el) return element
def get_switch_device(switches, switch_info=None, ngs_mac_address=None): """Return switch device by specified identifier. Returns switch device from switches array that matched with any of passed identifiers. ngs_mac_address takes precedence over switch_info, if didn't match any address based on mac fallback to switch_info. :param switch_info: hostname of the switch or any other switch identifier. :param ngs_mac_address: Normalized mac address of the switch. :returns: switch device matches by specified identifier or None. """ if ngs_mac_address: for sw_info, switch in switches.items(): mac_address = switch.ngs_config.get('ngs_mac_address') if mac_address and mac_address.lower() == ngs_mac_address.lower(): return switch if switch_info: return switches.get(switch_info)
def bigend_2_int(p_bytes): """ Convert bigending bytestring to int """ l_ix = 0 l_int = 0 while l_ix < len(p_bytes): l_b = int(p_bytes[l_ix]) l_int = l_int * 256 + l_b l_ix += 1 return l_int
def is_chinese_char(cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (0x4E00 <= cp <= 0x9FFF) or (0x3400 <= cp <= 0x4DBF) # or (0x20000 <= cp <= 0x2A6DF) # or (0x2A700 <= cp <= 0x2B73F) # or (0x2B740 <= cp <= 0x2B81F) # or (0x2B820 <= cp <= 0x2CEAF) # or (0xF900 <= cp <= 0xFAFF) or (0x2F800 <= cp <= 0x2FA1F) # ): # return True return False
def _compress_cmd(log_path): """Return bash command which compresses the given path to a tarball.""" compres_cmd = 'cd "$(dirname %s)" && ' % log_path compres_cmd += 'f="$(basename %s)" && ' % log_path compres_cmd += 'if [ -e "$f" ]; then tar czf "$f.tgz" "$f"; fi && ' compres_cmd += 'rm -rf %s' % log_path return compres_cmd
def with_prefix(prefix, name): """Adds prefix to name.""" return "/".join((prefix, name))
def calc_total(issues): """Return sum of story points for given issues""" return sum([int(i["story points"]) for i in issues if i["story points"] is not None])
def quote_argument(arg): """Wraps the given argument in quotes if needed. This is so print_cmd_line output can be copied and pasted into a shell. Args: arg: The string to convert. Returns: The quoted argument. """ if '"' in arg: assert "'" not in arg return "'" + arg + "'" if "'" in arg: assert '"' not in arg return '"' + arg + '"' if ' ' in arg: return '"' + arg + '"' return arg
def classifyCharacter(c): """ return 0 for delimiter, 1 for digit and 2 for alphabetic character """ if c in [".", "-", "_", " "]: return 0 if c.isdigit(): return 1 else: return 2
def convertable(obj, func): """ Returns True if obj can be converted by func without an error. """ try: func(obj) return True except ValueError: return False
def parse_hpo_phenotype(hpo_line): """Parse hpo phenotype Args: hpo_line(str): A iterable with hpo phenotype lines Yields: hpo_info(dict) """ hpo_line = hpo_line.rstrip().split('\t') hpo_info = {} hpo_info['hpo_id'] = hpo_line[0] hpo_info['description'] = hpo_line[1] hpo_info['hgnc_symbol'] = hpo_line[3] return hpo_info
def is_palindrome(n): """Checks if n is a palindrome""" n = str(n) return n == n[::-1]
def bisect_right(sorted_collection, item, lo=0, hi=None): """ Locates the first element in a sorted array that is larger than a given value. It has the same interface as https://docs.python.org/3/library/bisect.html#bisect.bisect_right . :param sorted_collection: some ascending sorted collection with comparable items :param item: item to bisect :param lo: lowest index to consider (as in sorted_collection[lo:hi]) :param hi: past the highest index to consider (as in sorted_collection[lo:hi]) :return: index i such that all values in sorted_collection[lo:i] are <= item and all values in sorted_collection[i:hi] are > item. Examples: >>> bisect_right([0, 5, 7, 10, 15], 0) 1 >>> bisect_right([0, 5, 7, 10, 15], 15) 5 >>> bisect_right([0, 5, 7, 10, 15], 6) 2 >>> bisect_right([0, 5, 7, 10, 15], 15, 1, 3) 3 >>> bisect_right([0, 5, 7, 10, 15], 6, 2) 2 """ if hi is None: hi = len(sorted_collection) while lo < hi: mid = (lo + hi) // 2 if sorted_collection[mid] <= item: lo = mid + 1 else: hi = mid return lo
def get_range_digits(n): """ Gets the range (l, h) where l is the position of the first number with n and h is the last digit of the last number that has n digits""" if n == 1: return (1, 9) else: l0, h0 = get_range_digits(n-1) l = h0 + 1 h = h0 + 9*(10**(n-1))*n return (l, h)
def to_pascal_case(value: str, ignore_pattern =None) -> str: """ Convert camel case string to pascal case :param value: string :return: string """ content = value.split('_') if len(content) == 1: if ignore_pattern and ignore_pattern.match(content[0]): return content[0] else: return content[0].title() else: return ''.join(word.title() for word in content[0:] if not word.isspace())
def pointsInRect(array, rect): """Find out which points or array are inside rect. Returns an array with a boolean for each point. """ if len(array) < 1: return [] xMin, yMin, xMax, yMax = rect return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
def nu(x, beta2): """ Eq. (6) from Ref[1] (coeffient of alpha**2) Note that 'x' here corresponds to 'chi = x/rho' in the paper. """ return 3 * (1 - beta2 - beta2*x) / beta2 / (1+x)
def aggregate_results_annually(data, annual_increments): """ """ annual_data = [] for year in annual_increments: data_selection = [datum for datum in data if datum['year'] == year] prems_covered = sum(int(item['prems_covered']) for item in data_selection) delivery_points = sum(int(item['delivery_points']) for item in data_selection) try: percentage_coverage = (prems_covered / delivery_points)* 100 except ZeroDivisionError: percentage_coverage = 0 annual_data.append ({ 'year': year, 'percentage_coverage': round(percentage_coverage), 'prems_covered': prems_covered, 'delivery_points': delivery_points }) return annual_data
def server_error(e): """Return a custom 500 error.""" print return 'Sorry, unexpected error:\n{}'.format(e), 500
def number_formatter(number, pos=None): """Convert a number into a human readable format.""" magnitude = 0 while abs(number) >= 1000: magnitude += 1 number /= 1000.0 return '%.0f%s' % (number, ['', 'K', 'M', 'B', 'T', 'Q'][magnitude])
def gcd(number1, number2): """calculates the gcd of two integers""" while number2 != 0: number1, number2 = number2, number1 % number2 return number1
def determine_adjacent(original): """ Figure out if we're doing an adjacent location, get the co-ordinates and return them and the stripped original """ if original[0] in ['+', '-']: adj = (original[0], original[1:-2]) original = original[-2:] else: adj = False return adj, original
def P_to_a(P, Mstar): """ Convenience function to convert periods to semimajor axis from Kepler's Law Parameters ---------- P : array-like orbital periods [days] Mstar : float stellar mass [solar masses] Returns ------- a : array-like semi-major axis [stellar radii] """ Pearth = 365.24 # [days] aearth = 215.05 # [solar radii] return aearth * ((P/Pearth)**2 *(1/Mstar))**(1/3)
def get_path_node_list(row_path_node_list): """Converts df path_node_list column to path node list; used by compute_d3() Parameters ---------- row_path_node_list : Pandas DataFrame column Pandas DataFrame column of OSMNx nodes comprising a route path Returns ------- path_node_list : list """ return [ int(item.strip()) for item in str(row_path_node_list)\ .strip('[]').split(',') ]
def _knapsack_2(limit, vs, ws): """ This is a DP solution based on the recursive approach """ memory = [[None for _ in range(limit + 1)] for _ in range(len(vs))] def _knapsack(limit, vs, ws, i): if limit == 0 or i == len(vs): return 0 if memory[i][limit] is not None: return memory[i][limit] reject = _knapsack(limit, vs, ws, i + 1) accept = ( _knapsack(limit - ws[i], vs, ws, i + 1) + vs[i] if ws[i] <= limit else 0 ) memory[i][limit] = max(accept, reject) return memory[i][limit] return _knapsack(limit, vs, ws, 0)
def nm_to_rgb(nm): """Convert a wavelength to corresponding RGB values [0.0-1.0]. Parameters ---------- nm : int or float The wavelength of light. Returns ------- List of [R,G,B] values between 0 and 1 `original code`__ __ http://www.physics.sfasu.edu/astro/color/spectra.html """ w = int(nm) # color --------------------------------------------------------------------------------------- if w >= 380 and w < 440: R = -(w - 440.) / (440. - 350.) G = 0.0 B = 1.0 elif w >= 440 and w < 490: R = 0.0 G = (w - 440.) / (490. - 440.) B = 1.0 elif w >= 490 and w < 510: R = 0.0 G = 1.0 B = -(w - 510.) / (510. - 490.) elif w >= 510 and w < 580: R = (w - 510.) / (580. - 510.) G = 1.0 B = 0.0 elif w >= 580 and w < 645: R = 1.0 G = -(w - 645.) / (645. - 580.) B = 0.0 elif w >= 645 and w <= 780: R = 1.0 G = 0.0 B = 0.0 else: R = 0.0 G = 0.0 B = 0.0 # intensity correction ------------------------------------------------------------------------ if w >= 380 and w < 420: SSS = 0.3 + 0.7 * (w - 350) / (420 - 350) elif w >= 420 and w <= 700: SSS = 1.0 elif w > 700 and w <= 780: SSS = 0.3 + 0.7 * (780 - w) / (780 - 700) else: SSS = 0.0 SSS *= 255 return [float(int(SSS * R) / 256.), float(int(SSS * G) / 256.), float(int(SSS * B) / 256.)]
def create_vm_parameters(server_id, nic_id, vm_reference, location, vmName, userName, password, size): """Create the VM parameters structure. """ return { 'location': location, 'tags': { 'server_id': server_id }, 'os_profile': { 'computer_name': vmName, 'admin_username': userName, 'admin_password': password }, 'hardware_profile': { 'vm_size': size }, 'storage_profile': { 'image_reference': { 'publisher': vm_reference['publisher'], 'offer': vm_reference['offer'], 'sku': vm_reference['sku'], 'version': vm_reference['version'] }, }, 'network_profile': { 'network_interfaces': [{ 'id': nic_id, }] }, }
def _shorten_file_path(line): """Shorten file path in error lines for more readable tracebacks.""" start = line.lower().find('file') if start < 0: return line first_quote = line.find('"', start) if first_quote < 0: return line second_quote = line.find('"', first_quote + 1) if second_quote < 0: return line path = line[first_quote + 1:second_quote] new_path = '/'.join(path.split('/')[-3:]) return line[:first_quote] + '[...]/' + new_path + line[second_quote + 1:]
def generatePath(pointer, timeSeriesName): """ Outputs the path to find the specified JSON file. Input: - pointer: float Folder number to find the JSON file. - timeSeriesName: string 'deviceMotion_walking_outbound', 'deviceMotion_walking_rest' or 'pedometer_walking_outbound' """ pointer = int(pointer) path = '../data/{}/{}/{}/' path = path.format(timeSeriesName, str(pointer % 1000), str(pointer)) return path
def heap_sort(elements): """ Use the simple heap sort algorithm to sort the :param elements. :param elements: a sequence in which the function __get_item__ and __len__ were implemented :return: the sorted elements in increasing order """ length = len(elements) if not length or length == 1: return elements for offset in range(0, length - 1): origin_parent = (length - offset - 2) / 2 if origin_parent >= 0: left = origin_parent * 2 + 1 + offset right = left + 1 parent = origin_parent + offset if right >= length: if elements[parent] > elements[left]: elements[parent], elements[left] = elements[left], elements[parent] else: min_index = left if elements[right] < elements[left]: min_index = right if elements[parent] > elements[min_index]: elements[parent], elements[min_index] = elements[min_index], elements[parent] origin_parent -= 1 while origin_parent >= 0: left = origin_parent * 2 + 1 + offset right = left + 1 parent = origin_parent + offset min_index = left if elements[right] < elements[left]: min_index = right if elements[parent] > elements[min_index]: elements[parent], elements[min_index] = elements[min_index], elements[parent] origin_parent -= 1 return elements
def getvalue(s): """ getvalue() takes a string like <aaa>bbbbb<cc> and returns bbbbb """ p = s.find('>') s = s[p+1:] s = s[::-1] # string reverse p = s.find('<') s = s[p+1:] s = s[::-1] # string reverse, again return s
def extend_path(path, valid_extensions, default_extension): """ Return tuple (path, extension) ensuring that path has extension. """ for extension in valid_extensions: if path.endswith("." + extension): return (path, extension) return (path + "." + default_extension, default_extension)
def hamming(n): """Returns the nth hamming number""" hams = [1] iterators = [iter(map(i.__mul__, hams)) for i in [2, 3, 5]] current = [next(it) for it in iterators] while len(hams) < n: value = min(current) if value != hams[-1]: hams.append(value) i = current.index(value) current[i] = next(iterators[i]) return hams[-1]
def apply_macros(macro_dictionary, program_string): """ Apply macros to the source code. :param macro_dictionary: a dictionary of macros (e.g., {"UINT32_MAX": 4294967296, "SAVED_REG": "%r0"}) :param program_string: assembly program :return: the program with macros substituted """ # Sort the macros from longest to shortest to cover the case where macros are substrings of one another. macros = macro_dictionary.keys() macros = sorted(macros, key=lambda m: -len(m)) # Replace all the macros with their values. for macro in macros: program_string = program_string.replace(macro, str(macro_dictionary[macro])) # Allow for duck-typed macros. # Return the preprocessed program. return program_string
def tokenize(source): """Transforms a string of source code into a list of tokens. Args: source (str): The source code to transform. """ return source.replace('(', ' ( ').replace(')', ' ) ').split()
def select_supporting_facts(scored_sfs, min_thresholds): """ select supporting facts according to the provided thresholds :param scored_sfs: a list of (sentence_id, score) :param min_thresholds: a list of minimum scores for top ranked supporting facts: [min_score_for_top_ranked, min_score_for_second_ranked, min_score_for_others] :return: a list of sentence ids predicted as supporting facts """ if isinstance(scored_sfs, dict): scored_sfs = [(sf, score) for sf, score in scored_sfs.items()] scored_sfs.sort(key=lambda tup: tup[1], reverse=True) sfs = [] for r, sf_score in enumerate(scored_sfs): sf, score = sf_score thresh = min_thresholds[min(r, len(min_thresholds)-1)] if score >= thresh: sfs.append(sf) return sfs
def filter_unwanted_classes(records, unwanted_classlist): """ given a list of unwanted classes, remove all records that match those """ subset = [entry for entry in records if not entry['label'] in unwanted_classlist] return subset
def polynomial_decay_learning_rate(step: int, learning_rate_start: float, learning_rate_final: float, decay_steps: int, power: float): """ Manual implementation of polynomial decay for learning rate :param step: which step we're on :param learning_rate_start: learning rate for epoch 0 :param learning_rate_final: learning rate for epoch decay_steps :param decay_steps: epoch at which learning rate stops changing :param power: exponent :return: """ if step <= decay_steps: delta = float(learning_rate_start - learning_rate_final) lr = delta * (1.0 - float(step) / float(decay_steps)) ** power + learning_rate_final return lr return learning_rate_final
def to_dict(dictish): """ Given something that closely resembles a dictionary, we attempt to coerce it into a propery dictionary. """ if hasattr(dictish, "keys"): method = dictish.keys else: raise ValueError(dictish) return {k: dictish[k] for k in method()}
def triangle_orientation(ax, ay, bx, by, cx, cy): """ Calculate orientation of triangle Args: ax, ay: coords of first point bx, by: coords of second point cx, cy: coords of third point Returns: +1 if counter clockwise 0 if colinear -1 if clockwise """ ab_x, ab_y = bx - ax, by - ay ac_x, ac_y = cx - ax, cy - ay # compute cross product: ab x bc ab_x_ac = (ab_x * ac_y) - (ab_y * ac_x) if ab_x_ac > 0: # Counter clockwise return 1 elif ab_x_ac < 0: # Clockwise return -1 else: # Collinear return 0
def dict_to_param_string(in_dict, delim = '|'): """ Convert a dictionary of param:value pairs to a delimited string of the form "{delim}param=value{delim}..." Why? As an easy way to implement different parameter schemes for different providers. We can check for equality by searching for the substring "{delim}param=value{delim}" Eats "=" and {delim} (default = '|') characters. :param in_dict: dict :param delim: str :return: str """ # Force sorted keyz = [str(x) for x in in_dict.keys()] keyz.sort() valz = [in_dict[k] for k in keyz] keyz = [x.replace(delim, '_').replace('=', '_') for x in keyz] valz = [str(x).replace(delim, '_').replace('=', '_') for x in valz] out = delim + (delim.join([x + '=' + y for x,y in zip(keyz, valz)])) + delim return out
def clean_list(pre_list): """Function delete blank lines from a list :type pre_list: List :param pre_list: A list made from a file :rtype: List :returns: A Cleaned List """ post_list = list() for line in pre_list: if len(line) > 0: post_list.append(line) return post_list
def to_string(rgb_tuple): """Convert a tupple (rgb color) to a string ready to print""" tmp = ' '.join(map(str, rgb_tuple)) return tmp
def implicant2bnet(partial_state): """Converts a partial state dictionary to a BNet string e.g., {'A':1,'B':0} returns 'A & !B' Parameters ---------- partial_state : partial state dictionary Partial state to convert. Returns ------- str BNET representation of the partial state. """ return ' & '.join(["!"+k for k in partial_state if not partial_state[k]]+[k for k in partial_state if partial_state[k]])
def abo_event_probs(p1, p2): """Probabilities for two events that cannot occur simultaneously. Context: Two events (1 and 2), if executed independently, have probabilities p1 and p2 of success; if they are run simultaneously, however, no more than one of the events can succeed (i.e., the two events cannot occur at the same trial). The random process: First, a random choice defines if one of the events will occur; with probability (1-p1)*(1-p2), none of them succeed, and with complementary probability another random choice is made between the two events. The second random choice has renormalized probability: with chance p1/(p1+p2), the event 1 occur, and with chance p2/(p1+p2), event 2 suceeds. Parameters ---------- p1 : float Individual probability of event 1. p2 : float Individual probability of event 2. Returns ------- p(0), p(A), p(B) Probabilities that neither occur, event 1 occurs or event 2 occurs, respectively. """ p0 = (1. - p1) * (1. - p2) # Probability that neither happens renorm = (1. - p0) / (p1 + p2) # Renorm. for knowing that A or B happens return p0, renorm * p1, renorm * p2
def array_set(index: int, value: int, buffer: bytearray, offset: int, size: int) -> int: """ Set value at index, return new size """ pos = offset + index tail = offset + size buf_len = len(buffer) if buf_len <= pos: # the target position is outside of the current buffer buffer.extend(bytearray(pos - buf_len + 1)) # set value buffer[pos] = value & 0xFF # clean the gaps if exist? if tail < pos: for i in range(tail, pos): buffer[i] = 0 # return the maximum size if index < size: return size else: return index + 1
def MakeOffset(x, sign=True): """Make integer x into an IDA-styled offset string >>> MakeOffset(0) 0 >>> MakeOffset(0xd0) 0D0h >>> MakeOffset(0x1234) 1234h """ if sign and x == 0: return "" hexstr = "%X" % x if hexstr[0] in ('A','B','C','D','E','F'): hexstr = '0' + hexstr if x > 9: hexstr += 'h' if sign and x >= 0: return "+%s" % hexstr return hexstr
def totient(p,q): """Eulers totient function""" return (p-1)*(q-1)
def get_impact_type_list(data): """ get_impact_type_list returns a list of impact types extracted from data.json 'global_impact_scores' list. Variable: - data strcuture loaded form data.json """ result=[] global_impact_details=data.get("global_impact_details",None) for global_impact_detail in global_impact_details: impact_type = global_impact_detail.get("type",None) if impact_type: result.append(impact_type) assert(type(result) is list) return result
def output_op(program, state, parameters, modes): """Output operation.""" value = program[parameters[0]] if modes[0] == 0 else parameters[0] state['output'].append(value) return True, 1
def gateway_environment(gateway_environment): """Sets gateway to use only path routing""" gateway_environment.update({"APICAST_PATH_ROUTING_ONLY": 1}) return gateway_environment
def filter_ending_items(registration_number: str, items: list): """ Get a subset of the provided list that excludes items that were removed for the specified registration number. :param registration_number: The registration number to filter out items for :param items: A list of items to be filtered. Must have a 'ending_registration_number' attribute :return: The filtered list """ return list(filter(lambda item: item.ending_registration_number != registration_number, items))
def calculate_air_mass(Sd): """Air mass on diaphragm; the difference between Mms and Mmd.""" return(1.13*(Sd)**(3/2))
def triplets(n): """ >>> triplets(5) [(1, 1, 2), (1, 2, 3), (1, 3, 4), (2, 2, 4)] """ return [(x, y, z) for x in range(1, n) for y in range(x, n) for z in range(y, n) if x+y==z]
def blend0(d=0.0, u=1.0, s=1.0): """ blending function trapezoid d = delta x = xabs - xdr u = uncertainty radius of xabs estimate error s = tuning scale factor returns blend """ d = float(abs(d)) u = float(abs(u)) s = float(abs(s)) v = d - u #offset by radius if v >= s: #first so if s == 0 catches here so no divide by zero below b = 0.0 elif v <= 0.0: b = 1.0 else: # 0 < v < s b = 1.0 - (v / s) return b
def precatenated(element, lst): """ pre-catenates `element` to `lst` and returns lst """ lst.insert(0, element) return lst
def force_array(val): """Returns val if it is an array, otherwise a one element array containing val""" return val if isinstance(val, list) else [val]
def deconv_output_length(input_length, filter_size, padding, stride): """This function was adapted from Keras Determines output length of a transposed convolution given input length. Arguments: input_length: integer. filter_size: integer. padding: one of "same", "valid", "full". stride: integer. Returns: The output length (integer). """ if input_length is None: return None output_length = input_length * stride if padding == 'VALID': output_length = output_length + max(filter_size - stride, 0) return output_length
def get_tier_values(field, db_object): """Retrieves the list of storage tiers associated with the cluster.""" tiers = [] for entry in getattr(db_object, 'tiers', []): val = {'name': entry.name, 'status': entry.status, 'uuid': entry.uuid} tiers.append(val) return tiers
def decode_fixed_len(output): """ {'c', 'f'} --> 1 {'b', 'c', 'd', 'f'} --> 4 {'a', 'c', 'f'} --> 7 {'a', 'b', 'c', 'd', 'e', 'f', 'g'} --> 8 """ letters_to_len = dict(cf=1, bcdf=4, acf=7, abcdefg=8) fixed_len_to_digit = {len(k): v for k, v in letters_to_len.items()} return 1 if len(output) in fixed_len_to_digit else 0
def transform_phone(number): """Expected phone number format (555)555-5555. Changes to spaces only.""" phone = number.replace("(", "").replace(")", " ").replace("-", " ") return phone
def get_named_regions(hemisphere): """Get names of predefined region for given hemisphere.""" if hemisphere == 'WG': return ['all'] if hemisphere == 'NH': return ['all', 'eurasia', 'pacific', 'atlantic', 'atlantic_eurasia'] if hemisphere == 'SH': return ['all', 'indian', 'south_america', 'pacific', 'full_pacific', 'australian'] raise RuntimeError("Invalid hemisphere '%s'" % hemisphere)
def negative(arg): """ returns the negative part of arg """ return (arg-abs(arg))/2.
def card_adder(decklist, card): """ Given a decklist and a card name, returns the decklist with the supplied card added in. Parameters: decklist: list of str Decklist represented by a list of strings of card names. card: str Card name to be added to the deck. :return: list of str Decklist with added card """ new_decklist = decklist.copy() new_decklist.append(card) return new_decklist
def convert_scores_to_pecentage(scores): """ This function takes in a dict of each unique score and the count of it, and return the % of the count :param scores: the scores with it counts :return: the dict with the unique scores but now the percentage as the value """ # Create empty dict percentages = {} # add upp all the scores so we have a total total = sum(scores.values()) for k, v in scores.items(): # Calc the percentage of each unique score and add to the dict percentages[k] = 100 * v / total # return this new dict return percentages
def get_filename(language: str) -> str: """Generate a filename for the analyzer to fix the language to use.""" if language == "python": return "test.py" return "test.c"
def is_cyclic(head): """ :type head: Node :rtype: bool """ if not head: return False runner = walker = head while runner._next and runner._next._next: runner = runner._next._next walker = walker._next if runner == walker: return True return False
def int_version(name, version): """splits the version into a tuple of integers""" sversion = version.split('-')[0] #numpy #scipy #matplotlib #qtpy #vtk #cpylog #pyNastran if 'rc' not in name: # it's gotta be something... # matplotlib3.1rc1 sversion = sversion.split('rc')[0] try: return [int(val) for val in sversion.split('.')] except ValueError: raise SyntaxError('cannot determine version for %s %s' % (name, sversion))
def correct_to_01(a, epsilon=1.0e-10): """Sets values in [-epsilon, 0] to 0 and in [1, 1 + epsilon] to 1. Assumption is that these deviations result from rounding errors. """ assert epsilon >= 0.0 min_value = 0.0 max_value = 1.0 min_epsilon = min_value - epsilon max_epsilon = max_value + epsilon if a <= min_value and a >= min_epsilon: return min_value elif a >= max_value and a <= max_epsilon: return max_value else: return a
def disk_to_hdname(disk_name): """ /dev/sdb --> B :param disk_name: :return: """ return disk_name[-1].upper()
def code(text, inline=False, lang=''): """Code. Args: text (str): text to make code. inline (bool, optional): format as inline code, ignores the lang argument. Defaults to False. lang (str, optional): set the code block language. Defaults to ''. Returns: str: code text. """ if inline: return '`{}`'.format(text) return '```{}\r\n'.format(lang) + text + '\r\n```'
def project_operational_periods( project_vintages_set, operational_periods_by_project_vintage_set ): """ :param project_vintages_set: the possible project-vintages when capacity can be built :param operational_periods_by_project_vintage_set: the project operational periods based on vintage :return: all study periods when the project could be operational Get the periods in which each project COULD be operational given all project-vintages and operational periods by project-vintage (the lifetime is allowed to differ by vintage). """ return set( (g, p) for (g, v) in project_vintages_set for p in operational_periods_by_project_vintage_set[g, v] )