content
stringlengths
42
6.51k
def calc_fitness(t, max_t, x, dx, theta, dtheta): """Calculate genome fitness. This fitness function is designed to minimise pole oscillations. Args: t (int): The number of time steps the agent balanced the pole for. max_t (int): The maximum number of time steps (solution threshold). x (iterable): The cart position in meters. dx (iterable): The cart velocity in meters per second. theta (iterable): The pole angle from vertical in degrees. dtheta (iterable): The pole velocity in degrees per second. Returns: float: The fitness of the genome. """ f1 = t / max_t if t < 100: f2 = 0 else: f2 = 0.75 / sum([abs(x[i]) + abs(dx[i]) + abs(theta[i]) + abs(dtheta[i]) for i in range(100)]) return 0.1 * f1 + 0.9 * f2
def stripSv(line,portFlag,bits): """ This function removes specific keywords from different lines of an SV file. """ portDict = { 1 : "in", 2 : "out", 3 : "inout" } if("//" in line): line,*blah = line.split("//") if("module" in line): line = line.replace("module", "") if("parameter" in line): line = line.replace("parameter", "") if("input" in line): line = line.replace("input", "") portFlag = 1 if("output" in line): line = line.replace("output", "") portFlag = 2 if("inout" in line): line = line.replace("inout", "") portFlag = 3 if("reg" in line): line = line.replace("reg", "") if("wire" in line): line = line.replace("wire", "") if("logic" in line): line = line.replace("logic", "") if(" " in line): line = line.replace(" ", "") if("=" in line): line = line.replace("=", ",%") line,*blah = line.split("%") if("[" in line): line = line.replace("[", "%") line = line.replace("]", "%") line = line.split("%") newLine = "" newannotate = ("// %s "%(portDict[portFlag])) for part in line: if(not(":" in part)): if("," in part): part = part.replace(",","") newLine = newLine+part else: newannotate += ("[%s]"%(part)) line = newLine+newannotate+"," elif(portFlag != 0): line = line.replace(",", "// %s [1],"%(portDict[portFlag])) if(";" in line): line = line.replace(");", "// %s [1]);"%(portDict[portFlag])) return line,portFlag,bits
def is_convertible_to_int(value): """Returns True if value is convertible to int""" try: int(value) except ValueError: return False else: return True
def getScale(header={}): """ :param header: :return: """ hasScale = 'SCALE' in header focalLength = float(header.get('FOCALLEN', 0)) binning = float(header.get('XBINNING', 0)) pixelSize = max(float(header.get('XPIXSZ', 0)), float(header.get('PIXSIZE1', 0)), ) hasAlternatives = focalLength and binning and pixelSize if hasScale: scale = float(header.get('SCALE', 0)) elif hasAlternatives: scale = pixelSize * binning / focalLength * 206.265 else: scale = 1.0 return scale
def html(*text, lang="en", metaTags=[], internal_ss=[], external_ss=[], title="", scripts=[]): """ Convert to html document args: lang <str> : language of html document metaTags <list> : meta tags to add in head tag internal_ss <list> : internal stylesheets(also scripts) external_ss <list> : external stylesheets(also scripts) title <str> : title of html document scripts <list> : scripts to add at the end of body tag """ return f"""<!Doctype html><html lang='{lang}'><head><meta charset="UTF-8"><meta name="viewport" content="width=device-width, initial-scale=1.0">{''.join(metaTags)}{''.join(external_ss)}{''.join([f"<style>{s}</style>" for s in internal_ss])}<title>{title}</title></head><body>{''.join(text)}{''.join(scripts)}</body></html>"""
def get_nodelist(nodelist_dir: str) -> list: """ Parse node IP from file """ nodelist = [] try: with open(nodelist_dir, "r") as nodelist_file: nodename_list = nodelist_file.read()[1:-1].split(", ") nodelist = [node.split(":")[0][1:] for node in nodename_list] except Exception as err: print(err) # pass return nodelist
def link( text, ref ): """ create internal link to an given site """ return "[{}]({})".format( str(text), str(ref))
def compare_search_results(r1, r2): """ Compares two spellcheck search results to see if they are equal. r1 and r2 are lists of search results, where each result contains a term, selection start, and selection end. """ if not len(r1) == len(r2): return False for idx in range(len(r1)): entry1 = r1[idx] entry2 = r2[idx] if not entry1['term'] == entry2['term']: return False if not entry1['paragraph_index'] == entry2['paragraph_index']: return False if not entry1['offset'] == entry2['offset']: return False if not entry1['end_offset'] == entry2['end_offset']: return False if not entry1['uri'] == entry2['uri']: return False if not entry1['link'] == entry2['link']: return False if not entry1['text'] == entry2['text']: return False return True
def list_deprecations(argument_spec, params): """Return a list of deprecations :arg argument_spec: An argument spec dictionary from a module :arg params: Dictionary of all module parameters :returns: List of dictionaries containing a message and version in which the deprecated parameter will be removed, or an empty list:: [{'msg': "Param 'deptest' is deprecated. See the module docs for more information", 'version': '2.9'}] """ deprecations = [] for arg_name, arg_opts in argument_spec.items(): if arg_opts.get('removed_in_version') is not None and arg_name in params: deprecations.append({ 'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name, 'version': arg_opts.get('removed_in_version') }) return deprecations
def checkPal(value): """ Function to check if a number is Pallindromic """ stringValue=str(value) if stringValue == stringValue[::-1]: return True else: return False
def convertHmsString(value, ndec=0, showSeconds=True, delimiter=':'): """ Converts floating point to HH:MM:[SS.S] Inputs value : floating point number ndec : number of decimal points to print seconds showSeconds : If True, show seconds, otherwise just use HH:MM. Default:True delimiter : the characters to be used between the numbers. Output a string of format hh:mm:[ss.s] Examples: convertHmsString(15.0) convertHmsString(15.0, ndec=2) convertHmsString(15.0, ndec=2, delimiter='hms') convertHmsString(15.0, ndec=2, delimiter='dms') """ # Set delimiter spacing = delimiter if len(spacing) == 1: spacing = [spacing] * 3 # Construct string t = value st = str(type(value)) if st.find('int') >= 0 or st.find('float') >= 0: x = abs(value) h = int(x) m = int(60 * (x - h)) sec = 3600.0 * (x - h - m/60.0) t = str("%.2d" % h) + spacing[0] + str('%.2d' % m) if showSeconds : # Add seconds t += spacing[1] format = '%0' + str(ndec+3) + '.' + str(ndec) + 'f' if ndec <= 0: format = '%.2d' t += str(format % sec) if spacing[2] not in [' ', ':']: t += spacing[2] if value < 0.0: t = '-' + t return t
def _negative(dz): """ Computes -dz for numpy indexing. Goal is to use as in array[i:-dz]. However, if dz=0 this indexing does not work. None needs to be used instead. """ if dz == 0: return None else: return -dz
def equals(s1, s2): """Equals 2 string parameters is true or false :param s1: :param s2: :return: """ return s2 is None if s1 is None else s1 == s2
def add_csv(filename: str) -> str: """Add '.csv' extension to each ticker name. """ return filename + '.csv'
def positive_sum(arr): """Return the sum of positive integers in a list of numbers.""" if arr: return sum([a for a in arr if a > 0]) return 0
def calc_capture_freq(capture_windows, captures): """Calculate the capture frequency -- the number of captures per unit of time. Parameters ---------- capture_windows : list of tuples of ints [(start, end), ...] Regions of current where the nanopore is available to accept a capture. (I.e., is in a "normal" voltage state.) [(start, end), ...] captures : list of tuples of ints [(start, end), ...] Regions of current where a capture is residing in the pore. The function is calculating time between these values (minus blockages). Returns ------- list of floats List of all capture times from a single channel. """ if captures is None: return None if len(captures) == 0: return 0 n_captures = 0 for capture_window_i, capture_window in enumerate(capture_windows): window_start, window_end = capture_window # Get all the captures within that window captures_in_window = capture_window.overlaps(captures) n_captures += len(captures_in_window) return n_captures
def getKmpNext(pattern): """ get KMP next arrray next[i] is the biggest k s.t. pattern[:k] == pattern[:i + 1][-k:] """ nextArr = [0] * len(pattern) i, j = 1, 0 while i < len(pattern): while j and pattern[j] != pattern[i]: j = nextArr[j - 1] if pattern[i] == pattern[j]: j += 1 nextArr[i] = j i += 1 return nextArr
def _get_height(node): """ Get height of a tree Args: node(node): root node """ if node is None: return 0 return max(_get_height(node.left), _get_height(node.right)) + 1
def format_param(value=None, name='', format=None): """ Makes parameter strings to be used for file names. Arguments: - value: parameter value - name: parameter name - format: parameter format """ if value is not None: value_str = (format % value).strip() value_long_str = name + value_str else: value_str = '' value_long_str = '' return value_str, value_long_str
def separate_dps(ast, max_len): """ Handles training / evaluation on long ASTs by splitting them into smaller ASTs of length max_len, with a sliding window of max_len / 2. Example: for an AST ast with length 1700, and max_len = 1000, the output will be: [[ast[0:1000], 0], [ast[500:1500], 1000], [ast[700:1700], 1500]] Input: ast : List[Dictionary] List of nodes in pre-order traversal. max_len : int Output: aug_asts : List[List[List, int]] List of (ast, beginning idx of unseen nodes) """ half_len = int(max_len / 2) if len(ast) <= max_len: return [[ast, 0]] aug_asts = [[ast[:max_len], 0]] i = half_len while i < len(ast) - max_len: aug_asts.append([ast[i : i + max_len], half_len]) i += half_len idx = max_len - (len(ast) - (i + half_len)) aug_asts.append([ast[-max_len:], idx]) return aug_asts
def linearly_interpolate_point(x_array, y_array, x_value): """ Returns the interpolated y_value for given x_value using data from the two sorted arrays: """ # Find the next largest point in x_array that is still smaller than x_value: lower_index = None for index, x in enumerate(x_array): if x > x_value: break lower_index = index # If x_value is outside the domain of x_array, we use either the min or max points for dydx if lower_index is None: lower_index = 0 elif lower_index == len(x_array) - 1: lower_index = lower_index - 1 higher_index = lower_index + 1 dydx = (y_array[higher_index] - y_array[lower_index]) / (x_array[higher_index] - x_array[lower_index]) if x_value < x_array[lower_index]: y_value = y_array[lower_index] - dydx * (x_value - x_array[lower_index]) else: y_value = y_array[lower_index] + dydx * (x_value - x_array[lower_index]) return y_value
def convert_frame_dict(track_dict): """ return result dict: { frame_id: { 'boxes': [], 'colors': [], 'fpoints': [], 'lpoints': [] } } """ result_dict = {} num_classes = len(track_dict) for label_id in range(num_classes): for track_id in track_dict[label_id].keys(): direction = track_dict[label_id][track_id]['direction'] boxes = track_dict[label_id][track_id]['boxes'] frames = track_dict[label_id][track_id]['frames'] color = track_dict[label_id][track_id]['color'] for i in range(len(track_dict[label_id][track_id])): frame_id = frames[i] box = boxes[i] if frame_id not in result_dict.keys(): result_dict[frame_id] = { 'boxes': [], 'colors': [], 'fpoints': [], 'lpoints': [], 'labels': [], 'directions': [] } first_box = box[0] last_box = box[-1] center_point_first = ((first_box[2]+first_box[0]) / 2, (first_box[3] + first_box[1])/2) center_point_last = ((last_box[2]+last_box[0]) / 2, (last_box[3] + last_box[1])/2) result_dict[frame_id]['boxes'].append(box) result_dict[frame_id]['fpoints'].append(center_point_first) result_dict[frame_id]['lpoints'].append(center_point_last) result_dict[frame_id]['directions'].append(direction) result_dict[frame_id]['colors'].append(color) result_dict[frame_id]['labels'].append(label_id) return result_dict
def format_font_name(value): """Formats a font name into the display version""" return value \ .replace('_', ' ') \ .title()
def merge(f_list): """ Merge two polynomials into a single polynomial f. Input: [f0, f1] A list of two polynomials Output: f A polynomial Format: Coefficient """ f0, f1 = f_list n = 2 * len(f0) f = [0] * n for i in range(n // 2): f[2 * i + 0] = f0[i] f[2 * i + 1] = f1[i] return f
def __args_to_weka_options(args): """ Function that creates list with options (args) in format approperiate for weka. :param args: dictionery with command line input :return: list of command line arguments """ result = [] for k,v in args.items(): if v: result.append("-" + k) result.append(v) elif v == "": result.append("-" + k) return result
def extract_string_from_byte_array(string_list): """Extract the string from array of string list """ start = 1 end = len(string_list) - 1 extract_string = string_list[start:end] return extract_string
def _flatten_dict(d, prefix=[]): """ Return a "flattened" dictionary suitable for passing to the JWP api. E.g. .. code:: { 'foo': 1, 'bar': { 'buzz': 2 }, } gets flattened to .. code:: { 'foo': 1, 'bar.buzz': 2, } """ rv = {} for k, v in d.items(): if isinstance(v, dict): rv.update(_flatten_dict(v, prefix=prefix + [k])) else: rv['.'.join(prefix + [k])] = v return rv
def decodeMemo(num): """ Since there are a lot of unnecessary repeation, we can bypass them with a memo The memo's index represent the length of the current substring and the value is the number of ways to decode it """ memo = [None for _ in range(len(num)+1)] def helper(num, l, mem): # initialize if l == 0: return 1 s = len(num) - l if num[s]=="0": return 0 if mem[l]: return mem[l] result = helper(num, l-1, mem) if l >= 2 and int(num[s:s+2])<=26: result += helper(num, l-2, mem) # remember the state mem[l] = result return result return helper(num, len(num), memo)
def factorial_rec(n): """Recursive implementation of factorial: O(n) runtime, O(n) max stack frames""" if n == 0: return 1 return n * factorial_rec(n - 1)
def get_Sigma_powerlaw_params(param_dict): """ Extract and return parameters from dictionary for powerlaw form """ rScale = param_dict['rScale'] rc = param_dict['rc'] sScale = param_dict['sScale'] sigma = param_dict['sigma'] return rScale, rc, sScale, sigma
def _get_kaggle_type(competition_or_dataset: str) -> str: """Returns the kaggle type (competitions/datasets). Args: competition_or_dataset: Name of the kaggle competition/dataset. Returns: Kaggle type (competitions/datasets). """ # Dataset are `user/dataset_name` return 'datasets' if '/' in competition_or_dataset else 'competitions'
def verify_required_properties(deep_security_event): """ Verify if the specified Deep Security event contains the required properties to be convert to an Amazon Finding Format finding """ result = True required_properties = [ 'HostOwnerID', 'HostInstanceID', 'TenantID', 'EventID', 'EventType', 'LogDate', 'HostAssetValue', 'HostGroupID', 'HostGroupName', 'HostID', 'Hostname', 'HostSecurityPolicyID', 'HostSecurityPolicyName', ] for prop in required_properties: if not prop in deep_security_event: result = False return result
def intersect(list1, list2): """ Intersection of two lists """ return [v for v in list1 if v in list2]
def trimDict(dict, direct = True, *attrs): """ Removes key:value pairs from a dict If a direct trim, the provided attributes will be removed from the dict If an indirect trim, the provided attributes will be kept, and all other attributes will be deleted """ if direct: for attr in attrs: if attr in dict: del(dict[attr]) else: for key in dict: if key not in attrs: del(dict[key]) return dict
def check_stopping_criteria(abs_diff=None, rel_diff=None, abs_tol=None, rel_tol=None): """ Decides whether or not to stop an optimization algorithm if the relative and/or absolute difference stopping critera are met. If both abs_tol and rel_tol are not None, then will only stop if both conditions are met. Parameters ---------- abs_diff: float, None The absolute difference in succesive loss functions. rel_diff: float, None The relative difference in succesive loss functions. abs_tol: None, float The absolute difference tolerance. rel_tol: None, float The relative difference tolerance. Output ------ stop: bool """ if abs_tol is not None and abs_diff is not None and abs_diff <= abs_tol: a_stop = True else: a_stop = False if rel_tol is not None and rel_diff is not None and rel_diff <= rel_tol: r_stop = True else: r_stop = False if abs_tol is not None and rel_tol is not None: # if both critera are use both must be true return a_stop and r_stop else: # otherwise stop if either is True return a_stop or r_stop
def int_log2(i): """Computes the floor of the base 2 logarithm of an integer.""" log2 = 0 while i >= 2: log2 += 1 i = i >> 1 return log2
def is_user_message(message): """Check if the message is a message from the user""" return (message.get('message') and message['message'].get('text') and not message['message'].get("is_echo"))
def _calculate_rejection_probabilities(init_probs, target_probs): """Calculate the per-class rejection rates. Args: init_probs: The class probabilities of the data. target_probs: The desired class proportion in minibatches. Returns: A list of the per-class rejection probabilities. This method is based on solving the following analysis: Let F be the probability of a rejection (on any example). Let p_i is the proportion of examples in the data in class i (init_probs) Let a_i is the rate the rejection sampler should *accept* class i Let t_i is the target proportion in the minibatches for class i (target_probs) ``` F = sum_i(p_i * (1-a_i)) = 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1 ``` An example with class `i` will be accepted if `k` rejections occur, then an example with class `i` is seen by the rejector, and it is accepted. This can be written as follows: ``` t_i = sum_k=0^inf(F^k * p_i * a_i) = p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1 = p_i * a_i / sum_j(p_j * a_j) using F from above ``` Note that the following constraints hold: ``` 0 <= p_i <= 1, sum_i(p_i) = 1 0 <= a_i <= 1 0 <= t_i <= 1, sum_i(t_i) = 1 ``` A solution for a_i in terms of the other variabes is the following: ```a_i = (t_i / p_i) / max_i[t_i / p_i]``` """ # Make list of t_i / p_i. ratio_l = [0 if x[0] == 0 else x[1] / x[0] for x in zip(init_probs, target_probs)] # Calculate list of rejection probabilities. max_ratio = max(ratio_l) return [1 - ratio / max_ratio for ratio in ratio_l]
def factorial(x, factorialTable): """ Read from the pre-computed table. """ return factorialTable[x-1]
def search_func(strlist, queries_by_str): """ lower and AND-searching. """ queries = queries_by_str.split(' ') if len(queries)==0: return strlist firstq = queries[0] if len(firstq)==0: return strlist if firstq[0]==' ': return strlist ret = [] for original_line in strlist: line = original_line.lower() is_matched = True for query in queries: if line.find(query)==-1: is_matched = False break if is_matched: ret.append(original_line) return ret
def get_integers_digits(integer) -> list: """This function store integers digits in list.""" digits = [] while integer != 0: digits.append(integer % 10) integer = integer // 10 return digits
def comp_hex(x): """ Get the components of a hex RGB color as strings (`"00"` to `"ff"`). """ return x[0:2], x[2:4], x[4:6]
def _get_download_url(version, deprecated=False): """Return a URL for a given SDK version. """ base_url = 'https://storage.googleapis.com/appengine-sdks/{0}/google_appengine_{1}.zip' if deprecated: return base_url.format('deprecated/{0}'.format(version.replace('.', '')), version) else: return base_url.format('featured', version)
def analysis_lindex(analysis): """ Returns a dictionary of locals by ID. This index can be used to find a local in constant time if you know its ID. When finding usages from a usage itself, the first step is to find the usage, once you have found it, you can use its ID to find the local. Locals and usages have the same ID, so it's possible to corretale a usage with a local. 'lindex' stands for 'local index'. """ return analysis.get("lindex", {})
def drop_indxs_from_lists(lists, indxs): """Function to take a list of indexes, and drop them from a list of lists.""" if len(indxs) > 0: lists_out=[]; for list_i in lists: list_o=[list_i[i] for i in range(0, len(list_i)) if i not in indxs] lists_out.append(list_o) else: lists_out= lists return list(lists_out)
def is_published(catalog_record): """ Is the catalog record published or not. Args: catalog_record (dict): A catalog record Returns: bool: True if record is published """ if catalog_record.get('state') == 'published': return True return False
def is_valid_mutation_record_to_process(raw_header_line, header, line): """ Determines whether the current line is a valid record in the mutation file that should be processed. A record is considered invalid if: - it is a duplicate of the header line - the line begins with a '#' - the line is empty """ if line.strip() == raw_header_line.strip(): return False if not line.strip(): return False if line.startswith("#"): return False return True
def check_UUID_input(UUID): """ Function checks to make sure UUID is a string :param UUID: user inputted unique identifier of the picture :raises ValueError: if UUID is not a string :returns True: if pass test """ # if isinstance(UUID, str): # if UUID.isalnum(): # return True # else: # logging.info('UUID has weird characters in it') # raise ValueError('UUID needs to be alphaNumeric') # else: # logging.info('UUID is not a string') # raise ValueError("UUID should be input as a string") return ("it is not checking")
def trans_attr(attr, lang): """ Returns the name of the translated attribute of the object <attribute>_<lang_iso_code>. For example: name_es (name attribute in Spanish) @param attr Attribute whose name will form the name translated attribute. @param lang ISO Language code that will be the suffix of the translated attribute. @return: string with the name of the translated attribute. """ lang = lang.replace("-","_").lower() return "{0}_{1}".format(attr,lang)
def rgb_to_hex(value): """Converts colour from rgb tuple into hex triplet. value (str): colour rgb tuple """ return '%02x%02x%02x' % value
def list_to_string(array, separator): """ Convert List,Tupples,Set into string separaated by "separator" :param array: List, Tupple or Set :param separator: Separator which can use to separate all values of array :type separator:str :return: String converted from list, If list empty then Nan :rtype: str """ if len(array) > 0: return separator.join(map(str, array)) return "NaN"
def r_vis_f(t_vis): """The visible light reflectance for the front surface Args: t_vis (double): The visible transmittance Returns: double: The visible light reflectance for the front surface """ return -0.0622 * t_vis ** 3 + 0.4277 * t_vis ** 2 - 0.4169 * t_vis + 0.2399
def parse_extra_loss_flag(extra_loss_flag): """Return extra loss config.""" extra_loss_dict = dict() if not extra_loss_flag: return extra_loss_dict extra_loss = extra_loss_flag.split(",") for loss_name_weight in extra_loss: loss_name, loss_weight = loss_name_weight.split(":") loss_weight = float(loss_weight) if loss_weight < 0 or loss_name in extra_loss_dict: raise ValueError("Invalid `extra_loss`: {}".format(extra_loss_flag)) extra_loss_dict[loss_name] = loss_weight return extra_loss_dict
def soundex(name, len=4): """ soundex module conforming to Knuth's algorithm implementation 2000-12-24 by Gregory Jorgensen public domain """ # digits holds the soundex values for the alphabet digits = '01230120022455012623010202' sndx = '' fc = '' # translate alpha chars in name to soundex digits for c in name.upper(): if c.isalpha(): if not fc: fc = c # remember first letter d = digits[ord(c)-ord('A')] # duplicate consecutive soundex digits are skipped if not sndx or (d != sndx[-1]): sndx += d # replace first digit with first alpha character sndx = fc + sndx[1:] # remove all 0s from the soundex code sndx = sndx.replace('0','') # return soundex code padded to len characters return (sndx + (len * '0'))[:len]
def add_digits(s): """ >>> add_digits('12') 3 >>> add_digits('17') 8 >>> add_digits('13') 4 """ val = 0 for c in s: val += int(c) return val
def setBox(locationCode=332, side='l'): """ A function to define where to place the histogram plot. The function starts with a subplot and a side, and returns an array suitable for use with axes() (ie. [xmin,ymin,width,height]) Arguments: locationCode: The locationCode used by the subplot() function side: Either 'l' or 'r' (for left or right) """ xR = [0.15,0.42,0.69] xA = [0.26,0.53,0.80] # y = [0.135,0.568] y = [0.13,0.415,0.695] width = 0.08 height = 0.03 if(locationCode==332): if(side=='l'): return [xR[1],y[2],width,height] else: return [xA[1],y[2],width,height] elif(locationCode==333): if(side=='l'): return [xR[2],y[2],width,height] else: return [xA[2],y[2],width,height] elif(locationCode==334): if(side=='l'): return [xR[0],y[1],width,height] else: return [xA[0],y[1],width,height] elif(locationCode==335): if(side=='l'): return [xR[1],y[1],width,height] else: return [xA[1],y[1],width,height] elif(locationCode==336): if(side=='l'): return [xR[2],y[1],width,height] else: return [xA[2],y[1],width,height] elif(locationCode==337): if(side=='l'): return [xR[0],y[0],width,height] else: return [xA[0],y[0],width,height] elif(locationCode==338): if(side=='l'): return [xR[1],y[0],width,height] else: return [xA[1],y[0],width,height] elif(locationCode==339): if(side=='l'): return [xR[2],y[0],width,height] else: return [xA[2],y[0],width,height]
def get_epoch(version_str): """ Parse the epoch out of a package version string. Return (epoch, version); epoch is zero if not found.""" try: # there could be more than one colon, # but we only care about the first e_index = version_str.index(':') except ValueError: # no colons means no epoch; that's valid, man return 0, version_str try: epoch = int(version_str[0:e_index]) except ValueError as value_error: raise Exception( 'Corrupt dpkg version %s: epochs can only be ints, and ' 'epochless versions cannot use the colon character.' % version_str) from value_error return epoch, version_str[e_index + 1:]
def get_slice_coord(bounds, n): """ # Given the bounds of an actor, return the point that # corresponds to the n% of the bounds range bounds should be a list of two floats n should be a float in range 0, 1 """ if not isinstance(bounds,(list, tuple)) or not isinstance(bounds[0],float) or not isinstance(bounds[1],float): raise ValueError("bounds should be a list or tuple of floats: {}".format(bounds)) if not isinstance(n, (int, float)): raise ValueError("n should be a float") if n < 0 or n > 1: raise ValueError("n should be in range [0, 1]") b0, b1 = bounds delta = b1 - b0 return b0 + delta*n
def distribute(N,div): """ we are going to verify outputs in groups """ n = N/div rem = N - (div * (N/div)) result = [] for j in range(div): if rem >0: result = result +[n+1] rem = rem -1 else: result = result + [n] return result
def extended_gcd(a, b): """ Get the gcd of the given numbers :param a: First number (integer) :param b: Second number (integer) :return: gcd of a and b (integer) """ x, lastx, y, lasty = 0, 1, 1, 0 while b != 0: a, (quotient, b) = b, divmod(a, b) x, lastx = lastx - quotient * x, x y, lasty = lasty - quotient * y, y return a, lastx * (-1 if a < 0 else 1), lasty * (-1 if b < 0 else 1)
def is_symmetric(L): """ Returns True if given list is symmetric. """ result = len(L) == len(L[0]) for i in range(len(L)): for j in range(len(L)): result *= L[i][j] == L[j][i] return result
def to_comment(value): """ Builds a comment. """ if value is None: return '' if len(value.split('\n')) == 1: return "# " + value else: return '\n'.join(['# ' + l for l in value.split('\n')[:-1]])
def getitem(lst, indices): """Definition for multidimensional slicing and indexing on arbitrarily shaped nested lists. """ if not indices: return lst i, indices = indices[0], indices[1:] item = list.__getitem__(lst, i) if isinstance(i, int): return getitem(item, indices) # Empty slice: check if all subsequent indices are in range for the # full slice, raise IndexError otherwise. This is NumPy's behavior. if not item: if lst: _ = getitem(lst, (slice(None),) + indices) elif any(isinstance(k, int) for k in indices): raise IndexError return [] return [getitem(x, indices) for x in item]
def make_chains(text_string, n): """ Takes input text as string; returns dictionary of markov chains. Dictionary has tuple as values, lists of words as keys """ chains = {} text_list = text_string.split() for i in range(len(text_list)-n): key = tuple(text_list[i:i+n]) chains[key] = chains.get(key, []) # slice text_list according to appropriate length as dictated by n chains[key].extend(text_list[i+n:i+2*n]) return chains
def _follow_type_to_int(follow_type: str): """Convert dpayd-style "follow type" into internal status (int).""" assert follow_type in ['blog', 'ignore'], "invalid follow_type" return 1 if follow_type == 'blog' else 2
def format_relative_datetime(amount: float, unit_key: str) -> str: """Generate a human-readable relative time string.""" # Only show relevant decimal places https://stackoverflow.com/a/51227501 amount_str = f"{amount:f}".rstrip("0").rstrip(".") # Only show the plural s if needed unit_str = unit_key if amount != 1.0 else unit_key[:-1] return f"{amount_str} {unit_str} ago"
def one_of_k_encoding_unk(x, allowable_set): """Maps inputs not in the allowable set to the last element.""" if x not in allowable_set: x = allowable_set[-1] return list(map(lambda s: x == s, allowable_set))
def sRGBToLinearRGB(color): """ Convert a grayscale value (single channel) value from sRGB to Linear """ # Note that range of input should be 0 to 1 if color < 0: return 0 elif color < 0.04045: return color/12.92 else: return (((color + 0.055) / 1.055) ** 2.4)
def area_triangle(base, height): """ Calculates triangle area with given base and height length. Input: base (float), height (float) Output: area_out (float ) """ if base < 0 and height < 0: raise ValueError("The base and hight length must be >0") elif base < 0: raise ValueError("The base length must be >0") elif height < 0: raise ValueError("The height length must be >0") area_out = 0.5 * base * height print("The triangle area is {:4.2f}cm2.".format(area_out)) return area_out
def removeBases(str): """Removes list of base class.""" pos1 = -2 while pos1 < len(str): pos1 = str.find(':', pos1 + 2) if pos1 < 0: return str if (pos1 == len(str)-1) or (str[pos1+1]!=':'): return str[:pos1] return str.strip()
def get_open_bid(curr_bids, curr_round): """Returns true if there is an open trick card to take""" if curr_bids >= curr_round: return False else: return True
def vecDot(vecA, vecB): """ the dot product of vecA and vecB :param vecA: :param vecB: :return: """ x1, y1 = vecA[0], vecA[1] x2, y2 = vecB[0], vecB[1] return x1 * x2 + y1 * y2
def bounds(total_docs, client_index, num_clients, includes_action_and_meta_data): """ Calculates the start offset and number of documents for each client. :param total_docs: The total number of documents to index. :param client_index: The current client index. Must be in the range [0, `num_clients'). :param num_clients: The total number of clients that will run bulk index operations. :param includes_action_and_meta_data: Whether the source file already includes the action and meta-data line. :return: A tuple containing: the start offset (in lines) for the document corpus, the number documents that the client should index, and the number of lines that the client should read. """ source_lines_per_doc = 2 if includes_action_and_meta_data else 1 docs_per_client = total_docs / num_clients start_offset_docs = round(docs_per_client * client_index) end_offset_docs = round(docs_per_client * (client_index + 1)) offset_lines = start_offset_docs * source_lines_per_doc docs = end_offset_docs - start_offset_docs lines = docs * source_lines_per_doc return offset_lines, docs, lines
def calc_construction_cost( levee_height_cm: float, height_increase_cm: float, construction_quadratic: float, construction_linear: float, construction_fixed: float, ) -> float: """ Implement the quadratic cost function """ # shorthand a = construction_quadratic b = construction_linear c = construction_fixed # calculation if height_increase_cm == 0.0: cost_millions = 0.0 elif height_increase_cm > 0.0: new_height = levee_height_cm + height_increase_cm cost_millions = a * (new_height ** 2) + b * height_increase_cm + c else: raise ValueError("height increase must be non-negative") # convert from million euros to euros cost_euro = cost_millions * 1e6 return cost_euro
def empty_seats(seats, seat_numbers): """Empty listed seats of their previous reservations. :param seats: dict - seating chart dictionary. :param seat_numbers: list - list of seat numbers to free up or empty. :return: updated seating chart dictionary. """ for seat in seat_numbers: seats[seat] = None return seats
def mapping_table_for(domain_table): """ Get name of mapping table generated for a domain table :param domain_table: one of the domain tables (e.g. 'visit_occurrence', 'condition_occurrence') :return: """ return '_mapping_' + domain_table
def parse_V1A_fname(fname): """ Note: See http://info.geonet.org.nz/display/appdata/Accelerogram+Data+Filenames+and+Formats The file name can take three forms: XNNSITEJJ, where X is the instrument code1, NN is the two-digit year of acquisition, SITE is the 4 character site code, and JJ is the site's unique accelerogram number for that year. YYYYMMDD_HHMM_SITE, where YYYY/MM/DD HH:MM is the earthquake origin time (UTC) and SITE is the 3 or 4 character site code. YYYYMMDD_HHMM_SITE_XX, where YYYY/MM/DD HH:MM is the instrument trigger time (UTC), SITE is the 3 or 4 character site code, and XX is the sensor location code. The latter is most appropriate for arrays of borehole sensors. fname: name of geoNet data file, must not include path """ form1 = "XNNSITEJJ" form2 = "YYYYMMDD_HHMM_SITE" form3 = "YYYYMMDD_HHMM_SITE_XX" #Remove .V1A, .V2A extension fname = fname.split(".")[0] YYYYMMDD = "" HHMM = "" if fname.count("_") == form1.count("_"): stat_code = fname[3:7] elif fname.count("_") == form2.count("_"): YYYYMMDD, HHMM, stat_code = fname.split("_") elif fname.count("_") == form3.count("_"): YYYYMMDD, HHMM, stat_code, XX = fname.split("_") else: raise Exception("{:s} is unknow file name format for .V1A files\n".format(fname)) return YYYYMMDD, HHMM, stat_code
def responseSignedURL(signedUrl,origin=None): """ Response to AWS API GATEWAY Parameters ---------- ContentType: isBase64Encoded: Body: """ return { "statusCode": 303, "headers": { "Location": signedUrl, # "Access-Control-Allow-Origin": origin, "Access-Control-Allow-Origin": "*", 'Cache-Control':"max-age=86400", 'Access-Control-Allow-Headers': 'Content-Type', 'Access-Control-Expose-Headers': 'Content-Type', # "Access-Control-Allow-Credentials": "true", # "Vary": "Origin", } }
def LBtoKG(mlb): """ Convertie une masse en lb vers kg note: 1 kg = 2.20462 lb :param mlb: masse [lb] :return mkg: masse [kg] """ mkg = mlb / 2.20462 return mkg
def exp(m: int, k: int) -> int: """Fast exponentiation: m ** k""" power = 1 while k: if k & 1: power *= m m **= 2 k >>= 1 return power
def merge_lists(first_list, second_list): """This method merges the two lists input here in one lists such that the resulting list contains tuples containing the elements of the two lists. @param first_list: The first list @param second_list: The second list @return: [(el11, el21), (el12, el22)] a list of tuples """ final_list = [] for i in range(len(first_list)): final_list.append((first_list[i], second_list[i])) return final_list
def arg_max(t): """Retourne l'indice du maximum de t.""" m = max(t) for i in range(len(t)): if t[i] == m: return i
def legend_of_image(square_side, smali_dim): """These lines read the number of character of a class and calculate where it begins and ends inside the image""" image_legend = "" first = True end = 0 for name in smali_dim: if first: end = smali_dim[name] + 1 xe = end % square_side + 1 ye = end // square_side + 1 image_legend = f"{name} [1,1] [{xe},{ye}]" first = False else: start = end + 1 xs = start % square_side + 1 ys = start // square_side + 1 end = start + smali_dim[name] xe = end % square_side + 1 ye = end // square_side + 1 image_legend += f"\n{name} [{xs},{ys}] [{xe},{ye}]" return image_legend
def _dumps(obj, level): """ Does the actual serializing of data into an ACF format. :param obj: A dictionary to serialize. :param level: Nesting level. :return: A List of strings. """ lines = [] indent = '\t' * level for key, value in obj.items(): if isinstance(value, dict): # [INDENT]"KEY" # [INDENT]{ line = indent + '"{}"\n'.format(key) + indent + '{' lines.append(line) # Increase intendation of the nested dict lines.extend(_dumps(value, level + 1)) # [INDENT]} lines.append(indent + '}') else: # [INDENT]"KEY"[TAB][TAB]"VALUE" lines.append(indent + '"{}"'.format(key) + '\t\t' + '"{}"'.format(value)) return lines
def probability_sum(probability, token_frequency, n): """Assigns score to document based on summation of probabilities. Args: probability (float): Previously calculated probability. token_frequency (float): Number of appearances of token in text. n (int): Length of text. Returns: probability_value (float): New caclculated probability. """ probability_value = probability+(token_frequency/n) return probability_value
def _batch_dimension_list(user_data, threshold=0.9): """ A helper function for ..py:function`build_batches` which returns a list of areas of the rectangles which will represent student history. :param list[UserData] user_data: The output of ..py:function`build_nn_data`. Must be sorted from largest to shortest history length *before* being passed. :return: list[(int, int)] batch_list: A list of rectangle dimensions for each batch """ if len(user_data) <= 0: return [] width = user_data[0].length # Width of rectangle (user with max interactions within a batch) area_actual = 0 # Actual area within rectangle area_rect = 0 # Area bounded by rectangle height = 0 # Height of rectangle (num users within a batch) dimension_list = [] # List of rectangle dimensions for i, user in enumerate(user_data): num_interactions = user.length # Calculate size of new area area_actual += num_interactions area_rect += width # Package the previous batch (not including the current one) # Note that we say height > 0 on the off chance that double rounding messes up # when area_actual "==" area_rect if area_actual / area_rect < threshold and height > 0: dimension_list.append((width, height)) width = num_interactions height = 0 area_actual = width area_rect = width height += 1 # Append the final batch dimension_list.append((width, height)) return dimension_list
def extract_row_and_col_mappings( grades_headers, grades_file_contents, master_repo_names ): """Extract mappings from username -> row_nr and master_repo_name -> col_nr. """ master_repo_to_col_nr = { repo_name: grades_headers.index(repo_name) for repo_name in master_repo_names } username_col = grades_headers.index("username") username_to_row_nr = { row[username_col]: i for i, row in enumerate(grades_file_contents) } return username_to_row_nr, master_repo_to_col_nr
def _n_gram(tokens, n=2): """Compute n-gram given n from tokens. Args: tokens (list of str): A list of tokens. n (int, optional): Number of tokens in a shingle. Default to 2 (bi-gram). Returns: list of str: A list of shingles. """ output = [] for i in range(len(tokens) - n + 1): output.append(' '.join(tokens[i:i + n])) return output
def sigma_k (n, k): """ Sum of divisors of integer n to the power k. Computes the sum of the positive divisors of the integer n raised to the power k. """ n = abs (n) sum = n**k for d in range (1, 1 + (n // 2)): if n % d == 0: sum = sum + d**k return sum
def _shuffle(items): """Makes sure items are not in alphabetical order or any other kind of order.""" items = sorted(items) return [items[1], items[0]] + items[2:]
def filename(objname, survey="DECaLS-DR8", format="fits"): """Convert a Component_name into a filename. Another method can be used as long as it is consistently applied.""" # Take Julian coords of name to eliminate white space - eliminate prefix name = objname.split(" ")[1] fname = f"{name}_{survey}.{format}" return fname
def none_to_empty_list(*args): """Replaces None inputs with an empty list Examples -------- Single input case >>> none_to_empty_list(None) [] Multiple input case >>> a, b, c = None, 'woo', 34 >>> none_to_empty_list(a, b, c) [[], 'woo', 34] """ outputs = [] for arg in args: outputs.append(arg if arg is not None else []) if len(outputs) == 1: return outputs[0] else: return outputs
def code_block(string: str, max_characters: int = 2048, code_type: str = ""): """ Formats text into discord code blocks """ string = string.replace("```", "\u200b`\u200b`\u200b`\u200b") len_ticks = 7 + len(code_type) if len(string) > max_characters - len_ticks: return f"```{code_type}\n{string[:max_characters - len_ticks - 4]} ...```" else: return f"```{code_type}\n{string}```"
def ft_year(s):# {{{ """ Returns the year from a datetime object Notes: If it is not possible to return the year, then returns -1 """ try: y=s.year except AttributeError: y=-1 return y
def clear_output(out): """ Remove new-lines and """ return out.decode('utf-8').replace('\n', '')
def get_place(city, country): """Join city and country args.""" city = str(city).replace(' ', '%20') country = str(country) return f"{city},{country}"
def append_list_to_list_or_item(items1, items2): """ Append a list or a single item to a list or a single item :param items1: The list (or item) to append to :param items2: The list (or item) to be appended :return: The appended list """ if type(items1) is not list: items1 = [items1] if type(items2) is not list: items2 = [items2] return items1 + items2
def getdiffmeta(diff): """get commit metadata (date, node, user, p1) from a diff object The metadata could be "hg:meta", sent by phabsend, like: "properties": { "hg:meta": { "date": "1499571514 25200", "node": "98c08acae292b2faf60a279b4189beb6cff1414d", "user": "Foo Bar <foo@example.com>", "parent": "6d0abad76b30e4724a37ab8721d630394070fe16" } } Or converted from "local:commits", sent by "arc", like: "properties": { "local:commits": { "98c08acae292b2faf60a279b4189beb6cff1414d": { "author": "Foo Bar", "time": 1499546314, "branch": "default", "tag": "", "commit": "98c08acae292b2faf60a279b4189beb6cff1414d", "rev": "98c08acae292b2faf60a279b4189beb6cff1414d", "local": "1000", "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"], "summary": "...", "message": "...", "authorEmail": "foo@example.com" } } } Note: metadata extracted from "local:commits" will lose time zone information. """ props = diff.get(r'properties') or {} meta = props.get(r'hg:meta') if not meta and props.get(r'local:commits'): commit = sorted(props[r'local:commits'].values())[0] meta = { r'date': r'%d 0' % commit[r'time'], r'node': commit[r'rev'], r'user': r'%s <%s>' % (commit[r'author'], commit[r'authorEmail']), } if len(commit.get(r'parents', ())) >= 1: meta[r'parent'] = commit[r'parents'][0] return meta or {}
def get_payload_from_clearsigned_message(message: str) -> str: """ Given a message in clearsign format removes signature and marker strings like -----BEGIN PGP SIGNATURE-----, -----BEGIN PGP SIGNED MESSAGE----- to extract the original payload. :param message: The message containing the signature and the payload. :return: Extracted payload as string. Calling code is responsible for converting it to proper data type. """ lines = message.strip().split('\n') if len(lines) < 5 \ or lines[0] != '-----BEGIN PGP SIGNED MESSAGE-----' \ or lines[1].startswith('Hash:') is False: raise RuntimeError("Invalid message format, no --BEGIN PGP SIGNED MESSAGE-- header") start_idx = 3 # Payload starts from 3rd line in clearsigned messages end_idx = None for idx, line in enumerate(lines[3:]): if line.strip() == '-----BEGIN PGP SIGNATURE-----': end_idx = idx + start_idx break if end_idx is None: raise RuntimeError("Invalid message format, no --BEGIN PGP SIGNATURE-- section") return "\n".join(lines[start_idx: end_idx])
def humanbytes(size: float) -> str: """ humanize size """ if not size: return "" power = 1024 t_n = 0 power_dict = {0: ' ', 1: 'Ki', 2: 'Mi', 3: 'Gi', 4: 'Ti'} while size > power: size /= power t_n += 1 return "{:.2f} {}B".format(size, power_dict[t_n])