content
stringlengths
42
6.51k
def _getbool_from_str(s): """Convert given string into bool value. Defaults to False. """ return (s or '').lower() in ['1', 'y', 'yes', 't', 'true']
def dot(a, b) -> float: """Returns the dot product of a and b""" return (float(a[0]) * b[0]) + (float(a[1]) * b[1])
def vertical_flip(pairs): """ Perform a vertical flip (along the horizontal axis) of all the pairs in the network. Such a flip should result in a valid sorting network (provided the network used for the flip was valid itself). """ max_idx = max(pairs, key=lambda pair: pair[1])[1] return [(max_idx - p1, max_idx - p0) for (p0, p1) in pairs]
def find_term(node, tokenL ): """ Given a trie, rooted on 'node', locate a matching term beginning on the first token in 'tokenL' Returns (matchNode,term_fl) where 'matchNode' is the node matching the last matched token and 'term_fl' is true if a complete term was matched. """ matchNode = None for token in tokenL: if token not in node['children']: break matchNode = node = node['children'][token] return (matchNode['value'], matchNode['term_fl']) if matchNode else (None,False)
def get_literals(cnf): """Extract the literals from a cnf formula Parameters ---------- cnf :list[set[(string,bool)]] The cnf from wich we want to extract the literla Returns ----- set[str] set of the literals in the cnf """ literals = set() for conj in cnf: for disj in conj: literals.add(disj[0]) return literals
def look_for_code(msg): """Checks the discord message for a code block. If there's no code, it returns an empty string.""" try: code = msg.split("```")[1] # Looks for anything wrapped in a ``` block while code[0] == "`": # Gets rid of any extra ` code = code[1:] return code.split("\n") except IndexError: # There's no ``` in the message content return ""
def mod_div(n, d, m): """Returns (n/d) mod m. Works because the modular multiplicative inverse of d is equal to d^(m-2) mod m as long as m is prime.""" inverse = pow(d, m-2, m) return (n*inverse) % m
def _compare_weights_of_sets(weight_new_set: int, weight_old_set: int) -> int: """ Method that compares the weighted sum of two sets and keeps the bigger one. :param weight_new_set: weighted sum of the new set :param weight_old_set: weighted sum of the old set :return: bigger weighted sum """ if weight_new_set > weight_old_set: return weight_new_set else: return weight_old_set
def query_attr_select(params, table_ref=True): """ Create portion of SELECT statement for attributes inolved in query. Defaults to order in the params @param params: dict of information used in query (column names, table name, etc.) Example: OrderedDict([('numerator', 'price'), ('denominator', 'sq_meters'), ('subquery', 'SELECT * FROM interesting_data')]) Output: "i.\"price\"::numeric As attr1, " \ "i.\"sq_meters\"::numeric As attr2, " """ attr_string = "" template = "\"%(col)s\"::numeric As attr%(alias_num)s, " if table_ref: template = "i." + template if ('time_cols' in params) or ('ind_vars' in params): # if markov or gwr analysis attrs = (params['time_cols'] if 'time_cols' in params else params['ind_vars']) if 'ind_vars' in params: template = "array_agg(\"%(col)s\"::numeric) As attr%(alias_num)s, " for idx, val in enumerate(attrs): attr_string += template % {"col": val, "alias_num": idx + 1} else: # if moran's analysis attrs = [k for k in params if k not in ('id_col', 'geom_col', 'subquery', 'num_ngbrs', 'subquery')] for idx, val in enumerate(attrs): attr_string += template % {"col": params[val], "alias_num": idx + 1} return attr_string
def get_value(str_val): """convert a string into float or int, if possible.""" if not str_val: return None try: val = float(str_val) if "." not in str_val: val = int(val) except ValueError: val = str_val return val
def set_compare(a, b): """ Compare two iterables a and b. set() is used for comparison, so only unique elements will be considered. Parameters ---------- a : iterable b : iterable Returns ------- tuple with 3 elements: (what is only in a (not in b), what is only in b (not in a), what is common in a and b) """ a, b = set(a), set(b) return (a-b, b-a, a.intersection(b))
def _side_name(side): """ Rack side (for a draft) """ if side == 'True': return 'Front side of the rack' else: return 'Back side of the rack'
def days_to_goal(target_distance: int, daily_distance: int): """ Calculate days to goal """ result_days = 0 current_distance = 0 while current_distance < target_distance: current_distance += daily_distance daily_distance *= 1.1 result_days += 1 return result_days
def validate_alpha(x) -> bool: """Validates that the input is alpha""" if x.isdigit(): return False elif x == "": return True else: return True
def pf_model_info(pf_model): """ Set the PF model list based on the input """ rot_model = pf_model['rot'] if 'rot' in pf_model else 'rigid' tors_model = pf_model['tors'] if 'tors' in pf_model else 'rigid' vib_model = pf_model['vib'] if 'vib' in pf_model else 'harm' sym_model = pf_model['sym'] if 'sym' in pf_model else 'none' vpt2_model = pf_model['vpt2'] if 'vpt2' in pf_model else 'none' etrans_model = pf_model['etrans'] if 'etrans' in pf_model else 'none' # Set well models if 'wells' in pf_model: rwells_model = pf_model['wells'] pwells_model = pf_model['wells'] else: if 'rwells' in pf_model: rwells_model = pf_model['rwells'] else: rwells_model = 'fake' if 'pwells' in pf_model: pwells_model = pf_model['pwells'] else: pwells_model = 'fake' pf_models = { 'rot': rot_model, 'tors': tors_model, 'vib': vib_model, 'sym': sym_model, 'vpt2': vpt2_model, 'etrans': etrans_model, 'rwells': rwells_model, 'pwells': pwells_model, } return pf_models
def _floor(n, base=1): """Floor `n` to a multiple of `base`""" return n // base * base
def get_measurements_from_kal_scan(kal_out): """Return a list of all measurements from kalibrate channel scan.""" result = [] for line in kal_out.splitlines(): line = line.decode("utf-8") if "offset " in line: p_line = line.split(' ') result.append(p_line[-1]) return result
def m_step(heads_A, tails_A, heads_B, tails_B): """Produce the values for theta that maximize the expected number of heads/tails""" # Replace dummy values with your implementation theta_A = heads_A / (heads_A + tails_A) theta_B = heads_B / (heads_B + tails_B) return theta_A, theta_B
def parse_point(s_point): """ Parses a point from the configuration file Function expects to be passed in a string parameter for the point value. Function returns a dictionary list item. """ point = {} p_array = s_point.split(",") point["x"] = int(p_array[0]) point["y"] = int(p_array[1]) return point
def is_uppercase(string): """Check if a given string is consists of uppercase letters. :param string: string to be checked :type string: str :return: True if letters in the passed string are all in uppercase. :rtype: bool """ return string == string.upper()
def nfloat(s): """Return floating point value of s if possible, None if not""" if not s == None: try: return float(s) except ValueError: return None else: return None
def geometry(w, h, i, j): """Convert a rectangle to ImageMagick geometry""" return "{0}x{1}+{2}+{3}".format(w, h, i - w//2, j - (h-1)//2)
def windumrechner(wind): """ Umwandlung von milen pro std in km/h :param wind: Int, float or None :return: Float or None """ if isinstance(wind, (int, float)): kmh = wind * 1.609346 kmh = round(kmh, 2) return kmh else: return None
def increment_avg(old_avg, new_value, elements): """ Description A function which increments and returns the average of a set. Arguments :param old_avg: The currrent average. :type old_avg: float :param new_value: The new value which changes the average. :type new_value: float :param elements: The element for which we want to increment the \ average. :type elements: list """ not_none = list(filter(None, elements)) return old_avg + (new_value - old_avg) / len(not_none)
def has_permission(user, item): """ :param user: {'id': str, 'creation_date': float, 'groups': [str], 'email': str, ..+} :param item: {'id': str, 'creation_date': float, 'partition': str, 'owner': str, 'read_groups': [str], 'write_groups': [str], ..+} :return: bool, determines if user has permission to update an item. You can also modify item by ref. like this: item['field'] = 'value' """ if user is None or item is None: return False user_groups = user.get('groups', []) user_id = user.get('id', None) if 'admin' in user_groups: return True if user_id == item.get('owner', None): return True return False
def text_to_bytes(text, encoding='UTF-8', size=None): """ Encode some text or string to a byte array :param text: text to encode to bytes :param encoding: optional encoding of the passed string. default to utf-8. :param size: optional, if given the text will be padded with 0x00 to the right size :return: a bytes object """ res = str(text).encode(encoding) if size: res = res.rjust(size, b'\x00') return res
def hand2value(h): """Transform card values in a hand to a base 16 number.""" return sum((v>>4) << i*4 for i,v in enumerate(sorted(h)))
def getCbsdsNotPartOfPpaCluster(cbsds, ppa_record): """Returns the CBSDs that are not part of a PPA cluster list. Args: cbsds : List of CBSDData objects. ppa_record : A PPA record dictionary. Returns: A list of CBSDs that are not part of the PPA cluster list. """ cbsds_not_part_of_ppa_cluster = [] # Compare the list of CBSDs with the PPA cluster list for cbsd in cbsds: if cbsd['id'] not in ppa_record['ppaInfo']['cbsdReferenceId']: cbsds_not_part_of_ppa_cluster.append(cbsd) return cbsds_not_part_of_ppa_cluster
def replace_version(playbook): """ replace the version of playbook with -1 :param playbook: playbook dict loaded from yaml :return: updated playbook dict """ playbook["version"] = -1 return playbook
def clean(elems): """ This method takes a list of scraped selenium web elements and filters/ returns only the hrefs leading to publications. Filtering includes removing all urls with keywords that are indicative of non-html links. Parameters ---------- elems (list) : The list of hrefs to be filtered Returns ------- urls (list) : The new list of hrefs, which should be the same as the list displayed on gui ScienceDirect """ titles = [] urls = [] for elem in elems: href_child = elem.find_element_by_css_selector('a[href]') url = href_child.get_attribute('href') title = href_child.text titles.append(title) urls.append(url) return urls, titles
def calc_precision(tp: int, fp: int) -> float: """Calculate Precision. Args: tp (int): amount of TP. fp (int): amount of FP. Returns: float: precision for the given amounts of TP and FP. """ if tp + fp != 0: precision = float(tp / (tp + fp)) else: # prevent zero division error. precision = 0 return precision
def rm_var(di): """Helper to remove the varname key from aggregation output dict Args: di (dict): dict from *aggr_out results Returns: dict without the "varname" key """ del di["varname"] return di
def lowerup(value): # Only one argument. """Converts a string into all lowercase""" return value.upper()
def graph_normalize(graph): """ Normalizing id of the nodes e.g. 0 1 3 nodes would be 0 1 2 if node '2' is missing :param graph: :return: (normalized graph, new2old, old2new) :rtype: (dict, dict, dict) """ old2new = {} new2old = {} new_id = 0 for node, neighbors in graph.items(): if node not in old2new: old2new[node] = str(new_id) new2old[old2new[node]] = node new_id += 1 for n in neighbors: if n not in old2new: old2new[n] = str(new_id) new2old[old2new[n]] = n new_id += 1 graph_normal = {} for node, neighbors in graph.items(): node_new_id = old2new[node] if node_new_id not in graph_normal: graph_normal[node_new_id] = {} for n in neighbors: graph_normal[node_new_id][old2new[n]] = graph[node][n] return graph_normal, new2old, old2new
def skip_leading_ws_with_indent(s,i,tab_width): """Skips leading whitespace and returns (i, indent), - i points after the whitespace - indent is the width of the whitespace, assuming tab_width wide tabs.""" count = 0 ; n = len(s) while i < n: ch = s[i] if ch == ' ': count += 1 i += 1 elif ch == '\t': count += (abs(tab_width) - (count % abs(tab_width))) i += 1 else: break return i, count
def _devicePixelRatioF(obj): """ Return obj.devicePixelRatioF() with graceful fallback for older Qt. This can be replaced by the direct call when we require Qt>=5.6. """ try: # Not available on Qt<5.6 return obj.devicePixelRatioF() or 1 except AttributeError: pass try: # Not available on Qt4 or some older Qt5. # self.devicePixelRatio() returns 0 in rare cases return obj.devicePixelRatio() or 1 except AttributeError: return 1
def get_insertion_losses_from_lists(txlist, reclist): """Get the list of all the Insertion Losses from two list of exctitations (driver and receiver). Optionally prefix can be used to retrieve driver and receiver names. Example: excitation_names ["1"] ["2"] output ["S(1,2)"] Parameters ---------- txlist : list of Drivers to include or all nets reclist : list of Receiver to include. Number of Driver = Number of Receiver an Returns ------- type list of string representing Insertion Losses of excitations """ spar = [] if len(txlist)!= len(reclist): print("TX and RX should be same length lists") return False for i, j in zip(txlist, reclist): spar.append("S({},{})".format(i, j)) return spar
def replace_all(text, dic): """ replace the substrings in text with those in dic, if the replacement is insensitive to the order of keys and values. https://stackoverflow.com/questions/6116978/how-to-replace-multiple-substrings-of-a-string :param text: :param dic: :return: """ for i, j in dic.items(): text = text.replace(i, j) return text
def get_year_bin(year, year_bins): """ Returns the bin containing the given year. Intended for small lists. Parameters: ----------- year: int The current simulation year. year_bins: list List of years. Returns: -------- The year bin that contains the provided year. """ year_bins = sorted(year_bins) first_bin = year_bins[0] if year is None or year <= first_bin: return first_bin idx = -1 for y_bin in year_bins: if year < y_bin: break idx += 1 return year_bins[idx]
def check_for_tree(tree_map, width, pos): """ Checks a position on the map for a tree :param tree_map: List of string representing map :param width: Width of the initial section of map :param pos: Position to check :return: 1 if there is a tree, 0 if not """ x = pos[0] % width y = pos[1] if tree_map[y] != '' and tree_map[y][x] == '#': return 1 return 0
def get_longest_matching_prefix(word, prefix_list): """Find the longest prefix of word that is in prefix_list. None otherwise.""" to_return = None for i in range(len(word)): prefix = word[: i + 1] if prefix in prefix_list: to_return = prefix return to_return
def sessionid(recst): """ Search session id from rtsp strings """ recst = recst.decode() recs = recst.split('\r\n') for rec in recs: ss = rec.split() # print ">",ss if (ss[0].strip() == "Session:"): return ss[1].strip()
def createstat(point_value): """ Function for creating stat object Parameters point_value - Value extracted from the centroid of the polygon """ if(point_value[0] is None): return({'count':1, 'min':'N/A','mean':'N/A', 'max':'N/A','median':'N/A'}) else: return({'count':0, 'min':point_value[0],'mean':point_value[0], 'max':point_value[0],'median':point_value[0]})
def height(p): """ given pressure in hPa, returns altitude in meters. """ h = (1 - pow(p / 1013.25, 0.190263)) * 44330.8 return h
def dir_names(dirrepo, panel_id): """Defines structure of subdirectories in calibration repository. """ dir_panel = '%s/%s' % (dirrepo, panel_id) dir_offset = '%s/offset' % dir_panel dir_peds = '%s/pedestals' % dir_panel dir_plots = '%s/plots' % dir_panel dir_work = '%s/work' % dir_panel dir_gain = '%s/gain' % dir_panel dir_rms = '%s/rms' % dir_panel dir_status = '%s/status' % dir_panel return dir_panel, dir_offset, dir_peds, dir_plots, dir_work, dir_gain, dir_rms, dir_status
def maxlevel(lst): """Return maximum nesting depth""" maxlev = 0 def f(lst, level): nonlocal maxlev if isinstance(lst, list): level += 1 maxlev = max(level, maxlev) for item in lst: f(item, level) f(lst, 0) return maxlev
def lower_words(s: str) -> str: """ Turn a word into lower case if it makes sense. (like "of", "is", etc.) :param s: the input string. :return: the output might be lower case. """ tup = ('of',) return s.lower() if s.lower() in tup else s
def populate_peer_info(neighbors, fanout_info, port): """ Build the peer_info map which will be used by the storm generation class Args: neighbors (dict): fanout info for each DUT port fanout_info (dict): fanout graph info port (string): test port Returns: peer_info (dict): all PFC params needed for fanout for storm generation """ peer_dev = neighbors[port]['peerdevice'] peer_port = neighbors[port]['peerport'] peer_info = {'peerdevice': peer_dev, 'hwsku': fanout_info[peer_dev]['device_info']['HwSku'], 'pfc_fanout_interface': peer_port } return peer_info
def _format_dict(data): """Formats raw data. Args: data ([dict]): Raw data. Returns: [dict]: Formatted data. """ return {key:value['raw'] if type(value) == dict and value != {} else value for key, value in data.items()}
def _make_tqdm_description(average_loss, average_em, average_f1): """ Build the string to use as the tqdm progress bar description. """ metrics = { "Train Loss": average_loss, "Train EM": average_em, "Train F1": average_f1 } return ", ".join(["%s: %.3f" % (name, value) for name, value in metrics.items()]) + " ||"
def clean_arrangement(arrangement): """ Cleans the song arrangement and turns it into a list. :example: >>> str_arr = 'V, C, V, C' >>> clean_arrangement(str_arr) ['V', 'C', 'V', 'C'] :param arrangement: a comma-delimited string containing the arrangement of the song. :type arrangement: `str` :param song_data: a data dictionary. Keys are the data model fields as specified in `datamodels.py`. One of the keys has to be "lyrics". :type song_data: `dict` :returns: arrangement a list of strings, each of which is a key in song's lyrics dictionary. :rtype: `list(str)` """ arrangement = [a.strip(" ") for a in arrangement.split(",")] return arrangement
def the_same_repository(repo_1_info, repo_2_info, check_revision=True): """ Given two dicts containing info about repositories, determine if they are the same repository. Each of the dicts must have the following keys: `changeset_revisions`( if check revisions is true), `name`, `owner`, and (either `tool_shed` or `tool_shed_url`). """ # Sort from most unique to least unique for fast comparison. if not check_revision or repo_1_info.get('changeset_revision') == repo_2_info.get('changeset_revision'): if repo_1_info.get('name') == repo_2_info.get('name'): if repo_1_info.get('owner') == repo_2_info.get('owner'): t1ts = repo_1_info.get('tool_shed', repo_1_info.get('tool_shed_url', None)) t2ts = repo_2_info.get('tool_shed', repo_2_info.get('tool_shed_url', None)) if t1ts in t2ts or t2ts in t1ts: return True return False
def prefix_dict(di_, prefix_s=''): """ Add prefix_s to every key in dict :param di_: :param prefix_s: :return: """ return {prefix_s + k: v for k, v in di_.items()}
def grid_shape(i, max_x=4): """Return a good grid shape, in x,y, for a number if items i""" from math import sqrt, ceil x = round(sqrt(i)) if x > max_x: x = max_x y = ceil(i / x) return x, y
def bubblesort(a): """ bubble sort implementation >>> bubblesort([6, 4, 8, 2, 1, 9, 10]) [1, 2, 4, 6, 8, 9, 10] """ for i in range(len(a)): for j in range(i, len(a)): if a[i] > a[j]: a[i], a[j] = a[j], a[i] return a
def cond_between(minVal, maxVal, colorformat): """helper function for returning xlsxwriter conditional formating dicts """ formDict = {'type': 'cell', 'criteria': 'between', 'minimum': minVal, 'maximum': maxVal, 'format': colorformat} return formDict
def S_2(k_inv, m, private_key, s_1, q): """ Compute S1 for the elgamal DSS """ try: q = q-1 s_2 = (k_inv * (m - (private_key * s_1))) % q return s_2 except Exception as e: print("Something went wrong: ",e.__str__()) return
def ij(n): """Returns upper triangular indices for looping over a square matrix""" rn = range(n) return [ (i,j) for i in rn[0:-1] for j in rn[i+1:n] ]
def f_to_c(degrees): """ Convert degrees fahrenheit to degrees celcius """ return (degrees - 32) * 5/9
def findstop_help(posLastStop,sequence,codon): """ return the index of the first position of codon in the dna sequence @type posLastStop: int @param posLastStop: Position of the last found stop codon. @type sequence: string @param sequence: Nucleotide sequence. @type codon: string @param codon: 3-letter DNA code. @rtype: int @return: The position of the stop codon in the nucleotide sequence. """ try: return(sequence.index(codon,posLastStop)) except: return(-1)
def OverrideToImplementCustomLogic_CallToSuperRecommended(obj): """Users should override this in their sub-classes to implement custom logic. Thereby, it is recommended (but not required) to call the super-class' corresponding method. Used in Trainer and Policy to tag methods that need overriding, but the super class' method should still be called, e.g. `Trainer.setup()`. Examples: >>> from ray import tune >>> @overrides(tune.Trainable) # doctest: +SKIP ... @OverrideToImplementCustomLogic_CallToSuperRecommended # doctest: +SKIP ... def setup(self, config): # doctest: +SKIP ... # implement custom setup logic here ... ... super().setup(config) # doctest: +SKIP ... # ... or here (after having called super()'s setup method. """ obj.__is_overriden__ = False return obj
def reduce_labels(line: str) -> str: """Simplify labels in data""" labels = ["none", "favor", "against"] label = line.split(":")[1] return label if label in labels else "none"
def _decode_toggle(packet): """Decode boolean toggle value""" return packet[4] == 0x0f
def parse_input(inp, options): """Parses user-provided input Check if the given input can be seen as an index of the options list. Parameters ---------- inp : str User input options : list List of option strings Returns ---------- parsed_choice The index corresponding to the input if index is valid for the options list else None """ try: parsed_choice = int(inp) if parsed_choice >= 0 and parsed_choice < len(options): return parsed_choice except: return None
def duty_compare(duty, top): """ Returns the compare value for a given duty cycle and top value. Duty is in % """ return int(duty * top / 100)
def calc_percent_of(percent, whole): """Utility method for getting percentage of whole Args: percent (:obj:`int` or :obj:`float`) whole (:obj:`int` or :obj:`float`) Returns: :obj:`float` """ return (percent * whole) / 100.0
def hash_distance(sim_hash_one, sim_hash_two): """ Calculates hamming distance between two sim hashes. :param sim_hash_one: long - sim hash :param sim_hash_two: long - sim hash :return: (int) returns hamming distance. """ f = 128 x = (sim_hash_one ^ sim_hash_two) & ((1 << f) - 1) ans = 0 while x: ans += 1 x &= x - 1 return ans
def contains_qmark(utt, history): """ Sentence-level attribute function. See explanation above. Returns 1 if utt contains a question mark, otherwise 0. """ return int("?" in utt)
def cut_corner(points, depth=1, factor=4): """ Chaikin's corner cutting algorithm :param points: points of the polygon :param depth: num of cuts :param factor: cutting factor :return: points of smoothed polygon """ for d in range(depth): new_points = [] for i in range(len(points) - 1): pi_x, pi_y = points[i] pj_x, pj_y = points[i + 1] qi_x = pi_x * (factor - 1) / factor + pj_x / factor qi_y = pi_y * (factor - 1) / factor + pj_y / factor ri_x = pi_x / factor + pj_x * (factor - 1) / factor ri_y = pi_y / factor + pj_y * (factor - 1) / factor new_points += [[qi_x, qi_y], [ri_x, ri_y]] points = new_points points += [points[0]] return points
def check_is_in_dir(parent_directory, child_file): """Check to see whether a filepath could in principle exist in a directory. Does not check whether the file nor directory exists - just checks to see whether the names are plausible. Arguments: parent_directory: {str} -- The absolute path of a candidate parent directory. child_file: {str} -- The absolute filepath of a candidate child file Returns: bool -- Whether child_file could be in parent_directory (in principle). """ from os.path import dirname child_directory = dirname(child_file) return child_directory.startswith(parent_directory)
def issequence(arg): """ Checks if arg is a sequence. For discussion, see: "check if an object is a list or tuple (but not string)" at http://stackoverflow.com/a/1835259/470560 """ return ( not hasattr(arg, "strip") and hasattr(arg, "__getitem__") or hasattr(arg, "__iter__") )
def deactivate_text(shell: dict, env_vars: dict) -> str: """Returns the formatted text to write to the deactivation script based on the passed dictionaries.""" lines = [shell["shebang"]] for k in env_vars.keys(): lines.append(shell["deactivate"].format(k)) return "\n".join(lines)
def swap(seq, pos1, pos2): """Return a new sequence with segments at position pos1 and pos2 swapped. pos1, pos2 are both of the form (start1, end1), (start2, end2) """ (start1, end1), (start2, end2) = sorted([pos1, pos2]) return ( seq[:start1] + seq[start2:end2] + seq[end1:start2] + seq[start1:end1] + seq[end2:] )
def to_sql(identifier): """Converts an identifier to a SQL-friendly form. Parameters ---------- identifier : int Vertex identifier to be converted. Returns ------- str SQL-friendly string representing the provided indentifier. """ return f"_{identifier}"
def grid3D(grid3d_width=100, grid3d_height=100, grid3d_depth=100, grid3d_rotate_speed=10, grid3d_rotate_sensitivity=1, is_grid3d_rotate=False, **kwargs): """ :param grid3d_width: 3D axis width :param grid3d_height: 3D axis height :param grid3d_depth: 3D axis depth :param grid3d_rotate_speed: 3D charts rotate speed :param is_grid3d_rotate: whether rotate 3D charts :param grid3d_rotate_sensitivity: 3D charts rotete sensitivity, The greater the value, the more sensitive. :param kwargs: :return: """ _grid3D = { "boxWidth": grid3d_width, "boxHeight": grid3d_height, "boxDepth": grid3d_depth, "viewControl": { "autoRotate": is_grid3d_rotate, "autoRotateSpeed": grid3d_rotate_speed, "rotateSensitivity": grid3d_rotate_sensitivity } } return _grid3D
def varname(obj, callingLocals=locals()): """ quick function to print name of input and value. If not for the default-Valued callingLocals, the function would always get the name as "obj", which is not what I want. """ for k, v in list(callingLocals.items()): if v is obj: name = k return name
def ij_to_vectorized_idx(i,j,n): """ Returns the index of the vector generated from vectorizing matrix from its original i,j index [[A11, A12, A13], [A21, A22, A23], --> [A11, A21, A31, A12, A22, A32, A13, A23, A33] [A31, A32, A33] ] n: number of rows """ return i+j*n
def retry_http(response): """Retry on specific HTTP errors: * 429: Rate limited to 50 reqs/minute. Args: response (dict): Dynatrace API response. Returns: bool: True to retry, False otherwise. """ retry_codes = [429] if isinstance(response.get('error', {}), str): code = 200 else: code = int(response.get('error', {}).get('code', 200)) return code in retry_codes
def referance_duplicate_to_master(master_file, duplicate_file): """ Referances a duplicate file back to a master file """ duplicate_file['real_path'] = master_file['real_path'] duplicate_file['version_id'] = master_file['version_id'] return duplicate_file
def get_formatted_emg(emg_row): """ :param emg_row: dict [str] one row that represent data from Electromyograph sensor example: ['2018-07-04T17:39:53.743240', 'emg', '-1', '-6', '-9', '-9', '1', '1', '-1', '-2', '2018-07-04T17:39:53.742082'] :return: formatted emg row example: ['2018-07-04T17:39:53.743240', '-1', '-6', '-9', '-9', '1', '1', '-1', '-2'] """ new_emg_row = emg_row.copy() new_emg_row.pop(1) # remove 'emg' word new_emg_row.pop(9) # remove last timestamp return new_emg_row
def windows_to_unix_timestamp(windows_timestamp): """ Converts a Windows timestamp to Unix one :param windows_timestamp: Windows timestamp :type windows_timestamp: int :return: Unix timestamp :rtype: int """ magic_number = 11644473600 return int((windows_timestamp / 10000000) - magic_number)
def cnn_output_length(input_length, filter_size, border_mode, stride, dilation=1): """ Compute the length of the output sequence after 1D convolution along time. Note that this function is in line with the function used in Convolution1D class from Keras. Params: input_length (int): Length of the input sequence. filter_size (int): Width of the convolution kernel. border_mode (str): Only support `same`, `valid` or 'causal'. stride (int): Stride size used in 1D convolution. dilation (int) """ if input_length is None: return None assert border_mode in {'same', 'valid', 'causal'} dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) if border_mode == 'same': output_length = input_length elif border_mode == 'valid': output_length = input_length - dilated_filter_size + 1 elif border_mode == 'causal': # as code below, causal mode's output_length is input_length. # https://github.com/fchollet/keras/blob/master/keras/utils/conv_utils.py#L112 output_length = input_length else: raise RuntimeError(f"invalid border_mode={border_mode}") return (output_length + stride - 1) // stride
def _infer_padding(well): """Guesses if a well is padded (A01) or not (A1). Returns False if it cannot be guessed (on double-digit column). """ # Assume False padded = False row = well[0] str_col = well[1:] int_col = str(int(str_col)) # Return True is str form != int form if len(str_col) != len(int_col): padded = True return padded
def parse_uint256(bs: bytes) -> int: """ Parse an unsigned integer encoded in big-endian order from the length 32 byte sequence ``bs``. Corresponds directly to the "parse_256(p)" function in BIP32 (https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions). :param bs: The byte sequence to be parsed. :return: The unsigned integer represented by ``bs``. """ assert len(bs) == 32 return int.from_bytes(bs, 'big')
def _encode_bytes(source: bytes) -> bytes: """Encode provided bytes as a bencoded string""" return str(len(source)).encode("ascii") + b":" + source
def recall(tp, fn): """ :param tp: :param fn: :return: """ return tp / (tp + fn)
def get_tracks_in_frame(frame_ind, track_list): """ Return list of all tracks present in frame ind. """ tracks_in_frame = [] for track in track_list: if (track['last_frame'] >= frame_ind and track['first_frame'] <= frame_ind): tracks_in_frame.append(track) return tracks_in_frame
def find_digit_sum(num): """Return the sum of digits in num.""" num = str(num) digit_sum = 0 for char in num: digit_sum += int(char) return digit_sum
def read_string(puzzle_string: str): """ Read string describing puzzle, converting it into a form the solver can understand. puzzle_string: string specifying puzzle return: array of numbers and operations for each row and column """ puzzle = [ part.split(' ') for part in puzzle_string.split('\n') ] puzzle = [ row[:2] + [float(row[2])] for row in puzzle if len(row) == 3 ] return puzzle
def save_cast_int(int_str: str) -> int: """ Helper function so the version number of prereleases (i.e. 3.8.0rc1) does not throw exceptions Parameters ---------- int_str : str String which should represent a number. Returns ------- int Int representation of int_str """ try: return int(int_str) except ValueError: return 0
def alpha_blend(a, b, alpha): """Blends a and b. Args: alpha: double, Ratio. Needs to be in [0, 1] and is the weight to blend a and b. """ return b * alpha + (1.0 - alpha) * a
def compare_rule_hits_count(r1, r2, diff): """Just compare rule hits metadata and fill-in diff structure accordingly.""" hits1 = r1["report"]["meta"]["count"] hits2 = r2["report"]["meta"]["count"] # remember counters -> needs to be written into the table diff["hits1"] = hits1 diff["hits2"] = hits2 the_same = hits1 == hits2 # result of comparison of two counters diff["eq_hits"] = "yes" if the_same else "no" # return comparison result return the_same
def power(x, y=2): """ X in power y """ r = 1 while y > 0: r = x * r y = y - 1 return r
def maybe(target, *keys, fallback=None): """ Simple implementation of optional chaining, like Haskell's Maybe. """ try: for key in keys: target = target[key] return target except: return fallback
def get_distance_meter_color(distance): """ Function transforms the normalized distance of two points into a RGB color. This color is interpolated between Green (0, 255, 0) through Yellow (255, 255, 0) to Red (255, 0, 0). :param distance: Normalized distance between GT and prediction, in range (0, 1). :return: (R, G, B) representation of the distance. """ R, G, B = 0.0, 0.0, 0.0 '''From Green to Yellow for the first half.''' if 0 <= distance < 0.5: G = 1.0 R = 2 * distance '''From Yellow to Red in the second half.''' if 0.5 <= distance <= 1: R = 1.0 G = 1.0 - 2 * (distance - 0.5) return R, G, B
def get_stripped_prefix(source, prefix): """Go through source, extracting every key/value pair where the key starts with the given prefix. """ cut = len(prefix) return { k[cut:]: v for k, v in source.items() if k.startswith(prefix) }
def read_metadata_tf(line, search_text, current_value): """ function to read simple DynAdjust header items and return True/False :param line: DynAdjust header line :param search_text: header field desired. :param current_value: stored value. Updated when search_text is successfully found. :return: either current value or True/False corresponding to search_text """ if line[:35] == search_text.ljust(35, ' '): if line[35:].strip() == 'Yes': return True else: return False else: return current_value
def fix_alpha2_value(alpha2): """Return a fixed two-letter uppercase alpha2, or None if unfixable.""" if alpha2 is None: return None fixed_alpha2 = alpha2.strip() if not fixed_alpha2.isalpha(): return None fixed_alpha2 = fixed_alpha2.upper() assert len(fixed_alpha2) == 2 return fixed_alpha2
def _nf(s: str): """None Filter""" return None if s == "None" else s
def _covert_360_to_180(degrees: float) -> float: """ converts from range (0, 360) to (-180, 180) :param degrees: the angle to convert from in degrees :return: the converted value in range (-180, 180) """ if degrees > 180: return degrees - 360 return degrees