content
stringlengths
42
6.51k
def get_t(x, mu, s, n): """ Function to get the T value correspondent of the X value in the t-student distribution. Parameters: -------------------------- x : double, float Specific value from the random variable. mu : double, float Population average given. sd : double, float Sample standard deviation given. n : int, optional Sample size, less than 30. Returns: -------------------------- t : float, double The T value correspondent of the X value in the t-student. """ if n > 30: print("The sample size must be less than 30.") else: t = (x - mu) / (s / (n**0.5)) return t
def Ms_Mw_Exp_DiGiacomo2015(MagSize, MagError): """ Exponential """ if MagSize >= 3.5 and MagSize <= 8.0: M = mt.exp(-0.222 + 0.233 * MagSize) + 2.863 E = MagError else: M = None E = None return (M, E)
def _ValueFieldName(proto_field_name): """Returns the name of the (internal) instance attribute which objects should use to store the current value for a given protocol message field. Args: proto_field_name: The protocol message field name, exactly as it appears (or would appear) in a .proto file. """ return '_value_' + proto_field_name
def der_value_offset_length(der): """Returns the offset and length of the value part of the DER tag-length-value object.""" tag_len = 1 # Assume 1 byte tag if(isinstance(der[tag_len], str)): tag_value = int(der[tag_len].encode('hex'), 16) else: tag_value = der[tag_len] if tag_value < 0x80: # Length is short-form, only 1 byte len_len = 1 len = int(tag_value) else: # Length is long-form, lower 7 bits indicates how many additional bytes are required len_len = (tag_value & 0x7F) + 1 try: len = int(der[tag_len + 1:tag_len + len_len].encode('hex'), 16) except AttributeError: len = int().from_bytes(der[tag_len + 1:tag_len + len_len], byteorder='big', signed=False) return {'offset':tag_len + len_len, 'length':len}
def scale_precision(precision_limits, min_scaling, max_scaling, start, end): """Scale the precision to the desired interval :param precision_limits: desired precision of the control limits :param min_scaling: minimum :param max_scaling maximum :param start: start of the scaling interval :param end: end of the scaling interval :return: scaled precision """ return (end - start) * ((precision_limits - min_scaling) / (max_scaling - min_scaling)) + start
def calcular_precio_servicio(cantidad_horas): """ num -> float opera un numeros para dar como resultado el precio del servicio :param cantidad_horas: Entero de las horas trabajadas :return: el precio del servicio >>> calcular_precio_servicio(4) 400000.0 >>> calcular_precio_servicio(5) 500000.0 """ if (cantidad_horas <= 0): return 'Las horas deben ser mayor a cero.' total = cantidad_horas * 100000 return float(total)
def eliminate_none(data): """ Remove None values from dict """ return dict((k, v) for k, v in data.items() if v is not None)
def get_user_move_history(piece, board_state): """Get the move history of the user""" move_history = [] for move in board_state: if move[0] != piece: move_history.append(int(move[1])) return move_history
def count_sheeps(arrayOfSheeps: list) -> int: """ Consider an array of sheep where some sheep may be missing from their place. We need a function that counts the number of sheep present in the array (true means present). Hint: Don't forget to check for bad values like null/undefined :param arrayOfSheeps: :return: """ return 0 if arrayOfSheeps is None \ else arrayOfSheeps.count(True)
def extract_account_id(user): """Extract the account id from the user information.""" admin_accounts = [ acc_id for acc_id, is_admin in user["accounts"].items() if is_admin ] non_admin_accounts = [ acc_id for acc_id, is_admin in user["accounts"].items() if not is_admin ] return admin_accounts[0] if admin_accounts else non_admin_accounts[0]
def safe_root(path): """ os path join will not work on a directory that starts with root """ if len(path) > 0: if path[0] == '/' or path[0] == '\\': path = path[1:] return path
def encode(value): """Encode string value to utf-8. Args: value: String to encode Returns: result: encoded value """ # Initialize key variables result = value # Start decode if value is not None: if isinstance(value, str) is True: result = value.encode() # Return return result
def _find_closest(x, feature_grids): """ Find the closest feature grid for x :param x: value :param feature_grids: array of feature grids :return: """ values = list(feature_grids) return values.index(min(values, key=lambda y: abs(y-x)))
def derive_folder_name(url, replaces): """ """ sanitize = lambda x: x.replace('/', '.') for service in replaces: if url.startswith(service): url = replaces[service] + url[len(service):] return sanitize(url) for generic in ['http://', 'https://', 'git://', 'svn://']: if url.startswith(generic): url = url[len(generic):] return sanitize(url) raise Exception('malformed url: {}'.format(url))
def get_module_name_parts(module_str): """Gets the name parts of a module string `module_str` in the form of module.submodule.method or module.submodule.class """ if module_str: parts = module_str.split('.') module_name = '.'.join(parts[:-1]) attr_name = parts[-1] values = (module_name, attr_name,) else: values = (None, None,) return values
def strip_schema_version(json_dict): """Returns the given JSON dict after stripping its schema version out :param json_dict: The JSON dict :type json_dict: dict :returns: The JSON dict with its schema version stripped out :rtype: dict """ if 'version' in json_dict: del json_dict['version'] return json_dict
def find_inverse_clifford_gates(num_qubits, gatelist): """ Find the inverse of a Clifford gate. Args: num_qubits: the dimension of the Clifford. gatelist: a Clifford gate. Returns: An inverse Clifford gate. """ if num_qubits in (1, 2): inv_gatelist = gatelist.copy() inv_gatelist.reverse() # replace v by w and w by v for i, _ in enumerate(inv_gatelist): split = inv_gatelist[i].split() if split[0] == 'v': inv_gatelist[i] = 'w ' + split[1] elif split[0] == 'w': inv_gatelist[i] = 'v ' + split[1] return inv_gatelist raise ValueError("The number of qubits should be only 1 or 2")
def error(msg, value=None) -> str: """Formats msg as the message for an argument that failed to parse.""" if value is None: return '<[{}]>'.format(msg) return '<[{} ({})]>'.format(msg, value)
def make_dirt_dict(clean_dict, keyname, dirt_list): """generates updated dict with dirt values appended""" dirt_dict = { keyname: clean_dict[keyname] + dirt_list, } return dirt_dict
def define_rule_sets(set_size): """Find every possible rule for given set size""" rule_sets = [] for n in range(1<<set_size): s = bin(n)[2:] s = '0'*(set_size-len(s))+s rule_sets.append((list(map(int, list(s))))) return rule_sets
def merge_envelopes(envelopes, extent_type='intersection'): """Return the extent of a list of envelopes. Return the extent of the union or intersection of a list of envelopes :param envelopes: list of envelopes or raster filenames (also mixed) :type envelopes: list, tuple, iterator :param extent_type: intersection or union :return: (xmin, xmax, ymin, ymax) in world coordinates """ et = extent_type.lower() xmin, xmax, ymin, ymax = envelopes[0] if len(envelopes) > 1: if et == 'intersection': for xmin0, xmax0, ymin0, ymax0 in envelopes[1:]: xmin = max(xmin, xmin0) xmax = min(xmax, xmax0) ymin = max(ymin, ymin0) ymax = min(ymax, ymax0) elif et == 'union': for xmin0, xmax0, ymin0, ymax0 in envelopes[1:]: xmin = min(xmin, xmin0) xmax = max(xmax, xmax0) ymin = min(ymin, ymin0) ymax = max(ymax, ymax0) else: msg = 'extent_type {} unknown'.format(extent_type) raise TypeError(msg) return xmin, xmax, ymin, ymax
def _convert_results_to_dict(r): """Takes a results from Elasticsearch and returns fields.""" if 'fields' in r: return r['fields'] if '_source' in r: return r['_source'] return {'id': r['_id']}
def density_to_porosity(rho, rho_matrix, rho_fluid): """ Get density from a porosity log. Typical values: rho_matrix (sandstone) : 2650 kg/m^3 rho_matrix (limestome): 2710 kg/m^3 rho_matrix (dolomite): 2876 kg/m^3 rho_matrix (anyhydrite): 2977 kg/m^3 rho_matrix (salt): 20320 kg/m^3 rho_fluid (fresh water): 1000 kg/m^3 rho_fluid (salt water): 1100 kg/m^3 See wiki.aapg.org/Density-neutron_log_porosity. Args: rho (ndarray): The bulk density log or RHOB. rho_matrix (float) rho_fluid (float) Returns: Estimate of porosity as a volume fraction. """ return (rho_matrix - rho) / (rho_matrix - rho_fluid)
def yamfilter(yam): """Filters out the yamls that will not be exported with the tool""" files = ['_settings', '_definitions', '_terms', 'project', 'program'] for f in files: if f in yam: return False return True
def merge_dictionaries(a, b): """ merging two dictionaries """ merged = {**a, **b} return merged
def get_adjacent_seats(seats, row, col): """ Return an array of characters representing the seating state of each of the seats adjacent to seat row,col """ neighbors = [] num_rows = len(seats) num_cols = len(seats[0]) # The row above if row > 0: if col > 0: neighbors.append(seats[row-1][col-1]) neighbors.append(seats[row-1][col]) if col < num_cols - 1: neighbors.append(seats[row-1][col+1]) # Same row if col > 0: neighbors.append(seats[row][col-1]) if col < num_cols - 1: neighbors.append(seats[row][col+1]) # Row below if row < num_rows - 1: if col > 0: neighbors.append(seats[row+1][col-1]) neighbors.append(seats[row+1][col]) if col < num_cols - 1: neighbors.append(seats[row+1][col+1]) return neighbors
def empty_filter(ivar): """return empty str if ivar is None or [] or {}""" return ivar if (ivar is not None and len(ivar) != 0) else ""
def sun_rot_elements_at_epoch(T, d): """Calculate rotational elements for Sun. Parameters ---------- T: float Interval from the standard epoch, in Julian centuries i.e. 36525 days. d: float Interval in days from the standard epoch. Returns ------- ra, dec, W: tuple (float) Right ascension and declination of north pole, and angle of the prime meridian. """ ra = 286.13 dec = 63.87 W = 84.176 + 14.1844000 * d return ra, dec, W
def get_last_server_gtid(gtid_set, server_uuid): """Get the last GTID of the specified GTID set for the given server UUID. This function retrieves the last GTID from the specified set for the specified server UUID. In more detail, it returns the GTID with the greater sequence value that matches the specified UUID. Note: The method assumes that GTID sets are grouped by UUID (separated by comma ',') and intervals appear in ascending order (i.e., the last one is the greater one). gtid_set[in] GTID set to search and get last (greater) GTID value. server_uuid[in] Server UUID to match, as a GTID set might contain data for different servers (UUIDs). Returns a string with the last GTID value in the set for the given server UUID in the format 'uuid:n'. If no GTID are found in the set for the specified server UUID then None is returned. """ uuid_sets = gtid_set.split(',') for uuid_set in uuid_sets: uuid_set_elements = uuid_set.strip().split(':') # Note: UUID are case insensitive, but can appear with mixed cases for # some server versions (e.g., for 5.6.9, lower case in server_id # variable and upper case in GTID_EXECUTED set). if uuid_set_elements[0].lower() == server_uuid.lower(): last_interval = uuid_set_elements[-1] try: _, end_val = last_interval.split('-') return '{0}:{1}'.format(server_uuid, end_val) except ValueError: # Error raised for single values (not an interval). return '{0}:{1}'.format(server_uuid, last_interval) else: # Skip uuid_set if it does not match the current server uuid. continue return None
def simple_chewrec_func(_data, rec, _arg): """ Callback for record chewing. """ if rec is None: return 1 return 0
def makeOptionsDict(options): """Convert options which were extracted from a tsv file to dictionaries. Args: options: A string containing options. Returns: preprocess_dict: A dictionary with options for preprocessing. blast_dict: A dictionary with options for BLAST. """ preprocess_dict = {} blast_dict = {} for option in options.split("|"): if "=" in option: keyValue = option.split("=") try: value = int(keyValue[1].strip()) except: raise ValueError key = keyValue[0].strip() if key.startswith("#"): key = key[1:] if key in ["minIdent", "reward", "penalty", "gapOpen", "gapExtend", "use_options"]: blast_dict[key] = value else: preprocess_dict[key] = value return preprocess_dict, blast_dict
def test_jpeg1(h, f): """JPEG data in JFIF format""" if b"JFIF" in h[:23]: return "jpeg"
def document_uri_self_claim(claimant): """Return a "self-claim" document URI dict for the given claimant.""" return { "claimant": claimant, "uri": claimant, "type": "self-claim", "content_type": "", }
def check_consistency(order, graph): """Make sure that `order` is topologically sorted from bottom to top, according to `graph`. """ seen_so_far = set() problems = False for crate in order: for dependent in graph[crate]: if dependent not in seen_so_far: print(f"{crate} dependency on {dependent} is not reflected in Cargo.toml") problems = True seen_so_far.add(crate) return problems
def filterLinesByCommentStr(lines, comment_str='#'): """ Filter all lines from a file.readlines output which begins with one of the symbols in the comment_str. """ comment_line_idx = [] for i, line in enumerate(lines): if line[0] in comment_str: comment_line_idx.append(i) for j in comment_line_idx[::-1]: del lines[j] return lines
def perm_conjugate(p, s): """ Return the conjugate of the permutation `p` by the permutation `s`. INPUT: two permutations of {0,..,n-1} given by lists of values OUTPUT: a permutation of {0,..,n-1} given by a list of values EXAMPLES:: sage: from sage.combinat.constellation import perm_conjugate sage: perm_conjugate([3,1,2,0], [3,2,0,1]) [0, 3, 2, 1] """ q = [None] * len(p) for i in range(len(p)): q[s[i]] = s[p[i]] return q
def get_column_width(runs_str, idx, header): """ returns the maximum width of the given column """ return max(max([len(r[idx]) for r in runs_str]), len(header))
def equal_species(list1,list2): """ Compare two species lists, this method only works if the length of the list is maximum 2 """ if len(list1) == len(list2): if len(list1) == 1: return list1[0] in list2[0] or list2[0] in list1[0] elif len(list1) == 2: bool1 = list1[0] in list2[0] or list2[0] in list1[0] bool2 = list1[1] in list2[1] or list2[1] in list1[1] bool3 = list1[0] in list2[1] or list2[0] in list1[1] bool4 = list1[1] in list2[0] or list2[1] in list1[0] return (bool1 and bool2) or (bool3 and bool4) else: #cannot handle 3 or more products at the moment return 0 else: return 0
def str22tuple( string ): """Converts string into a 2-tuple.""" vals = string.split() for vv in range( len(vals) ): vals[vv] = int( vals[vv].strip( '(), ' ) ) return (vals[0],vals[1])
def question1(s, t): """ Determines if string2 is an anagram of a substring of string1 Inputs: string1, string2 Ouput: Boolean""" if s and t: anagram = [] for letter in t: anagram.append(letter) # Create an array from t anagramTest = anagram[:] # Keep the original array for resets for letter in s: for character in range(len(anagramTest)): if letter == anagramTest[character]: del(anagramTest[character]) # Remove character from anagram break anagramTest = anagram # Reset anagramTest on fail if len(anagramTest) < 1: # All anagram letters appear in s in order return True return False
def get_objective_value(val, goal_type=None): """ Returns a single value to use as either a fitness function or Pareto indicator if our objective is a list If we have a list of list, flatten to a single list. """ if type(val) == list: try: val = [item for sublist in val for item in sublist] except TypeError: pass if goal_type == 'max': obj_val = max(val) elif goal_type == 'min': obj_val = min(val) elif goal_type == 'avg': obj_val = sum(val)/len(val) elif goal_type == 'abs max': obj_val = max([abs(x) for x in val]) elif goal_type == 'abs min': obj_val = min([abs(x) for x in val]) else: obj_val = val return obj_val
def append_or_extend(myList: list, item): """Returns and modifis a list by appending an item to the list or merging the item with the list if the item is also a list Args: myList ([type]): [description] item ([type]): [description] Returns: [type]: [description] """ if isinstance(item, list): myList.extend(item) else: myList.append(item) return myList
def get_zcl_attribute_type(code): """ Determine a ZCL attribute's data type from its type code. Args: code(int): The ZCL data type code included in the packet. Returns: str: The ZCL attribute type. """ opts = (0x00, "NULL", 0x08, "DATA8", 0x09, "DATA16", 0x0a, "DATA24", 0x0b, "DATA32", 0x0c, "DATA40", 0x0d, "DATA48", 0x0e, "DATA56", 0x0f, "DATA64", 0x10, "BOOL", 0x18, "MAP8", 0x19, "MAP16", 0x1a, "MAP24", 0x1b, "MAP32", 0x1c, "MAP40", 0x1d, "MAP48", 0x1e, "MAP56", 0x1f, "MAP64", 0x20, "UINT8", 0x21, "UINT16", 0x22, "UINT24", 0x23, "UINT32", 0x24, "UINT40", 0x25, "UNIT48", 0x26, "UNIT56", 0x27, "UINT64", 0x28, "INT8", 0x29, "INT16", 0x2a, "INT24", 0x2b, "INT32", 0x2c, "INT40", 0x2d, "NIT48", 0x2e, "NIT56", 0x2f, "INT64", 0x30, "ENUM8", 0x31, "ENUM16", 0x38, "SEMI", 0x39, "SINGLE", 0x3a, "DOUBLE", 0x41, "OCTSTR", 0x42, "STRING", 0x43, "OCTSTR16", 0x44, "STRING16", 0x48, "ARRAY", 0x4c, "STRUCT", 0x50, "SET", 0x51, "BAG", 0xe0, "ToD", 0xe1, "DATE", 0xe2, "UTC", 0xe8, "CLUSTERID", 0xe9, "ATTRID", 0xea, "BACOID", 0xf0, "EUI64", 0xf1, "KEY128", 0xff, "UNK") if code in opts: return opts[opts.index(code) + 1] return "OPAQUE"
def compute_node_distances(idx, neighbors): """" Computation of node degree. Args: idx: index of node neighbors: dict of neighbors per node """ queue = [] queue.append(idx) degree = dict() degree[idx] = 0 while queue: s = queue.pop(0) prev_dist = degree[s] for neighbor in neighbors[s]: if neighbor not in degree: queue.append(neighbor) degree[neighbor] = prev_dist + 1 return degree
def node_health(base_url, cfg, args_array): """Function: node_health Description: node_health function. Arguments: """ status = True if base_url and cfg and args_array: status = True return status
def _update_allow(allow_set, value): """ Updates the given set of "allow" values. The first time an update to the set occurs, the value(s) are added. Thereafter, since all filters are implicitly AND'd, the given values are intersected with the existing allow set, which may remove values. At the end, it may even wind up empty. Args: allow_set: The allow set, or None value: The value(s) to add (single value, or iterable of values) Returns: The updated allow set (not None) """ adding_seq = hasattr(value, "__iter__") and \ not isinstance(value, str) if allow_set is None: allow_set = set() if adding_seq: allow_set.update(value) else: allow_set.add(value) else: # strangely, the "&=" operator requires a set on the RHS # whereas the method allows any iterable. if adding_seq: allow_set.intersection_update(value) else: allow_set.intersection_update({value}) return allow_set
def get_cipd_pkg_name(pkg_pieces): """Return the CIPD package name with given list of package pieces. Pieces identified as False (like None, or empty string) will be skipped. This is to make sure the returned cipd pacakge name * Does not have leading & tailing '/' * Does not have string like '//' inside """ return '/'.join([str(piece) for piece in pkg_pieces if piece])
def rv6(mjd, hist=[], **kwargs): """cadence requirements for 6-visit RV plates Request: 6 total, ~3 per month, ideally within 1 week mjd: float, or int should be ok hist: list, list of previous MJDs """ # covid 4 plug per night solution return True if len(hist) == 0: return True if len(hist) > 6: return False deltas = mjd - np.array(hist) this_month = deltas[np.where(deltas < 15)] if len(deltas) > 2: return False # would allow for observations more than a week after previous return np.min(deltas) > 2
def index(objects, attr): """ Generate a mapping of a list of objects indexed by the given attr. Parameters ---------- objects : :class:`list`, iterable attr : string The attribute to index the list of objects by Returns ------- dictionary : dict keys are the value of each object's attr, and values are from objects Example ------- class Person(object): def __init__(self, name, email, age): self.name = name self.email = email self.age = age people = [ Person('one', 'one@people.com', 1), Person('two', 'two@people.com', 2), Person('three', 'three@people.com', 3) ] by_email = index(people, 'email') by_name = index(people, 'name') assert by_name['one'] is people[0] assert by_email['two@people.com'] is people[1] """ return {getattr(obj, attr): obj for obj in objects}
def find_indexes(data, prog_a, prog_b): """ Return the index of the elements in the proper order """ pos_a, pos_b = -1, -1 for index, elem in enumerate(data): if elem == prog_a: pos_a = index elif elem == prog_b: pos_b = index elif pos_a > 0 and pos_b > 0: break return pos_a, pos_b
def topdir_file(name): """Strip opening "src" from a filename""" if name.startswith("src/"): name = name[4:] return name
def xor(a: str, b: str) -> str: """Perform poistionwise xor of the arguments. This function is used to implement the xor operation between the round keys and plaintext block in DES""" res = "" for i in range(len(a)): if (a[i] == b[i]): res += '0' else: res += '1' return res
def dotify(name): """ translates a function name with dots in it...""" return name.replace('_','.')
def comp_stock_div_list(name_list): """ Given stock name, return stock dividends collected Inputs name_list: e.g. ['Dividends/Stock/UOL.txt'] Outputs div_list: e.g. [100] """ div_list = [] for name in name_list: sum = 0 try: with open(name) as f: for line in f: # e.g. line = 23-09-2014,34 x = line.strip().replace("\n", "").split(",") if (len(x) == 2 and x[1] != "" and x[1] != " "): sum = sum + int(x[1]) except IOError: print("File not found! file = " + name) div_list.append(sum) return div_list
def split_container_name(container_name): """Pulls apart a container name from its tag""" parts = container_name.split(":") if len(parts) > 1: return parts else: return parts, None
def strip_whitespace(string_value): """ Return the input string without space, tab, or newline characters (for comparing strings) """ return ''.join( [c for c in string_value if c != ' ' and c != '\n' and c != '\t'] )
def parse_description(description, sep): """returns a single text entry seperated by tabs""" return "\t".join(description.split(sep))
def mergeinfo_ranges_to_set(mergeinfo_ranges): """Convert compact ranges representation to python set object""" result = set() for r in mergeinfo_ranges: if type(r) == int: result.add(r) else: result |= set(range(r[0], r[1]+1)) return result
def coord_to_string(h_or_d, m, s): """ coord_to_string(h_or_d, m, s): Return a formatted string of RA or DEC values as 'hh:mm:ss.ssss' if RA, or 'dd:mm:ss.ssss' if DEC. """ retstr = "" if h_or_d < 0: retstr = "-" elif abs(h_or_d)==0: if (m < 0.0) or (s < 0.0): retstr = "-" h_or_d, m, s = abs(h_or_d), abs(m), abs(s) if (s >= 9.9995): return retstr+"%.2d:%.2d:%.4f" % (h_or_d, m, s) else: return retstr+"%.2d:%.2d:0%.4f" % (h_or_d, m, s)
def _bytes_bytearray_to_str(s): """If s is bytes or bytearray, convert to a unicode string (PRIVATE).""" if isinstance(s, (bytes, bytearray)): return s.decode() return s
def bprop_sub(x, y, dz): """Backpropagator for primitive `sub`.""" return (dz, -dz)
def append_form_control(value): """A filter for adding bootstrap classes to form fields.""" return value.replace( '<input', '<input class="form-control"' ).replace( '<textarea', '<textarea class="form-control"' ).replace( '<select', '<select class="form-control"' )
def submissionFilter(submission): """Filters the given submission to make sure it has text in its body. Params: - submission (dict): The submission to be filtered Returns: - contains_text (bool): True if the submission contains text, false otherwise """ if (not submission['is_self'] or 'selftext' not in submission.keys() or not submission['selftext'] or submission['selftext'] == '[deleted]' or submission['selftext'] == '[removed]'): return False return True
def _rxcheck(model_type, interval, iss_id, number_of_wind_samples): """Gives an estimate of the fraction of packets received. Ref: Vantage Serial Protocol doc, V2.1.0, released 25-Jan-05; p42""" # The formula for the expected # of packets varies with model number. if model_type == 1: _expected_packets = float(interval * 60) / (2.5 + (iss_id - 1) / 16.0) - \ float(interval * 60) / (50.0 + (iss_id - 1) * 1.25) elif model_type == 2: _expected_packets = 960.0 * interval / float(41 + iss_id - 1) else: return None _frac = number_of_wind_samples * 100.0 / _expected_packets if _frac > 100.0: _frac = 100.0 return _frac
def evaluate_ser_errors(entities, outputs): """ # For queries that the ser gets incorrect, group them into different types :param entities: In the form [(5, 6, 'number')], list of entities for each query :param outputs: PARSED mallard/duckling responses :return: """ missed_entity_indices = [] incorrect_span_indices = [] correct_indices = [] for i, (entity_info, output) in enumerate(zip(entities, outputs)): missed_entity = False incorrect_span = False for entity in entity_info: span = (entity[0], entity[1]) entity_type = entity[2] if entity_type not in output: # Completely not predicted if i not in missed_entity_indices: missed_entity_indices.append(i) missed_entity = True else: if span not in output[entity_type]: if i not in incorrect_span_indices: incorrect_span_indices.append(i) incorrect_span = True if not missed_entity and not incorrect_span: correct_indices.append(i) return missed_entity_indices, incorrect_span_indices, correct_indices
def get_parents(child, relations): """ Get the parent of a genre """ parents = [] current_parent = child last_parent = None while current_parent != last_parent: last_parent = current_parent for relation in relations: if relation[1] == current_parent: current_parent = relation[0] parents.append(current_parent) return parents
def clean_text(text): """ Clean text: - Strip space at the beginning and ending - Remove Vietnamese accent - Lowercase """ return text.lstrip().rstrip().lower() # return unidecode(text.lstrip().rstrip().lower())
def get_data_shapes(masks): """Generating data shapes for tensorflow datasets depending on if we need to load the masks or not outputs: data shapes with masks = output data shapes for (images, ground truth boxes, ground truth labels, masks, mask id labels) data shapes without masks = output data shapes for (images, ground truth boxes, ground truth labels) """ if masks: return ([None, None, None], [None, ], [None, None], [None, ], [None, None, None], [None, ]) else: return ([None, None, None], [None, ], [None, None], [None, ])
def UseExistingBootDisk(disks): """Returns True if the user has specified an existing boot disk.""" return any(disk.get('boot', False) for disk in disks)
def text_replace_line(text, old, new, find=lambda old,new:old == new, process=lambda _:_): """Replaces lines equal to 'old' with 'new', returning the new text and the count of replacements.""" res = [] replaced = 0 for line in text.split("\n"): if find(process(line), process(old)): res.append(new) replaced += 1 else: res.append(line) return "\n".join(res), replaced
def pytest_report_header(config): """ Pytest hook, see :py:func:`_pytest.hookspec.pytest_report_header` (:ref:`pytest:plugins`). It's important to see which URL is testing with which user and where are stored screenshots. It will be displayed before run of tests if you set some config values like that: .. code-block:: python def pytest_configure(config): config.webdriverwrapper_screenshot_path = os.path.join('/', 'tmp', 'testresults') config.webdriverwrapper_testing_url = 'http://example.com' config.webdriverwrapper_testing_username = 'testing_username' If your web app does not need any user, just don't set it. """ screenshot_path = getattr(config, 'webdriverwrapper_screenshot_path', None) testing_url = getattr(config, 'webdriverwrapper_testing_url', None) testing_username = getattr(config, 'webdriverwrapper_testing_username', None) lines = [] if screenshot_path: lines.append('| Screenshot path: {}'.format(screenshot_path)) if testing_url: lines.append('| Testing URL: {}'.format(testing_url)) if testing_username: lines.append('| Testing username: {}'.format(testing_username)) if lines: wrap_line = '+' + '-' * 75 lines.insert(0, wrap_line) lines.append(wrap_line) return lines
def getEverythingElseCard(cellNum, surfaceNum, comment): """Create a cell which encompasses everything outside an assembly/core.""" cellCard = "{} 0 {} imp:n=0 {}".format(cellNum, surfaceNum, comment) assert (len(cellCard) - len(comment)) < 80 return cellCard
def format_prop(prop): """ Map over ``titles`` to get properties usable for looking up from node instances. Change properties to have 'node_id' instead of 'id', and 'label' instead of 'type', so that the props can be looked up as dictionary entries: .. code-block:: python node[prop] """ if prop == "id": return "node_id" elif prop == "type": return "label" return prop
def is_triangular(k): """ k, a positive integer returns True if k is triangular and False if not """ #Andrey Tymofeiuk: Declare variable and list for collection summary = 0 collection = [] # Andrey Tymofeiuk: Corner case if k == 1: return True for element in range(k): summary += element collection.append(summary) if k in collection: return True else: return False
def quality_check(data): """ Expects string, returns tuple with valid data or False when data was invalid """ if data != False: if "RAWMONITOR" in data: data = data.replace("RAWMONITOR","") data = data.split("_") frequency = 2 * 8000000 - int(data[0]) temperature = int(data[1]) / 10 return (frequency, temperature) else: return False
def sum_up_diagonals(matrix): """Given a matrix [square list of lists], return sum of diagonals. Sum of TL-to-BR diagonal along with BL-to-TR diagonal: >>> m1 = [ ... [1, 2], ... [30, 40], ... ] >>> sum_up_diagonals(m1) 73 >>> m2 = [ ... [1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... ] >>> sum_up_diagonals(m2) 30 """ height = len(matrix) index = 0 my_sum = 0 while index < height: print(f"{my_sum} + {matrix[height-index-1][index]} + {matrix[index][index]}") my_sum = my_sum + matrix[height-index-1][index] + matrix[index][index] index = index+1 return my_sum
def crc8(b, crc): """ Updates a CRC8 value with a new byte. """ b2 = b if (b < 0): b2 = b + 256 for i in range(8): odd = ((b2 ^ crc) & 1) == 1 crc >>= 1 b2 >>= 1 if (odd): crc ^= 0x8C return crc
def timestamp_to_bytestring(val, padding=8): """Convert Unix timestamp to bytes""" result = [] while val != 0: result.append(chr(val & 0xFF)) val = val >> 8 return ''.join(reversed(result)).rjust(padding, '\0')
def calc_min_cost(string: str) -> int: """Determine the minimum cost of a string.""" cost = 0 if not string: return 0 characters = set() for char in string: if char not in characters: cost += 1 characters.add(char) return cost
def isprintable(s, codec='utf8'): """Checks where a character is Printable or not.""" try: s.decode(codec) except UnicodeDecodeError: return False else: return True
def join_adjacent_intervals(intervals): """Join adjacent or overlapping intervals into contiguous intervals. Args: intervals (list of 2-element iterables): A list of iterables with 2 elements where each such iterable (eg. the tuple (start, end)) defines the start and end of the interval. Returns: list of list: Contiguous intervals. Examples: >>> join_adjacent_intervals([[1, 2], [2, 3], [-1, 1]]) [[-1, 3]] >>> from datetime import datetime >>> contiguous = join_adjacent_intervals([ ... (datetime(2000, 1, 1), datetime(2000, 2, 1)), ... (datetime(1999, 1, 1), datetime(2000, 1, 1)), ... (datetime(1995, 1, 1), datetime(1995, 2, 1)) ... ]) >>> contiguous == [ ... [datetime(1995, 1, 1), datetime(1995, 2, 1)], ... [datetime(1999, 1, 1), datetime(2000, 2, 1)], ... ] True >>> overlapping_contiguous = join_adjacent_intervals([ ... (datetime(1999, 1, 1), datetime(2000, 2, 1)), ... (datetime(2000, 1, 1), datetime(2000, 2, 1)), ... (datetime(1995, 1, 1), datetime(1995, 3, 1)), ... (datetime(1995, 2, 1), datetime(1995, 4, 1)), ... (datetime(1995, 4, 1), datetime(1995, 5, 1)), ... ]) >>> overlapping_contiguous == [ ... [datetime(1995, 1, 1), datetime(1995, 5, 1)], ... [datetime(1999, 1, 1), datetime(2000, 2, 1)], ... ] True >>> join_adjacent_intervals([]) == [] True """ if not intervals: return [] intervals = list(map(list, intervals)) sorted_intervals = sorted(intervals, key=lambda x: x[0]) contiguous_intervals = [sorted_intervals.pop(0)] while sorted_intervals: if sorted_intervals[0][0] <= contiguous_intervals[-1][1]: contiguous_intervals[-1][1] = max( [sorted_intervals.pop(0)[1], contiguous_intervals[-1][1]] ) else: contiguous_intervals.append(sorted_intervals.pop(0)) return contiguous_intervals
def computeIoU(box1, box2): """ :param box1: [bottom-left-x, bottom-left-y, top-right-x, top-right-y] :param box2: [bottom-left-x, bottom-left-y, top-right-x, top-right-y] :return: """ inter_x1 = max(box1[0], box2[0]) inter_y1 = max(box1[1], box2[1]) inter_x2 = min(box1[2], box2[2]) inter_y2 = min(box1[3], box2[3]) if inter_x1 < inter_x2 and inter_y1 < inter_y2: inter = (inter_x2-inter_x1+1)*(inter_y2-inter_y1+1) else: inter = 0 union = (box1[2] - box1[0]) * (box1[3] - box1[1]) + (box2[2] - box2[0]) * (box2[3] - box2[1]) - inter return float(inter)/union
def get_hostname_from_fqdn(fqdn): """ Returns hostname from a fully-qualified domain name (returns left-most period-delimited value) """ if "." in fqdn: split = fqdn.split(".") split.reverse() return split.pop() else: return fqdn
def splitPackageName(packageName): """e.g. given com.example.appname.library.widgetname returns com com.example com.example.appname etc. """ result = [] end = packageName.find('.') while end > 0: result.append(packageName[0:end]) end = packageName.find('.', end+1) result.append(packageName) return result
def bubble_sort(integers): """Sort a list of integers using a simple bubble sorting method. Compare each element with its neighbor (except the last index) with its neighbor to the right, perform same check iteratively with the last n elements not being checked because they are sorted. """ integers_clone = list(integers) for number in range(len(integers_clone) - 1, 0, -1): for i in range(number): if integers_clone[i] > integers_clone[i + 1]: temp = integers_clone[i] integers_clone[i] = integers_clone[i + 1] integers_clone[i + 1] = temp return integers_clone
def image_format(alt, url): """make markdown link format string """ return "![" + alt + "]" + "(" + url + ")"
def get_seconds(hours, minutes, seconds): """ Calculate the `hours` and `minutes` into seconds, then add with the `seconds` paramater. """ return 3600*hours + 60*minutes + seconds
def get_sentiment(polarity_score): """Returns a string representation for `polarity_score`""" if polarity_score >= 0.05: return "POSITIVE" elif -0.05 < polarity_score < 0.05: return "NEUTRAL" return "NEGATIVE"
def list_placeholder(length, is_pg=False): """ Returns a (?,?,?,?...) string of the desired length if is_pg, returns ({},{},{},....) instead. """ if is_pg: return '(' + '{},'*(length-1) + '{})' else: return '(' + '?,'*(length-1) + '?)'
def flatten_data(d, res, prefix=''): """Takes a nested dictionary and converts it to a single level dictionary with flattened keys.""" for k in d: v = d[k] if isinstance(v, dict): flatten_data(v, res, prefix + k + '_') elif isinstance(v, list): pass else: res[prefix + k] = v return res
def get_minimal_box(points): """ Get the minimal bounding box of a group of points. The coordinates are also converted to int numbers. """ min_x = int(min([point[0] for point in points])) max_x = int(max([point[0] for point in points])) min_y = int(min([point[1] for point in points])) max_y = int(max([point[1] for point in points])) return [min_x, min_y, max_x, max_y]
def Main(a, b, c, d): """ :param a: :param b: :param c: :param d: :return: """ return a + b - c * d
def _float(value: str) -> float: """Convert string to float value Convert a string to a floating point number (including, e.g. -0.5960D-01). Whitespace or empty value is set to 0.0. Args: value: string value Returns: Float value """ if value.isspace() or not value: return 0.0 else: return float(value.replace("D", "e"))
def nice_size(size, number_only=False): """ Returns a readably formatted string with the size If ``number_only`` is set, return the number as a string; otherwise, add the unit (e.g., KB, MB). If the ``size`` cannot be parsed, return ``N/A``. >>> nice_size(100) '100 bytes' >>> nice_size(10000) '9.8 KB' >>> nice_size(1000000) '976.6 KB' >>> nice_size(100000000) '95.4 MB' """ words = ['bytes', 'KB', 'MB', 'GB', 'TB'] try: size = float(size) except Exception: return 'N/A' for ind, word in enumerate(words): step = 1024 ** (ind + 1) if step > size: size = size / float(1024 ** ind) if number_only: return '%.1f' % size if word == 'bytes': # No decimals for bytes return "%d bytes" % size return "%.1f %s" % (size, word) return 'N/A'
def ramb18_2x(tile_name, luts, lines, sites): """ RAMB18E1 in both top and bottom site. """ params = {} params['tile'] = tile_name params['Y0_IN_USE'] = True params['Y1_IN_USE'] = True params['FIFO_Y0_IN_USE'] = False params['FIFO_Y1_IN_USE'] = False lines.append( ''' (* KEEP, DONT_TOUCH, LOC = "{top_site}" *) RAMB18E1 #( ) bram_{top_site} ( ); (* KEEP, DONT_TOUCH, LOC = "{bottom_site}" *) RAMB18E1 #( ) bram_{bottom_site} ( ); '''.format( top_site=sites['FIFO18E1'], bottom_site=sites['RAMB18E1'], )) return params
def split_chunks(lst, n): """ Split lst into list of lists size n. :return: list of lists """ _chunks = [] for i in range(0, len(lst), n): _chunks.append(lst[i: i + n]) return _chunks
def shift_point(point, offset): """Shift a point by an offset.""" x, y = point dx, dy = offset return (x + dx, y + dy)
def cd2W(intensity, efficiency, surface): """ intensity in candles efficency is a factor surface in steradians """ lumens = intensity * surface return lumens / (efficiency * 683)
def create_dict(num, k): """ Parameters ---------- num : number of clusters k : number of nodes Returns ------- A : dict """ A = {} for i in range(k): A[i] = i + k*num return A
def normal_vector(vector): """ 2d perpendicular vector """ return (-vector[1], vector[0])