content
stringlengths
42
6.51k
def get_agrovoc_uris(dl): """ Helper function that separates uri descriptors from the mixture of text and uris that is returned from Agritriop Parameters ---------- dl : list Descriptor list. Returns ------- list list of uri descritors only. """ return list(filter(lambda x: x.startswith('http') , dl))
def response_ssml_text_card(title, output, endsession): """ create a simple json plain text response """ return { 'card': { 'type': 'Simple', 'title': title, 'content': output }, 'outputSpeech': { 'type': 'SSML', 'ssml': "<speak>" +output +"</speak>" }, 'shouldEndSession': endsession }
def split_first(s, delims): """ Given a string and an iterable of delimiters, split on the first found delimiter. Return two split parts and the matched delimiter. If not found, then the first part is the full input string. Example:: >>> split_first('foo/bar?baz', '?/=') ('foo', 'bar?baz', '/') >>> split_first('foo/bar?baz', '123') ('foo/bar?baz', '', None) Scales linearly with number of delims. Not ideal for large number of delims. """ min_idx = None min_delim = None for d in delims: idx = s.find(d) if idx < 0: continue if min_idx is None or idx < min_idx: min_idx = idx min_delim = d if min_idx is None or min_idx < 0: return s, '', None return s[:min_idx], s[min_idx + 1:], min_delim
def to_filename(name: str) -> str: """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace("-", "_")
def isreadable(f): """ Returns True if the file-like object can be read from. This is a common- sense approximation of io.IOBase.readable. """ if hasattr(f, 'readable'): return f.readable() if hasattr(f, 'closed') and f.closed: # This mimics the behavior of io.IOBase.readable raise ValueError('I/O operation on closed file') if not hasattr(f, 'read'): return False if hasattr(f, 'mode') and not any(c in f.mode for c in 'r+'): return False # Not closed, has a 'read()' method, and either has no known mode or a # readable mode--should be good enough to assume 'readable' return True
def translate_string(s, trans_tab): """ translate a bytestring using trans_tab, verify it is coding_out encoded afterwards. """ for old, new in trans_tab: s = s.replace(old, new) return s
def decode_uint40(bb): """ Decode 5 bytes as an unsigned 40 bit integer Specs: * **uint40 len**: 5 bytes * **Format string**: 'j' """ return int.from_bytes(bb, byteorder='little')
def diff_list(l1, l2): """Compute the set-difference (l1 - l2), preserving duplicates.""" return list(filter(lambda e: e not in l2, l1))
def get_matching_columns(columns, whitelist): """ Takes a list of columns and returns the ones that match whitelist """ ret = [] for x in whitelist: for column in columns: if x in column and column not in ret: ret.append(column) return ret
def like(s1, s2, wildcard='%'): """ @param s1: first string @param s2: second string @wildcard: a special symbol that stands for one or more characters. >>> like('*dog*', 'adogert','*') True >>> like('dog*', 'adogert','*') False >>> like('*dog', '*adogert','*') False """ if s1.startswith('^'): s1 = s1[1:].upper() s2 = s2.upper() if s1 == wildcard or s1 == s2: return True elif not wildcard in s1: return False if s1.startswith(wildcard): if s1.endswith(wildcard): return bool(s1[1:-1] in s2) return bool(s2.endswith(s1[1:])) if s1.endswith(wildcard): return bool(s2.startswith(s1[:-1])) return False
def possibly_flip(is_white, neighbors): """ Any black tile with zero or more than 2 black tiles immediately adjacent to it is flipped to white. Any white tile with exactly 2 black tiles immediately adjacent to it is flipped to black. """ black_neighbors = sum(not is_white for is_white in neighbors) if not is_white: if black_neighbors == 0 or black_neighbors > 2: return True else: if black_neighbors == 2: return False return is_white
def round_progress(x, base=5): """Rounds the number to closes multiple of base. :param x: float :param base: int, to which multiple the method rounds the number :return: int, rounded number """ return base * round(x / base)
def extract_next_gene(metagenome_lines, next_line): """ A helper function for load_metagenome. This function takes an array of lines from the metagenome file and the next_line for processing. returns: a tuple consisting of the name of the snippet, the sequence of the snippet, and the line number to process next. """ name = metagenome_lines[next_line].strip()[1:] next_line += 1 start_line = next_line while next_line < len(metagenome_lines): if metagenome_lines[next_line][0] == '>': break next_line += 1 return (name, ''.join([l.strip() for l in metagenome_lines[start_line:next_line]]), next_line)
def get_pct_change(ini_val, new_val): """ Get percentage change between two values Args: Double: Initial value Double: New value Returns: Double: Percentage change between the 2 values """ if not new_val == 0: if new_val < ini_val: ret = ((ini_val - new_val) / ini_val) * (-1) else: ret = (new_val - ini_val) / new_val else: ret = 0 return ret
def create_sas_url(account_name, container_name, blob_name, sas_token): """ Create the SAS URL from the required components :param account_name: type str: Name of Azure storage account :param container_name: type str: Name of the container of interest :param blob_name: type str: Name and path of the file of interest :param sas_token: type azure.storage.blob.generate_blob_sas :return: sas_url: String of the SAS URL """ # Generate the SAS URL using the account name, the domain, the container name, the blob name, and the # SAS token in the following format: # 'https://' + account_name + '.blob.core.windows.net/' + container_name + '/' + blob_name + '?' + blob sas_url = f'https://{account_name}.blob.core.windows.net/{container_name}/{blob_name}?{sas_token}' return sas_url
def get_layers(data_raw, width, height): """ Takes raw input data string and splits it into layers, based on the width and height of the image received. Returns a list with each entry being a single layer of the image. """ layer_list = [] image_size = width * height for i in range(0, len(data_raw), image_size): layer_list.append(data_raw[i:i+image_size]) return layer_list
def is_job(job): """Check if a given job representation corresponds to a job and not a folder or a view. :param job: Jenkins Job representation as a dictionary :type job: dict :returns: Whether the job representation actually corresponds to a job :rtype: bool """ job_class = job["_class"].lower() return not ("view" in job_class or "folder" in job_class)
def seven_seg(a: bool, b: bool, c: bool, d: bool, e: bool, f: bool, g: bool) -> str: """Given a set of 7 boolean values corresponding to each segment, returns a string representation of a 7 segment display. The display looks like this: _ |_| |_| And the mapping of the booleans is as follows: a fgb edc """ return ( (" _" if a else "") + "\n" + ("|" if f else " ") + ("_" if g else " ") + ("|" if b else "") + "\n" + ("|" if e else " ") + ("_" if d else " ") + ("|" if c else "") + "\n")
def is_referable(json_subschema): """return True if field is a referable object, or a list of referable objects """ if "oneOf" in json_subschema: # schema is oneOf of a CURIE and non-CURIE type refs = [oo.get("$ref", None) for oo in json_subschema["oneOf"]] return (any(r and r.endswith("/CURIE") for r in refs) and any(not r.endswith("/CURIE") for r in refs)) if "type" in json_subschema: t = json_subschema["type"] if t == "array": # an array of referable types return is_referable(json_subschema["items"]) return False
def parse_item(item, typ=None, strip=' '): """ Parse a single item """ item = item.strip(strip) type_candidates = [int, float] if typ is None else [typ] for type_candidate in type_candidates: try: return type_candidate(item) except ValueError: pass return item
def indent(string: str, distance="\t", split="\n"): """Indents the block of text provided by the distance""" return f"{distance}" + f"{split}{distance}".join(string.split(split))
def script_from_saved_model(saved_model_dir, output_file, input_arrays, output_arrays): """Generates a script for saved model to convert from TF to TF Lite.""" return u"""# --- Python code --- import tensorflow as tf lite = tf.compat.v1.lite saved_model_dir = '{saved_model_dir}' output_file = '{output_file}' converter = lite.TFLiteConverter.from_saved_model( saved_model_dir, input_arrays={input_arrays}, output_arrays={output_arrays}) tflite_model = converter.convert() with tf.io.gfile.GFile(output_file, 'wb') as f: f.write(tflite_model) print('Write file: %s' % output_file) """.format( saved_model_dir=saved_model_dir, output_file=output_file, input_arrays=input_arrays, output_arrays=output_arrays)
def dedup_list(l): """Given a list (l) will removing duplicates from the list, preserving the original order of the list. Assumes that the list entries are hashable.""" dedup = set() return [x for x in l if not (x in dedup or dedup.add(x))]
def parse_response(response, detail): """Parse the response data. Optionally includes additional data that specifies the object type and requires accessing the data through a nested dictionary. The Client API doesn't include any additional information, but the Servers API includes created and updated timestamps in the detailed response. Args: response(dict): A request response object. detail(bool): Include additional data from the raw response. """ if detail: data = response else: if response['object'] == 'list': data = [item.get('attributes') for item in response.get('data')] else: data = response.get('attributes') return data
def get_phred_query(sample_id, gt_ll, genotype, prefix=" and ", invert=False): """Default is to test < where a low value phred-scale is high confidence for that genotype >>> get_phred_query(2, 22, "het") ' and gt_phred_ll_het[1] < 22' >>> get_phred_query(2, 22, "het", prefix="") 'gt_phred_ll_het[1] < 22' >>> get_phred_query(2, 22, "het", prefix="", invert=True) 'gt_phred_ll_het[1] > 22' """ assert genotype in ("het", "homref", "homalt") if not gt_ll: return "" # they passed in the subject: if hasattr(sample_id, "sample_id"): sample_id = sample_id.sample_id sign = ["<", ">"][int(invert)] s = "gt_phred_ll_{genotype}[{sample_id}] {sign} {gt_ll}"\ .format(sample_id=sample_id-1, genotype=genotype, gt_ll=gt_ll, sign=sign) return prefix + s
def state_to_databases(state, get_state): """Convert the state to a dict of connectable database clusters. The dict is: {"databases": { "databaseA": { "master": "10.0.0.19", "nodes": { "10.0.0.9": { ...extra node data... }, "10.0.0.10": { ...extra node data... }, "10.0.0.99": { ...extra node data... }}}, "databaseB": { "master": "10.0.0.19", "nodes": { "10.0.0.19": { ...extra node data... }, "10.0.0.20": { ...extra node data... }}}, }} if get_state=True, then node data will be the contents of the state- znodes. If get_state=False, the node data will be the contents of the conn- znodes. """ databases = {} for node, v in state.items(): parts = node.split('_', 1) if len(parts) == 1: continue cluster_name, node = parts if node.startswith('state_') and get_state: _, ip4 = node.split('_', 1) database = databases.setdefault(cluster_name, dict(nodes={})) assert ip4 not in database['nodes'] database['nodes'][ip4] = v if node.startswith('conn_') and not get_state: _, ip4 = node.split('_', 1) database = databases.setdefault(cluster_name, dict(nodes={})) assert ip4 not in database['nodes'] database['nodes'][ip4] = v if node == 'master': cluster = databases.setdefault(cluster_name, dict(nodes={})) cluster['master'] = v return databases
def _path_names(xpath): """Parses the xpath names. This takes an input string and converts it to a list of gNMI Path names. Those are later turned into a gNMI Path Class object for use in the Get/SetRequests. Args: xpath: (str) xpath formatted path. Returns: list of gNMI path names. """ if not xpath or xpath == '/': # A blank xpath was provided at CLI. return [] return xpath.strip().strip('/').split('/') # Remove leading and trailing '/'.
def compute_R(state, turn): """ Returns the reward array for the current state. "Short term" memory / immediate gratification. """ # sets of indices where X has gone, O has gone, and no one has gone. X = set() O = set() N = set() R = [] # possible actions given current state for i, board_position in enumerate(state): if board_position == None: R.append(0) N.add(i) else: R.append(-1) if board_position == 1: X.add(i) else: O.add(i) possible_winners_X = X|N possible_winners_O = N|O for i, score in enumerate(R): if score != -1: if i in possible_winners_X: R[i] += 1 if i in possible_winners_O: R[i] += 1 return R
def invmod(a, b): """ Finds the multiplicative inverse of :param:`a` and :param:`b`. :return: An int `x` such that `ax % b == 1`. """ b = abs(b) if a < 0: a = b - (-a % b) x, nx = 0, 1 r, nr = b, a % b while nr: x, nx = nx, x - (r // nr) * nx r, nr = nr, r - (r // nr) * nr if r == 1: return x + b if x < 0 else x
def get_tool_info_from_test_id( test_id ): """ Test IDs come in the form test_tool_number (functional.test_toolbox.TestForTool_toolshed_url/repos/owner/repository_name/tool_id/tool_version) We want the tool ID and tool version. """ parts = test_id.replace( ')', '' ).split( '/' ) tool_version = parts[ -1 ] tool_id = parts[ -2 ] return tool_id, tool_version
def intersection(lst1, lst2): """! \details Finds hashes that are common to both lists and stores their location in both documents Finds similarity that is measured by sim(A,B) = number of hashes in intersection of both hash sets divided by minimum of the number of hashes in lst1 and lst2 \param lst1 : 1st list whose elements are of the form (hash, document ID, character position) \param lst2: 2nd list whose elements are of the form (hash, document ID, character position) \return l3: list of common hashes and their locations in both documents. This is a list whose elements are of the form (common hash, (document ID1, location1), (documnet ID2, location2)) \return sim: similarity measure evaluated """ l1h = [h[0] for h in lst1] l2h = [h[0] for h in lst2] l1loc = {h[0]:h[1:] for h in lst1} l2loc = {h[0]:h[1:] for h in lst2} l3h = list(set(l1h)&set(l2h)) l3 = [(h, l1loc[h], l2loc[h]) for h in l3h] sim = len(l3)/min(len(set(l1h)), len(set(l2h))) return l3, sim
def sum_period_review_assignment_stats(raw_aggregation): """Compute statistics from aggregated review request data for one aggregation point.""" state_dict, late_state_dict, result_dict, assignment_to_closure_days_list, assignment_to_closure_days_count = raw_aggregation res = {} res["state"] = state_dict res["result"] = result_dict res["open"] = sum(state_dict.get(s, 0) for s in ("assigned", "accepted")) res["completed"] = sum(state_dict.get(s, 0) for s in ("completed", "part-completed")) res["not_completed"] = sum(state_dict.get(s, 0) for s in state_dict if s in ("rejected", "withdrawn", "overtaken", "no-response")) res["open_late"] = sum(late_state_dict.get(s, 0) for s in ("assigned", "accepted")) res["open_in_time"] = res["open"] - res["open_late"] res["completed_late"] = sum(late_state_dict.get(s, 0) for s in ("completed", "part-completed")) res["completed_in_time"] = res["completed"] - res["completed_late"] res["average_assignment_to_closure_days"] = float(sum(assignment_to_closure_days_list)) / (assignment_to_closure_days_count or 1) if assignment_to_closure_days_list else None return res
def get_target_id(target): """Extract the resource ID from event targets. :param target: a resource model (e.g. `Ticket` or `WikiPage`) :return: the resource ID """ # Common Trac resource. if hasattr(target, 'id'): return str(target.id) # Wiki page special case. elif hasattr(target, 'name'): return target.name # Last resort: just stringify. return str(target)
def extract_pkid_from_uuid(pkid_or_uuid): """Removes all braces braces encapsulation included with AXL uuid if it exists. Does not use regex matching on start/finish for speed. No validation is provided on uuid format. If braces do not exist, the original string should be returned as-is. :param pkid_or_uuid: (str) pkid or uuid :return: (str) pkid with stripped encapsulation """ return pkid_or_uuid.replace('{', '').replace('}', '')
def reldev(y1, y2): """ Calculate the relative percent deviation between LCDM and non-LCDM outputs """ return 100.*(y2-y1)/y1
def is_valid_chunk_id(id): """ is_valid_chunk_id(FOURCC) Arguments: id (FOURCC) Returns: true if valid; otherwise false Check if argument id is valid FOURCC type. """ assert isinstance(id, str), \ 'id is of type %s, must be str: %r' % (type(id), id) return ((0 < len(id) <= 4) and (min(id) >= ' ') and (max(id) <= '~'))
def create_inventory(items): """ Create an inventory dictionary from a list of items. The string value of the item becomes the dictionary key. The number of times the string appears in items becomes the value, an integer representation of how many times. :param items: list - list of items to create an inventory from. :return: dict - the inventory dictionary. """ items_dict = {} for item in items: if item in items_dict: items_dict[item] += 1 else: items_dict[item] = 1 return items_dict
def dense_alter_layout(attrs, inputs, tinfos, out_type): """Change dense layout. Parameters ---------- attrs : tvm.ir.Attrs Attributes of current convolution inputs : tvm.relay.Expr Grouped input symbols tinfos : list Input shape and dtype out_type: type The output type Note ---- Unlike other TOPI functions, this function operates on both graph level and operator level. """ # not to change by default return None
def and_(x, y): """Implementation of `and` (`&`).""" return x.__and__(y)
def is_prime(num): """Returns True if `num` is prime """ if num < 2: return False for x in range(2, num): if num % x == 0: return False return True
def RPL_TRACEHANDSHAKE(sender, receipient, message): """ Reply Code 202 """ return "<" + sender + ">: " + message
def is_point_in_range(p, p_min=None, p_max=None) -> bool: """Check if point lays between two other points. Args: p: tuple (x, y) p_min (optional): min border point p_max (optional): max border point Returns: True if p_max >= p >= p_min """ if p_min is None and p_max is None: return True x, y = p if p_min: x_min, y_min = p_min if x < x_min or y < y_min: return False if p_max: x_max, y_max = p_max if x > x_max or y > y_max: return False return True
def get_innovation_check_flags(estimator_status: dict) -> dict: """ :param estimator_status: :return: """ innov_flags = dict() # innovation_check_flags summary # 0 - true if velocity observations have been rejected # 1 - true if horizontal position observations have been rejected # 2 - true if true if vertical position observations have been rejected # 3 - true if the X magnetometer observation has been rejected # 4 - true if the Y magnetometer observation has been rejected # 5 - true if the Z magnetometer observation has been rejected # 6 - true if the yaw observation has been rejected # 7 - true if the airspeed observation has been rejected # 8 - true if synthetic sideslip observation has been rejected # 9 - true if the height above ground observation has been rejected # 10 - true if the X optical flow observation has been rejected # 11 - true if the Y optical flow observation has been rejected innov_flags['vel_innov_fail'] = ((2 ** 0 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['posh_innov_fail'] = ((2 ** 1 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['posv_innov_fail'] = ((2 ** 2 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['magx_innov_fail'] = ((2 ** 3 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['magy_innov_fail'] = ((2 ** 4 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['magz_innov_fail'] = ((2 ** 5 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['yaw_innov_fail'] = ((2 ** 6 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['tas_innov_fail'] = ((2 ** 7 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['sli_innov_fail'] = ((2 ** 8 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['hagl_innov_fail'] = ((2 ** 9 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['ofx_innov_fail'] = ((2 ** 10 & estimator_status['innovation_check_flags']) > 0) * 1 innov_flags['ofy_innov_fail'] = ((2 ** 11 & estimator_status['innovation_check_flags']) > 0) * 1 return innov_flags
def normalize_query_result(result, sort=True): """ Post-process query result to generate a simple, nested list. :param result: A QueryResult object. :param sort: if True (default) rows will be sorted. :return: A list of lists of RDF values. """ normalized = [[row[i] for i in range(len(row))] for row in result] return sorted(normalized) if sort else normalized
def _make_path(keys, value, ext, version=None): """Generate a path to a lookup table or metadata""" if isinstance(keys, (list, tuple)): keys = '/'.join(keys) version_str = '' if version: version_str = '.{0}'.format(version) path = '{keys}/{value}{version}{ext}'.format( keys=keys, value=value, version=version_str, ext=ext ) return path
def unzip(ls, nout): """Unzip a list of lists into ``nout`` outputs.""" out = list(zip(*ls)) if not out: out = [()] * nout return out
def _get_ios_env_from_target(ios_sdk): """Return a value for the -sdk flag based on the target (device/simulator).""" if ios_sdk == "device": return "iphoneos" elif ios_sdk == "simulator": return "iphonesimulator" else: raise ValueError("Unrecognized ios_sdk: %s" % ios_sdk)
def safe_digest(hash_function): # type: (function) -> str """Returns the hexdigest if the hash is set (i.e. not None) :param hash_function: hash function :rtype: str :return: hex digest """ if hash_function is not None: return hash_function.hexdigest()
def merge(default, config): """ Override default dict with config dict. """ merged = default.copy() merged.update({ k: v for k,v in config.items() if v and not v=='prompt'}) return merged
def bt_adjusting(bt_value: float, epsilon: float, delta_finite_stat: int = 0) -> float: """Creates an adjustment value related to the probability of error due to finite stats. Args: bt_value: Bell value. epsilon: How close the output string is to that of a perfect distribution. delta_finite_stat: Set to zero to assume no finite statistical effects. Returns: Adjusted Bell value. """ bt_adjusted = (bt_value + delta_finite_stat) / (8 * ((0.5 - epsilon)**3)) return bt_adjusted
def transform(order_data_dict: dict): """ #### Transform task A simple Transform task which takes in the collection of order data and computes the total order value. """ total_order_value = 0 for value in order_data_dict.values(): total_order_value += value return {"total_order_value": total_order_value}
def py2tex(codestr): """Convert a python code string to LaTex""" strout=codestr.replace("'","^T") strout=strout.replace("*","") strout=strout.replace(".^","^") strout='$'+strout+'$' return strout
def fortran_value(value): """ Get value from Fortran-type variable. Args: value (str): Value read from Fortran-type input. Returns: value (bool, str, float): value in Python format. """ bools = {".true.": True, ".false": False} str_separators = "'" '"' if value in bools: value = bools[value] elif value.strip('+-').isdigit(): value = int(value) elif value[0] in str_separators: value = value.strip(str_separators) else: try: value = float(value.replace('d', 'e')) except ValueError: raise ValueError(f'{value} is incorrect') return value
def pred_pt_in_bbox(pt, bbox) -> bool: """ :param pt: [x, y, z] :param bbox: [z, xmin, ymin, xmax, ymax] :return: """ if (pt[2] == bbox[0]) and (bbox[1] <= pt[0] < bbox[3]) and (bbox[2] <= pt[1] < bbox[4]): return True else: return False
def print_num(n): """Print a number with proper formatting depending on int/float""" if float(n).is_integer(): return print(int(n)) else: return print(n)
def memoryToString(kmem, unit=None): """Returns an amount of memory in a human-friendly string.""" k = 1024 if unit == "K" or not unit and kmem < k: return "%dK" % kmem if unit == "M" or not unit and kmem < pow(k, 2): return "%dM" % (kmem // k) return "%.01fG" % (float(kmem) / pow(k, 2))
def get_db_url_mysql(config): """ Format the configuration parameters to build the connection string """ if 'DB_URL_TESTING' in config: return config['DB_URL_TESTING'] return 'mysql+mysqlconnector://{}:{}@{}/{}' \ .format(config['DB_USER'], config['DB_PASS'], config['DB_HOST'], config['DB_NAME'])
def determine_edge_label_by_layertype(layer, layertype): """Define edge label based on layer type """ if layertype == 'Data': edge_label = 'Batch ' + str(layer.data_param.batch_size) elif layertype == 'Convolution': edge_label = str(layer.convolution_param.num_output) elif layertype == 'InnerProduct': edge_label = str(layer.inner_product_param.num_output) else: edge_label = '""' return edge_label
def get_clicked_pos(pos, rows, width): """ Inputs: Mouse position, number of rows and the width of the entire grid Outputs: Row and Column number where the mouse is at Method: We take the pixel position of the mouse and get an rounded off integer value of the row and column value corresponding to the grid """ gap = width // rows y, x = pos row = y // gap col = x // gap return row, col
def build_arg_dict(arg_list): """Builds a dictionary from an argument listing.""" d={} for a in arg_list: toks=a.split('=') d[toks[0]]=toks[1] return d
def to_insert(table, d): """Generate an insert statement using the given table and dictionary. Args: table (str): table name d (dict): dictionary with column names as keys and values as values. Returns: tuple of statement and arguments >>> to_insert('doc.foobar', {'name': 'Marvin'}) ('insert into doc.foobar ("name") values ($1)', ['Marvin']) """ columns = [] args = [] for key, val in d.items(): columns.append('"{}"'.format(key)) args.append(val) stmt = 'insert into {table} ({columns}) values ({params})'.format( table=table, columns=', '.join(columns), params=', '.join(f'${i + 1}' for i in range(len(columns))) ) return (stmt, args)
def sbg_survival_rate(x, a, b): """ Probability that a customer alive at time x-1 is still alive at time x """ return (b + x - 1) / (a + b + x - 1)
def build_unicode(hyp): """ Parameters ---------- hyp : dict {'segmentation': [[0, 3], [1, 2]], 'symbols': [{'symbol': ID, 'probability': 0.12}], 'geometry': {'symbol': index, 'bottom': None or dict, 'subscript': None or dict, 'right': None or dict, 'superscript': None or dict, 'top': None or dict}, 'probability': 0.123 } """ latex = [] for symbol in hyp["symbols"]: latex.append(symbol["symbol"]) return ";;".join(latex)
def generate_bond_indices(natoms): """ Finds the array of bond indices of an interatomic distance matrix, in row wise order: [[0,1], [0,2], [1,2], [0,3], [1,3], [2,3], ..., [0, natoms], [1, natoms], ...,[natoms-1, natoms]] Parameters ---------- natoms: int The number of atoms Returns ---------- bond_indices : list A list of lists, where each sublist is the subscripts of an interatomic distance from an interatomic distance matrix representation of a molecular system. e.g. r_12, r_01, r_05 """ # initialize j as the number of atoms j = natoms - 1 # now loop backward until you generate all bond indices bond_indices = [] while j > 0: i = j - 1 while i >= 0: new = [i, j] bond_indices.insert(0, new) i -= 1 j -= 1 return bond_indices
def index(seq): """ Build an index for a sequence of items. Assumes that the items in the sequence are unique. @param seq the sequence to index @returns a dictionary from item to position in the sequence """ return dict((k,v) for (v,k) in enumerate(seq))
def data_reverse(data): """ Each segment is 8 bits long, meaning the order of these segments needs to be reversed. :param data: an array of ones and zeros. :return: the reversed of the 8 bit sequences inside the array. """ final = [] while data: for i in data[-8:]: final.append(i) data = data[:-8] return final
def replace_smart_quotes(text: str, encoding: str='utf8') -> str: """Replaces smart single and double quotes with straight ones""" encoded = text.encode(encoding) for single in (b'\x98', b'\x99'): encoded = encoded.replace(b'\xe2\x80' + single, b'\'') for double in (b'\x93', b'\x94', b'\x9c', b'\x9d'): encoded = encoded.replace(b'\xe2\x80' + double, b'"') return encoded.decode(encoding)
def tile_to_quadkey(tile_x, tile_y, level): """This is a function that converts tile coordinates at a certain level of detail of a Bing Map to a unique string identifier (QuadKey). :param tile_x: The x axis coordinate of the tile at `level` level of detail :param tile_y: The y axis coordinate of the tile at `level` level of detail :param level: The level of detail of the Bing Map :return: A `quadkey` string of length given by the `level` level of detail """ ql = [] for i in range(level, 0, -1): digit = ord('0') mask = 1 << (i-1) if ((tile_x & mask) != 0): digit+=1 if ((tile_y & mask) != 0): digit+=2 ql.append(chr(digit)) quadkey = ''.join(ql) return quadkey
def vi_compress(sequence): """Compress into two lists: Vertices - Original Data with no repeating values Indices - Indices into the Vertex list used to rebuild original data """ index = 0 values = [] indices = [] for x in sequence: if x not in values: values.append(x) indices.append(index) index += 1 else: indices.append(values.index(x)) return values, indices
def cluster_same_freq(pairs_dic): """ Group pairs in clusters with same frequency >>> T = [ list("ABC"), list("ABCABC") ] >>> cluster_same_freq( get_successor_pairs_by_freq(T) ) {3: [('A', 'B'), ('A', 'C'), ('B', 'C')], 1: [('B', 'A'), ('C', 'A'), ('C', 'B')]} """ freq = list(set(pairs_dic.values())) groups = {} for pair in pairs_dic.keys(): f = pairs_dic[pair] if f in groups.keys(): groups[f].append( pair ) else: groups[f] = [ pair ] return groups
def _parse_reflectivity(line, lines): """Parse Energy [eV] reflect_xx reflect_zz""" split_line = line.split() energy = float(split_line[0]) reflect_xx = float(split_line[1]) reflect_zz = float(split_line[2]) return {"energy": energy, "reflect_xx": reflect_xx, "reflect_zz": reflect_zz}
def calc_nchk(n,k): """ Calculate n choose k """ accum = 1 for i in range(1,k+1): accum = accum * (n-k+i)/i return int(accum)
def filter_by_pref(list, pref): """ pref :param list: :param pref: :return: """ if pref is None: return list # p = re.compile('^.+-.+-.+$') # l2 = [s for s in l1 if p.match(s)] return [s for s in list if s.startswith(pref)]
def domain_str_to_labels(domain_name): """Return a list of domain name labels, in reverse-DNS order.""" labels = domain_name.rstrip(".").split(".") labels.reverse() return labels
def get_list_hashes(lst): """ For LZ to be more effective, than worst case 'fresh' raw dump, you need to lz encode minimum 4 bytes length. Break into 4 bytes chunks and hash Parameters ---------- lst : list Input list to form hashes from (lz haystack). Returns ------- enumerated list: (n, hash) list of enumerated hashes. """ assert len(lst) > 0, 'Empty list in list hashes' return [(x, hash(tuple(lst[x:x + 4]))) for x in range(len(lst))]
def bytes2human(n): """Convertes bytes to humain readable values.""" symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return '%.1f%s' % (value, s) return "%sB" % n
def third_rule(board: list) -> bool: """ Function checks whether each block of cells of the same color contain numbers from 1 to 9 without repetition. Returns True if this rule of the game is followed. >>> third_rule(["**** ****","***1 ****","** 3****","* 4 1****",\ " 9 5 "," 6 83 *","3 1 **"," 8 2***"," 2 ****"]) True """ uniqueness = True for row in range(0, 5): digit_seen = [] for column in range(4 - row, 9 - row): cell = board[column][row] # if (8 - row) != (row + column) and cell.isdigit(): if cell.isdigit() and cell in digit_seen: uniqueness = False else: if cell in ('*', ' '): continue else: digit_seen.append(cell) for column in range(row + 1, row + 5): cell = board[8 - row][column] if cell.isdigit() and cell in digit_seen: uniqueness = False else: if cell in ('*', ' '): continue else: digit_seen.append(cell) return uniqueness
def get_crate_from_line(line): """Get crate name from use statements""" if not line.startswith("use "): return None if line.startswith("use crate"): # will handle this better later return None return line[4:-1]
def get_address_security(master_host, master_port): """Get address.""" return "tls://{}:{}".format(master_host, master_port)
def apply_to_list( f, ll, if_empty): """Applies function *f* to list if it is not empty, otherwise returns *if_empty*""" if len(ll) == 0: return if_empty else: return f(ll)
def sanitize_bvals(bvals,target_bvals=[0,1000,2000,3000]): """ Remove small variation in bvals and bring them to their closest target bvals Returns bvals equal to the set provided in target_bvals """ for idx,bval in enumerate(bvals): bvals[idx]=min(target_bvals, key=lambda x:abs(x-bval)) return bvals
def column_number_to_letter(number): """ Converts integer to letter :param number: Integer that represents a column :return: String that represents a letter in spreadsheet/Excel styling """ # https://stackoverflow.com/questions/23861680/convert-spreadsheet-number-to-column-letter string = "" while number > 0: number, remainder = divmod(number - 1, 26) string = chr(65 + remainder) + string return string
def validate_enum_arg(arg_name, arg_value, choices, nullable=False): """ Validate the value of a enumeration argument. Args: arg_name: Name of the argument. arg_value: Value of the argument. choices: Valid choices of the argument value. nullable: Whether or not the argument can be None? Returns: The validated argument value. Raises: ValueError: If `arg_value` is not valid. """ choices = tuple(choices) if not (nullable and arg_value is None) and (arg_value not in choices): raise ValueError('Invalid value for argument `{}`: expected to be one ' 'of {!r}, but got {!r}.'. format(arg_name, choices, arg_value)) return arg_value
def wafv2_custom_body_response_content(content): """validate wafv2 custom body response content. Any character between 1 to 10240""" if not content: raise ValueError("Content must not be empty") if len(content) > 10240: raise ValueError("Content maximum length must not exceed 10240") return content
def discretizer(cont_var, disc_min, disc_max, step_size): """ Round the continuous parameters from Borg (defined [0, 1]) to the rounded parameters of the simulation :param cont_var: float Continuous variables :param disc_min: numeric Minimum of the discretized parameter :param disc_max: numeric Maximum of the discretized parameter :param step_size: numeric Interval between discretizations :return: numeric Discretized values """ # Range Difference diff = disc_max - disc_min # Proportion of Continious Variable in Range dis = diff * cont_var # Round to Multiple of Step Size if step_size.is_integer(): # Round to Multiple of Step Size disc_var = int(disc_min + step_size * round(dis / step_size)) else: # Round to Multiple of Step Size disc_var = disc_min + step_size * round(dis / step_size) # Export return disc_var
def create_probs_from_genotypes(genotype): """Creates probabilities from an additive genotype.""" if genotype == 0: return (1, 0, 0) if genotype == 1: return (0, 1, 0) if genotype == 2: return (0, 0, 1) if genotype == -1: # Lower than normal probabilities return (0.8, 0.1, 0.1)
def get_agg_sent(neg_probs, pos_probs): """calculate an aggregate sentiment from the provided probabilities""" if not neg_probs: raise Exception("neg_probs is None") if not pos_probs: raise Exception("pos_probs is None") if not len(neg_probs) == len(pos_probs): raise Exception("length of neg_probs and pos_probs must match") sum_pos = sum(pos_probs) sum_neg = sum(neg_probs) avg_prob = (sum_pos - sum_neg) / len(pos_probs) if -1.0 <= avg_prob < -0.025: return (-1, avg_prob) elif -0.025 <= avg_prob <= 0.025: return (0, avg_prob) elif 0.025 < avg_prob <= 1.0: return (1, avg_prob) else: raise Exception("avg_prob not within [-1,1]")
def add_corner_pt_to_dic(pt1, pt2, pt3, pt4, vec_x, vec_y, dic): """ Return dictionary of measured values and also new averaged measured values. """ min_x = min(vec_x) max_x = max(vec_x) min_y = min(vec_y) max_y = max(vec_y) for (i, pt) in enumerate([pt1, pt2, pt3, pt4]): try: dic[pt] except KeyError: # print pt1, pt2, pt3, pt4 if i == 0: if pt1[0] == min_x and pt1[1] == min_y: dic[pt1] = (dic[pt4] + dic[pt2])/2 elif pt1[0] == min_x: idy = vec_y.index(pt1[1]) dic[pt1] = (dic[pt2] + dic[(pt1[0], vec_y[idy-1])])/2 else: idx = vec_x.index(pt1[0]) dic[pt1] = (dic[pt2] + dic[(vec_x[idx-1], pt1[1])])/2 elif i == 1: if pt2[0] == max_x and pt2[1] == min_y: dic[pt2] = (dic[pt1] + dic[pt3])/2 elif pt2[0] == max_x: idy = vec_y.index(pt2[1]) dic[pt2] = (dic[pt1] + dic[(pt2[0], vec_y[idy-1])])/2 else: idx = vec_x.index(pt2[0]) dic[pt2] = (dic[pt1] + dic[(vec_x[idx+1], pt2[1])])/2 elif i == 2: if pt3[0] == max_x and pt3[1] == max_y: dic[pt3] = (dic[pt4] + dic[pt2])/2 elif pt3[0] == max_x: dic[pt3] = (dic[pt4] + dic[pt2])/2 # idy = vec_y.index(pt3[1] # dic[pt3] = (dic[pt4] + dic[(pt3[0], vec_y(idy+1))])/2 else: idx = vec_x.index(pt3[0]) dic[pt3] = (dic[pt4] + dic[(vec_x[idx+1], pt3[1])])/2 elif i == 3: if pt4[0] == min_x and pt4[1] == max_y: dic[pt4] = (dic[pt1] + dic[pt3])/2 elif pt4[0] == min_x: dic[pt4] = (dic[pt1] + dic[pt3])/2 # idy = vec_y.index(pt4[1]) # dic[pt4] = (dic[pt3] + dic[(pt4[0], vec_y[idy+1])])/2 else: idx = vec_x.index(pt4[0]) dic[pt4] = (dic[pt3] + dic[(vec_x[idx-1], pt4[1])])/2 return dic
def speak(text: str) -> str: """ Wrap text in <speak/> tag @param text: @return: """ return f'<speak>{text}</speak>'
def s(amount): """Return the correct pluralization of the amount of items.""" return 's' if amount != 1 else ''
def normalizeEntities(formattedEntities): """ Normalizes the provider's entity types to match the ones used in our evaluation. Arguments: formattedEntities {List} -- List of recognized named entities and their types. Returns: List -- A copy of the input list with modified entity types. """ fEcopy = formattedEntities for i in range(len(fEcopy)): if fEcopy[i]['type'] == "Person": fEcopy[i]['type'] = "Person" elif fEcopy[i]['type'] == "Location": fEcopy[i]['type'] = "Location" elif fEcopy[i]['type'] == "Organization": fEcopy[i]['type'] = "Organization" elif fEcopy[i]['type'] == "Event": fEcopy[i]['type'] = "Event" elif fEcopy[i]['type'] == "Product": fEcopy[i]['type'] = "Product" return fEcopy
def print_objects_format(object_set, text): """ Return a printable version of the variables in object_sect with the header given with text. """ text += " (total %(n_obj)s):\n\t%(objs)s\n" % { "n_obj": len(object_set), "objs": ", ".join(object_set) } return text
def default_timeout(variables): """ Default page timeout """ return variables.get('default_timeout', 10)
def omit_if_false(value): """Returns None if value is falsey""" if value: return value else: return None
def gen_output_filenames(direction, rotation_angle, flip): """generate output names based on the direction, rot_angle, and flip""" proj_names = [] rot_names = [] flip_names = [] if direction != 0: name = '' if direction == 1: name = 'XOY+' elif direction == 2: name = 'XOY-' elif direction == 3: name = 'YOZ+' elif direction == 4: name = 'YOZ-' elif direction == 5: name = 'ZOX+' elif direction == 6: name = 'ZOX-' proj_names.append(name) elif direction == 0: proj_names = ['XOY+', 'XOY-', 'YOZ+', 'YOZ-', 'ZOX+', 'ZOX-'] if rotation_angle != 0: name = '' if rotation_angle == 1: name = '_r0' elif rotation_angle == 2: name = '_r90' elif rotation_angle == 3: name = '_r180' elif rotation_angle == 4: name = '_r270' rot_names.append(name) else: rot_names = ['_r0', '_r90', '_r180', '_r270'] if flip != 0: name = '' if flip == 1: name = '_OO' elif flip == 2: name = '_ud' elif flip == 3: name = '_lr' flip_names.append(name) else: # flip_names = ['_OO', '_ud', '_lr'] flip_names = ['_OO', '_ud'] return proj_names, rot_names, flip_names
def _floyd_warshall(graph): """ Calculate the shortest path between two tokens using the Floyd Warshall algorithm where the graph is the dependency graph """ dist = {} pred = {} for u in graph: dist[u] = {} pred[u] = {} for v in graph: dist[u][v] = float('inf') pred[u][v] = -1 dist[u][u] = 0 for neighbor in graph[u]: dist[u][neighbor] = graph[u][neighbor] pred[u][neighbor] = u for t in graph: # given dist u to v, check if path u - t - v is shorter for u in graph: for v in graph: newdist = dist[u][t] + dist[t][v] if newdist < dist[u][v]: dist[u][v] = newdist pred[u][v] = pred[t][v] # route new path through t return dist, pred
def keff_line_parse(keff_line): """parses through the ana_keff line in .res file Parameters ---------- keff_line: str string from .res file listing IMP_KEFF Returns ------- tuple (mean IMP_KEFF, std deviation of IMP_KEFF) """ start = keff_line.find('=') new_keff_line = keff_line[start:] start = new_keff_line.find('[') end = new_keff_line.find(']') # +3 and -1 is to get rid of leading and trailing whitespace keff_sd = new_keff_line[start + 3:end - 1] (keff, sd) = keff_sd.split(' ') return (keff, sd)
def likes(list_of_names: list) -> str: """ >>> likes([]) 'no one likes this' >>> likes(["Python"]) 'Python likes this' >>> likes(["Python", "JavaScript", "SQL"]) 'Python, JavaScript and SQL like this' >>> likes(["Python", "JavaScript", "SQL", "JAVA", "PHP", "Ruby"]) 'Python, JavaScript and 4 others like this' """ if len(list_of_names) == 0: return "no one likes this" if len(list_of_names) == 1: return f"{list_of_names[0]} likes this" if len(list_of_names) == 2: return f"{list_of_names[0]} and {list_of_names[1]} like this" if len(list_of_names) == 3: return ( f"{list_of_names[0]}, {list_of_names[1]} and {list_of_names[2]} like this" ) else: return f"{list_of_names[0]}, {list_of_names[1]} and {len(list_of_names) - 2} others like this"
def splitStr(val, sep): """Split strings by given separator and returns an array""" return [s.strip() for s in str(val).split(sep)]
def split_fn_ext(filename): """ Return tuple of directory and filename and extenstion. """ l = filename.rpartition('.') d = filename.rpartition('/') l = d[2].rpartition('.') di = '' if d[0] == '': di = '' else: di = d[0] + '/' return (di, l[0], l[2])