content
stringlengths
42
6.51k
def binary_string(number: int) -> str: """Number to binary string :param number: some number (an integer) to turn into a binary string :return: Some string which is the binary string :rtype: str .. doctest:: python >>> binary_string(200) '11001000' >>> binary_string(10) '1010' """ return bin(number)[2:]
def zeros(shape: tuple) -> list: """ Return a metrics that all elements are zero with the specified shape. """ return [shape[1] * [0] for _ in range(shape[0])]
def reextract_table_filename(table_file_name: str) -> str: """ :param table_file_name: :return: """ return table_file_name.replace('.tbl', '.reextract.tbl')
def deep_tuple(x): """Convert list recursively to tuple.""" if isinstance(x, list): return tuple(map(deep_tuple, x)) return x
def horizon_float(k0, plus_or_minus): """Returns a floating point representation of k direction face index; eg. 3.5 for face between layers 3 and 4.""" result = float(k0) if plus_or_minus == '+': result += 0.5 elif plus_or_minus == '-': result -= 0.5 else: assert False return result
def check_kwargs(keys, list_of_valid_params, exception): """ Make sure we have a valid set of keys for a given endpoint. If list_of_valid_params is `None` we don't have anything to check. """ if list_of_valid_params: for key in keys: if key not in list_of_valid_params: error = '{} is not a valid parameter.'.format(key) raise exception(error) return True
def pad_decr(ids): """Strip ID 0 and decrement ids by 1.""" if len(ids) < 1: return list(ids) if not any(ids): return [] # all padding. idx = -1 while not ids[idx]: idx -= 1 if idx == -1: ids = ids # pylint: disable=self-assigning-variable else: ids = ids[:idx + 1] return [i - 1 for i in ids]
def parser_private_data_indicator_Descriptor(data,i,length,end): """\ parser_private_data_indicator_Descriptor(data,i,length,end) -> dict(parsed descriptor elements). This descriptor is not parsed at the moment. The dict returned is: { "type": "private_data_indicator", "contents" : unparsed_descriptor_contents } (Defined in ISO 13818-1 specification) """ return { "type" : "private_data_indicator", "contents" : data[i+2:end] }
def get_source_uuids(*sources) -> str: """Returns file_uuid attributes of objects. Args: *sources: Objects whose file_uuid attributes are read (if exist). Returns: str: UUIDs separated by comma. """ uuids = [source.dataset.file_uuid for source in sources if hasattr(source, 'dataset') and hasattr(source.dataset, 'file_uuid')] unique_uuids = list(set(uuids)) return ', '.join(unique_uuids)
def parse_date(datestring, languagecode): """Parses a string of format yyyy-mm-dd into yyyy, mm-dd""" months = {'en': { '01': 'Jan', '02': 'Feb', '03': 'Mar', '04': 'Apr', '05': 'May', '06': 'Jun', '07': 'Jul', '08': 'Aug', '09': 'Sep', '10': 'Oct', '11': 'Nov', '12': 'Dec', }, 'nl': { '01': 'Jan', '02': 'Feb', '03': 'Mar', '04': 'Apr', '05': 'Mei', '06': 'Jun', '07': 'Jul', '08': 'Aug', '09': 'Sep', '10': 'Okt', '11': 'Nov', '12': 'Dec', }} year, month, date = datestring.split('-') if languagecode not in months: languagecode = 'en' month = months[languagecode].get(month, month) return year, month, date
def addhttp(url, s=False): """ Add 'http://' prefix to url """ """ Strip 'http://' or https:// prefix to url """ if url[:7] == 'http://': return url elif url[:8] == 'https://': return url return ("https://" if s else "https://") + url
def gradient_colors(nb_colors, color_start=None, color_end=None): """Produce a color gradient.""" if color_start is None: color_start = [1, 0, 0] if color_end is None: color_end = [0, 0, 1] # start at black, finish at white gradient = [color_start] # If only one color, return black if nb_colors == 1: return gradient # Calcuate a color at each evenly spaced value # of t = i / n from i in 0 to 1 for t in range(1, nb_colors): gradient.append( [ color_start[j] + (float(t) / (nb_colors - 1)) * (color_end[j] - color_start[j]) for j in range(3) ] ) return gradient
def make_ternary_dict(x, y): """Create a dictionary of location pairs and resulting values. :param x: A list of 2- or 3-element lists noting independant parameters :type x: list :param y: A 1-D list of values mapping to the position list :type x: list :returns: dict -- the dictionary to send to ternary heatmap data function. :raises: AttributeError, KeyError """ new_dict = {} for x_point, y_point in zip(x, y): new_key = (x_point[0], x_point[1]) new_val = y_point new_dict[new_key] = new_val return new_dict
def _get_text_recursively(text_entry) -> str: """ Get the text given a arbitrary object Parameters ---------- text_entry : any A arbitrary object that contains some text Returns ------- str The extracted text """ if text_entry is None: return '' if type(text_entry) == str: return text_entry else: text = [] items = text_entry if type(text_entry) == list else [x for k, x in text_entry.items()] for item in items: text.append(_get_text_recursively(item)) return ' '.join(text)
def composite_colors(first, second): """ Composite two colors together using their given alpha. The first color will be composited on top of the second color. Parameters ---------- first : tuple The rgba tuple of the first color. All values are floats in the range 0.0 - 1.0. second : tuple The rgba tuple of the second color. The format of this tuple is the same as the first color. Returns ------- result : tuple The composited rgba color tuple. """ r1, g1, b1, a1 = first r2, g2, b2, a2 = second y = a2 * (1.0 - a1) ro = r1 * a1 + r2 * y go = g1 * a1 + g2 * y bo = b1 * a1 + b2 * y ao = a1 + y return (ro, go, bo, ao)
def set_debug(amount=1): """ Set the current global debug verbosity level Parameters: amount (int): the new level Returns: int: the new level """ global DEBUG DEBUG = amount return DEBUG
def gen_pred_text(pred_texts, img_info): """Get the predict texts to save Args: pred_texts(list): predict texts img_info(list): the imgs information respectively Returns: list(dict): updated img_info which add predict texts """ for i, instance in enumerate(img_info): instance['img_info']['ann']['text'] = pred_texts[i] return img_info
def TrimExtraIndent(text_block): """Trim a uniform amount of whitespace off of each line in a string. Compute the minimum indent on all non blank lines and trim that from each, so that the block of text has no extra indentation. Args: text_block: a multiline string Returns: text_block with the common whitespace indent of each line removed. """ def CountLeadingWhitespace(s): count = 0 for c in s: if not c.isspace(): break count += 1 return count # find the minimum indent (except for blank lines) min_indent = min([CountLeadingWhitespace(line) for line in text_block.split('\n') if line]) return '\n'.join([line[min_indent:] for line in text_block.split('\n')])
def lon180_2lon360(lon): """ Convert from -180 - 180 to 0 - 360 :param lon: :return: """ return (lon)%360
def djoin (*args): """'dotless' join, for nicer paths.""" from os.path import join i = 0 alen = len (args) while i < alen and (args[i] == '' or args[i] == '.'): i += 1 if i == alen: return '.' return join (*args[i:])
def join_strings(strings, level): """ Define join symbol by level and join strings. :type strings: collections.Iterable[unicode or str] :param strings: strings to join :type level: int :param level: deep level :rtype: unicode :return: joined string """ return (u', ' if level is None else u',').join(strings)
def add_rollup(raw, tag): """ Returns a buffer that has ' R' appended to all performance report lines in the text buffer raw that are tagged with string tag. """ result = [] for line in raw.split('\n'): words = line.split('[ PERFORMANCE ] ') if len(words) == 2 and words[0] == '' and words[1].startswith(tag + ' ') and not line.endswith(' R'): line = line + ' R' result.append(line) result = '\n'.join(result) return result
def height2width(bbox, height): """Get optimized width for a given height regarding a known bbox""" x1 = bbox[0] y1 = bbox[1] x2 = bbox[2] y2 = bbox[3] return int(height * (x2 - x1) / (y2 - y1))
def color_string(guess: str, target: str) -> str: """ Returns Wordle colors for guess given target. """ c_string = "" for pos, letter in enumerate(guess): if target[pos] == letter: c_string += "g" elif letter in target: c_string += "y" else: c_string += "b" return c_string
def HeadingStr(heading): """ Gives a heading string given the heading float """ headstr = "?" if heading != None: if heading < 22.5 or heading >= 337.5: headstr = "N" elif heading >=22.5 and heading < 67.5: headstr = "NE" elif heading >= 67.5 and heading < 112.5: headstr = "E" elif heading >= 112.5 and heading < 157.5: headstr = "SE" elif heading >= 157.5 and heading < 202.5: headstr = "S" elif heading >= 202.5 and heading < 247.5: headstr = "SW" elif heading >= 247.5 and heading < 292.5: headstr = "W" elif heading >= 292.5 and heading < 337.5: headstr = "NW" return headstr
def sets_to_clusters(sets, idx_to_name): """ Converts a set representation of a partition to a vector of object labels. """ assert len(idx_to_name) == sum([len(a) for a in sets]) arr = [] for i in range(len(idx_to_name)): name = idx_to_name[i] cl = [j for j in range(len(sets)) if name in sets[j]] assert len(cl) == 1 cl = cl[0] arr.append(cl) return arr
def sfbool(string): """ Parse a string into a boolean value The string "true" regradless of case parses to True. Everything else parses to False. If anything other than a string is passed, an exception (ValueError) is raised. """ return string.lower() == 'true'
def is_valid(s): """ :type s: str :rtype: bool """ pairs = {"(": ")", "{": "}", "[": "]"} match = [] for c in s: if c in pairs.keys(): match.append(pairs[c]) elif not match or c != match.pop(): return False return len(match) == 0
def formatter(data, headers): """Just dump the json data""" data["body"] = f"{data}" return data
def apply_list_offset(lst, offset): """ :param lst: list of values :param offset: value to be added to the list :return: new list with offset applied """ return [i + offset for i in lst]
def samesign(a,b): """ Check if two numbers have the same sign Paramenters --------- a: `float` b: `float` Returns --------- a * b > 0: `bool` True if a and b have the same sign """ return a * b > 0
def permutate(array: list, permutation: list): """ permutate a fixed array with a given permutation list Args: array: An array of random elements permutation: The permutation of the given array Returns: """ _swapped_array = [] _counter = 0 for i in permutation: if _counter == i or i in _swapped_array: _counter += 1 continue _temp = i-1 _swap = array[_temp] _sub = array[_counter] array[_temp] = _sub array[_counter] = _swap _swapped_array.append(_counter) _counter += 1 return array
def format_str_value(value, format_str): """Format value based on format code in field in format id:02d""" if not format_str: return str(value) format_str = "{0:" + f"{format_str}" + "}" return format_str.format(value)
def nth_percentile(values, n=0.95): """Get's the value in values that is closest to the nth percentile. For example, nth_percentile([4, 2, 1, 5], .75) would return 4. """ sorted_vals = sorted(values) ind = int(len(sorted_vals) * n) return sorted_vals[ind]
def mock_purge_success(url, request): """ Mock a success Purge request """ return {'status_code': 201, 'content-type': 'application/json', 'server': 'Apache', 'content-location': '/ccu/v2/purges/1234-456-7890', 'content': { "estimatedSeconds": 420, "progressUri": "/ccu/v2/purges/1234-456-7890", "purgeId": "1234-456-7890", "supportId": "123456789", "httpStatus": 201, "detail": "Request accepted.", "pingAfterSeconds": 420} }
def create_cubes(n): """returns list of cubes from 0 to n""" result = [] for x in range(n): result.append(x**3) # entire 'result' list in memory (inefficient) return result
def _make_diamond_gradient(X, Y, angle): """Generates index map for diamond gradients.""" import numpy as np theta = np.radians(angle % 360) Z = np.abs(np.cos(theta) * X - np.sin(theta) * Y) + np.abs(np.sin(theta) * X + np.cos(theta) * Y) return Z
def lerp_vector(v1, v2, pct): """ lerps pct percent between v1 and v2 """ comp = 1 - pct return v1[0]*comp+v2[0]*pct,v1[1]*comp+v2[1]*pct,v1[2]*comp+v2[2]*pct
def apply_building_mapping(mapdict, label): """ Applies a building map YAML to a given label, binning it into the appropriate category. """ for category in mapdict: #print(mapdict, category, label) if label in mapdict[category]['labels']: return category return "house"
def get_history_text(workflow_count): """ Given a dictionary of closed workflow executions and their count, get the workflow history text to include in the email body If no workflow_count is supplied, get it from the object time_period in seconds """ history_text = "" # Concatenate the message for key in sorted(workflow_count): close_status = key run_count = workflow_count[key] history_text = history_text + "\n" + close_status + ": " + str(run_count) if history_text == "": history_text = None return history_text
def noNewLine(str): """ Delete all '\n' and '\r' characters in a string. """ return str.replace('\n', '').replace('\r','')
def green(string: str) -> str: """Add green colour codes to string Args: string (str): Input string Returns: str: Green string """ return "\033[92m" + string + "\033[0m"
def middle_atom(rotors,k): """ Verify if the kth atom is in a rotor and return two appropriate zmat neighbors return its neighbors """ for rot in rotors: if k == rot[1]: return 1, [rot[0],rot[2]] if k == rot[2]: return 1, [rot[1],rot[3]] for rot in rotors: if k == rot[0]: return 1, [rot[1],rot[2]] if k == rot[3]: return 1, [rot[2],rot[1]] return 0, []
def build_simc_file(talent_string, covenant_string, profile_name): """Returns output file name based on talent and covenant strings""" if covenant_string: if talent_string: return "profiles/{0}/{1}/{2}.simc".format(talent_string, covenant_string, profile_name) return "profiles/{0}/{1}.simc".format(covenant_string, profile_name) if talent_string: return "profiles/{0}/{1}.simc".format(talent_string, profile_name) return "profiles/{0}.simc".format(profile_name)
def is_wanted_dir(path, files): """ Check if the directory matches required criteria. - It must be in 'target directory' - It should contain files - it shoud contain one of the following in the path: - doc/pod - lib/perl - pan """ if 'target' not in path: return False if len(files) == 0: return False if True not in [substr in path for substr in ["doc/pod", "lib/perl", "pan"]]: return False return True
def p_a1(npsyns, ninputs): """ Probability of selecting one input given ninputs and npsyns attempts. This uses the intersection operator. @param npsyns: The number of proximal synapses. @param ninputs: The number of inputs. @return: The computed probability. """ return (npsyns + 1.) / ninputs
def get_gpu_share_cfg(k8s_conf): """ Returns GPU share enablement choice :return true/false """ if k8s_conf.get('enable_gpu_share') : return k8s_conf['enable_gpu_share']
def _slowXTextDecode(s, errors=None): """ Decode the xtext-encoded string C{s}. """ r = [] i = 0 while i < len(s): if s[i] == '+': try: r.append(chr(int(s[i + 1:i + 3], 16))) except ValueError: r.append(s[i:i + 3]) i += 3 else: r.append(s[i]) i += 1 return (''.join(r), len(s))
def cost(dic, PhoneCost, EmailCost, OverbondCost): """Calculates the cost by summing over all connections in the dictionary dic. Note that this time only one of the vertices knows of the connection to avoid redundancy. """ price = [OverbondCost, PhoneCost, EmailCost] result = 0.0 for a in dic: # iterating over all vertices in the clique for b in dic[a]: # iterating over all neighbours of the vertex a result += sum([x*y for x,y in zip(dic[a][b],price)]) return result
def process_base64(img_string: str): """ Converts a base64 image string to byte array. Example: "data:image/png;base64,iVBORw0KGgoAAAANSUhE..." becomes b'iVBORw0KGgoAAAANSUhE...' """ # If base64 has metadata attached, get only data after comma if img_string.startswith("data"): img_string = img_string.split(",")[-1] return bytes(img_string,'utf-8')
def getNewRecordsFromEventObject(event=None): """ This function returns a list of (bucketName, objectKey) tuples extracted from the event object. """ if not event or not event["Records"]: raise ValueError("no Records to process") s3Records = [eventRecord['s3'] for eventRecord in event['Records']] return [(s3Record['bucket']['name'], s3Record['object']['key']) for s3Record in s3Records]
def _parse_integrator(int_method): """parse the integrator method to pass to C""" #Pick integrator if int_method.lower() == 'rk4_c': int_method_c= 1 elif int_method.lower() == 'rk6_c': int_method_c= 2 elif int_method.lower() == 'symplec4_c': int_method_c= 3 elif int_method.lower() == 'symplec6_c': int_method_c= 4 elif int_method.lower() == 'dopr54_c': int_method_c= 5 else: int_method_c= 0 return int_method_c
def get_class(module: str, *attrs): """cls = get_class("module", "class / static function", "class static function")""" from importlib import import_module cls = import_module(module) for a in attrs: cls = getattr(cls, a) return cls
def convert_to_bool(text): """ Convert a few common variations of "true" and "false" to boolean :param text: string to test :return: boolean :raises: ValueError """ try: return bool(int(text)) except: pass text = str(text).lower() if text == "true": return True if text == "yes": return True if text == "false": return False if text == "no": return False if text == 0: return False if text == 1: return True raise ValueError
def concat(l): """concat([[1,2], [3]]) => [1,2,3] Concats a list of lists into a list. """ res = [] for k in l: res.extend(k) return res
def getPdbOccupancy(a): """ return pdb atom occupancy""" try: return float(a[60:67]) except: return 0.0
def intersection(actual, predicted): """ This function computes the intersection between the ground truth list and the predicted list :param actual: list containing ground truths :param predicted: list containing predictions :return : intersection between actual and predicted lists""" return set(actual).intersection(set(predicted))
def _get_link(name, link, data=None): """ Creates a link header field based on the supplied name, link, and data. If the data is empty, then a link header field with a rel tag and link value are generated. Otherwise, data is considered to contain keys and values representing the available link header fields as noted in the RFC here: https://tools.ietf.org/html/rfc5988#section-5 When data is present, its content will be added after the link value and rel fields. """ if data is None: return '<{0}>; rel="{1}"'.format(link, name) header_data = [] for key, value in data.items(): header_data.append('{0}="{1}"'.format(key, value)) if header_data: header_data = '; {0}'.format('; '.join(header_data)) else: header_data = '' return '<{0}>; rel="{1}"{2}'.format(link, name, header_data)
def lineFunc(x, m, k): """ A line function. Arguments: x: [float] Independant variable. m: [float] Slope. k: [float] Intercept. Return: y: [float] Line evaluation. """ return m*x + k
def state2feature(key): """ :returns: the feature string from the given key :rtype: str """ index = key.find("'") if index < 0: return key else: return key[index+3:]
def parsable(instring): """ Return an easily parsable list of strings from an input string. """ lines = instring lines = lines.replace('\t', ' ') lines = lines.strip() # strips trailing and leading whitespace splitlines = lines.split('\n') # split by newlines splitlines = [' '.join(k.split()) for k in splitlines if k != ''] # separates everything by just one whitespace character splitlines = [l for l in splitlines if not (l.startswith("@@") or l.startswith("$"))] # Concatenate table values to table end on to one line # very hacky, cannot handle cases where things are in an unexpected order or abbreviated. new_splitlines = [] table_values_line = '' capture_values = False for line in splitlines: if line.startswith('TABLE_VALUE'): capture_values = True table_values_line = ''.join([table_values_line, line]) elif line.startswith('TABLE_END'): capture_values = False table_values_line = ' '.join([table_values_line, line]) new_splitlines.append(table_values_line) table_values_line = '' elif capture_values: table_values_line = ', '.join([table_values_line, line]) else: new_splitlines.append(line) splitlines = new_splitlines return splitlines
def get_type_str(json): """ Recreates json as str :param json: input json :return: json string. """ type_str = '' if json.get('type') is not None: type_str += json['type'] for key, val in json.items(): if key == 'type': continue if key == 'typedef': type_str += get_type_str(val) else: if isinstance(val, list) or isinstance(val, dict): type_str += " {} {} {}".format('{', ','.join([str(i) for i in val]), '}') else: type_str += " {} {} {}".format('{', val, '}') return type_str
def getJ1939ProtocolString(protocol = 1, Baud = "Auto", Channel = None, SampleLocation = 95, SJW = 1, PROP_SEG = 1, PHASE_SEG1 = 2, PHASE_SEG2 = 1, TSEG1 = 2, TSEG2 = 1, SampleTimes = 1) -> bytes: """ Generates fpchProtocol string for ClientConnect function. The default values you see above were made up on the spot and shouldn't necessarily be used. Keyword arguments have the same names as below (e.g. Baud, SampleLocation, PHASE_SEG1). IDSize is automatically set to 29 whenever relevant because that is its only valid value. This function also accepts a Channel argument! Examples: - protocol1 = J1939.getProtocolString(protocol = 1, Baud = "Auto") - protocol2 = J1939.getProtocolString(protocol = 3, Baud = 500, SampleLocation = 75, SJW = 3, IDSize = 29) """ if Channel is not None: chan_arg = f",Channel={str(Channel)}" else: chan_arg = "" if protocol == 1: return bytes(f"J1939:Baud={str(Baud)}" + chan_arg, 'utf-8') elif protocol == 2: return bytes("J1939" + chan_arg, 'utf-8') elif protocol == 3: return bytes(f"J1939:Baud={str(Baud)},SampleLocation={str(SampleLocation)},SJW={str(SJW)},IDSize=29" + chan_arg, 'utf-8') elif protocol == 4: return bytes(f"J1939:Baud={str(Baud)},PROP_SEG={str(PROP_SEG)},PHASE_SEG1={str(PHASE_SEG1)},PHASE_SEG2={str(PHASE_SEG2)},SJW={str(SJW)},IDSize=29" + chan_arg, 'utf-8') elif protocol == 5: return bytes(f"J1939:Baud={str(Baud)},TSEG1={str(TSEG1)},TSEG2={str(TSEG2)},SampleTimes={str(SampleTimes)},SJW={str(SJW)},IDSize=29" + chan_arg, 'utf-8') else: return b"J1939"
def min_(string): """ Returns a string stripped of underscores. Parameters ---------- string : str A string containing underscores Returns ------- values: str A string without underscores """ return string.replace('_', '')
def compare(sequenceA, sequenceB, gaps = False): """Compares two sequences and returns the identity. If gaps is set to True then count a shared gap as identical. Args: sequenceA (str): The first sequence. sequenceA (str): The second sequence. gaps (bool): Should gaps count as identical? (Default: False) Returns: float: identity. """ assert(len(sequenceA) == len(sequenceA)), "Sequence lengths do not match" length = len(sequenceA) # Initiate counters identical = 0 gap_length = 0 # Look at each position in turn for base in range(0, length): # Deal with gaps (gap in ref and seq to compare) if((sequenceA[base] == "-" and sequenceB[base] == "-") and gaps == False) : gap_length += 1 continue # Is the base/residue the same? if ( sequenceA[base] == sequenceB[base]): # Increase the counter identical += 1 # Avoid a divide by zero error if (gap_length == length): length += 1 # Convert the count to a percentage identity = identical / (length - gap_length) * 100 return identity
def _underline(string, character="-"): """Convert string to header marks""" return character * len(string)
def get_model_name(name, batch_size, learning_rate, epoch): """ Generate a name for the model consisting of all the hyperparameter values Args: config: Configuration object containing the hyperparameters Returns: path: A string with the hyperparameter name and value concatenated """ path = "./checkpoints/model_{0}_bs{1}_lr{2}_epoch{3}".format(name, batch_size, learning_rate, epoch) return path
def count_significant_bits(input_x: int) -> int: """ Counts the number of significant bits of an integer, ignoring negative signs and leading zeroes. For example, for -0b000110010000, returns 5. """ x = input_x for i in range(x.bit_length()): if x & (1 << i) > 0: return x.bit_length() - i return 0
def format_fing(fingerprint): """ Format a fingerprint by capitalizing it and adding spaces every four characters. >>> format_fing('abc123def456ghi789jkl012mno345pqr678stu9') 'ABC1 23DE F456 GHI7 89JK L012 MNO3 45PQ R678 STU9' @type fingerprint: C{string} or C{buffer} @param fingerprint: The 40-character fingerprint. @rtype: C{string} @return: The capitalized fingerprint with spaces every four characters. """ fingerprint_list = [str(fingerprint)[i:(i + 4)] for i in range(0, 40, 4)] new_fingerprint = " ".join(fingerprint_list) return new_fingerprint.upper()
def convert_to_list(data): """Converts 2d list to a request""" return "?" + "&".join(["{}={}".format(key, value) for key, value in data.items()])
def sln(cost, salvage, life): """ Returns the straight-line depreciation of an asset for one period. """ return (float(cost) - float(salvage)) / float(life)
def host_match(cursor, name, topic=None): """ Check for matches with supplied values 0 - no matches -1 - error n - number of matching rows (should be only 1)""" try: if topic is None: records = cursor.execute('''select count(*) from host_activity where host=? and topic is NULL''', (name,)) else: records = cursor.execute('''select count(*) from host_activity where host=? and topic=?''', (name, topic,)) return records.fetchone()[0] except: return -1
def capitalize(item): """Return a copy of the string with its first character capitalized and the rest lowercased """ return item.capitalize()
def generate_cat_num_to_artifacts_dict(artifacts_summary=None, artifacts_details=None, append=False): """Make a dict of cat nums to artifacts from a dictionary of artifacts. The artifacts_summary parameter is the result of calling extract_all_of_artifacts_dir. The artifacts_details parameter is the result of calling extract_appendix_b. """ all_artifacts = {} if artifacts_details: for art_category in artifacts_details.values(): for artifact in art_category['artifacts']: cat_no = artifact['Catalog No.'] if cat_no not in all_artifacts: all_artifacts[cat_no] = { "details": [], "detailsFieldOrder": art_category["fields"], "appendixBPageNum": art_category['pageNum'], "zoneNum": None, "parentExcPage": None, "summary": None } if append: all_artifacts[cat_no]['details'].append(artifact) if artifacts_summary: for exc_element in artifacts_summary.values(): for zone in exc_element['zones']: for artifact in zone['artifacts']: cat_no = artifact['Cat. No.'] if cat_no not in all_artifacts: all_artifacts[cat_no] = { "details": None, "detailsFieldOrder": None, "appendixBPageNum": None } if 'More' in artifact: if artifact['More']: appendix_b_page_num = artifact['More'].split('/page')[1].split('.')[0] if (all_artifacts[cat_no]['appendixBPageNum'] and all_artifacts[cat_no]['appendixBPageNum'] != appendix_b_page_num ): pass # print("Discrepancy found for " + cat_no) else: all_artifacts[cat_no]['appendixBPageNum'] = appendix_b_page_num summary = None if append: summary = artifact all_artifacts[cat_no].update({ "summary": summary, "zoneNum": exc_element['zones'].index(zone), "parentExcPage": exc_element['parentExcPage'] }) return all_artifacts
def FormatYesNo(value): """Returns "Yes" if *value* is True, "No" otherwise. Reverses the transformation defined by :py:func:`ParseYesNo`.""" if value: return u'Yes' else: return u'No'
def arr(num=0): """Function which return a list of n elements""" return [elem for elem in range(num)]
def _validate_name_index_duplication(params): """ Validate for duplicate names and indices. :param params: Ansible list of dict :return: bool or error. """ msg = "" for i in range(len(params) - 1): for j in range(i + 1, len(params)): if params[i]['Name'] == params[j]['Name']: msg = "duplicate name {0}".format(params[i]['Name']) return msg return msg
def get_current_sentence(target, mini): """ (\n| )... min ... """ lfstart = target.rfind("\n", 0, mini) lfend = target.find("\n", mini) if lfend < 0: lfend = len(target) spstart = target.rfind(" ", 0, mini) spend = target.find(" ", mini) if spend < 0: spend = len(target) if lfstart >= spstart: start = lfstart +1 if start < 0: start = 0 else: start = spstart +2 if spend < lfend: end = spend else: end = lfend return (start, end)
def date_parser(dates): """ This function returns a list of strings where each element in the returned list contains only the date Example ------- Input: ['2019-11-29 12:50:54', '2019-11-29 12:46:53', '2019-11-29 12:46:10'] Output: ['2019-11-29', '2019-11-29', '2019-11-29'] """ c = [] # initialize an empty list for i in dates: i = i[:10] # the date from the datetime string is only made up of 9 characters c.append(i) # Adds items to the list c return c #return list c
def set_overlap(source_set, target_set): """Compute the overlap score between a source and a target set. It is the intersection of the two sets, divided by the length of the target set.""" word_overlap = target_set.intersection(source_set) overlap = len(word_overlap) / float(len(target_set)) assert 0. <= overlap <= 1. return overlap
def getjov_gravity(Rp,Mp): """gravity in cgs from radius and mass in the Jovian unit Args: Rp: radius in the unit of Jovian radius Mp: radius in the unit of Jovian mass Returns: gravity (cm/s2) Note: Mpcgs=Mp*const.MJ, Rpcgs=Rp*const.RJ then gravity is given by (const.G*Mpcgs/Rpcgs**2) """ return 2478.57730044555*Mp/Rp**2
def shorten_str(s: str, maxlen: int = 32): """ If `s` is longer than `maxlen`, shorten it to at mos maxlen using `'...'`. """ if len(s) > maxlen: s = s[:maxlen - 3] + "..." return s
def build_reverse_mappings_from_nidb(nidb): """Builds IP reverse mappings from NIDB""" rev_map = { "subnets": {}, "loopbacks": {}, "infra_interfaces": {}, } for node in nidb: if node.broadcast_domain: rev_map["subnets"][str(node.ipv4_subnet)] = node else: rev_map["loopbacks"][str(node.loopback)] = node for interface in node.physical_interfaces: rev_map["infra_interfaces"][str(interface.ipv4_address)] = interface return rev_map
def _extract_options(line_part): """Return k/v options found in the part of the line The part of the line looks like k=v,k=v,k=2 .""" result = {} pairs = line_part.split(",") for pair in pairs: if "=" not in pair: continue parts = pair.split("=") key = parts[0].strip() value = parts[1].strip() result[key] = value return result
def _reverse64(b): """Converts (a,b) from big to little endian to be consistent with secp256k1""" x = b[:32] y = b[32:] return x[::-1] + y[::-1]
def filter_yellow_square(words:tuple, letter:str, offset:int) -> tuple: """ For example `filter_bad_location(words, 'e', 3)` will return all the words that *do* contain at least one 'e', as long as it is *not* in column 3. """ return tuple( word for word in words if word[offset] != letter and letter in word )
def sort_dict_snapshots(dict_snapshots, times): """ Sort dictionary of snapshots by time :param dict_snapshots: :param times: :return: """ new_dict = {t: dict_snapshots[t] for t in times} return new_dict
def _total_size(shape_values): """Given list of tensor shape values, returns total size. If shape_values contains tensor values (which are results of array_ops.shape), then it returns a scalar tensor. If not, it returns an integer.""" result = 1 for val in shape_values: result *= val return result
def find_locked_exits(room, stuff): """ Given a room, and the player's stuff, find a list of exits that they can use right now. That means the exits must not be hidden, and if they require a key, the player has it. RETURNS - a list of exits that are visible (not hidden) and don't require a key! """ locked = [] for exit in room['exits']: if "required_key" in exit: locked.append(exit) continue return locked
def command_result_processor_parameter_extra(parameter_values): """ Command result message processor if received extra parameter(s). Parameters ---------- parameter_values : `str` Extra parameters. Returns ------- message : `str` """ message_parts = [] message_parts.append('Extra parameters: ') index = 0 limit = len(parameter_values) while True: parameter_value = parameter_values[index] index += 1 message_parts.append(repr(parameter_value)) if index == limit: break message_parts.append(', ') continue message_parts.append('.\n') return ''.join(message_parts)
def append_write(filename="", text=""): """ append_write - append a string to a file and return the number of chars """ with open(filename, "a", encoding="utf-8") as f: return f.write(text)
def __get_taxa(rank): """ Return set of taxids with abundance > 0 >>> __get_taxa(query_rank) {123} :param rank: Set of taxids of specific rank :return: list of taxids """ return set(k for k, v in rank.items() if v > 0)
def dot_bracket(pair_list, len_seq): """convert the list of BPs into a dot bracket notation """ str_struct = list("."*len_seq) for pi, pj in pair_list: str_struct[pi], str_struct[pj] = "(", ")" return "".join(str_struct)
def part1(instructions): """ >>> part1([0, 3, 0, 1, -3]) 5 >>> part1(read_input()) 318883 """ size = len(instructions) index = 0 count = 0 while 0 <= index < size: jump = instructions[index] instructions[index] += 1 index += jump count += 1 return count
def decode_integer(value): """ Decode from binary and return the integer value as a string """ return str(int(value, 2))
def _make_nested_padding(pad_shape, pad_token): """Create nested lists of pad_token of shape pad_shape.""" result = [pad_token] for dimension in reversed(pad_shape): result = [result * dimension] return result[0]
def brewery_location(brewery): """Takes an untappd brewery response and returns a location string""" brewery_loca = [] if "brewery_city" in brewery["location"]: if brewery["location"]["brewery_city"]: brewery_loca.append(brewery["location"]["brewery_city"]) if "brewery_state" in brewery["location"]: if brewery["location"]["brewery_state"]: brewery_loca.append( brewery["location"]["brewery_state"] ) if "country_name" in brewery: if brewery["country_name"]: brewery_loca.append(brewery["country_name"]) return format(', '.join(brewery_loca))
def any_value_except(mapping, excluded_keys): """Return a random value from a dict that is not associated with excluded_key. Raises StopIteration if there are no other keys than excluded_key""" return next(mapping[key] for key in mapping if key not in excluded_keys)
def get_reference_output_files(reference_files_dict: dict, file_type: str) -> list: """ Returns list of files matching a file_type from reference files Args: reference_files_dict: A validated dict model from reference file_type: a file type string, e.g. vcf, fasta Returns: ref_vcf_list: list of file_type files that are found in reference_files_dict """ ref_vcf_list = [] for reference_key, reference_item in reference_files_dict.items(): if reference_item['file_type'] == file_type: ref_vcf_list.append(reference_item['output_file']) return ref_vcf_list
def dec_to_bin_slow(n): """ Manually transform a decimal number to its binary representation. Parameters ---------- n: int Number in base 10 """ res = '' if n < 0: raise ValueError if n == 0: return '0' while n > 0: res = str(n % 2) + res n = n >> 1 return res