content
stringlengths
42
6.51k
def is_url(url: str) -> bool: """Determine if this is a tweet URL.""" url = url.lower() return "twitter.com/" in url and "/status/" in url
def is_special_file(path): """ is_special_file returns true if the file path is a special/virtual file. @type path: str @rtype: boolean """ return "::" in path.rpartition("/")[2]
def create_bom(merged_manifest, approved_list, denied_list): """Creates a BOM If a BOM package is approved or denied, the 'copyright_notice', 'interaction_types', and 'resolution' will be copied over. Otherwise, these attributes will be added to the BOM with empty values. Args: merged_manifest: dictionary representing all the packages used by a project denied_list: dictionary representing the denied packages approved_list: dictionary representing the approved packages Returns: Dictionary representing the BOM. """ resolved_packages = dict(list(approved_list.items()) + list(denied_list.items())) bom = merged_manifest.copy() for key in bom.keys(): package_info = { "copyright_notices": "", "interaction_types": [], "resolution": "", } resolved_package = resolved_packages.get(key) # standardize all version numbers to be strings bom[key]["version"] = str(bom[key]["version"]) if resolved_package is not None: for k in package_info.keys(): package_info[k] = resolved_package[k] bom[key].update(package_info) return bom
def getNetworkResources(networkDict): """ Get Network Resources """ return networkDict['NetworkResources']
def add_multiple(*arrs): """ this could be seen as np.add(x,y) with unlimited number of arguments """ if len(arrs) > 1: return add_multiple(*arrs[1:]) + arrs[0] else: return arrs[0]
def sumOutputSerializeSizes(outputs): # outputs []*wire.TxOut) (serializeSize int) { """ sumOutputSerializeSizes sums up the serialized size of the supplied outputs. Args: outputs list(TxOut): Transaction outputs. Returns: int: Estimated size of the byte-encoded transaction outputs. """ serializeSize = 0 for txOut in outputs: serializeSize += txOut.serializeSize() return serializeSize
def coq_scope(s): """Coq: create scope s Arguments: - `s`: """ return "Open Scope {0!s}.\n".format(s)
def _ensure_wrappability(fn): """Make sure `fn` can be wrapped cleanly by functools.wraps. Adapted from gin-config/gin/config.py """ # Handle "builtin_function_or_method", "wrapped_descriptor", and # "method-wrapper" types. unwrappable_types = (type(sum), type(object.__init__), type(object.__call__)) if isinstance(fn, unwrappable_types): # pylint: disable=unnecessary-lambda wrappable_fn = lambda *args, **kwargs: fn(*args, **kwargs) wrappable_fn.__name__ = fn.__name__ wrappable_fn.__doc__ = fn.__doc__ wrappable_fn.__module__ = '' # These types have no __module__, sigh. wrappable_fn.__wrapped__ = fn return wrappable_fn # Otherwise we're good to go... return fn
def list_to_dict(lst, separator='='): """ This converts a list of ["k=v"] to a dictionary {k: v}. """ kvs = [i.split(separator) for i in lst] return {k: v for k, v in kvs}
def get_mean(a, b): """ Find the mid point of two points Inputs: - a, b: floats Outputs: - the coordinate of the mid point: float """ return (a + b) / 2
def as_dms(f_degrees): """Given a floating-point number of degrees, translates it to degrees, minutes, and seconds.""" degrees = int(f_degrees) degrees_fraction = f_degrees - degrees f_minutes = degrees_fraction * 60 minutes = int(f_minutes) minutes_fraction = f_minutes - minutes f_seconds = minutes_fraction * 60 seconds = int(f_seconds) return u"%d\u00b0%02d'%02d\"" % (degrees, minutes, seconds)
def select_items_by_bits(lst, i): """ Selects items from lst indexed by the bits in i. :param lst: A list. :param i: A non-negative integer whose most significant bit is at a position lesser than len(lst). :return: A list containing all lst[k] where (i & (1 << k)) == 1. """ result = [] counter = 0 while i > 0: if i & 1 != 0: result.append(lst[counter]) i = i >> 1 counter += 1 return result
def find_in_data(ohw_data, name): """ Search in the OpenHardwareMonitor data for a specific node, recursively :param ohw_data: OpenHardwareMonitor data object :param name: Name of node to search for :returns: The found node, or -1 if no node was found """ if ohw_data == -1: raise Exception('Couldn\'t find value ' + name + '!') if ohw_data['Text'] == name: # The node we are looking for is this one return ohw_data elif len(ohw_data['Children']) > 0: # Look at the node's children for child in ohw_data['Children']: if child['Text'] == name: # This child is the one we're looking for return child else: # Look at this children's children result = find_in_data(child, name) if result != -1: # Node with specified name was found return result # When this point is reached, nothing was found in any children return -1
def str_clean(input: str, **kwargs) -> str: """ Cleans and encodes string for template replacemnt Keyword Arguments: replace_dual (bool, optional): Replace ``::`` with ``.`` Default ``True`` """ _replace_dual = bool(kwargs.get('replace_dual', True)) result = input if _replace_dual: result = result.replace("::", ".") result = result.replace('\\n', '\\\\\\\\n').replace('\\r', '\\\\\\\\r') result = result.replace('"', '\\"') return result.strip()
def check_movie_in_tweet(movie, full_text): """ Verifica se um filme foi mencionado no tweet. """ if movie['title'] in full_text: if (movie['words_count'] <= 2) and (movie['score'] < 10.0): return False if (movie['words_count'] <= 3) and (movie['score'] < 9): return False return True return False
def extract_encoding_and_rate_from_wav_header(wav_header): """ Attempt to extract encoding, sample rate from possible WAV header. Args: wav_header (bytes): Possible WAV file header. Returns: Union[bool, str]: False or truthy value, which may be the encoding name. int: Sample rate if WAV header successfully parsed, else 0. """ if wav_header[:4] == b'RIFF' and wav_header[8:12] == b'WAVE': # Get the encoding type, which is always truthy. encoding_bytes = wav_header[20:22] if encoding_bytes == b'\x01\x00': wav_encoding = 'pcm_s16le' elif encoding_bytes == b'\x06\x00': wav_encoding = 'a-law' elif encoding_bytes == b'\x07\x00': wav_encoding = 'mu-law' else: wav_encoding = True # Get the sample rate. sample_rate = int.from_bytes(wav_header[24:28], byteorder='little') return wav_encoding, sample_rate else: return False, 0
def get_slash_type(path)-> str : """ Get the slash type of a path. Returns: - string """ if '\\' in path: return '\\' else: return '/'
def get_loopbacks(yaml): """Return a list of all loopbacks.""" ret = [] if "loopbacks" in yaml: for ifname, _iface in yaml["loopbacks"].items(): ret.append(ifname) return ret
def sum_even_fibonacci_numbers(limit): """ Finds the sum of the even-valued terms in the Fibonacci sequence :param limit: :return: """ a, b = 1, 2 total = 0 while a < limit: if a % 2 == 0: total += a tmp = a a = b b = a + tmp return total
def get_display_text_for_event(event): """ Takes an event from a collection exercise and returns a version of it that can be displayed to the user. If the string isn't found in the map, it just returns the string the function was given. :param event: A name of an event from a collection exercise :type event: str :return: A version of that event that can be displayed to the user """ mapping = { "mps": "MPS (Main print selection)", "go_live": "Go live", "return_by": "Return by", "reminder": "First reminder", "reminder2": "Second reminder", "reminder3": "Third reminder", "nudge_email_0": "First nudge email", "nudge_email_1": "Second nudge email", "nudge_email_2": "Third nudge email", "nudge_email_3": "Fourth nudge email", "nudge_email_4": "Fifth nudge email", "exercise_end": "Exercise end", } return mapping.get(event, event)
def fmttime(time): """ Output time elapsed in seconds into a sensible format. INPUTS time : input time elapsed [seconds] """ sc = time % 60. time = (time-sc)/60. result = "{:}s".format(sc) if not time: return result mn = int(time % 60.) time = (time-mn)/60. result = "{:}m {:}".format(mn, result) if not time: return result hr = int(time % 24.) time = (time-hr)/24. result = "{:}h {:}".format(hr, result) if not time: return result dy = int(time % 7.) time = (time-dy)/7. result = "{:}d {:}".format(dy, result) return result
def cycle_list_next(vlist, current_val): """Return the next element of *current_val* from *vlist*, if approaching the list boundary, starts from begining. """ return vlist[(vlist.index(current_val) + 1) % len(vlist)]
def calculate_sleep_factor(scans_per_read, ljm_scan_backlog): """Calculates how much sleep should be done based on how far behind stream is. @para scans_per_read: The number of scans returned by a eStreamRead call @type scans_per_read: int @para ljm_scan_backlog: The number of backlogged scans in the LJM buffer @type ljm_scan_backlog: int @return: A factor that should be multiplied the normal sleep time @type: float """ DECREASE_TOTAL = 0.9 portionScansReady = float(ljm_scan_backlog) / scans_per_read if (portionScansReady > DECREASE_TOTAL): return 0.0 return (1 - portionScansReady) * DECREASE_TOTAL
def mapl(function, array): """ Map a function to an array (equivalent to python2 map) :param function: Function to be mapped :param array: List of values :return: List of returns values f = function array = [x0, x1, x2, ... xn] [f(x0), f(x1), f(x2) ... f(xn)] = mapl(f, array) Example: >>> from m2py import functional as f >>> fun = lambda x: x**2 >>> f.mapl(fun, [1, 2, 3, 4, 5, 6]) [1, 4, 9, 16, 25, 36] """ return list(map(function, array))
def average_index(n1, n2, v1=None, v2=None, vr=None): """Compute volume weighted average refractive index Args: n1, n2 (float/ndarray): refractive index of material 1 and 2 v1, v2 (float): volume of materials 1 and 2 vr (float): volume ratio of v1/(v1+v2) used instead of v1 and v2 """ if vr is None: if v1 or v2 is None: raise ValueError("Please supply volumes v1, v2 " "or volume ratio vr") n = (v1*n1 + v2*n2)/(v1+v2) else: # n = (vr*n1 + n2)/(vr+1) when vr = v1/v2 n = vr*n1 + (1-vr)*n2 return n
def get_app_icon_name(app_id: str) -> str: """Builds the corresponding app icon name from app id.""" return f"icon_{app_id}"
def count_total(P, x, y): """ This function calculates suffix sums, which are the totals of the k last values. Args: P: an array contains the consecutive sums of the first n elements x: an integer, whih is number of elements of a prefix-sum array y: an integer, whih is number of elements of a prefix-sum array Returns: an array suffix sums, which are the totals of the k last values or the totals of m slices [x..y] such that 0 <= x <= y < n """ return P[y + 1] - P[x]
def record_mandatory_paths(database, include_path_candidate, header): """Add mandatory path set into include path candidates database. Mandatory include path is detected if the source file location is different than location of included header file location""" if include_path_candidate not in database[header]["include_paths"]: database[header]["include_paths"].append(include_path_candidate) if "mandatory" not in database[header]["path_types"]: database[header]["path_types"].append("mandatory") return database
def my_cmp(a, b): """ Python 3 replacement for Python 2 builtin cmp """ ret_val = False if a is not None and b is not None: try: ret_val = (a > b) - (a < b) except ValueError as e: print (f"ValueError: {e}") except TypeError as e: print (f"TypeError: {e}") else: ret_val = True return ret_val
def suppress_classes(label, list_to_suppress): """ returns 'O' if class in list """ if label in list_to_suppress: return 'O' return label
def annotated_frames(scribbles_data): """ Finds which frames have a scribble. # Arguments scribbles_data (dict): Scribble in the default format. # Returns list: Number of the frames that contain at least one scribble. """ scribbles = scribbles_data['scribbles'] frames_list = [i for i, s in enumerate(scribbles) if s] return frames_list
def module_to_str(module): """Convert a Python module to a task name.""" return module.__name__.split(".")[-1]
def calculate_p_EE(R_EE, p_total, N_total, N_in): """ Calculates the p_EE for a clustered network from a given R_EE and the total connection probability :param R_EE: Factor of connection probability inside a cluster per probability of connection outside cluster :param p_total: overall connection probability over whole network :param N_total: total amount of neurons :param N_in: amount of neurons inside a cluster :return: [p_inside, p_out]: array of connection probabilites inside and outside of clusters """ p_out = p_total*N_total / (N_in*(R_EE-1) + N_total) return [p_out*R_EE, p_out]
def iterable_to_string(iterable, separator="\n"): """ Transforms the `iterable` to a string by getting the string value of each item and joining them with `sep`. :param iterable: the iterable :type iterable: Any :param separator: the separator :type separator: str :return: the joined string :rtype: str """ return separator.join(map(lambda x: str(x), iterable))
def get_mask_from_alignment(al): """ For a single alignment, return the mask as a string of '+' and '-'s. """ alignment_str = str(al).split("\n")[1] return alignment_str.replace("|", "+")
def flipcoords(xcoord, ycoord, axis): """ Flip the coordinates over a specific axis, to a different quadrant :type xcoord: integer :param xcoord: The x coordinate to flip :type ycoord: integer :param ycoord: The y coordinate to flip :type axis: string :param axis: The axis to flip across. Could be 'x' or 'y' """ axis = axis.lower() if axis == 'y': if xcoord > 0: return str(xcoord - xcoord - xcoord) + ', ' + str(ycoord) elif xcoord < 0: return str(xcoord + abs(xcoord) * 2) + ', ' + str(ycoord) elif xcoord == 0: return str(xcoord) + ', ' + str(ycoord) raise ValueError( "The X coordinate is neither larger, smaller or the same as 0.") elif axis == 'x': if ycoord > 0: return str(xcoord) + ', ' + str(ycoord - ycoord - ycoord) elif ycoord < 0: return str(ycoord + abs(ycoord) * 2) + ', ' + str(xcoord) elif ycoord == 0: return str(xcoord) + ', ' + str(ycoord) raise ValueError( "The Y coordinate is neither larger, smaller or the same as 0.") raise ValueError("Invalid axis. Neither x nor y was specified.")
def getSight(m): """Retourn la longueur de la ligne de mire d'un monstre""" return m["sight"]
def parseChoices(optString): """ The option string is basically our "recipe". Example: "time=Q4 2014 dimensions=[time,age,industry] geo="K000000001 obs=obs_col" """ time = None # assumed. geo = None # ... obs = None # . dimensions = [] # Look for optional "time=" in optString if "time=" in optString: # if "time=" appears more than once in the options string they're doing it wrong assert len(optString.split("time=")) ==2, "You can only specifiy the time (with time=) once." # Time may be in two parts (e.g Q1 2014) so split by eliminting the other options (which dont include whitespace delimeters) tString = optString tString = [x for x in tString.split("obs=") if "time=" in x][0] tString = [x for x in tString.split("dimensions=") if "time=" in x][0] tString = [x for x in tString.split("geo=") if "time=" in x][0] time = tString[5:] # get rid of "time=" # look for optional obs= in optString if "obs=" in optString: obs = optString.split("obs=") assert len(obs) == 2, "You can only specify the obs column (with obs=) once" # look for optional geo= in optString if "geo=" in optString: obs = optString.split("geo=") assert len(obs) == 2, "You can only specify the obs column (with geo=) once" # Finds MANDATORY dimensions= in optString assert "dimensions=" in optString, "You MUST provide the required dimensions when trying to convert to V4" # Find the sub-string. Assertion for stupidity. dimText = [x for x in optString.split(" ") if "dimensions=" in x] assert len(dimText) == 1, "You should only be specifiying 'dimensions=' once!" # Build the list of wanted dimensions dimText = dimText[0].replace("dimensions=", "").replace("[", "").replace("]", "") dimText = dimText.split(" ")[0].strip() dimensions = dimText.split(",") return time, geo, obs, dimensions
def simpleMsgResp(content: str) -> dict: """ Simple def to put content in following dict: { message: content, len: len(content) } :param content: str - the message to be inserted :return: dict containing the message and it's length :rtype: dict """ return {'message': content, 'len': len(content)}
def extract_annotations(node, category=None): """Extract all annotations that are marked up with the given mark category.""" result = [] if 'marks' in node: for mark in node['marks']: if mark['type'] == 'annotation' and (category is None or mark['attrs']['category'] == category): result.append(node) if 'content' in node: for child in node['content']: result.extend(extract_annotations(child, category)) return result
def recurring(strg): """O(n) solution.""" for idx, c in enumerate(strg): if c in strg[idx + 1:]: return c return None
def _static_hasattr(value, attr): """Returns whether this value has the given attribute, ignoring __getattr__ overrides.""" try: object.__getattribute__(value, attr) except AttributeError: return False else: return True
def bytearray_to_hexstring(ba): """Ugly workaround to create BitVector.""" ba_hex_str = '' for b in ba: hex_digit = hex(b)[2:] # 0x0 ... 0xff if len(hex_digit) == 1: hex_digit = '0' + hex_digit # log.debug('bytearray_to_hexstring: {}, {}'.format(b, hex_digit)) ba_hex_str += hex_digit # 0x0 ... 0xff # log.debug('bytearray_to_hexstring: {}'.format(ba_hex_str)) return ba_hex_str
def _all_the_same(items): """ Checks whether all values in a list are the same. """ return all(items[0] == item for item in items)
def sanitize_dict(d): """Remove all keys that have none values from a dict.""" rv = {} for key, value in d.items(): if not value: continue if key == "synonyms": value = sorted(value) rv[key] = value return rv
def merge(left, right): """this is used for merging two halves """ # print('inside Merge ') result = []; leftIndex = 0; rightIndex = 0; while leftIndex < len(left) and rightIndex < len(right): if left[leftIndex] < right[rightIndex]: result.append(left[leftIndex]) leftIndex += 1 else: result.append(right[rightIndex]) rightIndex += 1 # print('merge', left, right) # print('result', result) # print('left elements ->', left[leftIndex:] + right[rightIndex:]) # Checking if any element was left return result + left[leftIndex:] + right[rightIndex:]
def rtd10(raw_value): """Convert platinum RTD output to degrees C. The conversion is simply ``0.1 * raw_value``. """ return (float(raw_value) / 10.0, "degC")
def host_match(c_host, bw_host): """ Check if a cookie `c_host` matches a bw-list `bw_host`. """ if c_host == bw_host: return True elif bw_host.startswith('.') and c_host.endswith(bw_host): return True return False
def human_format(num): """ :param num: A number to print in a nice readable way. :return: A string representing this number in a readable way (e.g. 1000 --> 1K). """ magnitude = 0 while abs(num) >= 1000: magnitude += 1 num /= 1000.0 return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def task_wrapper(args): """ Helper for multiprocessing.Pool.imap_unordered. The first argument is a function to call. Rest of the arguments are passed to the function. """ func = args[0] func_args = args[1:] ret = func(*func_args) if ret is None: return True else: return ret
def get_average_mos(solution, ignore_non_served=False): """Returns the average MOS of a solution. """ smos = 0 nserved = 0 for u in solution["users"]: if "mos" in u: smos += u["mos"] nserved += 1 if ignore_non_served: # only take into account users that get some video return smos/nserved else: # calculate avg QoE across all users return smos/len(solution["users"])
def get_participant(encounter): """ get participant from encounter :param practitioner: :return: """ if "participant" in encounter: id = "" if 'individual' in encounter['participant'][0]: id_split = encounter['participant'][0]['individual']['reference'].split("/") id = id_split[1] return {'id': id, 'reference': encounter['participant'][0]['individual']['reference'], 'display': encounter['participant'][0]['individual']['display']} else: # print("encounter:", encounter) return {}
def check_clause(clause, implication_graph): """Check the clause If the clause infers contradiction, return False If all literals but one are assigned value 0, use unit resolution, return the valuable and it's assignment Else, we can't get anything useful from this clause, return None """ num_unassigned = 0 clause_value = 0 unit_variable, unit_value = None, None for a, b in clause: if a in implication_graph: if b == implication_graph[a][1]: clause_value = 1 if not a in implication_graph: if num_unassigned == 0: unit_variable, unit_value = a, b num_unassigned = 1 else: return None if num_unassigned == 1 and clause_value == 0: return unit_variable, unit_value else: if clause_value == 0: return False else: return None
def _get_topic_memcache_key(topic_id, version=None): """Returns a memcache key for the topic. Args: topic_id: str. ID of the topic. version: int. The version of the topic. Returns: str. The memcache key of the topic. """ if version: return 'topic-version:%s:%s' % (topic_id, version) else: return 'topic:%s' % topic_id
def get_maximum_overview_level(width, height, minsize=256): """ Calculate the maximum overview level of a dataset at which the smallest overview is smaller than `minsize`. Attributes ---------- width : int Width of the dataset. height : int Height of the dataset. minsize : int (default: 256) Minimum overview size. Returns ------- overview_level: int overview level. """ overview_level = 0 overview_factor = 1 while min(width // overview_factor, height // overview_factor) > minsize: overview_factor *= 2 overview_level += 1 return overview_level
def cball(callback, iterable): """Return True if all elements of the iterable are true (or if the iterable is empty) :param callback: callable :param iterable: Sequence :returns: bool """ for v in iterable: if not callback(v): return False return True
def isInt(x): """ Returns True, if x is an integer. >>> isInt(1) True >>> isInt('Hello') False >>> isInt(3.14) False """ return isinstance(x, int)
def get_sum(limit = 1000): """Gets the sum of the multiples of 3 or 5""" sum = 0 for n in range(1, limit): if n % 3 == 0 or n % 5 == 0: sum += n return sum
def field2DME(data, data_catelog): """Converts common field names with dme field names. Returns a dictionary of converted names suitable for archiving. """ converted = {} for collection_type, metadict in data.items(): if collection_type not in converted: converted[collection_type] = {} for common_name, user_value in metadict.items(): try: dme_name = data_catelog[collection_type][common_name][0] except KeyError: # sample dict key is sample_id and not collection type dme_name = data_catelog['Sample'][common_name][0] converted[collection_type][dme_name] = user_value return converted
def sub(value, arg): """Subtracts arg from value""" return (value) - (arg)
def extract_turls(indata): """ Extract TURLs from indata for direct i/o files. :param indata: list of FileSpec. :return: comma-separated list of turls (string). """ # turls = "" # for filespc in indata: # if filespc.status == 'remote_io': # turls += filespc.turl if not turls else ",%s" % filespc.turl # return turls return ",".join( fspec.turl for fspec in indata if fspec.status == 'remote_io' )
def order_match(str1, str2): """order_match""" l1 = len(str1) l2 = len(str2) i = 0 j = 0 if l1 > l2: return 0 while i < l1 and j < l2: if str1[i] == str2[j]: i += 1 j += 1 else: j += 1 return 100 * (i / l1)
def get_all_subsets(some_list): """Returns all subsets of size 0 - len(some_list) for some_list""" if len(some_list) == 0: # If the list is empty, return the empty list return [[]] subsets = [] first_elt = some_list[0] rest_list = some_list[1:] '''Strategy: Get all the subsets of rest_list; for each of those subsets, a full subset list will contain both the original subset as well as a version of the subset that contains first_elt ''' for partial_subset in get_all_subsets(rest_list): subsets.append(partial_subset) next_subset = partial_subset[:] + [first_elt] subsets.append(next_subset) return subsets
def thread(read=20, write=20): """Return the kwargs for creating the Thread table.""" return { 'AttributeDefinitions': [ { 'AttributeName': 'ForumName', 'AttributeType': 'S' }, { 'AttributeName': 'Subject', 'AttributeType': 'S' }, ], 'TableName': 'Thread', 'KeySchema': [ { 'AttributeName': 'ForumName', 'KeyType': 'HASH', }, { 'AttributeName': 'Subject', 'KeyType': 'RANGE', }, ], 'ProvisionedThroughput': { 'ReadCapacityUnits': read, 'WriteCapacityUnits': write, } }
def is_true(value): """Converts GET parameter value to bool""" return bool(value) and value.lower() not in ("false", "0")
def conv_out_shape(in_shape, out_fms, p, k, s): """ Gets the output shape [height, width] for a 2D convolution. (Assumes square kernel). @param in_shape: The input shape [batch, height, width, channels]. @param out_fms: The number of feature maps in the output. @param p: The padding type (either 'SAME' or 'VALID'). @param k: The side length of the kernel. @param s: The stride. @return The shape of the output after convolution. """ # convert p to a number if p == 'SAME': p = k // 2 elif p == 'VALID': p = 0 else: raise ValueError('p must be "SAME" or "VALID".') h, w = in_shape[1:3] return [in_shape[0], int(((h + (2 * p) - k) / s) + 1), int(((w + (2 * p) - k) / s) + 1), out_fms]
def column_name_list(columns): """ Gets a comma-separated list of column names. :param columns: The list of columns. :returns: A comma-separated list of column names. """ if not columns: return '' return ', '.join([column.name for column in columns])
def praat_string(text): """Return a string formatted to be recognized by Praat. Parameters ---------- text : str String to be formatted for Praat. Returns ------- str """ return '"' + text.replace('"', '""') + '"'
def draw_on_pattern(shape, pattern): """ Draws a shape on a pattern. >>> draw_on_pattern([(0, 0, 1), (0, 1, 3), (1, 1, 8)], [[0, 0, 0], [0, 0, 0]]) [[1, 3, 0], [0, 8, 0]] """ y_size = len(pattern) x_size = len(pattern[0]) new_pattern = pattern.copy() for cell in shape: y, x, colour = cell if 0 <= y < y_size and 0 <= x < x_size: new_pattern[y][x] = colour return new_pattern
def get_unique_words(documents): """ Helper function that returns every unique word present in a set of documents. :param documents: list of string documents containing words :return: list of all unique lowercase string words across the documents """ unique_words = [] for document in documents: doc_words = document.split(' ') unique_words.extend(doc_words) unique_words = [word.lower() for word in unique_words] return list(dict.fromkeys(unique_words))
def metadata_keys_complete(result_keys) -> bool: """ Helper function for tests that assert, that all metdata keys are present in a dictionary that has been returned as the result of a query. :param result_keys: keys of a database-specific dictionary that has been returned by a query, corresponds to metadata categories :type result_keys: dict_keys """ return set(result_keys) == {'symbol', 'UniProt ID', 'sequence', 'RefSeq ID'}
def import_from_path(path): """ Import a class dynamically, given it's dotted path. :param path: the path of the module :type path: string :return: Return the value of the named attribute of object. :rtype: object """ module_name, class_name = path.rsplit('.', 1) try: return getattr(__import__(module_name, fromlist=[class_name]), class_name) except AttributeError: raise ImportError('Unable to import %s' % path)
def get_url_attrs(url, attr_name): """ Return dictionary with attributes for HTML tag, updated with key `attr_name` with value `url`. Parameter `url` is either a string or a dict of attrs with the key `url`. Parameter `attr_key` is the name for the url value in the results. """ url_attrs = {} if isinstance(url, str): url = {"url": url} url_attrs.update(url) url_attrs[attr_name] = url_attrs.pop("url") return url_attrs
def convert_distance(val, old_scale="meter", new_scale="centimeter"): """ Convert from a length scale to another one among meter, centimeter, inch, feet, and mile. Parameters ---------- val: float or int Value of the length to be converted expressed in the original scale. old_scale: str Original scale from which the length value will be converted. Supported scales are Meter ['Meter', 'meter', 'm'], Centimeter ['Centimeter', 'centimeter', 'vm'], Inch ['Inch', 'inch', 'in'], Feet ['Feet', 'feet', 'ft'] or Mile ['Mile', 'mile', 'mil']. new_scale: str New scale from which the length value will be converted. Supported scales are Meter ['Meter', 'meter', 'm'], Centimeter ['Centimeter', 'centimeter', 'cm'], Inch ['Inch', 'inch', 'in'], Feet ['Feet', 'feet', 'ft'] or Mile ['Mile', 'mile', 'mil']. Raises ------- NotImplementedError if either of the scales are not one of the requested ones. Returns ------- res: float Value of the converted length expressed in the new scale. """ # Convert from 'old_scale' to Meter if old_scale.lower() in ['centimeter', 'cm']: temp = val / 100.0 elif old_scale.lower() in ['meter', 'm']: temp = val elif old_scale.lower() in ['inch', 'in']: temp = val / 39.37008 elif old_scale.lower() in ['feet', 'ft']: temp = val / 3.28084 elif old_scale.lower() in ['mile', 'mil']: temp = 1609.344 * val else: raise AttributeError( f'{old_scale} is unsupported. m, cm, ft, in and mile are supported') # and from Meter to 'new_scale' if new_scale.lower() in ['centimeter', 'cm']: result = 100*temp elif new_scale.lower() in ['meter', 'm']: result = temp elif new_scale.lower() in ['inch', 'in']: result= 39.37008*temp elif new_scale.lower() in ['feet', 'ft']: result=3.28084*temp elif new_scale.lower() in ['mile', 'mil']: result=temp/1609.344 else: raise AttributeError( f'{new_scale} is unsupported. m, cm, ft, in and mile are supported') return result
def serialize_umd(analysis, type): """.""" return { 'id': None, 'type': type, 'attributes': { 'loss': analysis.get('loss', None), 'gain': analysis.get('gain', None), 'treeExtent': analysis.get('treeExtent', None), 'treeExtent2010': analysis.get('treeExtent2010', None), 'areaHa': analysis.get('area_ha', None), 'loss_start_year': analysis.get('loss_start_year', None), 'loss_end_year': analysis.get('loss_end_year', None) } }
def input_data(list_images): """ images are alwyas NHWC. """ assert type(list_images) == list return list_images
def ExpandIncome(e00200, pencon_p, pencon_s, e00300, e00400, e00600, e00700, e00800, e00900, e01100, e01200, e01400, e01500, e02000, e02100, p22250, p23250, cmbtp, ptax_was, benefit_value_total, expanded_income): """ Calculates expanded_income from component income types. """ expanded_income = ( e00200 + # wage and salary income net of DC pension contributions pencon_p + # tax-advantaged DC pension contributions for taxpayer pencon_s + # tax-advantaged DC pension contributions for spouse e00300 + # taxable interest income e00400 + # non-taxable interest income e00600 + # dividends e00700 + # state and local income tax refunds e00800 + # alimony received e00900 + # Sch C business net income/loss e01100 + # capital gain distributions not reported on Sch D e01200 + # Form 4797 other net gain/loss e01400 + # taxable IRA distributions e01500 + # total pension & annuity income (including DB-plan benefits) e02000 + # Sch E total rental, ..., partnership, S-corp income/loss e02100 + # Sch F farm net income/loss p22250 + # Sch D: net short-term capital gain/loss p23250 + # Sch D: net long-term capital gain/loss cmbtp + # other AMT taxable income items from Form 6251 0.5 * ptax_was + # employer share of FICA taxes on wages/salaries benefit_value_total # consumption value of all benefits received; # see the BenefitPrograms function in this file for details on # exactly how the benefit_value_total variable is computed ) return expanded_income
def forwardTransform(transform, phi, actVal): """! Forwards transform a configuration and compute Jacobian. """ # there is no transform if transform is None: return phi, actVal, 0 # delegate to the actual transform return transform.forward(phi, actVal)
def GetColumn(data, index): """Extracts the given column from the dataset. data: sequence of rows index: which column Returns: map from int year to float datum """ res = {} for row in data: try: year = int(row[0]) res[year] = float(row[index]) / 10.0 except ValueError: pass return res
def bad_a_order(a, b): """ A partial order which is not a lattice. """ return (a in ['a', 'b']) and (b in ['c', 'd'])
def imager_view(request): """Display Imager Detail.""" message = "Hello" return {'message': message}
def bounding_box(points): """ The function calculates bounding_box from the coordinates. XY - Min , XY - Max :param points: coordinates list :return: List of bounding box. """ x_coordinates, y_coordinates = zip(*points) return [(min(x_coordinates), min(y_coordinates)), (max(x_coordinates), max(y_coordinates))]
def set_data_format(array_values): """ Check and set the corresponding format for each value :param list[list[str]] array_values: list of values :return: list[list[str]]: array formatted """ formatted_data = [] for d in array_values: values = [] for v in d: # Try to transform a text to int, if an exception occurs, treat it as an alphanumeric text try: v = int(v) v = str(v) except ValueError: if not v: v = "''" elif type(v) is str: if v.startswith("\""): # If starts with " replace it with ' v = "'" + v[1:-1] + "'" elif not v.startswith("'"): v = "'" + v + "'" values.append(v) formatted_data.append(values) # end for return formatted_data
def _create_simple_json_dict(image_anns): """Makes a list of annotations in simple_json format.""" simple_json_anns = [] for ann in image_anns: ann_dict = {} ann_dict["bbox"] = [ann["x_min"], ann["y_min"], ann["width"] + ann["x_min"], ann["height"] + ann["y_min"]] ann_dict["classname"] = ann["category"] if "score" in ann.keys(): ann_dict["confidence"] = ann["score"] simple_json_anns.append(ann_dict) return simple_json_anns
def to_single_data(input): """Convert an input to a single bcbio data/world object. Handles both single sample cases (CWL) and all sample cases (standard bcbio). """ if (isinstance(input, (list, tuple)) and len(input) == 1): return input[0] else: assert isinstance(input, dict), input return input
def get_key(iroot, num_states): """ Get energy key for the state of interest. Args: iroot (int): state of interest num_states (int): total number of states Returns: key (str): energy key """ # energy if only one state if iroot == 0 and num_states == 1: key = "energy" # otherwise energy with state suffix else: key = "energy_{}".format(iroot) return key
def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token. Original version was obtained from here: https://github.com/huggingface/transformers/blob/23c6998bf46e43092fc59543ea7795074a720f08/src/transformers/data/processors/squad.py#L38 """ best_score = None best_span_index = None for span_index, doc_span in enumerate(doc_spans): end = doc_span["start"] + doc_span["length"] - 1 if position < doc_span["start"]: continue if position > end: continue num_left_context = position - doc_span["start"] num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"] if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index
def collinear(coordinates): """ Passing thru one pt with some fixed slope \implies must exist only one such line """ A, B, C = coordinates epsilon = 1e-10 slope_AB = (B[1] - A[1]) / (B[0] - A[0]) slope_AC = (C[1] - A[1]) / (C[0] - A[0]) if abs(slope_AB - slope_AC) < epsilon: return True else: return False
def most_expensive_menu_item(restaurant): """ Loops through a list of menu items and determines the most expensive item Parameters: restaurant (dict): A dictionary with three lists items, prices, and cals Returns: str: A string with the name of the most expensive item """ highest_price = 0 highest_price_index = None for index, price in enumerate(restaurant['prices']): if price > highest_price: highest_price = price highest_price_index = index return restaurant['items'][highest_price_index]
def check_script(script): """ Checks if a given string is a script (hash160) (or at least if it is formatted as if it is). :param script: Script to be checked. :type script: hex str :return: True if the signatures matches the format, raise exception otherwise. :rtype: bool """ if not isinstance(script, str): raise Exception("Wrong script format.") elif len(script)/2 != 20: raise Exception("Wrong signature length " + str(len(script)/2)) else: return True
def get_dict_field(blob, field_name): """extract the isni from the remote id data for the author""" if not blob or not isinstance(blob, dict): return None return blob.get(field_name)
def is_power_of_2(val): """Returns True if an integer is a power of 2. Only works for x > 0.""" return not val & (val-1)
def bottom_row(matrix): """ Return the last (bottom) row of a matrix. Returns a tuple (immutable). """ return tuple(matrix[-1])
def _parse_csv_item_opts(entry): """Parse the _opts field in a SB Extended CSV item.""" # Accepting even slightly weirdly formatted entries: entry = entry.strip() if len(entry) == 0: return {} opts = {} for opt in entry.split(" "): opt_name, opt_val = opt.split(":") opts[opt_name] = opt_val return opts
def missing_formulas(dataset): """Return boolean of whether each technosphere and biosphere are parameterized. Used to check if datasets which are indicated for combined production can be used as such.""" TYPES = {'from environment', 'to environment', 'from technosphere'} return not all(exc.get('formula') for exc in dataset['exchanges'] if exc['type'] in TYPES)
def _some1(predicate, iterable): """Alternative implementation of :func:`some`.""" return any(map(predicate, iterable))
def xy(x:int, y:int) -> int: """Converts coordinates [x,y] in [(0-8),(0-8)] to [0-80] array location """ return y*9 + x
def _sorter(data): """ Return a tree of tuples (type, items sequence) for each items in a nested data structure composed of mappings and sequences. Used as a sorting key. """ seqtypes = list, tuple maptypes = dict, dict coltypes = seqtypes + maptypes if isinstance(data, maptypes): new_data = [] for k, v in data.items(): if isinstance(v, coltypes): v = _sorter(v) new_data.append((k, v)) return repr(tuple(sorted(new_data))) elif isinstance(data, seqtypes): new_data = [] for v in data: if isinstance(v, coltypes): v = _sorter(v) new_data.append(v) return repr(tuple(sorted(new_data))) else: return repr(data)
def get_p_key(episode_info): """ create the primary key field by concatenating episode information :param episode_info: Dictionary of a single episode """ return f'{episode_info["show_stub"]}S{episode_info["season"]}E{episode_info["episode"]}'
def inversao(num): """ Funcao para inverter o numero `param` num: numero para ser invertido """ aux = 0 while (num > 0): remainder = num % 10 aux = (aux * 10) + remainder num = num // 10 return aux