content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def rename_bands(img, in_band_names, out_band_names): """Renames image bands. Args: img (object): The image to be renamed. in_band_names (list): The list of of input band names. out_band_names (list): The list of output band names. Returns: object: The output image with the renamed bands. """ return img.select(in_band_names, out_band_names)
2cbf3652d3a46bae3de6d72971d95cefaff8416b
80,977
def _decode_message(output: bytes, encoding: str) -> str: """Converts bytes to string, stripping white spaces.""" return output.decode(encoding).strip()
7861d16653bf3bc862b191778d38b6b595510a50
80,985
def lookup_field(line, column): """ Lookup the value of a given (numbered) column in a given Line Returns None if either the column or the line is None """ # During merge, it is not guaranteed that each input file to the # merge has a matching line... if not line: return None # ...or that the input line has a column of the given name... if column == None: return None # ...or that the line in question bothers to supply every required # column try: return line.row[column] except IndexError: return None
787acda39ecdc2bf2c4a1065c7879b2145ba0685
80,986
import csv def load_precursors(path): """Loads precursors from file containing SMILES. Precursors must be in a `.csv` file, with the aldehyde in column 1 and the amine in column 2. Args: path: Path to file containing SMILES. Returns: List[List[str]]: List of lists containing SMILES from file. """ with open(path, "r") as f: # Load SMILES, ensuring string is not empty. reader = csv.reader(f, delimiter=",") return [row for row in reader]
6f8cebeb6a16808be76744a58f00a82f69b1ba91
80,987
def vdot(v1, v2): """ Returns the dot product of v1 and v2 """ return (v1 * v2).sum()
cee622b68574b13390113c03cb3959875a8f4ea2
80,992
def load_auth_settings(config): """ Load all the auth configuration settings into a dict auth configuration settings contain an auth_ prefix. Args: config (dict): the global config Returns: (dict): All the auth settings """ auth_config = {} for key, value in config.items(): if key.lower().startswith("auth_"): auth_config[key] = value return auth_config
1890c74c77307a31dc82d23b1eea1893e6acd13c
80,993
def filter_with_values(df, variable, values): """ Filter dataset for rows containing a specific value in a variable column. Parameters ---------- df : pandas DataFrame dataframe to be filtered variable : str name of variable to be used for filtering values : int/float or list value, or list of values, to filter for in variable column Returns ------- filtered_df : pandas DataFrame dataframe that has been filtered for value in variable column """ if isinstance(values, (int, float)): values = [values] # Coerce to list when single value, for isin() filtered_df = df.loc[df[variable].isin(values)] return filtered_df
2d430ab747aff506696fa5d1bb8b49e771c3a807
80,994
from typing import Dict from typing import Set from typing import Any def axis_keys(D: Dict, axis: int) -> Set[Any]: """Return set of keys at given axis. Parameters ------------------------------- D: Dict Dictionary to determine keys of. Axis:int Depth of keys. Returns ------------------------------- The set of keys at given axis """ return set.union(*[ axis_keys(d, axis-1) for d in D.values() ]) if axis else set(D.keys())
b44dac5a9265e917c76a6d60f26b6bbd4958d733
80,995
import inspect def is_overridden(method_name, obj, parent=None) -> bool: """Check if `method_name` from parent is overridden in `obj`. Args: method_name (str): Name of the method. obj: Instance or class that potentially overrode the method. parent: parent class with which to compare. If None, traverse the MRO for the first parent that has the method. Raises RuntimeError if `parent` is not a parent class and if `parent` doesn't have the method. Or, if `parent` was None, that none of the potential parents had the method. """ def get_mro(cls): try: return inspect.getmro(cls) except AttributeError: return inspect.getmro(cls.__class__) def first_parent_with_method(fn, mro_list): for cls in mro_list[::-1]: if hasattr(cls, fn): return cls return None if not hasattr(obj, method_name): return False try: instance_attr = getattr(obj, method_name) except AttributeError: return False return False mro = get_mro(obj)[1:] # All parent classes in order, self excluded parent = parent if parent is not None else first_parent_with_method(method_name, mro) if parent not in mro: raise RuntimeError(f"`{obj}` has no parent that defined method {method_name}`.") if not hasattr(parent, method_name): raise RuntimeError(f"Parent `{parent}` does have method `{method_name}`") super_attr = getattr(parent, method_name) return instance_attr.__code__ is not super_attr.__code__
1b767bc45e82ef4b60733a108a55dd2bd9bddb80
80,996
from typing import Dict from typing import List def _flatten_dictionary(source_dict: Dict, target_dict: Dict, key: str, flatten_arrays: bool) -> Dict: """Private function to flatten a dictionary representing a JSON Args: source_dict: A Python dictionary containing a JSON representation target_dict: The target dictionary to contain the flatten JSON key: If a recursive call, the key of the parent object flatten_arrays: A flag indicating whether arrays should be flattened as well. Only the first element of the array would be picked Returns: A dictionary with the representation of the flatten JSON """ for k, v in source_dict.items(): dict_key = k if len(key.strip()) == 0 else key + "." + k if isinstance(v, dict): for nested_k, nested_v in _flatten_dictionary(v, target_dict, dict_key, flatten_arrays).items(): target_dict[nested_k] = nested_v else: value_is_list = isinstance(v, List) if dict_key in target_dict: if flatten_arrays: target_dict[dict_key] = v[0] if value_is_list else v else: if value_is_list: target_dict[dict_key].extend(v) else: target_dict[dict_key].append(v) else: if flatten_arrays: target_dict[dict_key] = v[0] if value_is_list else v else: target_dict[dict_key] = v return target_dict
04ab22d41f8e2dff82b84dd89687686bb07c46e4
80,998
from typing import Optional def format_str(value: Optional[str], default_value: str = '?') -> str: """Format an output string. Simply puts single quotes around the value if it contains a space character. If the given argument is none the default value is returned Parameters ---------- value: string Given input value. Expected to be of type string default_value: string Default value that is returned in case value is None Returns ------- string """ if value is None: return default_value if ' ' in str(value): return '\'' + str(value) + '\'' else: return str(value)
7da84ae5e291a9ec86842b8d2df9d8e2519a5ce4
81,001
def merge_dicts(dict1,dict2): """merge two dictionaries if they have the same keys""" for key, value in dict2.items(): dict1.setdefault(key,[]).extend(value) return dict1
aa69aa1a20fa763d4c027b7989766462bdace402
81,002
def combine_dicts(dict1, dict2, which_values_are_key=1): """ Suppose you have two dicts with a common key that you want to 'factor out' to form a new dict where one of the parent's values is the new key and the values are lists of the other dict's values. There is probably a concise name for this operation, but to be clear I'll give the concrete example from this application: Suppose you have been reading through a flat text file. Reading separate lines at separate times, you discover a bunch of mappings from tag => score and tag => sequence. The tag is just an incidental common key though -- we care literally not at all about S_000001 for our purposes. What we really want and need is to be able to get 'all scores for a certain sequence.' """ if which_values_are_key == 2: return combine_dicts(dict2, dict1, which_values_are_key=1) combined = {} for tag in dict2.keys(): if dict1[tag] in combined: combined[dict1[tag]].append(dict2[tag]) else: combined[dict1[tag]] = [dict2[tag]] return combined
78a1826981109a6abb7e67f31c90bcdf1810f06f
81,004
def combinedJunctionDist(dist_0, dist_1): """Computes the combined genomic distance of two splice junction ends from the closest annotated junctions. In essence, it is finding the size indel that could have created the discrepancy between the reference and transcript junctions. Examples ('|' character respresents end of exon): Reference: ----->| |<----- Transcript: ----->| |<----- dist_0 = -2, dist_1 = +2, combined dist = 4 Reference: ----->| |<----- Transcript: ----->| |<----- dist_0 = 0, dist_1 = +2, combined dist = 2 Reference: ----->| |<----- Transcript: ----->| |<----- dist_0 = +1, dist_1 = +4, combined dist = 3 """ # If dist_0 and dist_1 have different signs, the combined distance is # the sum of their absolute values if dist_0*dist_1 <= 0: combined_dist = abs(dist_0) + abs(dist_1) else: combined_dist = abs(abs(dist_0) - abs(dist_1)) return combined_dist
af2a0b0d880f29afb097215a55dfef8426c57556
81,005
def function_wrapper(func, arg, kwarg, x): """ Wrapper function for Python functions. To be used with partial: `obj = partial(function_wrapper, func, arg, kwarg)` This allows then calling obj with only the non-masked parameters: `fx = obj(x)` which translates to: `fx = func(x, *arg, **kwarg)` Parameters ---------- func : callable Python function to be called `func(x, *arg, **kwarg)` arg : iterable Arguments passed to `func` kwarg : dictionary Keyword arguments passed to `func` Returns ------- float Output value calculated by `func` """ return func(x, *arg, **kwarg)
099a4b2b9f2dc66d855f32c5c0f0ec6342a71ff6
81,007
import networkx as nx def forest_str(graph, use_labels=True, sources=None, write=None): """ Creates a nice utf8 representation of a directed forest Parameters ---------- graph : nx.DiGraph | nx.Graph Graph to represent (must be a tree, forest, or the empty graph) use_labels : bool If True will use the "label" attribute of a node to display if it exists otherwise it will use the node value itself. Defaults to True. sources : List Mainly relevant for undirected forests, specifies which nodes to list first. If unspecified the root nodes of each tree will be used for directed forests; for undirected forests this defaults to the nodes with the smallest degree. write : callable Function to use to write to, if None new lines are appended to a list and returned. If set to the `print` function, lines will be written to stdout as they are generated. If specified, this function will return None. Defaults to None. Returns ------- str | None : utf8 representation of the tree / forest Example ------- >>> import networkx as nx >>> graph = nx.balanced_tree(r=2, h=3, create_using=nx.DiGraph) >>> print(forest_str(graph)) ╙── 0 ├─╼ 2 │   ├─╼ 6 │   │   ├─╼ 14 │   │   └─╼ 13 │   └─╼ 5 │   ├─╼ 12 │   └─╼ 11 └─╼ 1 ├─╼ 4 │   ├─╼ 10 │   └─╼ 9 └─╼ 3 ├─╼ 8 └─╼ 7 >>> graph = nx.balanced_tree(r=1, h=2, create_using=nx.Graph) >>> print(nx.forest_str(graph)) ╟── 1 ╎   ├── 2 ╎   └── 0 """ printbuf = [] if write is None: _write = printbuf.append else: _write = write if len(graph.nodes) == 0: _write("╙") else: if not nx.is_forest(graph): raise nx.NetworkXNotImplemented("input must be a forest or the empty graph") is_directed = graph.is_directed() succ = graph.succ if is_directed else graph.adj if sources is None: if is_directed: # use real source nodes for directed trees sources = [n for n in graph.nodes if graph.in_degree[n] == 0] else: # use arbitrary sources for undirected trees sources = sorted(graph.nodes, key=lambda n: graph.degree[n]) seen = set() stack = [] for idx, node in enumerate(sources): islast_next = idx == 0 stack.append((node, "", islast_next)) while stack: node, indent, islast = stack.pop() if node in seen: continue seen.add(node) # Notes on available box and arrow characters # https://en.wikipedia.org/wiki/Box-drawing_character # https://stackoverflow.com/questions/2701192/triangle-arrow if not indent: # Top level items (i.e. trees in the forest) get different # glyphs to indicate they are not actually connected if islast: this_prefix = indent + "╙── " next_prefix = indent + " " else: this_prefix = indent + "╟── " next_prefix = indent + "╎   " else: # For individual forests distinguish between directed and # undirected cases if is_directed: if islast: this_prefix = indent + "└─╼ " next_prefix = indent + " " else: this_prefix = indent + "├─╼ " next_prefix = indent + "│   " else: if islast: this_prefix = indent + "└── " next_prefix = indent + " " else: this_prefix = indent + "├── " next_prefix = indent + "│   " if use_labels: label = graph.nodes[node].get("label", node) else: label = node _write(this_prefix + str(label)) children = [child for child in succ[node] if child not in seen] for idx, child in enumerate(children, start=1): islast_next = idx <= 1 try_frame = (child, next_prefix, islast_next) stack.append(try_frame) if write is None: # Only return a string if the custom write function was not specified return "\n".join(printbuf)
15cbd5c82013323c5202bd735d887862336cfc0c
81,010
import json def read_config(filename): """Read the JSON configuration file. Parameters ---------- filename : str Path of the configuration file Returns ------- dict Dataset glossary """ with open(filename) as fobj: return json.load(fobj)
09d509f8062c073724da29b63c105a3c80404b72
81,011
def rotate_tuple(t, k): """ >>> rotate_tuple((1, 2, 3), 0) (1, 2, 3) >>> rotate_tuple((1, 2, 3), 2) (3, 1, 2) """ return t[k:] + t[0:k]
0de68624ea00ec681aa9e60647343ba63c328e96
81,012
def count_subset_sum_dp(arr, total): """Count subsets given sum by bottom-up dynamic programming. Time complexity: O(nm). Space complexity: O(nm). """ n = len(arr) - 1 T = [[0] * (total + 1) for _ in range(n + 1)] for a in range(n + 1): T[a][0] = 1 for a in range(n + 1): for t in range(1, total + 1): if t < arr[a]: T[a][t] = T[a - 1][t] else: T[a][t] = T[a - 1][t] + T[a - 1][t - arr[a]] return T[-1][-1]
fbcd618152b65f0e456a8f5ade8680e9a08d1d4e
81,014
def get_neighbor(r, c): """Return the neighbors assuming no diag movement""" return [(r + 1, c), (r - 1, c), (r, c + 1), (r, c - 1)]
5deba30deb07979b39d330ef1891e877be284357
81,019
import math def _compute_dcg(s, k): """ A function to compute dcg :param s: sequence of ground truth in the rank order to use for calculating dcg :param k: top k at which to evaluate ndcg :return: dcg for this ordering of ground truth :rtype: numeric """ dcg = 0.0 for i in range(min(k, len(s))): dcg += (math.pow(2, s[i]) - 1) / math.log(i + 2, 2) return dcg
6732c5ea7bbee643e9c20968c43232fc6bb695cd
81,022
from pathlib import Path def getHtml(fileName): """ HTML ファイルを読み込んで中身を返す Parameters ---------- fileName : str 読み込むファイル名 Returns ------- html : str HTML ソース文字列 """ file = Path(Path.cwd()).joinpath('html').joinpath(fileName) html = file.read_text(encoding = 'utf-8') return html
e492e1275c4b4cb7df4a426170d1cef80a1d0857
81,027
import torch def one_hot(targets, num_classes, dtype=torch.float32, device=None): """ Encode the targets (an tensor of integers representing a class) as one hot encoding. Support target as N-dimensional data (e.g., 3D segmentation map). Equivalent to torch.nn.functional.one_hot for backward compatibility with pytorch 1.0 Args: num_classes: the total number of classes targets: a N-dimensional integral tensor (e.g., 1D for classification, 2D for 2D segmentation map...) dtype: the type of the output tensor device: the device of the one-hot encoded tensor. If `None`, use the target's device Returns: a one hot encoding of a N-dimentional integral tensor """ if device is None: device = targets.device nb_samples = len(targets) if len(targets.shape) == 2: # 2D target (e.g., classification) encoded_shape = (nb_samples, num_classes) else: # N-d target (e.g., segmentation map) encoded_shape = tuple([nb_samples, num_classes] + list(targets.shape[1:])) with torch.no_grad(): encoded_target = torch.zeros(encoded_shape, dtype=dtype, device=device) encoded_target.scatter_(1, targets.unsqueeze(1), 1) return encoded_target
8a8b83b13965dc2cda429d4ac7f2e63f48164198
81,029
def pretty_date(dt): """ Formats a datetime instance, which can be none, in the local TZ """ if not dt: return "<unk date>" return dt.astimezone().strftime("%Y-%m-%d %I:%M%p %Z")
f372178122c82f53f0802da04d54eaaf1444e961
81,030
def slice_data(data, info, split): """Slice data according to the instances belonging to each split.""" if split is None: return data elif split == 'train': return data[:info['train_len']] elif split == 'val': train_n = info['train_len'] val_n = train_n + info['val_len'] return data[train_n: val_n] elif split == 'test': val_n = info['train_len'] + info['val_len'] test_n = val_n + info['test_len'] return data[val_n: test_n]
7708e448521965cb76ec90788a0fe0a0f41c7491
81,031
def get_row_col(mouse_x, mouse_y): """ Converts an x, y screen position into a row, col value. """ # Note: the top row is row=0 (bottom row=2), left col is col=0 (right col=2) spacing_x = 86 + 8 spacing_y = 98 + 5 top_y = 50 left_x = 50 return (mouse_y - top_y) // spacing_y, (mouse_x - left_x) // spacing_x
25c97f69dc2b98a4c1655975b62c85dd27e5e0b5
81,037
def _get_bc_count(sample_name, bc_count, sample_run): """Retrieve barcode count for a sample :param sample_name: sample name :param bc_count: parsed option passed to application :param sample_run: sample run object :returns: barcode count or None""" if isinstance(bc_count, dict): if sample_name in bc_count: return bc_count[sample_name] else: return bc_count.get("default", sample_run.get("bc_count", -1)) else: return bc_count
bd9e958eaadb7104805fbb4e5b0e42c1568fe940
81,043
def groupby_asdf(df, grouped, idx=0): """ Returns pandas DataFrame as a subset from df with given index idx in the grouped Parameters ---------- df: pandas DataFrame grouped: pandas GroupBy object based on df idx: group index in grouped Returns ------- pandas DataFrame as a subset from df in the grouped with a given index idx """ start = grouped.groups[idx][0] end = grouped.groups[idx][-1] return df[start:end]
22d0660c2ff43eed294f6a185a11be2745ae4c0b
81,046
import re def parse(str_input): """ Parse a string in CEF format and return a dict with the header values and the extension data. """ # Create the empty dict we'll return later values = dict() # This regex separates the string into the CEF header and the extension # data. Once we do this, it's easier to use other regexes to parse each # part. header_re = r'(.*(?<!\\)\|){,7}(.*)' res = re.search(header_re, str_input) if res: header = res.group(1) extension = res.group(2) # Split the header on the "|" char. Uses a negative lookbehind # assertion to ensure we don't accidentally split on escaped chars, # though. spl = re.split(r'(?<!\\)\|', header) # Since these values are set by their position in the header, it's # easy to know which is which. values["DeviceVendor"] = spl[1] values["DeviceProduct"] = spl[2] values["DeviceVersion"] = spl[3] values["DeviceEventClassID"] = spl[4] values["DeviceName"] = spl[5] if len(spl) > 6: values["DeviceSeverity"] = spl[6] # The first value is actually the CEF version, formatted like # "CEF:#". Ignore anything before that (like a date from a syslog message). # We then split on the colon and use the second value as the # version number. cef_start = spl[0].find('CEF') if cef_start == -1: return None (cef, version) = spl[0][cef_start:].split(':') values["CEFVersion"] = version # The ugly, gnarly regex here finds a single key=value pair, # taking into account multiple whitespaces, escaped '=' and '|' # chars. It returns an iterator of tuples. spl = re.findall(r'([^=\s]+)=((?:[\\]=|[^=])+)(?:\s|$)', extension) for i in spl: # Split the tuples and put them into the dictionary values[i[0]] = i[1] # Process custom field labels for key in values.keys(): # If the key string ends with Label, replace it in the appropriate # custom field if key[-5:] == "Label": customlabel = key[:-5] # Find the corresponding customfield and replace with the label for customfield in values.keys(): if customfield == customlabel: values[values[key]] = values[customfield] del values[customfield] del values[key] # Now we're done! return values
d61ed71557f626c7dd3aaa2f67955164b1aacdd2
81,047
import importlib def get_imported_module(module_name): """ import module and return imported module """ return importlib.import_module(module_name)
15872bd8c3ba843de13851267e7f9c1be872f9a5
81,050
def _are_values_different(old_value, new_value): """Checks if the two values are different whilst treating a blank string the same as a None.""" old_value = old_value if old_value != '' else None new_value = new_value if new_value != '' else None return old_value != new_value
16b47fd8fa95f58b8235990fd63fd1a088d2ac64
81,052
def levenshtein(s: str, t: str) -> int: """Levenshtein distance algorithm, implementation by Sten Helmquist. Copied from https://davejingtian.org/2015/05/02/python-levenshtein-distance-choose-python-package-wisely/""" # degenerate cases if s == t: return 0 if len(s) == 0: return len(t) if len(t) == 0: return len(s) # create two work vectors of integer distances v0 = [] v1 = [] # initialize v0 (the previous row of distances) # this row is A[0][i]: edit distance for an empty s # the distance is just the number of characters to delete from t for i in range(len(t)+1): v0.append(i) v1.append(0) for i in range(len(s)): # calculate v1 (current row distances) from the previous row v0 # first element of v1 is A[i+1][0] # edit distance is delete (i+1) chars from s to match empty t v1[0] = i + 1 # use formula to fill in the rest of the row for j in range(len(t)): cost = 0 if s[i] == t[j] else 1 v1[j + 1] = min(v1[j]+1, v0[j+1]+1, v0[j]+cost) # copy v1 (current row) to v0 (previous row) for next iteration for j in range(len(t)+1): v0[j] = v1[j] return v1[len(t)]
4e1d192756808c4b9e562fb60b87fca74a4f4b68
81,055
def test_lyrics(lyrics): """ Test lyrics downloaded to detect license restrinction string: 'We are not in a position to display these lyrics due to licensing restrictions. Sorry for the inconvinience.' Also test lyrics by looking for multiple new line characters. Returns booleans accordingly """ if not lyrics: return False license_str1 = 'We are not in a position to display these lyrics due to licensing restrictions. Sorry for the inconvinience.' license_str2 = 'display these lyrics due to licensing restrictions' license_str3 = 'We are not in a position to display these lyrics due to licensing restrictions.\nSorry for the inconvinience.' # If either of license string is found in lyrics downloaded or it has less than 4 new line characters if (license_str1 in lyrics or license_str2 in lyrics or license_str3 in lyrics or lyrics.count('\n') < 4): return False return True
366720a2e09d2c9210123e8173cdb7b02ca9854e
81,057
import torch def _transform_sample( data: torch.Tensor, normalize: bool, transpose: bool ) -> torch.Tensor: """ Optionally normalize and tranpose a torch.Tensor :param data: Input sample :type data: torch.Tensor :param normalize: To normalize sample or not :type normalize: bool :param transpose: to transpose sample or not :type transpose: bool :return: Modified sample :rtype: torch.Tensor """ if transpose: data = data.T if normalize: data = torch.nn.functional.normalize(data) return data
1d95b62bec596643f2110dab7dd792fe84a3c117
81,059
from typing import Dict def _init_stats() -> Dict: """Create a statistics data structure. :return: A new statistics data structure. """ return { 'count_scrubbed': 0, 'count_not_found': 0, 'count_partial': 0, }
71eec43bc1ffcaa65235a0e0bfaf71131a1bbf3d
81,060
def backwards(page_or_revision, data): """Map from three-option tag_filtering to boolean and_filtering. If tag_filtering was "all" (include pages that match all topic tags), set and_filtering to True. Otherwise, set it to False. The backwards field state doesn't support the tag_filtering "none" case. """ data["and_filtering"] = data.pop("tag_filtering", None) == "all" return data
81350dbc0eaf34594953d0fa09a4ef9078f1d9cc
81,070
def buildLine(mesh, matrix, vertex_colors, uv_textures, face_index, idx, verti): """ Returns a list containing a vertex (3 tuple of floats), normal (3 tuple of floats) vertex color (3 tuple of floats) and uv (2 tuple of floats). """ line_data = [] # Append vertex line_data.append((matrix * mesh.vertices[verti].co)[:]) # Vertex normal line_data.append((matrix.to_quaternion() * mesh.vertices[verti].normal)[:]) # Vertex color if vertex_colors == None: line_data.append([0.5, 0.5, 0.5]) else: line_data.append(getattr(vertex_colors[face_index], 'color%d' % (idx + 1))[:]) # UV if uv_textures == None: line_data.append([0, 0]) else: line_data.append(uv_textures[face_index].uv_raw[idx*2:idx*2+2]) return line_data
ad71547e605ef1beb5b6ec7665824764d7a76207
81,077
def validate_handler_overrides(process_class, handler_overrides, ctx): # pylint: disable=inconsistent-return-statements,unused-argument """Validator for the `handler_overrides` input port of the `BaseRestartWorkChain. The `handler_overrides` should be a dictionary where keys are strings that are the name of a process handler, i.e. a instance method of the `process_class` that has been decorated with the `process_handler` decorator. The values should be boolean. .. note:: the normal signature of a port validator is `(value, ctx)` but since for the validation here we need a reference to the process class, we add it and the class is bound to the method in the port declaration in the `define` method. :param process_class: the `BaseRestartWorkChain` (sub) class :param handler_overrides: the input `Dict` node :param ctx: the `PortNamespace` in which the port is embedded """ if not handler_overrides: return for handler, override in handler_overrides.get_dict().items(): if not isinstance(handler, str): return 'The key `{}` is not a string.'.format(handler) if not process_class.is_process_handler(handler): return 'The key `{}` is not a process handler of {}'.format(handler, process_class) if not isinstance(override, bool): return 'The value of key `{}` is not a boolean.'.format(handler)
de1d5530d6cad2f967fcbb7343cb918615a26185
81,090
from typing import List from typing import Optional def find_object(objects: List[object], **filters) -> Optional[object]: """ Pass a list of objects and filters as kwargs to get first occurence record. If no filters passed return first object in the list Args: objects: list of objects to iterate and filter from **filters: kwargs to be used for filtering as key:value pairs Returns: Found record obj or None Examples: >>> class A: ... def __init__(self, var1, var2, var3=False): ... self.var1, self.var2, self.var3 = var1, var2, var3 ... def __repr__(self): ... return f'{self.var1}-{self.var2}' ... >>> find_object([A('test', 1), A('test2', 2)], var1='test', var2=1) test-1 >>> find_object([A('test', 1), A('test2', 2)], var1='test', var2=2) is None True >>> find_object([{}]) {} """ for rec in objects: if all([getattr(rec, key) == value for key, value in filters.items()]): return rec
4664d4c6a2a7ba53171b4bc043a177cb4369c99e
81,109
from functools import reduce def product(arr): """Return the product of a sequence of elements of <arr>.""" return reduce(lambda a,b: a*b, arr)
e752c06909ee06ae7f9ca216db89c679f937180f
81,110
def get_frequencies(column): """ The purpose of this function is to get the count for how many times a value appeaars in a given column. Attributes: - column(list): a list of values in a column which will be checked for values and frequencies. Returns: - values, counts(tuple of lists): a tuple containing the values within a column and their associated frequencies. """ values = [] counts = [] column.sort() for value in column: if value not in values: values.append(value) counts.append(1) else: counts[-1] += 1 return values, counts
fe805af74ef41dba16aae277a907fbd57ba9a40e
81,115
def count_refs(pileup_base_string): """ Given a pileup string from samtools mpileup output, count the number of reads that support the reference allele in that position. """ ref_count = 0 i = 0 while i < len(pileup_base_string): if pileup_base_string[i] in [".", ","]: ref_count += 1 # if the current character is '^', then the next # character will be a PHRED quality, so skip it. elif pileup_base_string[i] in ["^"]: i += 1 i += 1 # for other things, like insertions, deletions, and # ends of reads, there will not be any '.' or ',' # characters to confuse us, so we don't need to # actually parse these out since all we care about # is the number of ref alleles return ref_count
abed60338e5fe45b6fd5ec7153f171dd0f40dace
81,120
def get_linear_lattice_interpolation_fn(lattice_sizes, monotonicities, output_min, output_max): """Returns function which does lattice interpolation. Returned function matches lattice_layer.LinearInitializer with corresponding parameters. Args: lattice_sizes: list or tuple of integers which represents lattice sizes. monotonicities: monotonicity constraints. output_min: minimum output of linear function. output_max: maximum output of linear function. Returns: Function which takes d-dimension point and performs lattice interpolation assuming lattice weights are such that lattice represents linear function with given output_min and output_max. All monotonic dimesions of this linear function cotribute with same weight despite of numer of vertices per dimension. All non monotonic dimensions have weight 0.0. """ def linear_interpolation_fn(x): """Linear along monotonic dims and 0.0 along non monotonic.""" result = output_min num_monotonic_dims = len(monotonicities) - monotonicities.count(0) if num_monotonic_dims == 0: local_monotonicities = [1] * len(lattice_sizes) num_monotonic_dims = len(lattice_sizes) else: local_monotonicities = monotonicities weight = (output_max - output_min) / num_monotonic_dims for i in range(len(x)): if local_monotonicities[i]: result += x[i] * weight / (lattice_sizes[i] - 1.0) return result return linear_interpolation_fn
032f945931530b530aa25333f77b0172cf89c629
81,121
def s3_key_to_link(region, bucket, key): """generates public link of s3 object""" return "https://s3-{0}.amazonaws.com/{1}/{2}".format( region, bucket, key)
b2c2ed298123dc0516b7052e4c790041d4cf02c4
81,122
def sign_non_zero(x): """ returns the sign of the input variable as 1 or -1 arbitrarily sign_non_zero(0) = 1 """ return 2 * (x >= 0) - 1
4c28bee24bab6e9cbe912998960286e714dfa2bc
81,126
def activedb(dbdata, active): """Reduce database to active entries Parameters ---------- dbdata : List[Dict[]] List of database entries active : List List of stems in blacklist directory Returns ------- List[Dict[active entries database schema]] """ out = [e for e in dbdata if e['ip'] in active] return out
51df79179b6ad50a8fd25a06f2697b3efb7efbbb
81,127
import random def take_sample(population, k): """Returns a list of k randomly chosen elements from population.""" result, n, k = [], k, min(k, len(population)) result = random.sample(population, k) while len(result) < n: result += random.sample(population, min(k, n - len(result))) return result
46f30c9db46bac912d756db4a9ce3f87b8dceaa5
81,132
def get_interpolation_range(sidelen, n, i): """ Finds the range of indices for interpolation in Robinson Projection Input sidelen: the number of items on both sides of i, including i in the left n: the total number of items i: the index of the largest item smaller than the value Output ileft: the left index of the value (inclusive) iright: the right index of the value (noninclusive) """ if i<sidelen: ileft = max([0, i-sidelen+1]) else: ileft = i-sidelen+1 if i>=n-sidelen: iright = min(n, i+sidelen+1) else: iright = i+sidelen+1 return ileft, iright
c217615d892bb1490824486dec405341c14d3c64
81,137
def format_as_code_block(text_to_wrap: str) -> str: """ Wrap the text in a JIRA code block. Args: text_to_wrap: The text to wrap. Returns: A JIRA formatted code block. """ return "".join(["{code:java}", "{}".format(text_to_wrap), "{code}"])
53dc980acabbf6c9fde0d16841d66433196f8a38
81,138
def fixupResultForD978(resultStr, output_d978fa): """ For programs that require the strict output format of dump978 or dump978-fa, this routine will edit the result string and return a new string with the desired output. All strings contain the message itself. The 'rs=' reed-solomon errors are printed only if there are any. Only a single total number is printed. Since FISB-978 stops looking for FISB errors if it doesn't have to process all the packets, only the errors actually found are used. If this is dump978-fa format, the time and rssi values will always be included. For original dump-978, these are always missing. Args: resultStr (str): Usual FISB-978 result string. output_d978fa (bool): True if the output is to be in dump978-fa format. Otherwise will be in dump978 format. Returns: str: Edited result string in proper format. """ parts = resultStr.split(';') # Put reed-solomon only if there are any errors. # For FIS-B, stop counting for amounts over 90, these are messages parts that # don't need to be checked. rsStr = parts[1].split('/')[1] if ':' in rsStr: rsSplit = rsStr.split(':') totalErrs = 0 for i in rsSplit: x = int(i) if x > 90: break totalErrs += x else: totalErrs = int(rsStr) # Start building result string. newResultStr = parts[0] if totalErrs > 0: newResultStr += ';rs=' + str(totalErrs) # Add d978-fa only items if output_d978fa: rssiStr = parts[2].split('/')[1] tiStr = parts[3][2:] newResultStr += ';rssi=' + rssiStr + ';t=' + tiStr # Always add ';' to end newResultStr += ';' return newResultStr
45609dc9223649284750ca9d889532e202edcd9f
81,139
import json def insert_organization(cursor, info): """organization情報登録 Args: cursor (mysql.connector.cursor): カーソル info (Dict)): organization情報のJson形式 Returns: int: organization_id """ # insert実行 cursor.execute('INSERT INTO organization ( organization_name, additional_information )' \ ' VALUES ( %(organization_name)s, %(additional_information)s )', { 'organization_name' : info['organization_name'], 'additional_information' : json.dumps(info['additional_information']) } ) # 追加したワークスペースIDをreturn return cursor.lastrowid
092279b329831ddd5700fe6fcb0cb26aaafd552e
81,140
def isRXCY(s): """ >>> isRXCY('RRCC233') False >>> isRXCY('R22C22') True """ try: idx0 = s.index('R') idx1 = s.index('C') except: return False if idx0 != -1 and idx1 != -1: return any([str(i) in s[idx0+1:idx1] for i in range(10)]) return False
367388cc9257d37a83de2a26f849debd414ee9a8
81,141
def apply_mapping(mask, mapping): """Applies the mapping to obtain a frequency aligned mask. Args: mask: Permuted mask with shape (K, F, ...). mapping: Reverse mapping with shape (K, F). >>> np.random.seed(0) >>> K, F, T = 3, 5, 6 >>> reference_mask = np.zeros((K, F, T), dtype=np.int8) >>> def viz_mask(mask: np.ndarray): ... mask = np.einsum('KFT->FKT', mask).astype(str).tolist() ... for mask_f in mask: ... print(' '.join([' '.join(m) for m in mask_f])) >>> reference_mask[0, :, :2] = 1 >>> reference_mask[1, :, 2:4] = 1 >>> reference_mask[2, :, 4:] = 1 >>> viz_mask(reference_mask) 1 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1 >>> mapping = sample_random_mapping(K, F) >>> mask = apply_mapping(reference_mask, mapping) >>> viz_mask(mask) 0 0 0 0 1 1 0 0 1 1 0 0 1 1 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1 0 0 1 1 0 0 1 1 0 0 0 0 Test against a loopy implementation of apply mapping >>> def apply_mapping_loopy(mask, mapping): ... _, F = mapping.shape ... aligned_mask = np.zeros_like(mask) ... for f in range(F): ... aligned_mask[:, f, :] = mask[mapping[:, f], f, :] ... return aligned_mask >>> mask = apply_mapping_loopy(reference_mask, mapping) >>> viz_mask(mask) 0 0 0 0 1 1 0 0 1 1 0 0 1 1 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1 0 0 1 1 0 0 1 1 0 0 0 0 """ K, F = mapping.shape assert K < 20, (K, mapping.shape) assert mask.shape[:2] == mapping.shape, (mask.shape, mapping.shape) return mask[mapping, range(F)]
7dc3192a97354902750eb36357f5d02b2a081274
81,142
def values_from_syscalls(line): """ Utility method to obtain the system call count values from the relative line in the final analysis output text file. :param line: string from log file :return: int corresponding to system call frequency """ return int(line.strip().split('\t')[1])
f7c7e9f57c5fc799b61fc37aa5484e0a07fbb04f
81,143
def _calculate_total_dispatchable_mw(case): """ Calculates the total available MW for dispatch. This is equal to the total remaining capacity of all in-service, non-swing generators. :param case: The :class:`Case` that is synchronized with the current loaded case. :type case: :class:`Case` :return: The total remaining dispatchable capacity in the case, in MW. :rtype: float """ total_dispatchable_mw_max = 0 swing = case.swing for machine in case.machines: if machine.status and machine.has_capacity: if machine.bus_number != swing.number: total_dispatchable_mw_max += machine.max_mw_output return total_dispatchable_mw_max
7f26723e186c67f797bddc2e7c11b4313437261f
81,145
def _check_shape_contain_zero(shp): """Check whether shape contains 0""" if isinstance(shp, int): return shp == 0 if isinstance(shp, (list, tuple)): for s in shp: if s == 0: return True return False
235fc26c0a7e1a3db5f7d1c986b5e5ef9526864a
81,146
import math def ray_param(v_int: float, theta: float) -> float: """ Calculates the ray parameter P. :param v_int: Interval velocity :param theta: Angle of incidence (deg) :return: p: Ray parameter """ # Cast inputs to floats theta = float(theta) v = float(v_int) p = math.sin(math.radians(theta)) / v # ray parameter calculation return p
857f5c38e8591f126f257c11b71787013ae790e0
81,152
def get_searchpath(golib, mode): """Returns the search path for the given mode""" # The attribute name must match the one assigned in emit_library_actions return getattr(golib, mode+"_searchpath")
514edf6858e87ba38a17800d5609819aa68da28a
81,155
def intorNone(x): """Return int(x) if x is not None.""" return int(x) if x is not None else x
9d2897e053c38950eed3b5928674a422f22f0ead
81,156
def tagseq_to_entityseq(tags: list) -> list: """ Convert tags format: [ "B-LOC", "I-LOC", "O", B-PER"] -> [(0, 2, "LOC"), (3, 4, "PER")] """ entity_seq = [] tag_name = "" start, end = 0, 0 for index, tag in enumerate(tags): if tag.startswith("B-"): if tag_name != "": end = index entity_seq.append((start, end, tag_name)) tag_name = tag[2:] start = index elif tag.startswith("I-"): if tag_name == "" or tag_name == tag[2:]: continue else: end = index entity_seq.append((start, end, tag_name)) tag_name = "" else: # "O" if tag_name == "": continue else: end = index entity_seq.append((start, end, tag_name)) tag_name = "" return entity_seq
1fb1ce7ff7266a961ba84188bcbf9df2c92ba650
81,158
from typing import Counter def filter_counts(list_of_elements, minimum): """ Filter out elements in a list that are not observed a minimum of times :param list_of_elements: a list of for example positions :param minimum: the miminum number of times an value must be observed :type list_of_elements: list :type minimum: int :returns: a dictionary of value:observation key value pairs """ counts = Counter(list_of_elements) lookup = {} for k, v in counts.items(): if v >= minimum: lookup[k] = v return lookup
c630f223ec506190e7727fd3a13a08ec3e36ada4
81,165
def create_burger(conn, burger): """ Create a new burger within the table :param conn: :param burger: :return burger_id: """ sql_cmd = ''' INSERT INTO burgers(name, availability) VALUES(?,?) ''' cursor = conn.cursor() cursor.execute(sql_cmd, burger) return cursor.lastrowid
112bda69a2b17bf9e5880b782b6b71982e59d4bd
81,172
def shift_by(l, n): """Convenience function to shift a list by a number of steps. If ``n`` is positive it shifts to the left. Args: l (list): list to be shifted n (int): size of the shift """ return l[n:] + l[:n]
35c8af04b302cafda92b979fa998b5f740d0655d
81,176
def get_data_mod(data_type, data_name): """Get the pythondata-{}-{} module or raise a useful error message.""" imp = "import pythondata_{}_{} as dm".format(data_type, data_name) try: l = {} exec(imp, {}, l) dm = l['dm'] return dm except ImportError as e: raise ImportError("""\ pythondata-{dt}-{dn} module not installed! Unable to use {dn} {dt}. {e} You can install this by running; pip3 install git+https://github.com/litex-hub/pythondata-{dt}-{dn}.git """.format(dt=data_type, dn=data_name, e=e))
9462d619007665f9e6e5bf0dae609003ef7e3d42
81,178
def read_clustering(filename): """ Read the clustering in :param filename: The cluster file to read :returns: A list of lists of active site numbers """ clusters = [] cluster = [] with open(filename, 'r') as fp: for line in fp: line = line.strip() if line == '': continue if line.startswith('---'): continue if line.startswith('Cluster'): if len(cluster) > 0: clusters.append(cluster) cluster = [] else: cluster.append(int(line)) if len(cluster) > 0: clusters.append(cluster) return clusters
23cc0c9040cc45dadd05d3aba39ab385ec4a493f
81,183
def huffman_decoding(encoded_str, root): """Huffman decoding algorithm Args: sentence: encoded string root: root Node for Huffman Binary Tree Returns: decode_str: string decoded from the sequence """ decoded = [] node = root for e in encoded_str: if e == '1': node = node.get_right_child() elif e == '0': node = node.get_left_child() if node.is_leaf(): decoded.append(node.get_letter()) node = root decoded = ''.join([e for e in decoded]) return decoded
f8f6e9c897206b8db65d27652ece7754edec0d1d
81,187
import pathlib from typing import Literal import hashlib def _check_file_ready(file: pathlib.Path, md5_checksum: str, open_mode: Literal["r", "rb"]) -> bool: """Checks if the file is ready to use (exists and has the right MD5 checksum) Args: file: path to the file one wants to check md5_checksum: expected MD5 checksum open_mode: how the file should be opened Returns: true, if the file exists and has the right checksum """ if not file.exists(): return False with open(file, open_mode) as f: checksum = hashlib.md5(f.read()).hexdigest() return checksum == md5_checksum
5711f64aa10fc73e6dc0829c2a948a667e08f768
81,190
import logging def validateHeartRate(input, patientDict): """ Validates patient hr input, checking that fields are proper and exist :returns: -1 if not successful, 1 if successful """ if(not isinstance(input, type({}))): logging.error("input not type dict") return -1 if "patient_id" not in input.keys(): logging.error("missing patient id") return -1 if "heart_rate" not in input.keys(): logging.error("missing heart rate") return -1 if input["patient_id"] not in patientDict.keys(): logging.error("patient not initialized") return -1 try: if(float(input["heart_rate"]) < 0): logging.error("invalid hr") return -1 except: logging.error("non numeric hr") return -1 return 1
c7d78250bbd6a2121dff1f784099aa5ef7127541
81,192
def types_args_one_port(port, formatter): """ Extract from a port: 1. a list of "type arg"s 2. a list of args :param port: :param formatter: a string formatter with {0} and/or {1} where {0} is the port name, and {2} is an arg ID. :return: """ types_args = [] args = [] for i in range(len(port.argtypes)): arg = formatter.format(port.name, i) args.append(arg) types_args.append("%s %s" % (port.argtypes[i], arg)) return types_args, args
c10a379aab76ff2101f4353c5421ede62c1473cb
81,199
def double_center(mat): """Double center a 2d array. Args: mat (ndarray): 2d numpy array Returns: mat (ndarray): double-centered version of input """ if len(mat.shape) != 2: raise ValueError("Array should be 2d") # keepdims ensures that row/column means are not incorrectly broadcast during subtraction row_mean = mat.mean(axis=0, keepdims=True) col_mean = mat.mean(axis=1, keepdims=True) grand_mean = mat.mean() return mat - row_mean - col_mean + grand_mean
3ee3106c2c1ee94055c3dd9e0a9ac2adbdbcfee7
81,201
def _cross2d(x, y): """Cross product in 2D.""" return x[:, :, 0] * y[:, :, 1] - x[:, :, 1] * y[:, :, 0]
e6a26cc6aee267f562d3493eb8309035388bf7b7
81,202
def get_file_subset(path_gen, max_number): """ Get a subset of file from a generator :param path_gen: pathlib.Path.glob :param max_number: int :return: list of pathlib.Path objects """ filelist = [] while len(filelist) < max_number: try: next_path = next(path_gen) if next_path.is_file(): filelist.append(next_path) except StopIteration: break return filelist
207fb58c54a9a8382ecf1f67e519d017dfd59464
81,206
from typing import Callable from typing import Tuple from typing import Any def recurse_eval(path: str, data: dict, fn: Callable) -> Tuple[str, Any]: """ Given a `path` such as `a.b.0.split(' ')` this function traverses the `data` dictionary using the path, stopping whenever a key cannot be found in the `data`. `fn` is then applied to the extracted data and the result is returned along with the part of the path which was traversed. In the following example, `a.b.0` is identified as the path to return since `.split()` is not an item in `data`. >>> recurse_eval( ... path="a.b.0.split(' ')", ... data={"a": {"b": [{"$eval": "'hey ' * 2"}]}}, ... fn=lambda node, _ : eval(node["$eval"]) if "$eval" in node else node ... ) ('a.b.0', 'hey hey ') Parameters ---------- path The path to fetch from in the data data Dictionary which should be traversed using the path fn function to call with the fetched data as parameter Returns ------- Tuple[str, Any] The path and the value after applying the `fn` """ tmp = data current_path = [] path = path.replace("[", ".[") for key in path.split("."): original_key = key if "[" in key: key = key.replace("[", "").replace("]", "").replace('"', "") try: tmp = tmp[key] current_path.append(original_key) except TypeError: try: tmp = tmp[int(key)] current_path.append(original_key) except ValueError: break except: break return ".".join(current_path).replace(".[", "["), fn(tmp, data)
636661654f245869b045a19cf1750d73514e9a28
81,208
from typing import List from pathlib import Path def do_single_check(command: str, relevant_files: List[Path], command_check_mapping) -> bool: """Do a single check requested by a command.""" check = dict(command_check_mapping)[command] if command == 'format': success = check(relevant_files, False) else: success = check(relevant_files) if not success: print('ERROR: %s failed, see errors above.' % check.__name__) return success
f02c5befefe6b8bbecf95509d1d0c813ad67544a
81,209
def format_oauth(oauth_token: str) -> str: """ Format an oauth token for IRC auth. """ return oauth_token if oauth_token.startswith('oauth:') else f'oauth:{oauth_token}'
ae4b2d22530c16f13b4ec4a0140afd0a134d26f9
81,214
def setup_none(lvl): """Set up default, empty smoother.""" def smoother(A, x, b): pass return smoother
9c4c91b6c6dd98436932442d595b8fb06e83de5e
81,215
import unicodedata def normalize(tag): """Normalize a single tag: remove non valid chars, lower case all.""" tag_stripped = tag.strip() value = unicodedata.normalize("NFKD", tag_stripped.lower()) value = value.encode('ascii', 'ignore').decode('utf-8') return value
bad11bc517d971bf71e0a4cef9df5cc2a388f8cb
81,217
def formula_has_multi_any_bfunc(formula, bfunc_set): """Returns true if the total times any of the provided basis functions appear in the formula is more than once per per normalization. :formula: str :bfunc_set: list of str :returns: bool """ equivalents=formula.count("+")+1 instances=0 for b in bfunc_set: instances+=formula.count(b) return instances/equivalents>1
aef4fd2cabf41d1eaa3c1004a7d0f0ae9a054047
81,221
def groups_balanced(arg): """ Match [, {, and ( for balance >>> groups_balanced("(a) and (b)") True >>> groups_balanced("((a) and (b))") True >>> groups_balanced("((a) and (b)") False >>> groups_balanced(" [a] and [b] ") True >>> groups_balanced("((a) and [(b)])") True >>> groups_balanced("((a) and [(b))]") False """ arg = arg.strip() open_list = ["(", "[", "{"] close_list = [")", "]", "}"] stack = [] for i in arg: if i in open_list: stack.append(i) elif i in close_list: pos = close_list.index(i) if ((len(stack) > 0) and (open_list[pos] == stack[len(stack)-1])): stack.pop() else: return False if len(stack) == 0: return True else: return False
cef90251e5dfe9f3be17af062d6042df6505bb0c
81,223
from io import StringIO import traceback def error2str(e): """returns the formatted stacktrace of the exception `e`. :param BaseException e: an exception to format into str :rtype: str """ out = StringIO() traceback.print_exception(None, e, e.__traceback__, file=out) out.seek(0) return out.read()
7de136bae98078fe317e3d1c78043046b2c3a14e
81,228
def get_azfs_url(storage_account, container, blob=''): """Returns the url in the form of https://account.blob.core.windows.net/container/blob-name """ return 'https://' + storage_account + '.blob.core.windows.net/' + \ container + '/' + blob
dedc8fb665385165a4bf03329f17aa946bba8cd6
81,229
import re def regex_strip(text, remove=None): """ Takes a string and performs the same operation as str.strip() This function defaults to removing whitespace, but if the optional remove argument is supplied, the remove value is removed from the text :param str text: source string :param str remove: optional value to remove from source string """ if not remove: return re.compile(r'^\s*|\s*$').sub('', text) else: return re.compile(f'^({remove})*|({remove})*$').sub('', text)
778f93466a025c1e9a48eca66141c348b114d82e
81,231
def task_catalog_get(task, task_id, is_consumer): """ Get an object from the task catalog @param task: The Celery Task @param task_id: The Celery Task ID @param is_consumer: Boolean @return: scope """ catalog = getattr(task, '_instana_scopes', None) if catalog is None: return None key = (task_id, is_consumer) return catalog.get(key, None)
2d2d93647676cfe661faf43fbc9c9a2e217992b2
81,233
import ipaddress def ip_address(value): """Validate that the value is an IP address""" value = str(value) ipaddress.ip_address(value) return value
4187ba28d34a0edc67291be65c1811b8b685516f
81,239
def read_in_list(list_in): """ Convert list file into list object """ file_list = [line.rstrip('\n') for line in open(list_in)] return file_list
c1de17a3b79f251744d1fc0be745414ef12d20b4
81,240
def categorize(df): """ Category Combinations Labels ------- - M = majority winner - P = plurality winner - C = condorcet winner - U = utility winner Categories ---------- - MU = Has majority utility winner - M = Has majority winner that is not utility winner. - - CPU = Has condorcet, utility, plurality winner - CU = Has condorcet, utility winner that is not plurality winner - CP = Has condorcet, plurality winner that is not utility winner - C = Has condorcet winner who is not plurality and utility winner - - NC = Has no Condorcet winner - """ iM = df['output.candidate.winner_majority'] iP = df['output.candidate.winner_plurality'] iC = df['output.candidate.winner_condorcet'] iU = df['output.candidate.winner_utility'] df = df.copy() df.loc[:, 'categories'] = 'No category' maj = iM > -1 no_maj = ~maj MU = (iM == iU) M = maj & (iM != iU) CPU = no_maj & (iC == iP) & (iC == iU) CP = no_maj & (iC == iP) & (iC != iU) CU = no_maj & (iC == iU) & (iC != iP) C = (iC > -1) & (iC != iP) & (iC != iU) PU = no_maj & (iP == iU) & (iP != iC) NC = (iC == -1) df.loc[MU, 'categories'] = 'MU' df.loc[M, 'categories'] = 'M' df.loc[CPU, 'categories'] = 'CPU' df.loc[CP, 'categories'] = 'CP' df.loc[CU, 'categories'] = 'CU' df.loc[C, 'categories'] = 'C' df.loc[PU, 'categories'] = 'PU' df.loc[NC, 'categories'] = 'nc' return df
6c89905cfbe0b354f3e70a4e2dcd967ee8c37470
81,242
from typing import Sequence def non_english_resource_types() -> Sequence[str]: """All the non-English resource types.""" return ["ulb", "reg", "tn", "tq", "tw"]
8c40f07b222445c13b90c2c491ef9755e20d4430
81,243
def isinstance_or_subclass(inst, cls): """ Utility for safely checking isinstance and issubclass. """ if isinstance(inst, cls): return True try: if issubclass(inst, cls): return True except TypeError: pass return False
2c0ce1d0702fc3a29d72048e7445a3cbcadf858a
81,249
def rescale_value(value, current_limits, new_limits): """ Given a value and the limits, rescales the value to the new limits input: value : float variable containing the value current_limits : a tuple containing the lower and upper limits of the value new_limits : a tuple containing the desired lower and upper limits. """ old_range = current_limits[1] - current_limits[0] new_range = new_limits[1] - new_limits[0] return (value-current_limits[0]) / old_range * new_range \ + new_limits[0]
341aeca7af515e1628e034e56a947fba1044b6cf
81,252
def power(V, R): """Electrical power Watts. V in volts, R in ohms""" return V**2 / R
4cd394f81f3b650920d9c369567acf2eaf3d5ff0
81,253
def is_valid_kubernetes_resource_name(name: str) -> bool: """ Returns a truthy value indicating whether name meets the kubernetes naming constraints, as outlined in https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names This implementation is based on https://tools.ietf.org/html/rfc1123: - contains no more than 253 characters - contain only lowercase alphanumeric characters, '-' or '.' - start with an alphanumeric character - end with an alphanumeric character """ if name is None or (len(name) == 0) or (len(name) > 253) or not name[0].isalnum() or not name[-1].isalnum(): return False for char in name: if char.isdigit(): pass elif char.isalpha(): if not char.islower(): return False elif char not in ["-", "."]: return False return True
132a7293191542869991b03f77971709d925b285
81,255
def get_windows(src): """ Return a list of all windows composing the entire raster """ return [win for _, win in src.block_windows(1)]
6c469f1cd18e808731f14f19a8557345860cc721
81,256
from typing import List def flatten_list( x: List[list] ) -> list: """ Transform a list of lists into a single flat list. Args: x: A nested list Returns: A flat list """ return [v for sublist in x for v in sublist]
af523b82747915cb501dc2c5fec275f9af85ceac
81,257
def read_from_add_translation_dict(address, tdict): """map an original address to x, y coords using the dictionary of unique addresses""" x, y = tdict[address] return x, y
6a00aeac9bab7514033b644e512b5e21a076166c
81,258
import colorsys def scale_lightness(rgb, scale_l): """ Scales lightness of color `rgb` https://stackoverflow.com/a/60562502/3413239 """ # convert rgb to hls h, l, s = colorsys.rgb_to_hls(*rgb) # manipulate h, l, s values and return as rgb return colorsys.hls_to_rgb(h, min(1, l * scale_l), s = s)
8a4ab3380c387c368085ace8b6fe8b1072abe01d
81,261
def simple_tokenizer(text: str) -> list[list[str]]: """ Given whitespace/newline tokenized text, return the list of sentences (where each sentence is made of tokens) using whitespaces and newlines """ doc: list[list[str]] = [] if text: for s in text.split("\n"): doc.append(s.split(" ")) return doc
dc6c65d3fa1a9a60ef5dc4c3bffabc482c4d3f8e
81,263
def _name_as_key(name): """Uppercase, stripped, no repeated interior whitespace.""" name = name.upper().strip() while ' ' in name: name = name.replace(' ', ' ') return name
58836d4f26ce83cb9eb0bc597626bc1d4fcaa7fa
81,264
def mean(items): """Returns the mean of the given items. Example:: >>> mean([5, 3, 7, 1, 9]) 5.0 """ if not items: return 0.0 return sum(items) / len(items)
621ccf666b4690818bf87787e6b7b7b3bf8bfaf0
81,266
from typing import Any def is_empty(data: Any) -> bool: """Checks if argument is empty. Args: data (Any): To check if empty Returns: bool: Returns bool indicating if empty """ if data is None or data == '' or data == 'null': return True return False
015c109dbfa2bc72d21213d526db6a553b27aa19
81,269
def from_augmented(aug_B): """Convert the augmented matrix back into transformation matrix + tranlsation vector.""" return aug_B[:-1, :-1], aug_B[-1, :-1]
2f066c5736ba438f7c9688a23a82d08ce9377d51
81,270