content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def van_der_corput(n, base=2): """Returns the nth element of the van der Corput sequence with the given base.""" vdc, denom = 0.0, 1.0 while n: denom *= base n, rem = divmod(n, base) vdc += rem / denom return vdc
fe5f3aca53f740ab6476235e3b08a2619f2dfbe9
81,276
import torch def get_current_targets(task_id, hnet): """ For all :math:`j < \text{task\_id}`, compute the output of the hypernetwork. This output will be detached from the graph and cloned before being added to the return list of this function. Note, if these targets don't change during training, it would be more memory efficient to store the weights :math:`\theta^*` of the hypernetwork (which is a fixed amount of memory compared to the variable number of tasks). Though, it is more computationally expensive to recompute :math:`h(c_j, \theta^*)` for all :math:`j < \text{task\_id}` everytime the target is needed. Note, this function sets the hypernet temporarily in eval mode. No gradients are computed. Args: task_id: The ID of the current task. hnet: An instance of the hypernetwork before learning a new task (i.e., the hypernetwork has the weights :math:`\theta^*` necessary to compute the targets). Returns: An empty list, if `task_id` is 0. Otherwise, a list of `task_id`-1 targets. These targets can be passed to the method :func:`calc_fix_target_reg` while training on the new task. """ # We temporarily switch to eval mode for target computation (e.g., to get # rid of training stochasticities such as dropout). hnet_mode = hnet.training hnet.eval() ret = [] with torch.no_grad(): for j in range(task_id): W = hnet.forward(task_id=j) ret.append([d.detach().clone() for d in W]) hnet.train(mode=hnet_mode) return ret
311078cc5c882923f3b550f3deda2c5250dedfad
81,277
def multipole_label(T,L): """Get multipole label. T = 0 (electric), 1(magnetic) L = 0,1,2... (order) """ first = ['e', 'm'][T] if L <= 3: last = ['D', 'Q', 'O', 'H'][L] else: last = " (L = {L})".format(L=L) return first + last
c7a261873c75fc6a808da6e7ad77806aaf49b403
81,278
def emd_function_value(pixel_group, base): """Calculate and return the f value of the given pixel group f value is defined as a weighted sum of the pixel values modulo a chosen base""" f_value = 0 for i in range(len(pixel_group)): f_value = (f_value + pixel_group[i] * (i + 1)) % base return f_value
d4ec4819911283a0ffd55c0060c51e8819b8c638
81,281
def summarize_stocks(transactions_dict): """ calculates total number of shares for each company based on transactions dictionary """ stocks_summarized = {} for t in transactions_dict: if t["symbol"] not in stocks_summarized: stocks_summarized[t["symbol"]] = t["quantity"] else: stocks_summarized[t["symbol"]] += t["quantity"] return stocks_summarized
1db0e8bccaa22652dacd6e7f19df296b3c1797ed
81,284
import copy def _prune_keys(in_dict, *keys): """remove key(s) and their values if they exist in the dict.""" dict_ = copy.deepcopy(in_dict) for key in keys: if dict_.get(key): dict_.pop(key) return dict_
cd759f7aba8c3305c1d66dafbb448223e6a23835
81,286
def get_crop(bbox): """Get crop coordinates and side length given a bounding box. Force the crop to be square. Args: bbox: x1, y1, x2, y2; coordinates for bounding box. Returns: x1, y1, side_length: """ # Get side length to keep square aspect ratio x1, y1, x2, y2 = bbox side_length = max(x2 - x1, y2 - y1) + 1 # Center the skull in the cropped region x1 = max(0, x1 - (side_length - (x2 - x1 + 1)) // 2) y1 = max(0, y1 - (side_length - (y2 - y1 + 1)) // 2) return x1, y1, side_length
bdb0154e785d250eae1b6093c49f8a6d6f50b8e1
81,290
import re def camel_case_transformer(s): """ Converts a camel case string into space separated words starting with a capital letters E.g. input: 'camelCase' output: 'Camel Case' REMARK: the exceptions list below is returned uppercase, e.g. "cve" => "CVE" """ str = re.sub('([a-z])([A-Z])', r'\g<1> \g<2>', s) if str in ['id', 'cve', 'arn']: return str.upper() return str.title()
a0bde68e9b1ea892c98b675ffb14981314268f67
81,291
from typing import Tuple def uint64_to_two_uint32(num: int) -> Tuple[int, int]: """Convert uint64 to two uint32s""" num_bin = f"{num:064b}" if num < 0 or len(num_bin) != 64: raise ValueError( f"Number is not positive or can be represented as a uint64: {num}") a = int(num_bin[:32], 2) b = int(num_bin[32:], 2) return a, b
bf9ae9dc37bec755176db9db9abfc7c34a1bbd6b
81,294
from datetime import datetime def from_iso_date(str): """Convert an ISO8601 to a datetime. Args: str(string): The ISO8601 formatted string to convert Returns: A :class:`datetime` object representing the given time """ try: return datetime.strptime(str, "%Y-%m-%dT%H:%M:%S.%fZ") except ValueError: return datetime.strptime(str, "%Y-%m-%dT%H:%M:%SZ")
f1247e5860380582d96c9b56d5eaa9612f77cae5
81,297
def GetGuestName(sHostGuestStepName): """Get the guest name from the given formal HOST_GUEST_StepSize name, e.g. GEDKUY_ethene_S0-4. [sHostGuestStepName]: string, formal sHostGuestStepName name. <retval>: string, guest name. """ iAnchor= sHostGuestStepName.find("_") if iAnchor==-1: raise Exception(f"Failed finding first score in {sHostGuestStepName}") iAnchor+=1 iScore= sHostGuestStepName.find("_",iAnchor) if iScore==-1: raise Exception(f"Failed finding second score in {sHostGuestStepName}") return sHostGuestStepName[iAnchor:iScore]
2af049f370e584dba9db32c8b4d8016786aadf73
81,304
def asn1_length(n): """Return a string representing a field length in ASN.1 format.""" assert n >= 0 if n < 0x7f: return chr(n) r = "" while n > 0: r = chr(n & 0xff) + r n >>= 8 return r
8d6d555089d823cb39bcdc3fe728c1b1fab01cff
81,307
def construct_node_config(*, name: str, remote_fs: str = '/tmp', executors: int = 2) -> dict: """ Args: - name: Node name - remote_fs: Remote node root directory - executors: Number of node executors Returns: - dict: return ready to use dict with nodes.create() """ return { 'name': name, 'nodeDescription': '', 'numExecutors': executors, 'remoteFS': remote_fs, 'labelString': '', 'launcher': { 'stapler-class': 'hudson.slaves.JNLPLauncher', }, 'retentionStrategy': { 'stapler-class': 'hudson.slaves.RetentionStrategy$Always', }, 'nodeProperties': { 'stapler-class-bag': 'true' } }
2c7255d380c4c080f829dcef613cd772620a0299
81,313
def _copy_channel(inst, ch_name, new_ch_name): """Add a copy of a channel specified by ch_name. Input data can be in the form of Raw, Epochs or Evoked. The instance object is modified inplace. Parameters ---------- inst : instance of Raw | Epochs | Evoked Data containing the EEG channels ch_name : str Name of the channel to copy. new_ch_name : str Name given to the copy of the channel. Returns ------- inst : instance of Raw | Epochs | Evoked The data with a copy of a given channel. """ new_inst = inst.copy().pick_channels([ch_name]) new_inst.rename_channels({ch_name: new_ch_name}) inst.add_channels([new_inst], force_update_info=True) return inst
e4bcbdcf88969d1830fbaa0f9e0c3aee3b3cd318
81,317
import string def next_digit_offset(s, start_offset=0): """Given an optional start offset, find the offset of the next digit character in string s, or -1. """ s_len = len(s) for i in range(start_offset, s_len): if s[i] in string.digits: return i return -1
102988c248eef8296d228b4ff86feeddcdafe834
81,321
from typing import Any def coalesce(default: Any, *args: Any) -> Any: """ Returns the first non-None value. If no non-None args exist, then return default. """ for arg in args: if arg not in (None, "'None'", 'None'): return arg return default
a975f0bcc4b2613051fdb90187004cee79a3f365
81,322
def is_sequence(obj): """ Determines whether the given value is a sequence. Sets, lists, tuples, bytes, dicts, and strings are treated as sequence. :param obj: The value to test. :returns: ``True`` if value is a sequence; ``False`` otherwise. """ try: list(obj) return True except TypeError: #, exception: #assert "is not iterable" in bytes(exception) return False
b17bf2c93a69e3209fc76f6b00a38e40243ab600
81,325
def harmonic(matches): """Calculate harmonic series sum up to the integer 'matches' >>> harmonic(0) 0.0 >>> harmonic(2) 1.5 """ utility = 0.0 for i in range(1, matches + 1): utility += 1 / i return utility
f5308ffd045664aacc93799fc7d83b92365122a0
81,326
def cbrt(x): """cubic root, this cubic root routine handles negative arguments""" if x == 0.0: return 0 elif x > 0.0: return pow(x, 1./3.) else: return -pow(-x, 1./3.)
06dbfcdcfbfe3d11d8b2d6ebc7f7043b66735511
81,327
from typing import List def load_list(filename: str) -> List[str]: """ Reads a wordlist from disk """ with open(filename, "r") as fp: return [line.strip() for line in fp.readlines()]
c03b9a621e4475d07ba943c11fb6b38784da6fda
81,331
def obj_is_multiple(obj_multiple_instances: str) -> bool: """ maps the xml value of an Object's "MultipleInstances" to a boolean, that represents if the object can be instanced. this is useful to optimize memory usage of the generated code :param obj_multiple_instances: :return: """ return True if obj_multiple_instances.upper() == "Multiple".upper() else False
3ecfb0f7571d313b00d977ccde92052669f14049
81,332
from functools import reduce def union(sets): """Get the union of all input sets""" if all((type(i)==type(set()) for i in sets)): return reduce(set.union, sets) else: sets = list(map(set, sets)) return reduce(set.union, sets)
3995546d91fcbd81fdeb2634c4d982806bf3f085
81,333
def get_year_and_month(key): """Returns year and month from manifest key""" date = key.split('/')[-2] year = date[:4] month = date[4:6] return year, month
375e39d1f54f15427219366833cc5bdc7246f77a
81,341
from typing import Dict import json def load_cccv_accuracy_file(cccv_file: str) -> Dict: """ Function to load cccv accuracies from phendb format. :param cccv_file: The CCCV results file. :return: A Dict of CCCV results in the internal CCCV results format. """ cccv_results = {} with open(cccv_file) as fin: loaded = json.load(fin) for row in loaded: score_mean, score_sd, conta, comple = row.values() comple_dict = cccv_results.setdefault(comple, {}) comple_dict[conta] = { 'score_mean': score_mean, 'score_sd': score_sd } return cccv_results
f8f9fd1108b05bdd74c0df3de2880682737236c7
81,343
def linear(x, m, b): """linear function, to be used for fitting parameters""" y = m*x + b return y
86d5e5b80b8af5c3081f00a1b25d6aa3e55ddf23
81,350
import torch def persistent_entropy(D, **kwargs): """Calculate persistent entropy of a persistence diagram. Parameters ---------- D : `torch.tensor` Persistence diagram, assumed to be in shape `(n, 2)`, where each entry corresponds to a tuple of the form :math:`(x, y)`, with :math:`x` denoting the creation of a topological feature and :math:`y` denoting its destruction. Returns ------- Persistent entropy of `D`. """ persistence = torch.diff(D) persistence = persistence[torch.isfinite(persistence)].abs() P = persistence.sum() probabilities = persistence / P # Ensures that a probability of zero will just result in # a logarithm of zero as well. This is required whenever # one deals with entropy calculations. indices = probabilities > 0 log_prob = torch.zeros_like(probabilities) log_prob[indices] = torch.log2(probabilities[indices]) return torch.sum(-probabilities * log_prob)
4436baadda1d7375838a98c5e0a22f57eec44bc7
81,352
def have_matching_types(a, b, type_or_types): """True if a and b are instances of the same type and that type is one of type_or_types. """ if not isinstance(a, type_or_types): return False return isinstance(b, type(a))
3a9388d4c55365c6513f34dbefa27d1b0700ceca
81,357
import requests def get_pages(start, end, address): """ Downloads the content from pages {start]-{end} and stores the html of the page in the array pages. :param start: First page to get. :param end: Last page to get. :param address: The address from which to get the content. :return: pages """ pages = [] for page in range(start, end+1): parameters = { 'page': page, } r2 = requests.get(address, params=parameters) pages.append(r2.text) return pages
e38b9d8db5ac23c5ac56924161d2c55f0529d46a
81,359
import time def date_to_millis(d): """Converts a datetime object to the number of milliseconds since the unix epoch.""" return int(time.mktime(d.timetuple())) * 1000
46be131f35c64af8e2e70523ac18543319980704
81,360
def dict_failed_keys(table): """Returns all failed keys of the dict match comparison result table.""" failed = [] for _, key, result, _, _ in table: if key and result == 'Failed': failed.append(key) return tuple(sorted(failed))
58a8707d08c6e944f4a2ea5752e423297c02d84f
81,362
def r2_score(truth, pred, mean_dims, dims=None, w=1.0): """ R2 score for xarray objects """ if dims is None: dims = mean_dims mu = truth.mean(mean_dims) sum_squares_error = ((truth - pred)**2 * w).sum(dims) sum_squares = ((truth - mu)**2 * w).sum(dims) return 1 - sum_squares_error / sum_squares
9601a6e2681be6cd748a83b3bfbffa2f07c0a752
81,364
def escape_newline(s: str) -> str: """Replaces each new line character (\\n) in the input with \\\\n""" return s.replace('\n', '\\n')
4867efd3e6edbef44509de50f0e598c7defd0b47
81,366
def usage(err=''): """ Prints the Usage() statement for the program """ m = '%s\n' %err m += ' Default usage is to load a specific .rpt file in current working directory.\n' m += ' use the -t option to indicate the sf.com ID of the parent Tech Campaign\n' m += ' the -c dir switch will load all rpt file in the current working directory. \n' m += ' \n' m += ' qor2sf -c load -f <filename-in-cwd> -t a0Q3000000000K1 \n' m += ' NOTE: the filename must have an extension of .rpt or .gold\n' m += ' or\n' m += ' qor2sf -c dir -t a0Q3000000000K1 \n' m += ' NOTE: only filename with an extension of .rpt or .gold will be processed\n' m += ' ' return m
6cb3d1448a6290327c06ebb277a5efcc1a6d3147
81,367
def bernoulli(p, u): """ Generates a Bernoulli rnd variate. :param p: (float) success probability. Must be 0.0 < p < 1.0. :param u: (float) rnd number in (0,1). :return: (float) the Bernoulli(p) rnd variate. """ return 0 if u < (1 - p) else 1
7d114c1f103c70d5e8bc350fe646d8746bfda771
81,369
def get_session_type(data_store, project_id, brain_id, session_id): """Get the cached session type (immutable) of a session. Args: data_store: data_store.DataStore to read from if the session is not cached. project_id: Project ID associated with the requested session. brain_id: Brain ID associated with the requested session. session_id: Session ID associated with the requested session. Returns: session_pb2.SessionType enum. """ return data_store.read_by_proto_ids( project_id=project_id, brain_id=brain_id, session_id=session_id).session_type
211246fda2a30295b0d98a7d325f39ac9154a300
81,370
def _earliest_run_start( prev1_start, prev1_haplo, curr1_haplo, prev2_start, prev2_haplo, curr2_haplo): """ find the earliest "prev start" such that the "prev" and "curr" haplos match """ earliest_start = None if prev1_haplo == curr1_haplo: earliest_start = prev1_start if prev2_haplo == curr2_haplo: if earliest_start is None or prev2_start < prev1_start: earliest_start = prev2_start return earliest_start
0fa7e8998968cad79e6223d7151343814cf4f882
81,371
def format_day(dt, fmt = None): """ Returns the formatted date truncated to the day. The format is optional and defaults to the first registered date format. """ return dt.strftime(fmt or "%Y-%m-%d")
abafb0862d0a373e9b964055cba3ce7e0484d495
81,372
def find_variants(tile): """ Find all flipped/rotated variants of a tile """ t = tile[0] r = tile[1] b = tile[2] l = tile[3] tr = "".join(reversed(tile[0])) rr = "".join(reversed(tile[1])) br = "".join(reversed(tile[2])) lr = "".join(reversed(tile[3])) return [ [t, r, b, l ], # orig [b, rr, t, lr], # flip-x [tr, l, br, r ], # flip-y [br, lr, tr, rr], # flip-xy, or rotate-180 [r, br, l, tr], # rotate-left [l, b, r, t], # rotate-left, then flip-x [rr, tr, lr, br], # rotate-left, then flip-y [lr, t, rr, b] # rotate-right ]
7b86855b35dd036c1189967a36c204a38cfd5fc9
81,373
def binary_search(arr, k): """ Implement nonrecursive binary search :param arr: an array sorted in ascending order :param k: seach key K :return: an index of array element that is equal to k """ l = 0 r = len(arr) - 1 while l <= r: m = (l+r) // 2 if k == arr[m]: return m elif k < arr[m]: r = m - 1 else: l = m + 1 return -1
60a8e907d8fc2931958e04ce7d64f733b5d2a6e4
81,374
def cov(df, groupby_columns:list, value_column:str): """ Calculates the coefficient of variation for data grouped by specific columns Args: df: pandas dataframe groupby_columns: list of column names to group the data by value_column: string name of column containing the values for which you want cov calculated Returns: result: a pandas df with grouped statistics for count, mean, population standard deviation, and cov """ # define a function to calculate the population standard deviation with ddof=0 std_p = lambda x: x.std(ddof=0) std_p.__name__ = 'std_p' columns_to_keep = groupby_columns + [value_column] df = df.copy()[columns_to_keep] result = df.groupby(groupby_columns).agg(['count','mean',std_p]) result = result.droplevel(level=0, axis=1) result['cov'] = result['std_p'] / result['mean'] return result
881cce82ff323e0295731901bafeaeac5e47c0bc
81,379
from typing import List import gc def get_child_ids(obj: object) -> List[int]: """ Return children of the provided object using gc.get_referents :param obj: The object :return: List of object ids """ return [id(child) for child in gc.get_referents(obj)]
09e205293ada98080a23196ad74d8e0c65fad1b6
81,383
from typing import List from typing import Dict import json def load_examples(fpath: str) -> List[Dict]: """Load the preprocessed examples """ data = [] with open(fpath, 'r') as fp: for line in fp: data.append(json.loads(line)) return data
5e6bd07b1456dd1f9a4a23335378ec3569a3e54e
81,385
def transform_key_to_list(key): """ Convert key to list. Takes each 2 digit value and put it in list. :param key: Input key :return: Key as integer list format """ key_int_list = list() for i in range(0, len(key), 2): key_int_list.append(int(key[i:i + 2], 16)) return key_int_list
3091ca5025b4be9b871ef22a4c7e6f0c64a9a6ce
81,391
import re def get_active_port(flows): """Find the active port name based on knowledge that down stream traffic is only output to the 'nic' interface. Args: flows (dict): Open flow details of the mux, result returned from function get_flows Returns: string or None: Name of the active port or None if something is wrong. """ for in_port in flows.keys(): if not re.match(r"(m|mu|mux|muxy)-", in_port): return in_port return None
cfee2c86e0d22de37cf206249a91e565d328b875
81,394
def calc_activity(X, gamma): """ Returns the value of the activity of any species given the concentration of that species in mol fraction, X, and the activity coefficient gamma. Parameters ---------- X: float Concentration of the given species in mol fraction gamma: float Activity coefficient of the given species Returns ------- float Activity of the given species """ return X * gamma
98819a93bdb5f5eae5f7aa0544b37e9d3c60e802
81,397
def existence_search(client, flat_name, names, value): """Create a search for existence or non existence of a field, if the value is * or -*. """ if value == '': return (client.field(flat_name).exists(), '%s exists' % (':'.join(names), )) elif value == '*': return (client.field(flat_name).nonempty(), '%s is non-empty' % (':'.join(names), )) elif value == '-*': return (client.field(flat_name).empty(), '%s exists but is empty' % (':'.join(names), )) return None
4ba929a484d6717cb9379dd318ec8891d7a56148
81,398
def wrap_python_args_with_string(args): """Wrap argument values with string Args: args: list like ["--foo", "3", "--bar", False] Returns: list of string: like ["--foo", "'3'", "--bar", "'False'"] """ result = [] for value in args: if not value.startswith("--"): result.append("'{}'".format(value)) else: result.append(value) return result
387f2e400a1eaa218cd35d6aa7adc780bbbd8752
81,401
def remove_emoticons(string): """Remove emoticons from a string and return it. Postional arguments: string -- string to remove emoticons from. Returns: String without emoticons. >>> remove_emoticons("applause and laughter ;)") 'applause and laughter ' """ emoticons = (':D', ':)', ':/', ':p', ';)') for emoticon in emoticons: string = string.replace(emoticon, "") return string
cb9a4bbbf3d0332aefab1bde5089ef4aa06b3b00
81,407
def fill_kwargs(kwargs): """Give the kwargs dict default options.""" defaults = { "strandedness": None, "overlap": True, "how": None, "invert": None, "new_pos": None, "suffixes": ["_a", "_b"], "suffix": "_b", "sparse": { "self": False, "other": False } } defaults.update(kwargs) return defaults
b6337fba83cff3430633f10e447a8796c6411804
81,410
def build_url(name_account): """ >>> name_account = 'alfareiza-3385E2' >>> build_url(name_account) http://4usee.com/alfareiza/3385E2 >>> name_account = 'tutorial' >>> build_url(name_account) http://tutorial.4yousee.com.br """ if '-' in name_account: return f"http://4usee.com/{name_account.split('-')[0]}/{name_account.split('-')[1]}" else: return f"http://{name_account.split('-')[0]}.4yousee.com.br"
fa6694c5ace43590ad0a06955e674deec805a021
81,411
import re def parse_url(url): """Tokenize the url to get username and repository name It parses the url and returns following properties - username - name (refers to repository name) Args: url: A repository URL Returns: A tuple of username and name if successful Raises: ValueError: An error occured parsing invalid github repository URL """ check_valid_github_repository = re.match(r'^github.com/[\w-]+/[\w-]+/?$', url) if check_valid_github_repository is None: raise ValueError('The given repository URL is not valid') username, name = url.split('/')[-2:] return username, name
2905b8ad9ae6bc0709c4fae641cee3d53e2a2ab3
81,412
from typing import List import re def list_filter(list_var: List[str], filter_string: str, exact_match: bool = False) -> List[str]: """ from the given list, return the list with filtered values. Args: list_var (list): list of strings filter_string (str): string which will be used to filter exact_match (bool, optional): filter should be based on exact match or partial match. default is partial. Returns: List with filtered values """ # Exact Matches gets priority even when exact match is set to false return_list = list(filter(lambda current_item: ( current_item == filter_string), list_var)) if not exact_match: return_list = list(filter(lambda current_item: ( current_item == filter_string), list_var)) return_list.extend(list(filter(lambda current_item: ( bool(re.search(".*" + re.escape(filter_string) + ".*", current_item)) and current_item not in return_list), list_var))) return return_list
a97f19baf7e44132a4fec196f0296b59b9f38b83
81,414
from typing import List from typing import Optional import glob def make_video_table( data_dir: str, youtube_ids: List[str], ground_truth_labels: List[str], predictions: Optional[List[str]] = None, ) -> str: """ Make an HTML table where each cell contains a video and metadata. Inputs: youtube_ids: list of strings of YouTube ids, one for each video to display these videos should be part of the Kinetics dataset ground_truth_labels: list of strings of ground truth labels, one for each video predictions: [optional] list of strings of model predictions, one for each video Outputs: video_html: a list of HTML tags that build a table; to be called with IPython.display.HTML Example: from IPython.display import HTML HTML(make_video_table(YOUTUBE_IDS, TRUE_LABELS_STR)) """ VIDEOS_PER_ROW = 4 NO_ROWS = len(youtube_ids) // VIDEOS_PER_ROW + 1 WIDTH = 210 HEIGHT = WIDTH * 2 // 3 # for videos to properly display, data directory must be relative to notebook dir try: data_dir = data_dir[data_dir.find("data"):] except: pass filepaths = [] for youtube_id in youtube_ids: filepaths.append(glob.glob(f"{data_dir}/*/{youtube_id}_*.mp4")[0]) # make html video table video_html = ["<table><tr>"] i = 0 while i < len(filepaths): prediction_par = "" if predictions is not None: color = "black" if predictions[i] == ground_truth_labels[i] else "red" prediction_par = f"<p style='color:{color};'>{predictions[i]}</p>" video_html.append( f""" <td><h2>{i}</h2><p>{ground_truth_labels[i]}</p><video width="{WIDTH}" height="{HEIGHT}" controls> <source src="{filepaths[i]}" type="video/mp4"> </video>{prediction_par}</td>""" ) i += 1 if i % VIDEOS_PER_ROW == 0: video_html.append("</tr><tr>") video_html.append("</tr></table>") return "".join(video_html)
4a5bc47cc43ad260ad4e94d918ae0568c1d04bf9
81,415
import uuid def glossary_name(request) -> str: """Returns a suitable glossary name to be used in the test""" test_name = request.node.name new_uuid = str(uuid.uuid1()) return f"deepl-python-test-glossary: {test_name} {new_uuid}"
8897e0c936cbd977c01740d73f54c1e11fa17c1f
81,419
def filter_method(func): """Marks a method for registration as template filter.""" func.is_filter = True return func
18c20004501a7243110555e17657c801f4f48d39
81,421
def diagnose(message, command, status, stdout, stderr): """Constructs a detailed failure message based on arguments.""" result = message + '\n' result += 'COMMAND: %s\n' % ' '.join(command) if status == -1: result += 'EXIT STATUS: %d (command timed out)\n' % status else: result += 'EXIT STATUS: %d\n' % status result += 'STANDARD OUTPUT:' if (stdout is None) or (len(stdout.rstrip('\n')) == 0): result += ' [none]\n' else: result += '\n' + stdout.rstrip('\n') + '\n' result += 'STANDARD ERROR:' if (stderr is None) or (len(stderr.rstrip('\n')) == 0): result += ' [none]\n' else: result += '\n' + stderr.rstrip('\n') + '\n' return result
9e39fd130a7d80aae7e7c1dcb0e5d9942719b165
81,429
def get_weakest_bot(bots): """Returns the weakest bot out of a list of bots.""" assert len(bots) != 0 # bots have 50 hp max least_hp = 51 weakest_bot = None for bot in bots: if bot.hp < least_hp: weakest_bot = bot least_hp = bot.hp return weakest_bot
3e35a06d183ea6ad0189996f4a09e9193fbbe42e
81,434
def magmom_lane_corrector(magmom_list: list, true_ratio: int) -> list: """ Args: magmom_list (list) - list of magnetic atoms from uncorrected INCAR file true_ratio (int) - ratio of uncorrected number atoms to the right number Returns: new_magmom_list (list) - list with corrected configuration for the afm cell Examples: magmom_lane_corrector([1, 1, 1, 0, 0], 2) >>> ([1, 1, 1, -1, -1, -1, 0, 0, 0, 0]) magmom_lane_corrector([2, 0], 2) >>> ([2, -2, 0, 0]) """ magnetic_atoms_list = [i for i in magmom_list if i] * true_ratio noNmagnetic_atoms_list = [i for i in magmom_list if not i] * true_ratio middle_index = len(magnetic_atoms_list) // 2 second_half = magnetic_atoms_list[middle_index:] magnetic_atoms_list[middle_index:] = [-i for i in second_half] new_magmom_list = magnetic_atoms_list + noNmagnetic_atoms_list return new_magmom_list
23692da3cd0d1e26cde0f0fb6e26e75d1ef8bd75
81,435
import re def remove_trailing_whitespace(ansible_src): """ Removes trailing whitespace in an Ansible script """ return re.sub(r'[ \t]+$', '', ansible_src, 0, flags=re.M)
9ed75dbf0c06abb5567afbc2796878284ce4504d
81,436
def get_table(conn, table): """ get table data from database :param conn The sql database connection :param table The name of the table :return table data as fetchall() """ with conn: cur = conn.cursor() cur.execute("SELECT * FROM " + table) conn.commit() return cur.fetchall()
837f05ac1bbcb1503654265f3f2b712700b36add
81,440
def get_value(table, row, col) : """Get value in given row and column""" return table.find_all('tr')[row].find_all('td')[col].string
4d1c96dc239654fb3df57b4f12618b00bfe869d3
81,442
import types def is_function(item): """ Takes item object, returns True if it is a function. """ return isinstance(item, types.FunctionType)
ccfe96d9a3cd5bc2106f393154cc3879fecbfa00
81,451
def class_filter(dataset, classes, is_superclass=False, proportion=1.0): """ Handles filtering of (super)classes for use with tf.Dataset. Arguments: dataset: An instance of the Dataset class. classes: A list of classes (or superclasses). is_superclass: A flag indicate whether or not the "classes" param consists of superclasses. proportion: A float indicating the proportion of classes to retrieve. """ output_classes = [] try: if classes and is_superclass: output_classes = dataset.get_classes_by_superclass(classes, proportion) elif classes and not is_superclass: output_classes = [int(x) for x in classes] except: raise ValueError('Failed to filter classes.') return output_classes
6aee69302c6f53f574fece881e6e254bae77bcde
81,455
import math def COL_DIST2(col1, col2): """ Computes 2-distance between two RGB vectors, i.e. = sqrt(r^2 + g^2 + b^2) """ r, g, b = (col1[i] - col2[i] for i in range(0, 3)) return math.sqrt(r * r + g * g + b * b)
88f9d83361937703e07c0a0bc258987cbe5bd276
81,456
def ij_to_list_index(i, j, n): """ Assuming you have a symetric nxn matrix M and take the entries of the upper triangular including the diagonal and then ravel it to transform it into a list. This function will transform a matrix location given row i and column j into the proper list index. :param i: row index of the matrix M :param j: column index of matrix M :param n: total number of rows / colums of n :return: The correct index of the lost """ assert j >= i, "We only consider the upper triangular part..." index = 0 for k in range(i): index += n - k - 1 return index + j
d2d1d9533abe2a8c07a2fd32d9721360c012289c
81,466
def get_version(raw): """ This will parse out the version string from the given list of lines. If no version string can be found a ValueError will be raised. """ for line in raw: if line.startswith("# MODULE:"): _, version = line.split(":", 1) return version.strip() raise ValueError("Could not determine the version")
cddccd1905f7a75eb1122b300cccebb71782304f
81,476
def effective_number(Z, orders): """Effective number of species is the number of equiprobable species that would yield the same diversity as a given distribution. As it is a monotonic transformation this function can be used to transform confidence intervals on Z as well. Parameters ---------- Z : np.ndarray of floats, [0, 1] Generalized Simpson's entropies orders : np.ndarray of integers Order of the generalized Simpson's entropy. Must match the orders used in the calculation of Z's. Returns ------- D : float Effective number""" return 1 / (1 - Z**(1 / orders))
7c5c65bb92a30aeade25452624aa139cc2085c6c
81,479
def spline_interpolate(s,t,p1,p2,p3,p4): """Cubic hermite spline interpolation. Arguments: s -- tension parameter, usually should be calculated as (1-t)/2 t -- time parameter between 0.0 and 1.0, representing distance along the spline from p2 to p3. p1,p2,p3,p4 -- Four control points necessary for interpolation. """ # Cubic hermite spline, re-arranged and optimised t2 = t ** 2 t3 = t ** 3 a = s * (-t3 + 2 * t2 - t) b = s * (t2 - t3) c = b + (2 * t3 - 3 * t2 + 1) d = s * (t3 - 2 * t2 + t) + (-2 * t3 + 3 * t2) x = a * p1[0] + c * p2[0] + d * p3[0] - b * p4[0] y = a * p1[1] + c * p2[1] + d * p3[1] - b * p4[1] return (x,y)
0f2dea82d60c38779b079afb4b10e6daed1e2e9f
81,480
def subSamplerConnector2D(size_in, size_out, weights, delays): """ This function is called later on to build the subsampler connections. Read this when is called, now jump to the start of the simulation Returns a list that can then be used with the FromList Connector The output is a connectivity matrix which will subsample the input population size_in = size of the input population (2D = size_in x size_in) size_out = size of the sampled population weights = averaging weight value (each connection will have this value) must be float delays = averaging delay value (each connection will have this value) """ out = [] step = size_in/size_out for i in range(size_in): for j in range(size_in): i_out = i/step j_out = j/step out.append((i*size_in + j, i_out*size_out + j_out, weights, delays)) # Python is magic return out
611d3cd020104fc2f191cb10ce8780988a71de98
81,486
from typing import Dict from typing import List def avoid_walls(my_head: Dict[str, int], board_width: int, board_height: int, possible_moves: List[str]) -> List[str]: """ my_head: Dictionary of x/y coordinates of the Battlesnake head. e.g. {"x": 0, "y": 0} board_width: Width of the board (x-axis) board_height: Height of the board (y-axis) possible_moves: List of strings. Moves to pick from. e.g. ["up", "down", "left", "right"] return: The list of remaining possible_moves, with the 'neck' direction removed """ if my_head["x"] == 0: possible_moves.remove("left") elif my_head["x"] == board_width - 1: possible_moves.remove("right") if my_head["y"] == 0: possible_moves.remove("down") elif my_head["y"] == board_height - 1: possible_moves.remove("up") return possible_moves
7eff7fce17fec91dcaebbd6ac155d8158a9295af
81,489
def PyUnicode_Concat(space, w_left, w_right): """Concat two strings giving a new Unicode string.""" return space.add(w_left, w_right)
c2afe0fa41fa4921c03148b551de95e11b83abc1
81,495
def list_mean(l): """compute avergae from a list Parameters: l (list): list Returns: mean (float): mean """ if len(l) == 0: return None else: return sum(l) / len(l)
4b257a2398aa6d2f88af6bdd6aa10a3f944aa61d
81,502
import shlex def _parse_tags(tags: str): """Takes a string of whitespace seperated tags and assigns them to a dict. Expects each tag to be of the format 'tag_name:tag_value' Args: tags (str): The tags to parse Returns: dict: The parsed tags """ return dict(item.split(":") for item in shlex.split(tags))
ce907265c5a1706af6a79860abe4453878b3456e
81,503
def surrogate_escape_string(input_string, source_character_set): """ Escapes a given input string using the provided source character set, using the `surrogateescape` codec error handler. """ return input_string.encode(source_character_set, "surrogateescape").decode("utf-8", "surrogateescape")
0447d97a110935f41a0fc0ece665fb9e95af47c6
81,504
def _get_title(property_name, schema, property_section=None): """ Get the title for the property name from the WRA Data Model Schema. Optionally, you can send the section of the schema where the property should be found. This avoids finding the wrong property name when the name is not unique. If the property name is not found it will return itself. :param property_name: The property name to find. :type property_name: str :param schema: The WRA Data Model Schema. :type schema: dict :param property_section: The section in the schema where the property can be found. This avoids the case where the property_name is not unique in the schema. :type property_section: str or None :return: The title as stated in the schema. :rtype: str """ # search through definitions first if schema.get('definitions') is not None: if property_name in schema.get('definitions').keys(): return schema.get('definitions').get(property_name).get('title') # search through properties if schema.get('properties') is not None: # is property_name in the main properties if property_name in schema.get('properties').keys() and property_section is None: return schema.get('properties').get(property_name).get('title') # is property_section part of the main properties if property_section in schema.get('properties').keys(): property_type = schema.get('properties').get(property_section).get('type') if property_type is not None and 'array' in property_type: # move down into an array result = _get_title(property_name, schema.get('properties').get(property_section)['items']) if result != property_name: return result elif property_type is not None and 'object' in property_type: # move down into an object result = _get_title(property_name, schema.get('properties').get(property_section)) if result != property_name: return result # don't recognise either property_name or property_section. # loop through each property to find an array or object to move down to for k, v in schema.get('properties').items(): if v.get('type') is not None and 'array' in v['type']: # move down into an array result = _get_title(property_name, v['items'], property_section) if result != property_name: return result elif v.get('type') is not None and 'object' in v['type']: # move down into an object result = _get_title(property_name, v, property_section) if result != property_name: return result # can't find the property_name in the schema, return itself return property_name
f64cf9a3efd4b5a6a7f49cfd326d223afbde7673
81,506
def get_sleepiest_minute(guard_sleep): """For a guard's sleep record, return the minute he was most often asleep. returns: (most_slept_minute, times_slept_that_minute) """ sleepiest = 0, 0 for minute in guard_sleep: if guard_sleep[minute] > sleepiest[1]: sleepiest = minute, guard_sleep[minute] return sleepiest
8574f0eae4fc39e2366a9e7473ffad4a62901f3a
81,511
def seq_to_str(seq_sol): """From a sequence solution (e.g.: ['r2']) returns its string form (e.g.: 'r2')""" return ' '.join(seq_sol)
8d398b7c14dd546b411000d39d539266f515c049
81,514
import string def tokenize(txt, translator=None): """ Tokenize text - converting to lower characters, eliminating puncutations Args: txt: text to be tokenized; str translator: this includes punctuations for translate() func; dict() """ if not translator: translator = str.maketrans("", "", string.punctuation) tokens = str(txt).lower().translate(translator).strip().split() return tokens
129484b4984feb4e28124b541cb692a94968a610
81,524
def factorial(n: int) -> int: """Iterative implementation of factorial algorithm. factorial(0) = 1 factorial(1) = 1 :param n: positive integer """ result = 1 for i in range(2, n + 1): result *= i return result
9b5e64735b526905f5617c7cc04e0b34d70f3930
81,527
import re import collections def most_common_word(paragraph: str, banned: list) -> str: """Return most common word except for banned words""" # Data processing words = [word for word in re.sub(r"[^\w]", " ", paragraph) .lower() .split() if word not in banned] # Reorder a dictionay by values counts = collections.Counter(words) return sorted(counts.items(), key=lambda x : x[1], reverse=True)[0][0]
74c48d3a9023874fc03e0e26617bc155b5f4ebb3
81,529
def slice_clusters_idx(rm_pd, z_bins): """ Returns all clusters from the DataFrame of clusters between a range of specified redshift limits. Parameters ----------- rm_pd : `pd.DataFrame` DataFrame of cluster centers with ['RA','DEC','Z_LAMBDA'] information. z_bins : `numpy.ndarray` Array of the left and right bin edges of the different redshift bins. Returns ----------- cluster_idx_arr : `list` List of indices of the cluster centers for each of the different redshift bins. """ # Array of cluster indices cluster_idx_arr = [[] for x in range(len(z_bins))] # Populating array with cluser indices for kk, (zz_low, zz_high) in enumerate(z_bins): # Cluster indices cluster_idx_kk = rm_pd.loc[ (rm_pd['Z_LAMBDA'] >= zz_low) & (rm_pd['Z_LAMBDA'] < zz_high)].index.values # Saving to list cluster_idx_arr[kk] = cluster_idx_kk return cluster_idx_arr
c81ebc69da3a35d0316a78b42499a1d5b8159b08
81,531
from pathlib import Path import gzip import json def read_json(filename) -> dict: """Read json files and return a dict. (.json, .json.gz)""" if isinstance(filename, Path): filename = str(filename) if filename.endswith(".json.gz"): with gzip.open(filename, "r") as file: json_bytes = file.read() json_str = json_bytes.decode("utf-8") return json.loads(json_str) elif filename.endswith(".json"): with open(filename, "r") as file: return json.load(file) else: raise IOError("File format must be .gz or .json.gz")
f6d8e95590b60f839771281fe3ae8ceaec0fc824
81,533
def GetTheater( soup ): """Given a BeautifulSoup object corresponding to a table row containing theater name and showtimes, extract the theater name. Used in GetTheatersAndTimes. """ # safest thing is to look for "link" class within "mid b" column midCol = soup.find("td", {"class": "mid b"}) link = midCol.find("span", {"class": "link"}) return link.getText()
30ed86cda4ac4064a549c6916db4661f2affdb32
81,535
def divide(items, fraction = 0.5): """ Split a sequence into two. """ size = int(round(fraction * len(items))) return (items[:size], items[size:])
56d1b99728a7c12d47f6d5144c8ee95fcb113834
81,536
def MockIterIfs(arg): """Mock out pynetlinux.Interface.Iterifs, return no interfaces.""" return []
6d8baafe6bf571b72f72f665eee4c12defa939da
81,537
from typing import List def convert_comma_separated_str_to_list(input_str: str, trim: bool = True) -> List[str]: """Convert comma separated string to list of strings.""" comma_separated_str = input_str.strip() if trim else input_str if not comma_separated_str: return [] result = [] for part_str in comma_separated_str.split(","): value = part_str if trim: value = value.strip() if not value: continue result.append(value) return result
bab83ade3b5bbef4b8dafaed9e949260fb1a93f1
81,539
import json def transform_plural(value): """ Transform the plural data represented in the CSV as a JSON string into a Python object. """ if not value: return [] return json.loads(value)
6d53d4c1fd4d3820c2a6ad65509b94c2a4fcc392
81,541
import pytz from datetime import datetime def UTCSecondToLocalDatetime(utcsec, timezone="Europe/Berlin"): """ Convert utc second to local datetime (specify correct local timezone with string). Parameters ---------- utcsec: int Time in utc seconds (unix time). timezone: string Time zone string compatible with pytz format Returns ------- dt: Datetime object corresponding to local time. Notes ----- Adapted from a stackoverflow answer. To list all available time zones: >> import pytz >> pytz.all_timezones To print the returned datetime object in a certain format: >> from pyik.time_conversion import UTCSecondToLocalDatetime >> dt = UTCSecondToLocalDatetime(1484314559) >> dt.strftime("%d/%m/%Y, %H:%M:%S") """ local_tz = pytz.timezone(timezone) utc_dt = datetime.utcfromtimestamp(utcsec) local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz) return local_tz.normalize(local_dt)
8c28e8bec4097ef4e8edb06ca4a34138a82751c2
81,542
def parse_story_file(content): """ Remove article highlights and unnecessary white characters. """ content_raw = content.split("@highlight")[0] content = " ".join(filter(None, [x.strip() for x in content_raw.split("\n")])) return content
350774af9ba5341f8869039fb99018be951365af
81,545
def grid_rows_reversed(grid: list[list[int]]) -> list[list[int]]: """Return grid with each row reversed""" return [row[::-1] for row in grid]
de1c68c83c157316453592a349f8a33f83580198
81,548
import operator from functools import reduce def tupsum(*args): """Adds any number of tuples together as if they were vectors. """ def tupsum_(tuple1, tuple2): if len(tuple1) != len(tuple2): raise ValueError('Tuples must be of same length to be summed') return tuple(map(operator.add, tuple1, tuple2)) return reduce(lambda x, y: tupsum_(x,y), args)
cd7aa38c09706417f2b839eb8b9cb86e76ec9a4d
81,551
def is_edges_simplified(edges_gdf): """ The function checks the presence of possible duplicate geometries in the edges_gdf GeoDataFrame. Parameters ---------- edges_gdf: LineString GeoDataFrame street segments Returns ------- simplified: boolean whether the edges of the network are simplified or not """ simplified = True edges_gdf['code'] = None edges_gdf['code'][edges_gdf['v'] >= edges_gdf['u']] = edges_gdf.u.astype(str)+"-"+edges_gdf.v.astype(str) edges_gdf['code'][edges_gdf['v'] < edges_gdf['u']] = edges_gdf.v.astype(str)+"-"+edges_gdf.u.astype(str) dd = dict(edges_gdf['code'].value_counts()) dd = {k: v for k, v in dd.items() if v > 1} if len(dd) > 0: simplified = False return simplified
77a9cb3d4e2d00b61f304d0fce9201d914c5cfcb
81,555
import json def deserialize(string): """Reconstitues datastructure from a string. Args: string: A serialized data structure Returns: Data structure represented by string parameter """ return json.loads(string)
96ad64167a2339cbb3c271dfed0790c3d64b1e6d
81,556
def _resize_plot_inches(plot, width_change=0, height_change=0): """ Accepts a matplotlib figure or axes object and resizes it (in inches). Returns the original object. Parameters ---------- plot: matplotlib.Figure() or matplotlib.Axes() The matplotlib Figure/Axes object to be resized. width_change: `float` The amount of change to be added on to original width. Use negative values for reducing figure dimensions. height_change `float` The amount of change to be added on to original height. Use negative values for reducing figure dimensions. Returns ------- plot: matplotlib.Figure() or matplotlib.Axes() The matplotlib Figure/Axes object after being resized. """ try: orig_size = plot.figure.get_size_inches() except AttributeError: orig_size = plot.get_size_inches() new_size = (orig_size[0] + width_change, orig_size[1] + height_change, ) try: plot.figure.set_size_inches(new_size, forward=True) except AttributeError: plot.set_size_inches(new_size) return plot
2032fbc514094fa022da87a6f1586a9d4a7fc215
81,559
def space_tokenizer(sequence): """ Splits sequence based on spaces. """ if sequence: return sequence.split(' ')
539809996bfe16faebad8abd29bcec46e88a8283
81,562
from typing import Any def merge_dictionaries( target_dict: dict[Any, Any], *source_dicts: dict[Any, Any] ) -> dict[Any, Any]: """ Recursively merge each of the ``source_dicts`` into ``target_dict`` in-place. """ for source_dict in source_dicts: for key, value in source_dict.items(): if isinstance(value, dict): target_dict_value = target_dict.setdefault(key, {}) merge_dictionaries(target_dict_value, value) else: target_dict[key] = value return target_dict
6fc5eba59c3148b3fc15a2caf4bfae7a148e8922
81,564
def findPassingDistances ( distances, threshold ): """ This function takes a list of distances and a threshold and returns all indices of distances which pass the threshold, or -1 if no distances is below the threshold. Parameters ---------- list : distances The list of the distances to be searched. float : threshold The threshold below which any passing distance needs to be. Returns ------- list : indices The indices of all passing distances or -1 if no distance is below the threshold. """ ### Initialise variables minInd = [] indCtr = 0 ### Find all passing distances for val in distances: if val[0] < threshold: minInd.append ( indCtr ) indCtr += 1 ### Check for at least one if len( minInd ) == 0: minInd = -1 ### Return the index return ( minInd )
3ab5070fa8f24dc4abec5541524728230dcd43be
81,568
from typing import Any import unicodedata def print_len(elements: Any) -> int: """Return length of text in characters, excluding Mark characters. This is meant to approximate width of text in a monospaced font, so that a plaintext table has a decent chance of lining up the columns correctly. >>> print_len('abc') 3 'é' counts as one display character, whether it is represented as a single precombined Unicode character or as a letter character followed by an diacritic character. >>> print_len('abb\N{LATIN SMALL LETTER E WITH ACUTE}') 4 >>> print_len('abbe\N{COMBINING ACUTE ACCENT}') 4 """ return sum( 1 for element in str(elements) if not unicodedata.category(element).startswith('M'))
89dc0b26ea87328a114314a7f9d5e340af497161
81,569
def find_net_from_node(node, in_dict, in_keys): """ Return the net name to which the given node is attached. If it doesn't exist, return None """ for k in in_keys: if node in in_dict[k]: return k
7d5eb4016d6a12ba12a343bbf02c37d4029e65f2
81,575
def local2global(local_coord, start, end, strand): """Return global coordinate within a region from a local coordinate""" # swap if strands disagree if strand == 1: return local_coord + start else: return end - local_coord
79683968f8d68068ed66c897c8c29f631138757b
81,584