content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def underscore_2_space(string: str): """ Return string with underscores replaced by spaces. """ return re.sub('[_]', ' ', string)
a4ef9b19cda662ec4714cab58e1f502f668b6aaa
701,183
from typing import Iterator from typing import Callable from typing import Optional import multiprocessing import importlib def import_entry_point() -> Iterator[Callable[[str], Optional[int]]]: """ Yields a function that imports a module in a seperate Python process and returns the exit code. """ context = multiprocessing.get_context("spawn") def do_import(module: str) -> Optional[int]: process = context.Process(target=importlib.import_module, args=(module,)) process.start() process.join() return process.exitcode yield do_import
07b5cb8620cee039351cc894707eb8fe3c457daf
701,184
def preprocess_inputs(batched_data, max_sub_l, max_vcpt_l, max_vid_l, device="cuda:0"): """clip and move to target device""" max_len_dict = {"sub": max_sub_l, "vcpt": max_vcpt_l, "vid": max_vid_l} text_keys = ["q", "a0", "a1", "a2", "a3", "a4", "sub", "vcpt"] label_key = "answer_idx" qid_key = "qid" vid_feat_key = "vid" model_in_list = [] for k in text_keys + [vid_feat_key]: v = getattr(batched_data, k) if k in max_len_dict: ctx, ctx_l = v max_l = min(ctx.size(1), max_len_dict[k]) if ctx.size(1) > max_l: ctx_l = ctx_l.clamp(min=1, max=max_l) ctx = ctx[:, :max_l] model_in_list.extend([ctx.to(device), ctx_l.to(device)]) else: model_in_list.extend([v[0].to(device), v[1].to(device)]) target_data = getattr(batched_data, label_key) target_data = target_data.to(device) qid_data = getattr(batched_data, qid_key) return model_in_list, target_data, qid_data
a453e68f6b799ef3215befeacf8d93704ecc3fcc
701,185
import torch from typing import Optional from typing import Union from typing import Tuple def wmean( x: torch.Tensor, weight: Optional[torch.Tensor] = None, dim: Union[int, Tuple[int]] = -2, keepdim: bool = True, eps: float = 1e-9, ) -> torch.Tensor: """ Finds the mean of the input tensor across the specified dimension. If the `weight` argument is provided, computes weighted mean. Args: x: tensor of shape `(*, D)`, where D is assumed to be spatial; weights: if given, non-negative tensor of shape `(*,)`. It must be broadcastable to `x.shape[:-1]`. Note that the weights for the last (spatial) dimension are assumed same; dim: dimension(s) in `x` to average over; keepdim: tells whether to keep the resulting singleton dimension. eps: minimum clamping value in the denominator. Returns: the mean tensor: * if `weights` is None => `mean(x, dim)`, * otherwise => `sum(x*w, dim) / max{sum(w, dim), eps}`. """ args = {"dim": dim, "keepdim": keepdim} if weight is None: return x.mean(**args) if any( xd != wd and xd != 1 and wd != 1 for xd, wd in zip(x.shape[-2::-1], weight.shape[::-1]) ): raise ValueError("wmean: weights are not compatible with the tensor") return (x * weight[..., None]).sum(**args) / weight[..., None].sum(**args).clamp( eps )
db742eb5d899b190609a8e40cd9e4a65f52a45cd
701,186
def openREADME(): """ This is only needed because README.rst is UTF-8 encoded and that won't work under python3 iff sys.getfilesystemencoding() returns 'ascii' Since open() doesn't accept an encoding in python2... """ try: f = open("README.rst", encoding="utf-8") except: f = open("README.rst") foo = f.read() f.close() return foo
d42e07721f40d7681a9a8a22787a4ba9ac78ec8d
701,187
def ts(nodes, topo_order): # topo must be a list of names """ Orders nodes by their topological order :param nodes: Nodes to be ordered :param topo_order: Order to arrange nodes :return: Ordered nodes (indices) """ node_set = set(nodes) return [n for n in topo_order if n in node_set]
741c7b3ac34c9f5beb6dc57ebf1539a27cc2b91b
701,188
import os import json def load_fold_indices(path): """Load the stard and end indices of the test set for every fold.""" filename = os.path.join(path, 'dataset_fold_indices.json') with open(filename, 'r') as handle: parsed = json.load(handle) return json.dumps(parsed['short'], indent=4)
9408eef31d38c11a63faa544a59d6ef7fef8e7ce
701,189
import requests import json def _get_topics_by_token(push_token): """ :param push_token: required :return: topics, to which this token is subscribed (tags) """ firebase_server_key = "SERVER_KEY" firebase_info_url = "FIREBASE_INFO_URL" # "https://iid.googleapis.com/iid/info/" auth_key = "key=%s" % firebase_server_key headers = { "Authorization": auth_key } url = firebase_info_url + push_token + '?details=true' response = requests.get(url, headers=headers) response_dict = json.loads(response.text) return response_dict['rel']['topics'].keys()
a1265c6ab177caec92c3c550eebae3ecd35887c6
701,190
def return_slice(axis, index): """Prepares a slice tuple to use for extracting a slice for rendering Args: axis (str): One of "x", "y" or "z" index (int): The index of the slice to fetch Returns: tuple: can be used to extract a slice """ if axis == "x": return (slice(None), slice(None), index) if axis == "y": return (slice(None), index, slice(None)) if axis == "z": return (index, slice(None), slice(None)) return None
ac6db30fc12509062efa4481d1f7b2fdaff8149b
701,191
import os import re import fnmatch def load_files_from_dir(dir, pattern = None): """Given a directory, load files. If pattern is mentioned, load files with given pattern Keyword arguments: text -- given text delimiter - type of delimiter to be used, default value is '\n\n' """ files_in_path = os.listdir(dir) docs_names = [] if pattern is None: docs_names = list(file for file in files_in_path if re.search(".txt", file)) else: try: docs_names = fnmatch.filter(os.listdir(dir), pattern) except TypeError: print("Error! pattern should be a string or bytes like object. Returning None") docs_names = None return docs_names
2b7ad778421598247975a2f37722efae3fd3f718
701,192
def get(*args, **kwargs): """Decorates a test to issue a GET request to the application. This is sugar for ``@open(method='GET')``. Arguments are the same as to :class:`~werkzeug.test.EnvironBuilder`. Typical usage:: @frontend.test @get('/') def index(response): assert 'Welcome!' in response.data """ kwargs['method'] = 'GET' return open(*args, **kwargs)
c6322bd1340f5fe919ba25d70823be52ec368363
701,193
import argparse import sys def FunctionExitAction(func): """Get an argparse.Action that runs the provided function, and exits. Args: func: func, the function to execute. Returns: argparse.Action, the action to use. """ class Action(argparse.Action): def __init__(self, **kwargs): kwargs['nargs'] = 0 super(Action, self).__init__(**kwargs) def __call__(self, parser, namespace, values, option_string=None): func() sys.exit(0) return Action
a6b292ed2491189e14e36df1ef7fb4d38d0102e2
701,196
def _count_dot_semicolumn(value): """Count the number of `.` and `:` in the given string.""" return sum([1 for c in value if c in [".", ":"]])
57ee0c88d31ed62168e562191bb1dd4ebb3de859
701,197
import re def FixIP(pattern): """If a stand alone IP, fix it so RE does not go off the rails""" if re.search(pattern,"^([0-9]{1,3}\.){3}[0-9]{1,3}$"): # If IP, make sure "." is not interpreted as a regexp "." instead of a period seperator pattern = pattern.replace(".",r"\.") return pattern
6cddfc3afda7f4c00ec7167a2a468791d8cc6632
701,198
def grant_staff_access(actor, user, is_staff): """ Grant staff access to a user via an actor. """ user.is_staff = is_staff user.save() return user
18624d0c9968e0e495235c4684d243650183dae0
701,199
import os def parse_options(option_name: str) -> dict: """Parse a Kakoune map option and return a str-to-str dict.""" items = [ elt.split('=', maxsplit=1) for elt in os.environ[f"kak_opt_{option_name}"].split() ] return {v[0]: v[1] for v in items}
94e38b2cb0d1887036c0d0c778974067a45f4ae0
701,200
import re def normalize_string(string, able=None): """ Parameters ---------- string: str able: list[str] or None, default None Returns ------- str """ if able is None: return string if "space" in able: string = re.sub(r"[\t\u2028\u2029\u00a0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000]+", " ", string) if "hyphen" in able: string = re.sub(r"[\u2010\u002d\u2011\u2043\u2212]+", "-", string) if "amp" in able: string = re.sub(r"&amp;", "&", string) if "quot" in able: string = re.sub(r"&quot;", "'", string) if "lt" in able: string = re.sub(r"&lt;", "<", string) if "gt" in able: string = re.sub(r"&gt;", ">", string) return string
eb50298d476fba2b1a313afb3051233c1b16e4b5
701,201
def average_above_zero(tab): """ Brief: computes of the avrage of the positive value sended Arg: a list of numeric values, except on positive value, else it will raise an Error Return: a list with the computed average as a float value and the max value Raise: ValueError if no positive value is found ValueError if input tab is not a list """ if not(isinstance(tab,list)): raise ValueError("Expected a list as input") return_vals=[] average=0.0 nPositiveValue=0 valSum=0.0 maxi=0 #NMAX = len(tab) #for idx in range(NMAX) for value in tab: if value>0: valSum=valSum+float(value) nPositiveValue=nPositiveValue+1 if (maxi<value): maxi=value if nPositiveValue<=0: raise ValueError("No positive value found") average=valSum/nPositiveValue return_vals=[average,maxi] #return return_vals return return_vals[0]
307846cdd75d8e415c6a7d819ffc0d5f7bc70da6
701,202
import click import traceback def handle_exception(e: Exception, verbose: bool) -> int: """ Handle exception from a scan command. """ if isinstance(e, click.exceptions.Abort): return 0 elif isinstance(e, click.ClickException): raise e else: if verbose: traceback.print_exc() raise click.ClickException(str(e))
6f295f1c260d8ca92ac1e04ec504177a711c200e
701,203
def calc_montage_horizontal(border_size, *frames): """Return total[], pos1[], pos2[], ... for a horizontal montage. Usage example: >>> calc_montage_horizontal(1, [2,1], [3,2]) ([8, 4], [1, 1], [4, 1]) """ num_frames = len(frames) total_width = sum(f[0] for f in frames) + (border_size * num_frames + 1) max_height = max(f[1] for f in frames) total_height = max_height + (2 * border_size) x = border_size pos_list = [] for f in frames: y = border_size + (max_height - f[1]) // 2 pos_list.append([x, y]) x += f[0] + border_size result = [[total_width, total_height]] result.extend(pos_list) return tuple(result)
8fe5de84d9b1bff9950690ec99f63e174f2f0d22
701,204
def confused(total, max_part, threshold): """Determine whether it is too complex to become a cluster. If a data set have several(<threshold) sub parts, this method use the total count of the data set, the count of the max sub set and min cluster threshold to determine whether it is too complex to become a cluster. Args: total (int): Total count of a data set. max_part (int): The max part of the data set. threshold (int): The min threashold of a cluster. Returns: bool: Too complex return True, otherwise False. """ if total < threshold: return False o_part = total - max_part if max_part >= threshold and o_part >= threshold: return True return abs(max_part - o_part) < threshold - 1
eb674774a8792b4e06d810738fb46f50589b7815
701,206
def predicted_retention(alpha, beta, t): """ Generate the retention probability r at period t, the probability of customer to be active at the end of period t−1 who are still active at the end of period t. Implementing the formula in equation (8). """ assert t > 0, "period t should be positive" return (beta + t) / (alpha + beta + t)
0bc94878e93b65711fc0f520e2de898cd113b91f
701,207
def is_palindrome(s): """ Determine whether or not given string is valid palindrome :param s: given string :type s: str :return: whether or not given string is valid palindrome :rtype: bool """ # basic case if s == '': return True # two pointers # one from left, one from right i = 0 j = len(s) - 1 while i < j: # find left alphanumeric character if not s[i].isalnum(): i += 1 continue # find right alphanumeric character elif not s[j].isalnum(): j -= 1 continue # case insensitive compare if s[i].lower() != s[j].lower(): return False i += 1 j -= 1 return True
a9d700a2e7907e551cb5060f61de5c829ab77291
701,208
def range_to_level_window(min_value, max_value): """Convert min/max value range to level/window parameters.""" window = max_value - min_value level = min_value + .5 * window return (level, window)
0a388ff48a29f0daff20a7cdfd200f8330a32a15
701,209
def type_(printer, ast): """Prints "[const|meta|...] type".""" prefixes_str = ''.join(map(lambda prefix: f'{prefix} ', ast["prefixes"])) type_id_str = printer.ast_to_string(ast["typeId"]) return f'{prefixes_str}{type_id_str}'
cdf03cfb3ff0a00fa973aa4eaf7a32cb9b822191
701,210
import numpy def is_same_transform(matrix0, matrix1): """Return True if two matrices perform same transformation. >>> is_same_transform(numpy.identity(4), numpy.identity(4)) True >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) False """ matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) matrix0 /= matrix0[3, 3] matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) matrix1 /= matrix1[3, 3] return numpy.allclose(matrix0, matrix1)
5b2d9eeffb314a54f1cb41d6e58f2aedce0e78de
701,212
def nrWords(text): """Scrieti o functie care returneaza numarul de cuvinte din string. Cuvintele sunt separate de spatii, semne de punctuatie (, ;, ? ! . ). """ list = text.replace(',', ' ').replace('.', ' ').replace(';', ' ') list = list.replace('!', ' ').replace('?', ' ').split() return len(list)
5703481391d3a4043df070e8f224a136e4023fe1
701,213
def _RIPPER_growphase_prune_metric(rule, pos_pruneset, neg_pruneset): """ RIPPER/IREP* prune metric. Returns the prune value of a candidate Rule. Cohen's formula is (p-n) / (p+n). Unclear from the paper how they handle divzero (where p+n=0), so I Laplaced it. Weka's solution was to modify the formula to (p+1)/(p+n+2), but the (non-NaN) values I got appeared closer to those of the original formula. """ # I imagine Weka's is 1/2 because that's closer to a 50-50 class distribution? p = rule.num_covered(pos_pruneset) n = rule.num_covered(neg_pruneset) return (p - n + 1) / (p + n + 1)
5f6d4fefa0a1b06658309fcc882bd889791648d8
701,214
def rid(num): """Translate from id to rid""" return num-1
9080d1d2fa9329ee5678800c2d55f3228d0ffb79
701,215
from typing import Tuple def stack_fixture(stack_function_fixture) -> Tuple[str, str]: """ Fixture that creates a dummy stack with dummy resources. :return: Tuple, where first element is stack name, and second element is stack id. """ return stack_function_fixture()
379d4f4202bcf197d855efe72ca84c7c1a319acd
701,216
def make_decorator(func): """ Wraps a test decorator so as to properly replicate metadata of the decorated function, including nose's additional stuff (namely, setup and teardown). """ def decorate(newfunc): if hasattr(func, 'compat_func_name'): name = func.compat_func_name else: name = func.__name__ newfunc.__dict__ = func.__dict__ newfunc.__doc__ = func.__doc__ newfunc.__module__ = func.__module__ if not hasattr(newfunc, 'compat_co_firstlineno'): newfunc.compat_co_firstlineno = func.__code__.co_firstlineno try: newfunc.__name__ = name except TypeError: # can't set func name in 2.3 newfunc.compat_func_name = name return newfunc return decorate
bb76f9589abd0b31d008072664e470f16000e208
701,217
import logging def edge_select_transform(data, edge): """ Data selection and transformation between tasks Data transfer between tasks is handled by edges that define rich data mapping and selection criteria to match input of the next tasks to the output of the previous task. Supported transformation options: - data_select: a list of data dictionary keys to select as input for the next tasks. All output selected as input by default - data_mapping: dictionary to map (rename) key/value pairs. Useful when the input/output data types between tasks are the same but the names differ. - nested data: specific selection of nested data is supported by definition of dot separated key names (path to nested data) These options should cover most of the output/input data mapping needs. More elaborate data transformations should be handled by dedicated tasks instead. .. warning:: data mapping may result in duplicated keys and resulting (potentially unwanted) value replacement. Unwanted side effects can be limited by selecting specific data first. Multiple key/value renaming is handled in alphabetic order. :param data: output of previous task :type data: :py:dict :param edge: edge connecting tasks :type edge: :graphit:GraphAxis, :py:dict :return: curated output :rtype: :py:dict """ mapper = edge.get('data_mapping', {}) select = edge.get('data_select', data.keys()) def recursive_key_search(keys, search_data): if keys[0] in search_data: search_data = search_data[keys[0]] else: return None if len(keys) > 1: return recursive_key_search(keys[1:], search_data) return search_data transformed_data = {} for key in select: # search recursively for dot separated keys value = recursive_key_search(key.split('.'), data) if value is None: logging.warn('Data selection: parameter {0} not in output of task'.format(key)) continue mapped_key = mapper.get(key, key) transformed_data[mapped_key.split('.')[-1]] = value return transformed_data
6f092fd9558b6aa0262f7aacc94e77bac125b0c2
701,218
def add_allocation_config(config, cache): """ config["components"] maps Allocation name that can be found in csv to allocation_id :param config: :param cache: :return: """ allocation_root = cache.get_xml_tree("allocation") allocation_config = {} for allocation in allocation_root.findall("allocationContexts_Allocation"): allocation_config["AllocationDegreeImpl:{}".format(allocation.get("entityName"))] = allocation.get("id") config["components"] = allocation_config return config
63df5aaf0b70dbdb17da8a706480be4baa7570d0
701,219
def postvar(sum2, n, a, b): """ Parameters ---------- sum2 n a b Returns ------- out """ return (0.5 * sum2 + b) / (n / 2.0 + a - 1.0)
be89d743f79771c78ca45b98df4ac8f6a39ac971
701,220
def argmax(l,f=None): """http://stackoverflow.com/questions/5098580/implementing-argmax-in-python""" if f: l = [f(i) for i in l] return max(enumerate(l), key=lambda x:x[1])[0]
51e480cc579b5be37266fecd44359a4984c9ac6e
701,221
def IsEnabled(test, possible_browser): """Returns True iff |test| is enabled given the |possible_browser|. Use to respect the @Enabled / @Disabled decorators. Args: test: A function or class that may contain _disabled_strings and/or _enabled_strings attributes. possible_browser: A PossibleBrowser to check whether |test| may run against. """ platform_attributes = [a.lower() for a in [ possible_browser.browser_type, possible_browser.platform.GetOSName(), possible_browser.platform.GetOSVersionName(), ]] if possible_browser.supports_tab_control: platform_attributes.append('has tabs') if hasattr(test, '__name__'): name = test.__name__ elif hasattr(test, '__class__'): name = test.__class__.__name__ else: name = str(test) if hasattr(test, '_disabled_strings'): disabled_strings = test._disabled_strings if not disabled_strings: return False # No arguments to @Disabled means always disable. for disabled_string in disabled_strings: if disabled_string in platform_attributes: print ( 'Skipping %s because it is disabled for %s. ' 'You are running %s.' % (name, ' and '.join(disabled_strings), ' '.join(platform_attributes))) return False if hasattr(test, '_enabled_strings'): enabled_strings = test._enabled_strings if not enabled_strings: return True # No arguments to @Enabled means always enable. for enabled_string in enabled_strings: if enabled_string in platform_attributes: return True print ( 'Skipping %s because it is only enabled for %s. ' 'You are running %s.' % (name, ' or '.join(enabled_strings), ' '.join(platform_attributes))) return False return True
a51835a29cdc0b6909986af7a698cc9d40213f39
701,222
def flatten_list(a_list): """Given a list of sequences, return a flattened list >>> flatten_list([['a', 'b', 'c'], ['e', 'f', 'j']]) ['a', 'b', 'c', 'e', 'f', 'j'] >>> flatten_list([['aaaa', 'bbbb'], 'b', 'cc']) ['aaaa', 'bbbb', 'b', 'cc'] """ # isinstance check to ensure we're not iterating over characters # in a string return sum([isinstance(item, (list, tuple)) and list(item) or [item] \ for item in a_list], [])
49baca675cfa59a4cf7b813bc10c12f16d0960bd
701,223
def up(user_version: int) -> str: """Return minimal up script SQL for testing.""" return f""" BEGIN TRANSACTION; PRAGMA user_version = {user_version}; CREATE TABLE Test (key PRIMARY KEY); COMMIT; """
2cd7f9204bddeea94d392f40dc91fcfcb9464632
701,224
import re import collections def get_noun_term_freq(df, option='N'): """ Returns noun morphemes and freqeuncy - input : dataframe, {option : compound noun decomposition flag, default : N} - output : list of tuples(morpheme, frequency) """ _noun_type = ['NNG', 'NNP'] _terms = [] for index, row in df.iterrows(): if row['tag'] in _noun_type: if row['type'] == 'Compound' and option != 'N': tag=row['expression'] _terms.extend(re.split(' ', tag.replace('+',' ').replace("/*", ''))) else: _terms.append(row['surface']) return sorted(collections.Counter(_terms).items(), key=lambda x: x[1], reverse=True)
33d711207955e457aab8d83757eb2fea642e07d7
701,226
def getRecipients(test_data): """ Returns, as a string, the email addresses of the student's parents for the given test """ recipients = [] if test_data["MotherEmail"] != "": recipients.append(test_data["MotherEmail"]) if test_data["FatherEmail"] != "": recipients.append(test_data["FatherEmail"]) # Some entries for mother/father email are identical if len(recipients) == 2: if recipients[0] == recipients[1]: return recipients[0] return ", ".join(recipients)
d4b7896a0a4293601031463240c85001ce8430de
701,227
def get_doc_id_filter(doc_id_set): """ This function returns a filter which removes any documents not in doc_id_set. This filter does not alter record_dict, but instead returns either true or false indicating whether to document should be kept or not. Parameters ---------- doc_id_set : set Returns ------- doc_id_filter : function Note ---- It is recommended that this filter be used before any others in order to minimize unnecessary computations. """ def doc_id_filter(record_dict): doc_id = record_dict['doc_id'] keep_doc = doc_id in doc_id_set return keep_doc return doc_id_filter
7a62a6cf8a290947174183c97fdfc398781d5b19
701,229
def ldrpc_source_address(line): """Calculate the absolute source address of a PC-relative load. 'line' is a line returned by disassembly_lines(). If it doesn't look like the right kind of instruction, returns None. """ # Example: ldr r2, [pc, #900] ; (0x000034b4) if line.op == 'ldr' and line.args.find('[pc') > 0: return int(line.comment.strip(';( )'), 0)
de9ffb9a3e7f1b6a66c6f6727c6d4e37fc42a4b7
701,230
def class_to_json(obj): """ function that returns dictionary description with simple data structure for JSON serialization of an object """ return obj.__dict__
d3b044a62d1c6142959923f0963e438675ef7bbf
701,231
def string(text, is_return_null=False): """ sql字符串拼接专用函数 会在字符串两边添加'单撇号,用于生成数据库sql语句 :param text: 需要添加'的字符串 :param is_return_null: 是否返回null,是的话在字符串为空时返回null,否则返回'' :return: """ if not text is None and text != '': return "'" + str(text) + "'" elif not is_return_null: return "''" else: return "null"
e02ac1f120e79985fb15de05c667dfd248476ff1
701,232
def get_swap_targets(node): """ All pairs of columns that do not have the same types in every cell """ pairs = [] n_col = node.table.n_col for i in range(n_col): col_i = node.table.get_col(i) for j in range(i + 1, n_col): col_j = node.table.get_col(j) same = True for ci, cj in zip(col_i, col_j): if not ci.t == cj.t: same = False break if not same: pairs.append((i, j)) return pairs
372d2fed3c2e1544f20f69a3166b5773f954736e
701,233
import functools import operator def product (nums): """ get the product of numbers in an array. """ return functools.reduce(operator.mul, nums, 1)
801b3e9c3fe9229eaf8ad58fe73a9a1e63506b41
701,234
def many_constants(): """Generate Python code which includes >256 constants.""" return "".join(f'a = {i}\n' for i in range(300))
8b06dac948ebf7467824221161161d8d8cacef1d
701,235
import distutils.sysconfig as sysconfig import os def python_stdlib_dirs(): """ Returns (<stdlib-dir>, <sitepkg-dir>, <global-sitepkg-dir>). This exists because sysconfig.get_python_lib(standard_lib=True) returns something surprising when running a virtualenv python (the path to the global python standard lib directory, rather than the virtualenv standard lib directory), whereas sysconfig.get_python_lib(standard_lib=False) returns a path to the local site-packages directory. When processing the standard lib directory (global), we should ignore the global site-packages directory, not just the local one (which wouldn't get processed anyway). """ sitepkg_dir = sysconfig.get_python_lib(standard_lib=False) stdlib_dir = sysconfig.get_python_lib(standard_lib=True) return (stdlib_dir, sitepkg_dir, os.path.join(stdlib_dir, 'site-packages'))
15d3c5a91e94348ffbc76b7f36c556619bc3ae56
701,236
import re def remove_parentheses(s: str) -> str: """Returns the string without the parts in parentheses.""" while s != ( s:= re.sub( r"\([^(]*?\)", "", s ) ): pass return s
a8a4f9cfe536997be1277d16682f8bb65b833e3d
701,237
def get_mapindex(res, index): """Get the index of the atom in the original molecule Parameters ---------- res : prolif.residue.Residue The residue in the protein or ligand index : int The index of the atom in the :class:`~prolif.residue.Residue` Returns ------- mapindex : int The index of the atom in the :class:`~prolif.molecule.Molecule` """ return res.GetAtomWithIdx(index).GetUnsignedProp("mapindex")
21c1724a5da26cc98857ef3dbaba6806ca653ef8
701,238
def get_combos(itr1, itr2): """Returns all the combinations of elements between two iterables.""" return [[x, y] for x in itr1 for y in itr2]
5a57a1237cc73692abd077360f19916c9f366900
701,239
from typing import Tuple import os def _get_image_info_from_path(image_path: str) -> Tuple[str, str]: """Gets image info including sequence id and image id. Image path is in the format of '.../split/sequence_id/image_id.png', where `sequence_id` refers to the id of the video sequence, and `image_id` is the id of the image in the video sequence. Args: image_path: Absolute path of the image. Returns: sequence_id, and image_id as strings. """ sequence_id = image_path.split('/')[-2] image_id = os.path.splitext(os.path.basename(image_path))[0] return sequence_id, image_id
4c386e05601a5069388a559e57b493a13b7286b2
701,240
from typing import Any import numbers def is_num(a: Any) -> bool: """Checks if the specified Python object is a number (int, float, long, etc).""" return isinstance(a, numbers.Number)
437aed69142ea7a3e15eac27aa4c7d9026c8de8e
701,241
import time def strftime(dt, *args, **kwargs): """Version of datetime.strftime that always uses the C locale. This is because date strings are used internally in the database, and should not be localized. """ if hasattr(dt, 'strftime'): return dt.strftime(*args, **kwargs) else: return time.strftime(dt, *args, **kwargs)
c4fd0f707915e0e26a1a59ab8c5c72f2d22ce8f0
701,242
def _get_config_kwargs(**kwargs): """Get the subset of kwargs which pertain the config object""" valid_config_kwargs = ["websockets", "cipher", "proxy_options", "keep_alive"] config_kwargs = {} for kwarg in kwargs: if kwarg in valid_config_kwargs: config_kwargs[kwarg] = kwargs[kwarg] return config_kwargs
3e01d1df4b8bddc1c53dfcd8007269894b963eaf
701,243
import unicodedata import re from bs4 import BeautifulSoup def _process_text(text): """ Pre-process Text """ text = unicodedata.normalize("NFKD", text) # Normalize text = '\n'.join(text.splitlines()) # Let python take care of unicode break lines # Take care of breaklines & whitespaces combinations due to beautifulsoup parsing text = re.sub(r'[ ]+\n', '\n', text) text = re.sub(r'\n[ ]+', '\n', text) text = re.sub(r'\n+', '\n', text) # Reformat item headers text = text.replace('\n.\n', '.\n') # Move Period to beginning text = text.replace('\nI\nTEM', '\nITEM') text = text.replace('\nITEM\n', '\nITEM ') text = text.replace('\nITEM ', '\nITEM ') text = text.replace(':\n', '.\n') # Math symbols for clearer looks text = text.replace('$\n', '$') text = text.replace('\n%', '%') # Reformat text = text.replace('\n', '\n\n') # Reformat by additional breakline soup = BeautifulSoup(text, "html.parser") text = soup.get_text("\n") # Convert to upper text = text.upper() # Convert to upper text = re.sub(r'(\.)', '', text) # remove empty lines text = re.sub(r'(\n\s)', '', text) # make the ITEM 1A end with a dot: text = text.replace("ITEM 1A", " ITEM 1A. ") text = re.sub(r'(ITEM\s1A)', 'ITEM 1A. ', text) # Some 10ks number their items with "ONE", "TWO" etc, replace that so we get consistency text = text.replace("ITEM ONE", " ITEM 1. ") text = text.replace("ITEM TWO", " ITEM 2. ") # Some 10ks number the items with roman numbers text = text.replace("ITEM I", "ITEM 1.") text = text.replace("ITEM II", "ITEM 2.") text = text.replace("ITEM III", "ITEM 3.") # make the ITEM 1 and ITEM 2 standard, meaning with a "." text = re.sub("(ITEM\s1\W)", " ITEM 1. ", text) text = re.sub("(ITEM\s2\W)", " ITEM 2. ", text) # some 10ks group together ITEM 1 and 2, therefore: change the keyword to something different text = re.sub("(ITEM\s3\W)", " ITEM 3. ", text) text = re.sub("(ITEMS\s1\W)", " ITEM XX. ", text) return text
e593a8450d92d8c59c8ae20c6889b678c78b886d
701,244
import hashlib def create_hash_key(index: str, params: str) -> str: """ :param index: индекс в elasticsearch :param params: параметры запроса :return: хешированый ключ в md5 """ hash_key = hashlib.md5(params.encode()).hexdigest() return f"{index}:{hash_key}"
1762d3d089cd26c6c7e9423fd37e782b61be1235
701,245
import os def check_paths(import_path, export_path): """ Inspects for file errors in import and export paths. Args: import_path (string): path to input file. export_path (string): path to output file. Returns: None. Raises: FileNotFoundError: file does not exist on input path. FileExistsError: file exists on output path. """ # If input file cannot be found, raises file error if not os.path.isfile(import_path): # Raises error for non-existent input file raise FileNotFoundError( "File not found in \"input\" directory.\n" "Please confirm and run again." ) # If output file already exists, raises file error if os.path.isfile(export_path): # Raises error for existing output file raise FileExistsError( "File already exists in \"output\" directory." "\nPlease back up, remove, and run again." ) # Completes quality check for import path and export path return None
c65fcc16643a5a40417a2312a5d7820f67d2a543
701,246
import hashlib def HashFileContents(filename): """Return the hash (sha1) of tthe contents of a file. Args: filename: Filename to read. Returns: The sha1 of a file. """ hasher = hashlib.sha1() fh = open(filename, 'rb') try: while True: data = fh.read(4096) if not data: break hasher.update(data) finally: fh.close() return hasher.hexdigest()
930ecd76dac76c09a17cddeeb52229189098553a
701,248
def get_moyenne(points_3D, indice): """Calcul la moyenne d'une coordonnée des points, la profondeur est le 3 ème = z, le y est la verticale indice = 0 pour x, 1 pour y, 2 pour z """ somme = 0 n = 0 for i in range(17): if points_3D[i]: n += 1 somme += points_3D[i][indice] if n != 0: moyenne = int(somme/n) else: moyenne = None return moyenne
739548f93b2361e5f7e442dd0504e6b619ded1f9
701,249
def code_to_md(source, metadata, language): """ Represent a code cell with given source and metadata as a md cell :param source: :param metadata: :param language: :return: """ options = [] if language: options.append(language) if 'name' in metadata: options.append(metadata['name']) return ['```{}'.format(' '.join(options))] + source + ['```']
aa51b274d4aacd1bc5dce5bf9ee638429f0f9370
701,250
def get_schema(is_ipv6, octet): """Get the template with word slots""" new_line = '\n' period = '.' space = ' ' non_words = [new_line, period, space] if is_ipv6: schema = [octet, octet, 'and', octet, octet, new_line, octet, octet, octet, octet, octet, octet, octet, period, new_line, octet, octet, octet, octet, octet, period, new_line] else: schema = ['The', octet, octet, octet, new_line, octet, 'in the', octet, octet, period, new_line, octet, octet, period, new_line] space_num = 0 # Add spaces before words except the first word. for i in range(1, len(schema)): i = i + space_num insert_space = True # If the current entry is a non_word, don't add a space. if schema[i] in non_words: insert_space = False # If the previous entry is a new_line, don't add a space. if schema[i-1] == new_line: insert_space = False if insert_space: schema.insert(i, space) space_num = space_num + 1 return schema
039e8a623be8e37acea7f2a4373a3b5f9bdaf53c
701,251
def impute_features(feature_df): """Imputes data using the following methods: - `smoothed_ssn`: forward fill - `solar_wind`: interpolation """ # forward fill sunspot data for the rest of the month feature_df.smoothed_ssn = feature_df.smoothed_ssn.fillna(method="ffill") # interpolate between missing solar wind values feature_df = feature_df.interpolate() return feature_df
3b61e29b30a011f86a8f18f649b77e72f3f87878
701,252
import random def rand_5() -> int: """ Generates a single, randomly generated 5-digit number. Returns: The generated 5-digit number. """ return random.randrange(100000)
b6abb4ecbd4397548e35c386a4e653df205282ea
701,254
import re import click def validate_memory(_ctx, _param, value): """Validate memory string.""" if value is None: return if not re.search(r'\d+[KkMmGg]$', value): raise click.BadParameter('Memory format: nnn[K|M|G].') return value
105035f338138ff47b6505868c8bf9d2bf37fa91
701,256
import argparse def cmd_arguments(): """ taking CMD arguments and sending them through the argument parser. """ parser = argparse.ArgumentParser() parser.add_argument("-i", "--image", help="Full image path can be optionally supplied.") args = parser.parse_args() return args
ce64792de04ab054959380c5089e47277343bce3
701,257
def flags(flags): """Hard-code KZC flags""" def f(test, way): test.way = None test.args += flags return f
4257fd66abc036af0d9ef9f971fcc834c735460a
701,258
def indexColumnPivot(Z, minOrMax, tableaux, _colPivot): # seach the value that'll come to base (index column) """ [seach the index of pivot column. That is a value that'll come to base] Arguments: Z {[list]} -- [list from Z line] minOrMax {[int]} -- [description] tableaux {[matrix]} -- [description] _colPivot {[int]} -- [last index of pivot column] Returns: [int] -- [index of actual pivot column] """ piv = 0 if minOrMax == 1: maxI = -9999 piv = 0 j = 0 for line in tableaux: i = 0 if j < len(tableaux): for z_value in Z: if i < len(Z)-1 and z_value < 0: # dont take result column if i > 0: # dont take base column if i != _colPivot: # if i == line[0]: break if abs(z_value) > maxI: #verific if this index already is in base k = 1 flag = True for line in tableaux: if k < len(tableaux): if line[0] == i: flag = False break k+=1 if flag: maxI = abs(z_value) piv = i i+=1 j += 1 elif minOrMax == 2: minI = -9999 piv = 0 j = 0 for line in tableaux: i = 0 if j < len(tableaux): for z_value in Z: if i < len(Z)-1 and z_value < 0: # dont take result column if i > 0: # dont take base column if i != _colPivot: # if i == line[0]: break if abs(z_value) > minI: #verific if this index already is in base k = 1 flag = True for line in tableaux: if k < len(tableaux): if line[0] == i: flag = False break k+=1 if flag: minI = z_value piv = i i+=1 j += 1 print("Variavel que entra: X"+str(piv)) return piv
b47ac8f8ba3661e193cf9b687d5e0ff6a4bd1d7e
701,259
def interval2string_m(i: float) -> str: """Convert a time interval to a string or to NA if it is does not have a value Arguments: i: is a number of seconds Returns: a string of the number of minutes represented or NA if i is not a float """ if type(i) == float: i_m = str(round(i / 60.0, 2)) else: i_m = 'NA' return i_m
e7d710d10fb8a3393ca2fad4d99e7acc5f4b45eb
701,261
def tab(column) -> str: """Emulates the TAB command in BASIC. Returns a string with ASCII codes for setting the cursor to the specified column.""" return f"\r\33[{column}C"
d57e0d6840c9f446b2832bbe725bc986aaabf13e
701,262
import codecs import os import ast def get_version(): """ Get version without importing from elasticapm. This avoids any side effects from importing while installing and/or building the module Once Python 3.8 is the lowest supported version, we could consider hardcoding the version in setup.cfg instead. 3.8 comes with importlib.metadata, which makes it trivial to find the version of a package, making it unnecessary to have the version available in code. :return: a string, indicating the version """ version_file = codecs.open(os.path.join("elasticapm", "version.py"), encoding="utf-8") for line in version_file: if line.startswith("__version__"): version_tuple = ast.literal_eval(line.split(" = ")[1]) return ".".join(map(str, version_tuple)) return "unknown"
c0ed312698377e705cfb547c39b69f1858fc490d
701,263
import binascii def x(h): """Convert a hex string to bytes""" return binascii.unhexlify(h.encode('utf8'))
0cbba146b22419f9e67f0a2ea46851a47826d4f8
701,264
def get_new_goal(current_pos, current_dest, home_pos, rally_point, batt_level, blocking_time=10, speed=5, energy_consumption=1): """ Function for deciding the new goal All the input coordinates are in the format [LAT, LON] (deg). returns a [int, int] list with [X, Y] coordinates of new destination """ new_destination = [None, None] return new_destination
da8a22da63d518935a1d1cd2b9aa9515abc301e5
701,265
import os import sys def getfname(chapelfile, output, prefix): """Compute filename for output""" if output == 'rst': filename = os.path.split(chapelfile)[1] basename, _ = os.path.splitext(filename) rstname = ''.join([basename, '.rst']) rstfile = os.path.join(prefix, rstname) return rstfile elif output == 'stdout': print('Warning: prefix ignored for stdout') return None else: print('Error: output = {0} is invalid') sys.exit(1)
15d48044546718055979a0bb2548e9267b8fb134
701,267
import typing def cumprod(mod: int, a: typing.List[int]) -> typing.List[int]: """Compute cummulative product over Modular.""" a = a.copy() for i in range(len(a) - 1): a[i + 1] = a[i + 1] * a[i] % mod return a
62a99a916c5e09187f05b9a31096037b3c53eb85
701,268
def second(str_number): """ :param str_number: str :return int """ list_number = list(str_number) total = list_number.__len__() half = total/2 result = 0 for key, x in enumerate(list_number): index_y = int((key + half) % total) if x == list_number[index_y]: result += int(x) return result
92ffd31c72f4c0f5bc09c244b2e03acaea2d45e3
701,269
import ipaddress def is_valid_ipv4(address): """Check an IPv4 address for validity""" try: ip = ipaddress.ip_address(address) except ValueError: return False if not isinstance(ip, ipaddress.IPv4Address): return False warning = None if ip.is_loopback: warning = "loopback address" elif ip.is_multicast: warning = "multicast address" elif ip.is_reserved: warning = "reserved address" elif ip.is_link_local: warning = "link-local address" elif ip.is_unspecified: warning = "unspecified address" elif ip.is_private: warning = "private address" elif address.endswith(".0") or address.endswith(".255"): warning = "potential broadcast address" if warning: print("*** Warning: {} is a {}".format(address, warning)) return True
fd095d8903cd0a44bfd6a7eb02fa95217a60077a
701,270
def two_pts_to_line(pt1, pt2): """ Create a line from two points in form of a1(x) + a2(y) = b """ pt1 = [float(p) for p in pt1] pt2 = [float(p) for p in pt2] try: slp = (pt2[1] - pt1[1]) / (pt2[0] - pt1[0]) except ZeroDivisionError: slp = 1e5 * (pt2[1] - pt1[1]) a1 = -slp a2 = 1. b = -slp * pt1[0] + pt1[1] return a1, a2, b
d607008c41eaa052c0988a7ac66588b464aab8e0
701,271
import numpy as np def yield_function(E,species='W'): """ use: Y = yield_function(E,species='W') This method implements the modified Bohdansky formula for physical sputtering with incidence angle 0. See for example Y. Marandet et. al. PPCF 58 (2016) 114001 input: E: Energy of incoming projetiles. ... (1xN) np array species: target particle species. ... 'Be' or 'W'. Default 'W' output: Y: Physical sputtering yield. ....... Same type and size as E """ #Threshold energy in eV # Eth = EB/(gamma(1-gamma)) # with EB the surface binding energy and # gamma the maximum energy fraction transferred during collision. # gamma = 4 M_T M_P/(M_T+M_P)^2 Eth = {'Be':13.09,'W':209.37} # Thomas-Fermi energy in eV ETF = {'Be':282,'W':9925} # Yield factor Q = {'Be':0.11,'W':0.07} # Nuclear stopping cross section def Sn(x): return 3.441*np.sqrt(x)*np.log(x+2.718) / ( 1+6.335*np.sqrt(x) + x*(6.882*np.sqrt(x) - 1.708 )) Y = np.zeros(E.size) good = E>=Eth[species] Y[good] = Q[species]*Sn(E[good]/ETF[species])*(1-(Eth[species]/E[good])**(2/3))*(1-(Eth[species]/E[good]))**2 return Y
71c562fa24960838ae58c9d7ed6ba37c3744e13c
701,272
from datetime import date def get_current_day(): """ return the current day number value from (1-7). """ return date.today().isocalendar()[2]
52558cf9c1cab283abe496845ce778eac4802980
701,273
from typing import List def read_file_list(path: str) -> List[str]: """读取文件列表(包含\\n字符)""" with open(path, 'r', encoding='utf-8') as f: content = f.readlines() # 包含\n字符 return content
5dbb082f9d228cd2ef227d51603a232508ee74dd
701,274
def semi_split(s): """ Split 's' on semicolons. """ return map(lambda x: x.strip(), s.split(';'))
f81eff42e170c7b760a75d86d5d0d9326365ca8e
701,275
def sliding_window(sequence, window_size, step_size): """Converts a sequence into sliding windows. Returns a list of sequences. - sequence: a string containing only nucleotides. - window_size: Windows of equal size sequence. Default is 500 bp. - step_size: overlap required for sliding windows. Default is 250 bp. """ seq=[] for start in range(0, len(sequence), step_size): end = start + window_size if end > len(sequence) : seq.append(sequence[start:len(sequence)]+"N"*(window_size-(len(sequence)-start))) break # else: seq.append(sequence[start:end]) return seq
3af3e5a0fb204fe019971494a8f89272fd5026d3
701,276
import curses def get_next_player_move(state, gui): """Uses input to get the next move by a human""" def is_valid_move(state, x): if x == '': return False, '\n' s = x.split(',') if len(s) != state.ndim: return False, 'Not the correct number of dimensions.\n' s2 = [int(y) for y in s] if s2 in state.xes: return False, 'This space already has an X in it.\n' elif s2 in state.oes: return False, 'This space already has an O in it\n' return True, '' move = '' querystr = ('Where would you like to go Player ' + state.player + ' (give your move with 0,1,2 separated by commas)?') errorstr = '\n' curstr = '' is_valid = False while not is_valid: gui['inputbar'].clear() gui['inputbar'].addstr(querystr + '\t' + errorstr + curstr) gui['inputbar'].refresh() nextchr = gui['inputbar'].getkey() if nextchr in ('\n', '\r', curses.KEY_ENTER): move = curstr elif nextchr == 'q': exit() elif nextchr in ['0', '1', '2', ',']: curstr += nextchr elif nextchr in ['\b', '\x7f', curses.KEY_DC, curses.KEY_BACKSPACE]: curstr = curstr[:-1] is_valid, errorstr = is_valid_move(state, move) return [int(y) for y in move.split(',')]
4cbcbceeb58d94f84445402505d0b945e9000e1a
701,277
def reconstruct_path(goal, branch, waypoint_fn): """ Reconstruct a path from the goal state and branch information """ current_node = goal path = [current_node] while current_node is not None: previous_node = branch[waypoint_fn(current_node)] path.append(previous_node) current_node = previous_node path.pop() path.reverse() return path
cdc99ebaa5b987785e86a8b277c25638af1a3e71
701,278
from typing import Type from typing import Any def type_instantiate(attr_type: Type, **kwargs) -> Any: """ Instantiate a nominated type. """ while hasattr(attr_type, "__origin__"): attr_type = attr_type.__origin__ return attr_type(**kwargs)
a42893824f52ae2bff583014f3e824634e6417e2
701,279
def get_fitness(cell_type,cell_neighbour_types,DELTA,game,game_params): """returns fitness of single cell""" return 1+DELTA*game(cell_type,cell_neighbour_types,*game_params)
bfac50ecbe64911c145a9e6cbc9e872add774cbf
701,280
import sys def get_print_func(io): """This is a workaround go get mocking of stdout to work with both py2 and py3. """ if sys.version_info[0] == 2: return io.write else: return io.writelines
0f9ba032cd701d9bdbac8218b152a6109e0586b7
701,281
def verify_incall_state(log, ads, expected_status): """Verify phones in incall state or not. Verify if all phones in the array <ads> are in <expected_status>. Args: log: Log object. ads: Array of Android Device Object. All droid in this array will be tested. expected_status: If True, verify all Phones in incall state. If False, verify all Phones not in incall state. """ result = True for ad in ads: if ad.droid.telecomIsInCall() is not expected_status: log.error("Verify_incall_state: {} status:{}, expected:{}".format( ad.serial, ad.droid.telecomIsInCall(), expected_status)) result = False return result
a20f88770e92a6f61ac5376e3a7827cf3c82c1f2
701,282
def getCompletedJobs(jobs): """ Gets all completed jobs """ completed_jobs = [] for job in jobs: if 'result' in job: completed_jobs.append(job) return completed_jobs
7193cae304fb0498f9fe50f0302e50699d7e96e3
701,283
def read_pergene_file(pergene_insertions_file,chrom): """Reading the pergene file , the information per gene , related to where it starts and ends in the genome. Parameters ---------- pergene_insertions_file : str absolute path of the per gene file location chrom : str Name of the chromosome in roman where to extract the informatiion from the wigfile Returns ------- gene_position_dict : dict A dictionary describing the chromosome, start, and end location of every gene in the chromosome of interest. """ with open(pergene_insertions_file) as f: lines = f.readlines() gene_position_dict = {} for line in lines[1:]: line_split = line.strip('\n').split('\t') if line_split[1] == chrom: genename = line_split[0] gene_chrom = line_split[1] gene_start = int(line_split[2]) gene_end = int(line_split[3]) gene_position_dict[genename] = [gene_chrom, gene_start, gene_end] #DICT CONTAINING ALL GENES WITHIN THE DEFINED CHROMOSOME INCLUDING ITS START AND END POSITION geneinserts_str = line_split[4].strip('[]') if not geneinserts_str == '': geneinserts_list = [int(ins) for ins in geneinserts_str.split(',')] else: geneinserts_list = [] genereads_str = line_split[5].strip('[]') if not genereads_str == '': genereads_list = [int(read) for read in genereads_str.split(',')] else: genereads_list = [] if len(geneinserts_list) != len(genereads_list): print('WARNING: %s has different number of reads compared with the number of inserts' % genename ) return gene_position_dict
e9629c5a9621784be6eb6efbd1e8793f2ac5b67e
701,284
import math def gam_a98rgb(rgb): """Convert an array of linear-light a98-rgb in the range 0.0-1.0 to gamma corrected form.""" return [math.copysign(math.pow(abs(val), 256 / 563), val) for val in rgb]
6d2543dc28ac6533067ffe9b0a265ef093416f9e
701,285
import socket def conflictBetweenIPv4AndIPv6(): """ Is there a conflict between binding an IPv6 and an IPv4 port? Return True if there is, False if there isn't. This is a temporary workaround until maybe Twisted starts setting C{IPPROTO_IPV6 / IPV6_V6ONLY} on IPv6 sockets. @return: C{True} if listening on IPv4 conflicts with listening on IPv6. """ s4 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s6 = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) try: s4.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s6.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s4.bind(("", 0)) s4.listen(1) usedport = s4.getsockname()[1] try: s6.bind(("::", usedport)) except socket.error: return True else: return False finally: s4.close() s6.close()
bef744d6f066e3aafbe7644662d8d87e4b139abb
701,286
def get_frame_subframe(sfn_sf): """ Get the frame and the subframe number from the received bitstring """ sfn_sf_list = [] frame_mask = ~((1<<4) - 1) frame = (sfn_sf & frame_mask) >> 4 sf_mask = ~(((1<<12) - 1) << 4) subframe = (sfn_sf & sf_mask) sfn_sf_list.append(frame) sfn_sf_list.append(subframe) return sfn_sf_list
d623ef8c1fa97dfbd5b5ef2a99e52e045d5dd205
701,287
def covars_for_custom_models_simple_training_data_4elements(): """ defines initial covars compatible with custom_models_simple_training_data_4elements above :return: covars (list of tuple) """ covars = [(0.0, -2.0, 2.0)] return covars
f21cfa472c03811758dfb5196594f93f6e8247c6
701,288
import typing def _de_bruijn(k: int, n: int) -> typing.List[int]: """Generate a De Bruijn sequence. This is a piece of mathematical heavy machinery needed for O(1) ctz on integers of bounded size. The algorithm is adapted from chapter 7 of Frank Ruskey's "Combinatorial Generation". Args: k: The number of symbols in the alphabet. Must be > 1. n: The length of subsequences. Should be >= 0. Returns: The De Bruijn sequence, as a list of integer indices. >>> _de_bruijn(2, 3) [0, 0, 0, 1, 0, 1, 1, 1] >>> _de_bruijn(4, 2) [0, 0, 1, 0, 2, 0, 3, 1, 1, 2, 1, 3, 2, 2, 3, 3] >>> _de_bruijn(2, 5) [0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1] """ a: typing.List[int] = [0] * k * n sequence: typing.List[int] = [] def gen(t: int, p: int): if t > n: if n % p == 0: sequence.extend(a[1:p + 1]) else: a[t] = a[t - p] gen(t + 1, p) for j in range(a[t - p] + 1, k): a[t] = j gen(t + 1, t) gen(1, 1) return sequence
bbb86a4f0a7ef03bbce29126cc56e4fb3e41e288
701,289
def identify_necessary_covariates(dependents, definitions): """Identify covariates necessary to compute `dependents`. This function can be used if only a specific subset of covariates is necessary and not all covariates. See also -------- respy.likelihood._compute_x_beta_for_type_probability """ dependents = {dependents} if isinstance(dependents, str) else set(dependents) new_dependents = dependents.copy() while new_dependents: deps = list(new_dependents) new_dependents = set() for dependent in deps: if dependent in definitions and definitions[dependent]["depends_on"]: dependents |= definitions[dependent]["depends_on"] new_dependents |= definitions[dependent]["depends_on"] else: dependents.remove(dependent) covariates = {dep: definitions[dep] for dep in dependents} return covariates
08f6db315a1c1281ef2ffae4efc1ccda78f3e95e
701,290
def template_used(response, template_name): """Asserts a given template was used (with caveats) First off, this is a gross simplification of what the Django assertTemplateUsed() TestCase method does. This does not work as a context manager and it doesn't handle a lot of the pseudo-response cases. However, it does work with Jinja2 templates provided that monkeypatch_render() has patched ``django.shortcuts.render`` to add the information required. Also, it's not tied to TestCase. Also, it uses fewer characters to invoke. For example:: self.assertTemplateUsed(resp, 'new_user.html') assert template_used(resp, 'new_user.html') :arg response: HttpResponse object :arg template_name: the template in question :returns: whether the template was used """ templates = [] # templates is an array of TemplateObjects templates += [t.name for t in getattr(response, "templates", [])] # jinja_templates is a list of strings templates += getattr(response, "jinja_templates", []) return template_name in templates
f8a878d27c5379b0b2f2fac2a08fbeee91607858
701,291
import click def ensure_remote(ctx, param, value): """ Ensure --remote is used. """ # pylint: disable=unused-argument if value and ctx.params.get('remote') == (None, None): raise click.BadParameter('Invalid without "-r" / "--remote" option') return value
d22f8d73e4b5ec57cc18607918775778d09efc67
701,292
import os def add_prefix_to_fname(original_fname, prefix): """Add a prefix to a given filename. Examples: >>> add_prefix_to_fname("data.tif", "London") "London_data.tif" """ file_dir = os.path.dirname(original_fname) file_basename = os.path.basename(original_fname) new_basename = "_".join([prefix, file_basename]) return os.path.join(file_dir, new_basename)
307385941159b38925cd935f381d5b337d345eeb
701,293