id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
7,691
def relabel_nodes(G, mapping, copy=True): if (not hasattr(mapping, '__getitem__')): m = dict(((n, mapping(n)) for n in G)) else: m = mapping if copy: return _relabel_copy(G, m) else: return _relabel_inplace(G, m)
[ "def", "relabel_nodes", "(", "G", ",", "mapping", ",", "copy", "=", "True", ")", ":", "if", "(", "not", "hasattr", "(", "mapping", ",", "'__getitem__'", ")", ")", ":", "m", "=", "dict", "(", "(", "(", "n", ",", "mapping", "(", "n", ")", ")", "for", "n", "in", "G", ")", ")", "else", ":", "m", "=", "mapping", "if", "copy", ":", "return", "_relabel_copy", "(", "G", ",", "m", ")", "else", ":", "return", "_relabel_inplace", "(", "G", ",", "m", ")" ]
relabel the nodes of the graph g .
train
true
7,692
def _run_varnishadm(cmd, params=(), **kwargs): cmd = ['varnishadm', cmd] cmd.extend([param for param in params if (param is not None)]) log.debug('Executing: {0}'.format(' '.join(cmd))) return __salt__['cmd.run_all'](cmd, python_shell=False, **kwargs)
[ "def", "_run_varnishadm", "(", "cmd", ",", "params", "=", "(", ")", ",", "**", "kwargs", ")", ":", "cmd", "=", "[", "'varnishadm'", ",", "cmd", "]", "cmd", ".", "extend", "(", "[", "param", "for", "param", "in", "params", "if", "(", "param", "is", "not", "None", ")", "]", ")", "log", ".", "debug", "(", "'Executing: {0}'", ".", "format", "(", "' '", ".", "join", "(", "cmd", ")", ")", ")", "return", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ",", "**", "kwargs", ")" ]
execute varnishadm command return the output of the command cmd the command to run in varnishadm params any additional args to add to the command line kwargs additional options to pass to the salt cmd .
train
true
7,693
def stringdisplay(s): import re return map(str, re.split('[ ,]', s))
[ "def", "stringdisplay", "(", "s", ")", ":", "import", "re", "return", "map", "(", "str", ",", "re", ".", "split", "(", "'[ ,]'", ",", "s", ")", ")" ]
convert "d .
train
false
7,694
def lexicographic_product(G, H): GH = _init_product_graph(G, H) GH.add_nodes_from(_node_product(G, H)) GH.add_edges_from(_edges_cross_nodes_and_nodes(G, H)) GH.add_edges_from(_nodes_cross_edges(G, H)) GH.name = (((('Lexicographic product(' + G.name) + ',') + H.name) + ')') return GH
[ "def", "lexicographic_product", "(", "G", ",", "H", ")", ":", "GH", "=", "_init_product_graph", "(", "G", ",", "H", ")", "GH", ".", "add_nodes_from", "(", "_node_product", "(", "G", ",", "H", ")", ")", "GH", ".", "add_edges_from", "(", "_edges_cross_nodes_and_nodes", "(", "G", ",", "H", ")", ")", "GH", ".", "add_edges_from", "(", "_nodes_cross_edges", "(", "G", ",", "H", ")", ")", "GH", ".", "name", "=", "(", "(", "(", "(", "'Lexicographic product('", "+", "G", ".", "name", ")", "+", "','", ")", "+", "H", ".", "name", ")", "+", "')'", ")", "return", "GH" ]
return the lexicographic product of g and h .
train
false
7,695
def remove_samples(path): RE_SAMPLE = re.compile(sample_match, re.I) for (root, _dirs, files) in os.walk(path): for file_ in files: if RE_SAMPLE.search(file_): path = os.path.join(root, file_) try: logging.info('Removing unwanted sample file %s', path) os.remove(path) except: logging.error(T('Removing %s failed'), clip_path(path)) logging.info('Traceback: ', exc_info=True)
[ "def", "remove_samples", "(", "path", ")", ":", "RE_SAMPLE", "=", "re", ".", "compile", "(", "sample_match", ",", "re", ".", "I", ")", "for", "(", "root", ",", "_dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "file_", "in", "files", ":", "if", "RE_SAMPLE", ".", "search", "(", "file_", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "file_", ")", "try", ":", "logging", ".", "info", "(", "'Removing unwanted sample file %s'", ",", "path", ")", "os", ".", "remove", "(", "path", ")", "except", ":", "logging", ".", "error", "(", "T", "(", "'Removing %s failed'", ")", ",", "clip_path", "(", "path", ")", ")", "logging", ".", "info", "(", "'Traceback: '", ",", "exc_info", "=", "True", ")" ]
remove all files that match the sample pattern .
train
false
7,696
def add_dictval_to_list(adict, key, alist): if (key in adict): if isinstance(adict[key], six.string_types): alist.append(adict[key]) else: alist.extend(adict[key])
[ "def", "add_dictval_to_list", "(", "adict", ",", "key", ",", "alist", ")", ":", "if", "(", "key", "in", "adict", ")", ":", "if", "isinstance", "(", "adict", "[", "key", "]", ",", "six", ".", "string_types", ")", ":", "alist", ".", "append", "(", "adict", "[", "key", "]", ")", "else", ":", "alist", ".", "extend", "(", "adict", "[", "key", "]", ")" ]
add a value from a dictionary to a list parameters adict : dictionary key : hashable alist : list list where value should be added .
train
false
7,697
def _create_ccm_cipher(factory, **kwargs): try: key = key = kwargs.pop('key') except KeyError as e: raise TypeError(('Missing parameter: ' + str(e))) nonce = kwargs.pop('nonce', None) if (nonce is None): nonce = get_random_bytes(11) mac_len = kwargs.pop('mac_len', factory.block_size) msg_len = kwargs.pop('msg_len', None) assoc_len = kwargs.pop('assoc_len', None) cipher_params = dict(kwargs) return CcmMode(factory, key, nonce, mac_len, msg_len, assoc_len, cipher_params)
[ "def", "_create_ccm_cipher", "(", "factory", ",", "**", "kwargs", ")", ":", "try", ":", "key", "=", "key", "=", "kwargs", ".", "pop", "(", "'key'", ")", "except", "KeyError", "as", "e", ":", "raise", "TypeError", "(", "(", "'Missing parameter: '", "+", "str", "(", "e", ")", ")", ")", "nonce", "=", "kwargs", ".", "pop", "(", "'nonce'", ",", "None", ")", "if", "(", "nonce", "is", "None", ")", ":", "nonce", "=", "get_random_bytes", "(", "11", ")", "mac_len", "=", "kwargs", ".", "pop", "(", "'mac_len'", ",", "factory", ".", "block_size", ")", "msg_len", "=", "kwargs", ".", "pop", "(", "'msg_len'", ",", "None", ")", "assoc_len", "=", "kwargs", ".", "pop", "(", "'assoc_len'", ",", "None", ")", "cipher_params", "=", "dict", "(", "kwargs", ")", "return", "CcmMode", "(", "factory", ",", "key", ",", "nonce", ",", "mac_len", ",", "msg_len", ",", "assoc_len", ",", "cipher_params", ")" ]
create a new block cipher .
train
false
7,699
def security_group_get_by_project(context, project_id): return IMPL.security_group_get_by_project(context, project_id)
[ "def", "security_group_get_by_project", "(", "context", ",", "project_id", ")", ":", "return", "IMPL", ".", "security_group_get_by_project", "(", "context", ",", "project_id", ")" ]
get all security groups belonging to a project .
train
false
7,700
def equal_fields(matchdict, field): return equal((m[field] for m in matchdict.values()))
[ "def", "equal_fields", "(", "matchdict", ",", "field", ")", ":", "return", "equal", "(", "(", "m", "[", "field", "]", "for", "m", "in", "matchdict", ".", "values", "(", ")", ")", ")" ]
do all items in matchdict .
train
false
7,701
def rename_ep_file(cur_path, new_path, old_path_length=0): if ((old_path_length == 0) or (old_path_length > len(cur_path))): (cur_file_name, cur_file_ext) = os.path.splitext(cur_path) else: cur_file_ext = cur_path[old_path_length:] cur_file_name = cur_path[:old_path_length] if (cur_file_ext[1:] in subtitleExtensions): sublang = os.path.splitext(cur_file_name)[1][1:] from sickrage.core.searchers import subtitle_searcher if subtitle_searcher.isValidLanguage(sublang): cur_file_ext = ((u'.' + sublang) + cur_file_ext) new_path += cur_file_ext make_dirs(os.path.dirname(new_path)) try: sickrage.srCore.srLogger.info((u'Renaming file from %s to %s' % (cur_path, new_path))) moveFile(cur_path, new_path) except (OSError, IOError) as e: sickrage.srCore.srLogger.error((u'Failed renaming %s to %s : %r' % (cur_path, new_path, e))) return False delete_empty_folders(os.path.dirname(cur_path)) return True
[ "def", "rename_ep_file", "(", "cur_path", ",", "new_path", ",", "old_path_length", "=", "0", ")", ":", "if", "(", "(", "old_path_length", "==", "0", ")", "or", "(", "old_path_length", ">", "len", "(", "cur_path", ")", ")", ")", ":", "(", "cur_file_name", ",", "cur_file_ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "cur_path", ")", "else", ":", "cur_file_ext", "=", "cur_path", "[", "old_path_length", ":", "]", "cur_file_name", "=", "cur_path", "[", ":", "old_path_length", "]", "if", "(", "cur_file_ext", "[", "1", ":", "]", "in", "subtitleExtensions", ")", ":", "sublang", "=", "os", ".", "path", ".", "splitext", "(", "cur_file_name", ")", "[", "1", "]", "[", "1", ":", "]", "from", "sickrage", ".", "core", ".", "searchers", "import", "subtitle_searcher", "if", "subtitle_searcher", ".", "isValidLanguage", "(", "sublang", ")", ":", "cur_file_ext", "=", "(", "(", "u'.'", "+", "sublang", ")", "+", "cur_file_ext", ")", "new_path", "+=", "cur_file_ext", "make_dirs", "(", "os", ".", "path", ".", "dirname", "(", "new_path", ")", ")", "try", ":", "sickrage", ".", "srCore", ".", "srLogger", ".", "info", "(", "(", "u'Renaming file from %s to %s'", "%", "(", "cur_path", ",", "new_path", ")", ")", ")", "moveFile", "(", "cur_path", ",", "new_path", ")", "except", "(", "OSError", ",", "IOError", ")", "as", "e", ":", "sickrage", ".", "srCore", ".", "srLogger", ".", "error", "(", "(", "u'Failed renaming %s to %s : %r'", "%", "(", "cur_path", ",", "new_path", ",", "e", ")", ")", ")", "return", "False", "delete_empty_folders", "(", "os", ".", "path", ".", "dirname", "(", "cur_path", ")", ")", "return", "True" ]
creates all folders needed to move a file to its new location .
train
false
7,702
def fully_qualify_hdfs_path(path): if is_uri(path): return path elif path.startswith('/'): return ('hdfs://' + path) else: return ('hdfs:///user/%s/%s' % (getpass.getuser(), path))
[ "def", "fully_qualify_hdfs_path", "(", "path", ")", ":", "if", "is_uri", "(", "path", ")", ":", "return", "path", "elif", "path", ".", "startswith", "(", "'/'", ")", ":", "return", "(", "'hdfs://'", "+", "path", ")", "else", ":", "return", "(", "'hdfs:///user/%s/%s'", "%", "(", "getpass", ".", "getuser", "(", ")", ",", "path", ")", ")" ]
if path isnt an hdfs:// url .
train
false
7,703
def desktop_name_dlgproc(hwnd, msg, wparam, lparam): if (msg in (win32con.WM_CLOSE, win32con.WM_DESTROY)): win32gui.DestroyWindow(hwnd) elif (msg == win32con.WM_COMMAND): if (wparam == win32con.IDOK): desktop_name = win32gui.GetDlgItemText(hwnd, 72) print 'new desktop name: ', desktop_name win32gui.DestroyWindow(hwnd) create_desktop(desktop_name) elif (wparam == win32con.IDCANCEL): win32gui.DestroyWindow(hwnd)
[ "def", "desktop_name_dlgproc", "(", "hwnd", ",", "msg", ",", "wparam", ",", "lparam", ")", ":", "if", "(", "msg", "in", "(", "win32con", ".", "WM_CLOSE", ",", "win32con", ".", "WM_DESTROY", ")", ")", ":", "win32gui", ".", "DestroyWindow", "(", "hwnd", ")", "elif", "(", "msg", "==", "win32con", ".", "WM_COMMAND", ")", ":", "if", "(", "wparam", "==", "win32con", ".", "IDOK", ")", ":", "desktop_name", "=", "win32gui", ".", "GetDlgItemText", "(", "hwnd", ",", "72", ")", "print", "'new desktop name: '", ",", "desktop_name", "win32gui", ".", "DestroyWindow", "(", "hwnd", ")", "create_desktop", "(", "desktop_name", ")", "elif", "(", "wparam", "==", "win32con", ".", "IDCANCEL", ")", ":", "win32gui", ".", "DestroyWindow", "(", "hwnd", ")" ]
handles messages from the desktop name dialog box .
train
false
7,706
def getDoubleForLetter(letter, splitLine): return getDoubleAfterFirstLetter(splitLine[getIndexOfStartingWithSecond(letter, splitLine)])
[ "def", "getDoubleForLetter", "(", "letter", ",", "splitLine", ")", ":", "return", "getDoubleAfterFirstLetter", "(", "splitLine", "[", "getIndexOfStartingWithSecond", "(", "letter", ",", "splitLine", ")", "]", ")" ]
get the double value of the word after the first occurence of the letter in the split line .
train
false
7,707
def advance_time_seconds(seconds): advance_time_delta(datetime.timedelta(0, seconds))
[ "def", "advance_time_seconds", "(", "seconds", ")", ":", "advance_time_delta", "(", "datetime", ".", "timedelta", "(", "0", ",", "seconds", ")", ")" ]
advance overridden time by seconds .
train
false
7,709
def next_workday(dt): dt += timedelta(days=1) while (dt.weekday() > 4): dt += timedelta(days=1) return dt
[ "def", "next_workday", "(", "dt", ")", ":", "dt", "+=", "timedelta", "(", "days", "=", "1", ")", "while", "(", "dt", ".", "weekday", "(", ")", ">", "4", ")", ":", "dt", "+=", "timedelta", "(", "days", "=", "1", ")", "return", "dt" ]
returns next weekday used for observances .
train
true
7,710
def gnr_graph(n, p, create_using=None, seed=None): if (create_using is None): create_using = nx.DiGraph() elif (not create_using.is_directed()): raise nx.NetworkXError('Directed Graph required in create_using') if (seed is not None): random.seed(seed) G = empty_graph(1, create_using) G.name = ('gnr_graph(%s,%s)' % (n, p)) if (n == 1): return G for source in range(1, n): target = random.randrange(0, source) if ((random.random() < p) and (target != 0)): target = next(G.successors(target)) G.add_edge(source, target) return G
[ "def", "gnr_graph", "(", "n", ",", "p", ",", "create_using", "=", "None", ",", "seed", "=", "None", ")", ":", "if", "(", "create_using", "is", "None", ")", ":", "create_using", "=", "nx", ".", "DiGraph", "(", ")", "elif", "(", "not", "create_using", ".", "is_directed", "(", ")", ")", ":", "raise", "nx", ".", "NetworkXError", "(", "'Directed Graph required in create_using'", ")", "if", "(", "seed", "is", "not", "None", ")", ":", "random", ".", "seed", "(", "seed", ")", "G", "=", "empty_graph", "(", "1", ",", "create_using", ")", "G", ".", "name", "=", "(", "'gnr_graph(%s,%s)'", "%", "(", "n", ",", "p", ")", ")", "if", "(", "n", "==", "1", ")", ":", "return", "G", "for", "source", "in", "range", "(", "1", ",", "n", ")", ":", "target", "=", "random", ".", "randrange", "(", "0", ",", "source", ")", "if", "(", "(", "random", ".", "random", "(", ")", "<", "p", ")", "and", "(", "target", "!=", "0", ")", ")", ":", "target", "=", "next", "(", "G", ".", "successors", "(", "target", ")", ")", "G", ".", "add_edge", "(", "source", ",", "target", ")", "return", "G" ]
return the growing network with redirection digraph with n nodes and redirection probability p .
train
false
7,711
@tornado.gen.coroutine def async_poll_for(filename, needles, timeout=5): if isinstance(needles, str): needles = [needles] start = time() needle = content = None while ((time() - start) < timeout): with open(filename) as f: content = f.read() for needle in needles: if (needle in content): raise tornado.gen.Return(True) (yield tornado_sleep(0.1)) raise TimeoutException(('Timeout polling "%s" for "%s". Content: %s' % (filename, needle, content)))
[ "@", "tornado", ".", "gen", ".", "coroutine", "def", "async_poll_for", "(", "filename", ",", "needles", ",", "timeout", "=", "5", ")", ":", "if", "isinstance", "(", "needles", ",", "str", ")", ":", "needles", "=", "[", "needles", "]", "start", "=", "time", "(", ")", "needle", "=", "content", "=", "None", "while", "(", "(", "time", "(", ")", "-", "start", ")", "<", "timeout", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "for", "needle", "in", "needles", ":", "if", "(", "needle", "in", "content", ")", ":", "raise", "tornado", ".", "gen", ".", "Return", "(", "True", ")", "(", "yield", "tornado_sleep", "(", "0.1", ")", ")", "raise", "TimeoutException", "(", "(", "'Timeout polling \"%s\" for \"%s\". Content: %s'", "%", "(", "filename", ",", "needle", ",", "content", ")", ")", ")" ]
async version of poll_for .
train
false
7,712
def create_verifier_for_pss(signature, hash_method, public_key): mgf = padding.MGF1(hash_method) salt_length = padding.PSS.MAX_LENGTH return public_key.verifier(signature, padding.PSS(mgf=mgf, salt_length=salt_length), hash_method)
[ "def", "create_verifier_for_pss", "(", "signature", ",", "hash_method", ",", "public_key", ")", ":", "mgf", "=", "padding", ".", "MGF1", "(", "hash_method", ")", "salt_length", "=", "padding", ".", "PSS", ".", "MAX_LENGTH", "return", "public_key", ".", "verifier", "(", "signature", ",", "padding", ".", "PSS", "(", "mgf", "=", "mgf", ",", "salt_length", "=", "salt_length", ")", ",", "hash_method", ")" ]
create the verifier to use when the key type is rsa-pss .
train
false
7,713
def getCraftTypePluginModule(craftTypeName=''): if (craftTypeName == ''): craftTypeName = getCraftTypeName() profilePluginsDirectoryPath = getPluginsDirectoryPath() return archive.getModuleWithDirectoryPath(profilePluginsDirectoryPath, craftTypeName)
[ "def", "getCraftTypePluginModule", "(", "craftTypeName", "=", "''", ")", ":", "if", "(", "craftTypeName", "==", "''", ")", ":", "craftTypeName", "=", "getCraftTypeName", "(", ")", "profilePluginsDirectoryPath", "=", "getPluginsDirectoryPath", "(", ")", "return", "archive", ".", "getModuleWithDirectoryPath", "(", "profilePluginsDirectoryPath", ",", "craftTypeName", ")" ]
get the craft type plugin module .
train
false
7,714
def merge_clusters_chimeras(de_novo_clusters, chimeras): expanded_chimeras = [] expanded_non_chimeras = [] for curr_cluster in de_novo_clusters: curr_seq_ids = [] matches_chimera = False for curr_seq in de_novo_clusters[curr_cluster]: if (curr_seq in chimeras): matches_chimera = True curr_seq_ids.append(curr_seq) if matches_chimera: expanded_chimeras += curr_seq_ids else: expanded_non_chimeras += curr_seq_ids return (expanded_chimeras, expanded_non_chimeras)
[ "def", "merge_clusters_chimeras", "(", "de_novo_clusters", ",", "chimeras", ")", ":", "expanded_chimeras", "=", "[", "]", "expanded_non_chimeras", "=", "[", "]", "for", "curr_cluster", "in", "de_novo_clusters", ":", "curr_seq_ids", "=", "[", "]", "matches_chimera", "=", "False", "for", "curr_seq", "in", "de_novo_clusters", "[", "curr_cluster", "]", ":", "if", "(", "curr_seq", "in", "chimeras", ")", ":", "matches_chimera", "=", "True", "curr_seq_ids", ".", "append", "(", "curr_seq", ")", "if", "matches_chimera", ":", "expanded_chimeras", "+=", "curr_seq_ids", "else", ":", "expanded_non_chimeras", "+=", "curr_seq_ids", "return", "(", "expanded_chimeras", ",", "expanded_non_chimeras", ")" ]
merges results of chimeras/clusters into list of chimeras .
train
false
7,717
def start_end_date_for_period(period, default_start_date=False, default_end_date=False): today = date.today() if (period == 'daily'): start_date = today end_date = start_date elif (period == 'weekly'): start_date = (today + relativedelta(weekday=MO((-1)))) end_date = (start_date + timedelta(days=7)) elif (period == 'monthly'): start_date = today.replace(day=1) end_date = (today + relativedelta(months=1, day=1, days=(-1))) elif (period == 'yearly'): start_date = today.replace(month=1, day=1) end_date = today.replace(month=12, day=31) else: start_date = default_start_date end_date = default_end_date return (start_date, end_date) return (fields.Datetime.to_string(start_date), fields.Datetime.to_string(end_date))
[ "def", "start_end_date_for_period", "(", "period", ",", "default_start_date", "=", "False", ",", "default_end_date", "=", "False", ")", ":", "today", "=", "date", ".", "today", "(", ")", "if", "(", "period", "==", "'daily'", ")", ":", "start_date", "=", "today", "end_date", "=", "start_date", "elif", "(", "period", "==", "'weekly'", ")", ":", "start_date", "=", "(", "today", "+", "relativedelta", "(", "weekday", "=", "MO", "(", "(", "-", "1", ")", ")", ")", ")", "end_date", "=", "(", "start_date", "+", "timedelta", "(", "days", "=", "7", ")", ")", "elif", "(", "period", "==", "'monthly'", ")", ":", "start_date", "=", "today", ".", "replace", "(", "day", "=", "1", ")", "end_date", "=", "(", "today", "+", "relativedelta", "(", "months", "=", "1", ",", "day", "=", "1", ",", "days", "=", "(", "-", "1", ")", ")", ")", "elif", "(", "period", "==", "'yearly'", ")", ":", "start_date", "=", "today", ".", "replace", "(", "month", "=", "1", ",", "day", "=", "1", ")", "end_date", "=", "today", ".", "replace", "(", "month", "=", "12", ",", "day", "=", "31", ")", "else", ":", "start_date", "=", "default_start_date", "end_date", "=", "default_end_date", "return", "(", "start_date", ",", "end_date", ")", "return", "(", "fields", ".", "Datetime", ".", "to_string", "(", "start_date", ")", ",", "fields", ".", "Datetime", ".", "to_string", "(", "end_date", ")", ")" ]
return the start and end date for a goal period based on today .
train
false
7,718
def has_external_plugins(): return bool(config['plugins'])
[ "def", "has_external_plugins", "(", ")", ":", "return", "bool", "(", "config", "[", "'plugins'", "]", ")" ]
true if there are updateable plugins .
train
false
7,719
def write_int_matrix(fid, kind, mat): FIFFT_MATRIX = (1 << 30) FIFFT_MATRIX_INT = (FIFF.FIFFT_INT | FIFFT_MATRIX) data_size = ((4 * mat.size) + (4 * 3)) fid.write(np.array(kind, dtype='>i4').tostring()) fid.write(np.array(FIFFT_MATRIX_INT, dtype='>i4').tostring()) fid.write(np.array(data_size, dtype='>i4').tostring()) fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring()) fid.write(np.array(mat, dtype='>i4').tostring()) dims = np.empty(3, dtype=np.int32) dims[0] = mat.shape[1] dims[1] = mat.shape[0] dims[2] = 2 fid.write(np.array(dims, dtype='>i4').tostring()) check_fiff_length(fid)
[ "def", "write_int_matrix", "(", "fid", ",", "kind", ",", "mat", ")", ":", "FIFFT_MATRIX", "=", "(", "1", "<<", "30", ")", "FIFFT_MATRIX_INT", "=", "(", "FIFF", ".", "FIFFT_INT", "|", "FIFFT_MATRIX", ")", "data_size", "=", "(", "(", "4", "*", "mat", ".", "size", ")", "+", "(", "4", "*", "3", ")", ")", "fid", ".", "write", "(", "np", ".", "array", "(", "kind", ",", "dtype", "=", "'>i4'", ")", ".", "tostring", "(", ")", ")", "fid", ".", "write", "(", "np", ".", "array", "(", "FIFFT_MATRIX_INT", ",", "dtype", "=", "'>i4'", ")", ".", "tostring", "(", ")", ")", "fid", ".", "write", "(", "np", ".", "array", "(", "data_size", ",", "dtype", "=", "'>i4'", ")", ".", "tostring", "(", ")", ")", "fid", ".", "write", "(", "np", ".", "array", "(", "FIFF", ".", "FIFFV_NEXT_SEQ", ",", "dtype", "=", "'>i4'", ")", ".", "tostring", "(", ")", ")", "fid", ".", "write", "(", "np", ".", "array", "(", "mat", ",", "dtype", "=", "'>i4'", ")", ".", "tostring", "(", ")", ")", "dims", "=", "np", ".", "empty", "(", "3", ",", "dtype", "=", "np", ".", "int32", ")", "dims", "[", "0", "]", "=", "mat", ".", "shape", "[", "1", "]", "dims", "[", "1", "]", "=", "mat", ".", "shape", "[", "0", "]", "dims", "[", "2", "]", "=", "2", "fid", ".", "write", "(", "np", ".", "array", "(", "dims", ",", "dtype", "=", "'>i4'", ")", ".", "tostring", "(", ")", ")", "check_fiff_length", "(", "fid", ")" ]
write integer 32 matrix tag .
train
false
7,720
def fitLine(pts): n = len(pts) if (n < 1): return ((0, 0), (0, 0)) a = np.zeros(((n - 1), 2)) for i in range((n - 1)): v = (pts[i] - pts[(i + 1)]) a[i] = (v / np.linalg.norm(v)) direction = np.mean(a[1:(-1)], axis=0) start = np.mean(pts[1:(-1)], axis=0) return (start, (start + direction))
[ "def", "fitLine", "(", "pts", ")", ":", "n", "=", "len", "(", "pts", ")", "if", "(", "n", "<", "1", ")", ":", "return", "(", "(", "0", ",", "0", ")", ",", "(", "0", ",", "0", ")", ")", "a", "=", "np", ".", "zeros", "(", "(", "(", "n", "-", "1", ")", ",", "2", ")", ")", "for", "i", "in", "range", "(", "(", "n", "-", "1", ")", ")", ":", "v", "=", "(", "pts", "[", "i", "]", "-", "pts", "[", "(", "i", "+", "1", ")", "]", ")", "a", "[", "i", "]", "=", "(", "v", "/", "np", ".", "linalg", ".", "norm", "(", "v", ")", ")", "direction", "=", "np", ".", "mean", "(", "a", "[", "1", ":", "(", "-", "1", ")", "]", ",", "axis", "=", "0", ")", "start", "=", "np", ".", "mean", "(", "pts", "[", "1", ":", "(", "-", "1", ")", "]", ",", "axis", "=", "0", ")", "return", "(", "start", ",", "(", "start", "+", "direction", ")", ")" ]
returns a start vector and direction vector assumes points segments that already form a somewhat smooth line .
train
false
7,722
def make_overload_attribute_template(typ, attr, overload_func, base=_OverloadAttributeTemplate): assert (isinstance(typ, types.Type) or issubclass(typ, types.Type)) name = ('OverloadTemplate_%s_%s' % (typ, attr)) dct = dict(key=typ, _attr=attr, _impl_cache={}, _overload_func=staticmethod(overload_func)) return type(base)(name, (base,), dct)
[ "def", "make_overload_attribute_template", "(", "typ", ",", "attr", ",", "overload_func", ",", "base", "=", "_OverloadAttributeTemplate", ")", ":", "assert", "(", "isinstance", "(", "typ", ",", "types", ".", "Type", ")", "or", "issubclass", "(", "typ", ",", "types", ".", "Type", ")", ")", "name", "=", "(", "'OverloadTemplate_%s_%s'", "%", "(", "typ", ",", "attr", ")", ")", "dct", "=", "dict", "(", "key", "=", "typ", ",", "_attr", "=", "attr", ",", "_impl_cache", "=", "{", "}", ",", "_overload_func", "=", "staticmethod", "(", "overload_func", ")", ")", "return", "type", "(", "base", ")", "(", "name", ",", "(", "base", ",", ")", ",", "dct", ")" ]
make a template class for attribute *attr* of *typ* overloaded by *overload_func* .
train
false
7,723
def generate_instance_identity_document(instance): document = {u'devPayProductCodes': None, u'availabilityZone': instance.placement[u'AvailabilityZone'], u'privateIp': instance.private_ip_address, u'version': u'2010-8-31', u'region': instance.placement[u'AvailabilityZone'][:(-1)], u'instanceId': instance.id, u'billingProducts': None, u'instanceType': instance.instance_type, u'accountId': u'012345678910', u'pendingTime': u'2015-11-19T16:32:11Z', u'imageId': instance.image_id, u'kernelId': instance.kernel_id, u'ramdiskId': instance.ramdisk_id, u'architecture': instance.architecture} return document
[ "def", "generate_instance_identity_document", "(", "instance", ")", ":", "document", "=", "{", "u'devPayProductCodes'", ":", "None", ",", "u'availabilityZone'", ":", "instance", ".", "placement", "[", "u'AvailabilityZone'", "]", ",", "u'privateIp'", ":", "instance", ".", "private_ip_address", ",", "u'version'", ":", "u'2010-8-31'", ",", "u'region'", ":", "instance", ".", "placement", "[", "u'AvailabilityZone'", "]", "[", ":", "(", "-", "1", ")", "]", ",", "u'instanceId'", ":", "instance", ".", "id", ",", "u'billingProducts'", ":", "None", ",", "u'instanceType'", ":", "instance", ".", "instance_type", ",", "u'accountId'", ":", "u'012345678910'", ",", "u'pendingTime'", ":", "u'2015-11-19T16:32:11Z'", ",", "u'imageId'", ":", "instance", ".", "image_id", ",", "u'kernelId'", ":", "instance", ".", "kernel_id", ",", "u'ramdiskId'", ":", "instance", ".", "ramdisk_id", ",", "u'architecture'", ":", "instance", ".", "architecture", "}", "return", "document" ]
URL a json file that describes an instance .
train
false
7,724
def getInstanceState(inst, jellier): if hasattr(inst, '__getstate__'): state = inst.__getstate__() else: state = inst.__dict__ sxp = jellier.prepare(inst) sxp.extend([qual(inst.__class__), jellier.jelly(state)]) return jellier.preserve(inst, sxp)
[ "def", "getInstanceState", "(", "inst", ",", "jellier", ")", ":", "if", "hasattr", "(", "inst", ",", "'__getstate__'", ")", ":", "state", "=", "inst", ".", "__getstate__", "(", ")", "else", ":", "state", "=", "inst", ".", "__dict__", "sxp", "=", "jellier", ".", "prepare", "(", "inst", ")", "sxp", ".", "extend", "(", "[", "qual", "(", "inst", ".", "__class__", ")", ",", "jellier", ".", "jelly", "(", "state", ")", "]", ")", "return", "jellier", ".", "preserve", "(", "inst", ",", "sxp", ")" ]
utility method to default to normal state rules in serialization .
train
false
7,725
def is_git_repo(): gitfolder = os.path.join(qutebrowser.basedir, os.path.pardir, '.git') return os.path.isdir(gitfolder)
[ "def", "is_git_repo", "(", ")", ":", "gitfolder", "=", "os", ".", "path", ".", "join", "(", "qutebrowser", ".", "basedir", ",", "os", ".", "path", ".", "pardir", ",", "'.git'", ")", "return", "os", ".", "path", ".", "isdir", "(", "gitfolder", ")" ]
does the current nipype module have a git folder .
train
false
7,726
def os2_yaml_formatter(table_dict): table_string = 'OS/2: {\n' for field in table_dict.keys(): if (field == 'panose'): table_string = (((table_string + (' ' * 4)) + field) + ': {\n') panose_string = '' panose_dict = table_dict['panose'].__dict__ for panose_field in panose_dict.keys(): panose_string = (((((panose_string + (' ' * 8)) + panose_field[1:]) + ': ') + str(panose_dict[panose_field])) + ',\n') table_string = (((table_string + panose_string) + (' ' * 4)) + '}\n') else: table_string = (((((table_string + (' ' * 4)) + field) + ': ') + str(table_dict[field])) + ',\n') table_string = (table_string + '}\n\n') return table_string
[ "def", "os2_yaml_formatter", "(", "table_dict", ")", ":", "table_string", "=", "'OS/2: {\\n'", "for", "field", "in", "table_dict", ".", "keys", "(", ")", ":", "if", "(", "field", "==", "'panose'", ")", ":", "table_string", "=", "(", "(", "(", "table_string", "+", "(", "' '", "*", "4", ")", ")", "+", "field", ")", "+", "': {\\n'", ")", "panose_string", "=", "''", "panose_dict", "=", "table_dict", "[", "'panose'", "]", ".", "__dict__", "for", "panose_field", "in", "panose_dict", ".", "keys", "(", ")", ":", "panose_string", "=", "(", "(", "(", "(", "(", "panose_string", "+", "(", "' '", "*", "8", ")", ")", "+", "panose_field", "[", "1", ":", "]", ")", "+", "': '", ")", "+", "str", "(", "panose_dict", "[", "panose_field", "]", ")", ")", "+", "',\\n'", ")", "table_string", "=", "(", "(", "(", "table_string", "+", "panose_string", ")", "+", "(", "' '", "*", "4", ")", ")", "+", "'}\\n'", ")", "else", ":", "table_string", "=", "(", "(", "(", "(", "(", "table_string", "+", "(", "' '", "*", "4", ")", ")", "+", "field", ")", "+", "': '", ")", "+", "str", "(", "table_dict", "[", "field", "]", ")", ")", "+", "',\\n'", ")", "table_string", "=", "(", "table_string", "+", "'}\\n\\n'", ")", "return", "table_string" ]
formats the yaml table string for opentype os/2 tables .
train
false
7,727
@image_comparison(baseline_images=[u'interp_nearest_vs_none'], extensions=[u'pdf', u'svg'], remove_text=True) def test_interp_nearest_vs_none(): rcParams[u'savefig.dpi'] = 3 X = np.array([[[218, 165, 32], [122, 103, 238]], [[127, 255, 0], [255, 99, 71]]], dtype=np.uint8) fig = plt.figure() ax1 = fig.add_subplot(121) ax1.imshow(X, interpolation=u'none') ax1.set_title(u'interpolation none') ax2 = fig.add_subplot(122) ax2.imshow(X, interpolation=u'nearest') ax2.set_title(u'interpolation nearest')
[ "@", "image_comparison", "(", "baseline_images", "=", "[", "u'interp_nearest_vs_none'", "]", ",", "extensions", "=", "[", "u'pdf'", ",", "u'svg'", "]", ",", "remove_text", "=", "True", ")", "def", "test_interp_nearest_vs_none", "(", ")", ":", "rcParams", "[", "u'savefig.dpi'", "]", "=", "3", "X", "=", "np", ".", "array", "(", "[", "[", "[", "218", ",", "165", ",", "32", "]", ",", "[", "122", ",", "103", ",", "238", "]", "]", ",", "[", "[", "127", ",", "255", ",", "0", "]", ",", "[", "255", ",", "99", ",", "71", "]", "]", "]", ",", "dtype", "=", "np", ".", "uint8", ")", "fig", "=", "plt", ".", "figure", "(", ")", "ax1", "=", "fig", ".", "add_subplot", "(", "121", ")", "ax1", ".", "imshow", "(", "X", ",", "interpolation", "=", "u'none'", ")", "ax1", ".", "set_title", "(", "u'interpolation none'", ")", "ax2", "=", "fig", ".", "add_subplot", "(", "122", ")", "ax2", ".", "imshow", "(", "X", ",", "interpolation", "=", "u'nearest'", ")", "ax2", ".", "set_title", "(", "u'interpolation nearest'", ")" ]
test the effect of "nearest" and "none" interpolation .
train
false
7,728
def create_prereqs_cache_dir(): try: os.makedirs(PREREQS_STATE_DIR) except OSError: if (not os.path.isdir(PREREQS_STATE_DIR)): raise
[ "def", "create_prereqs_cache_dir", "(", ")", ":", "try", ":", "os", ".", "makedirs", "(", "PREREQS_STATE_DIR", ")", "except", "OSError", ":", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "PREREQS_STATE_DIR", ")", ")", ":", "raise" ]
create the directory for storing the hashes .
train
false
7,730
def gethostname(): import socket return socket.gethostname()
[ "def", "gethostname", "(", ")", ":", "import", "socket", "return", "socket", ".", "gethostname", "(", ")" ]
gethostname() -> string return the current host name .
train
false
7,731
def simple_format(s, keys, pattern=RE_FORMAT, expand=u'\\1'): if s: keys.setdefault(u'%', u'%') def resolve(match): key = match.expand(expand) try: resolver = keys[key] except KeyError: raise ValueError(UNKNOWN_SIMPLE_FORMAT_KEY.format(key, s)) if isinstance(resolver, Callable): return resolver() return resolver return pattern.sub(resolve, s) return s
[ "def", "simple_format", "(", "s", ",", "keys", ",", "pattern", "=", "RE_FORMAT", ",", "expand", "=", "u'\\\\1'", ")", ":", "if", "s", ":", "keys", ".", "setdefault", "(", "u'%'", ",", "u'%'", ")", "def", "resolve", "(", "match", ")", ":", "key", "=", "match", ".", "expand", "(", "expand", ")", "try", ":", "resolver", "=", "keys", "[", "key", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "UNKNOWN_SIMPLE_FORMAT_KEY", ".", "format", "(", "key", ",", "s", ")", ")", "if", "isinstance", "(", "resolver", ",", "Callable", ")", ":", "return", "resolver", "(", ")", "return", "resolver", "return", "pattern", ".", "sub", "(", "resolve", ",", "s", ")", "return", "s" ]
format string .
train
false
7,732
def get_day_names(width='wide', context='format', locale=LC_TIME): return Locale.parse(locale).days[context][width]
[ "def", "get_day_names", "(", "width", "=", "'wide'", ",", "context", "=", "'format'", ",", "locale", "=", "LC_TIME", ")", ":", "return", "Locale", ".", "parse", "(", "locale", ")", ".", "days", "[", "context", "]", "[", "width", "]" ]
return the day names used by the locale for the specified format .
train
false
7,733
def all_suffixes(): return ((SOURCE_SUFFIXES + BYTECODE_SUFFIXES) + EXTENSION_SUFFIXES)
[ "def", "all_suffixes", "(", ")", ":", "return", "(", "(", "SOURCE_SUFFIXES", "+", "BYTECODE_SUFFIXES", ")", "+", "EXTENSION_SUFFIXES", ")" ]
returns a list of all recognized module suffixes for this process .
train
false
7,734
def _conflict_bail(VC_err, version): conflict_tmpl = textwrap.dedent("\n The required version of setuptools (>={version}) is not available,\n and can't be installed while this script is running. Please\n install a more recent version first, using\n 'easy_install -U setuptools'.\n\n (Currently using {VC_err.args[0]!r})\n ") msg = conflict_tmpl.format(**locals()) sys.stderr.write(msg) sys.exit(2)
[ "def", "_conflict_bail", "(", "VC_err", ",", "version", ")", ":", "conflict_tmpl", "=", "textwrap", ".", "dedent", "(", "\"\\n The required version of setuptools (>={version}) is not available,\\n and can't be installed while this script is running. Please\\n install a more recent version first, using\\n 'easy_install -U setuptools'.\\n\\n (Currently using {VC_err.args[0]!r})\\n \"", ")", "msg", "=", "conflict_tmpl", ".", "format", "(", "**", "locals", "(", ")", ")", "sys", ".", "stderr", ".", "write", "(", "msg", ")", "sys", ".", "exit", "(", "2", ")" ]
setuptools was imported prior to invocation .
train
true
7,735
def as_dict(config): if isinstance(config, text_or_bytes): config = Parser().dict_from_file(config) elif hasattr(config, 'read'): config = Parser().dict_from_file(config) return config
[ "def", "as_dict", "(", "config", ")", ":", "if", "isinstance", "(", "config", ",", "text_or_bytes", ")", ":", "config", "=", "Parser", "(", ")", ".", "dict_from_file", "(", "config", ")", "elif", "hasattr", "(", "config", ",", "'read'", ")", ":", "config", "=", "Parser", "(", ")", ".", "dict_from_file", "(", "config", ")", "return", "config" ]
return a dict from config whether it is a dict .
train
false
7,736
def add_traits(base, names, trait_type=None): if (trait_type is None): trait_type = traits.Any undefined_traits = {} for key in names: base.add_trait(key, trait_type) undefined_traits[key] = Undefined base.trait_set(trait_change_notify=False, **undefined_traits) for key in names: _ = getattr(base, key) return base
[ "def", "add_traits", "(", "base", ",", "names", ",", "trait_type", "=", "None", ")", ":", "if", "(", "trait_type", "is", "None", ")", ":", "trait_type", "=", "traits", ".", "Any", "undefined_traits", "=", "{", "}", "for", "key", "in", "names", ":", "base", ".", "add_trait", "(", "key", ",", "trait_type", ")", "undefined_traits", "[", "key", "]", "=", "Undefined", "base", ".", "trait_set", "(", "trait_change_notify", "=", "False", ",", "**", "undefined_traits", ")", "for", "key", "in", "names", ":", "_", "=", "getattr", "(", "base", ",", "key", ")", "return", "base" ]
add traits to a traited class .
train
false
7,737
def convert_to_float(image, preserve_range): if preserve_range: image = image.astype(np.double) else: image = img_as_float(image) return image
[ "def", "convert_to_float", "(", "image", ",", "preserve_range", ")", ":", "if", "preserve_range", ":", "image", "=", "image", ".", "astype", "(", "np", ".", "double", ")", "else", ":", "image", "=", "img_as_float", "(", "image", ")", "return", "image" ]
convert input image to double image with the appropriate range .
train
false
7,740
def unicode_to_ascii(text): return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode()
[ "def", "unicode_to_ascii", "(", "text", ")", ":", "return", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "text", ")", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "decode", "(", ")" ]
transliterate unicode characters to ascii .
train
false
7,741
def _error_traceback_html(exc_info, traceback): html = '\n <html>\n <head>\n <title>ERROR: {error}</title>\n </head>\n <body style="font-family: sans-serif">\n <h4>The Authomatic library encountered an error!</h4>\n <h1>{error}</h1>\n <pre>{traceback}</pre>\n </body>\n </html>\n ' return html.format(error=exc_info[1], traceback=traceback)
[ "def", "_error_traceback_html", "(", "exc_info", ",", "traceback", ")", ":", "html", "=", "'\\n <html>\\n <head>\\n <title>ERROR: {error}</title>\\n </head>\\n <body style=\"font-family: sans-serif\">\\n <h4>The Authomatic library encountered an error!</h4>\\n <h1>{error}</h1>\\n <pre>{traceback}</pre>\\n </body>\\n </html>\\n '", "return", "html", ".", "format", "(", "error", "=", "exc_info", "[", "1", "]", ",", "traceback", "=", "traceback", ")" ]
generates error traceback html .
train
false
7,742
def _get_G(k_params): I = np.eye(k_params) A = np.concatenate(((- I), (- I)), axis=1) B = np.concatenate((I, (- I)), axis=1) C = np.concatenate((A, B), axis=0) return matrix(C)
[ "def", "_get_G", "(", "k_params", ")", ":", "I", "=", "np", ".", "eye", "(", "k_params", ")", "A", "=", "np", ".", "concatenate", "(", "(", "(", "-", "I", ")", ",", "(", "-", "I", ")", ")", ",", "axis", "=", "1", ")", "B", "=", "np", ".", "concatenate", "(", "(", "I", ",", "(", "-", "I", ")", ")", ",", "axis", "=", "1", ")", "C", "=", "np", ".", "concatenate", "(", "(", "A", ",", "B", ")", ",", "axis", "=", "0", ")", "return", "matrix", "(", "C", ")" ]
the linear inequality constraint matrix .
train
false
7,745
def merge_asof(left, right, on=None, left_on=None, right_on=None, left_index=False, right_index=False, by=None, left_by=None, right_by=None, suffixes=('_x', '_y'), tolerance=None, allow_exact_matches=True, direction='backward'): op = _AsOfMerge(left, right, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, by=by, left_by=left_by, right_by=right_by, suffixes=suffixes, how='asof', tolerance=tolerance, allow_exact_matches=allow_exact_matches, direction=direction) return op.get_result()
[ "def", "merge_asof", "(", "left", ",", "right", ",", "on", "=", "None", ",", "left_on", "=", "None", ",", "right_on", "=", "None", ",", "left_index", "=", "False", ",", "right_index", "=", "False", ",", "by", "=", "None", ",", "left_by", "=", "None", ",", "right_by", "=", "None", ",", "suffixes", "=", "(", "'_x'", ",", "'_y'", ")", ",", "tolerance", "=", "None", ",", "allow_exact_matches", "=", "True", ",", "direction", "=", "'backward'", ")", ":", "op", "=", "_AsOfMerge", "(", "left", ",", "right", ",", "on", "=", "on", ",", "left_on", "=", "left_on", ",", "right_on", "=", "right_on", ",", "left_index", "=", "left_index", ",", "right_index", "=", "right_index", ",", "by", "=", "by", ",", "left_by", "=", "left_by", ",", "right_by", "=", "right_by", ",", "suffixes", "=", "suffixes", ",", "how", "=", "'asof'", ",", "tolerance", "=", "tolerance", ",", "allow_exact_matches", "=", "allow_exact_matches", ",", "direction", "=", "direction", ")", "return", "op", ".", "get_result", "(", ")" ]
perform an asof merge .
train
true
7,746
def _same_mesh(vertices1, faces1, vertices2, faces2, tol=1e-10): triangles1 = vertices1[np.array(faces1)] triangles2 = vertices2[np.array(faces2)] triang1 = [np.concatenate(sorted(t, key=(lambda x: tuple(x)))) for t in triangles1] triang2 = [np.concatenate(sorted(t, key=(lambda x: tuple(x)))) for t in triangles2] triang1 = np.array(sorted([tuple(x) for x in triang1])) triang2 = np.array(sorted([tuple(x) for x in triang2])) return ((triang1.shape == triang2.shape) and np.allclose(triang1, triang2, 0, tol))
[ "def", "_same_mesh", "(", "vertices1", ",", "faces1", ",", "vertices2", ",", "faces2", ",", "tol", "=", "1e-10", ")", ":", "triangles1", "=", "vertices1", "[", "np", ".", "array", "(", "faces1", ")", "]", "triangles2", "=", "vertices2", "[", "np", ".", "array", "(", "faces2", ")", "]", "triang1", "=", "[", "np", ".", "concatenate", "(", "sorted", "(", "t", ",", "key", "=", "(", "lambda", "x", ":", "tuple", "(", "x", ")", ")", ")", ")", "for", "t", "in", "triangles1", "]", "triang2", "=", "[", "np", ".", "concatenate", "(", "sorted", "(", "t", ",", "key", "=", "(", "lambda", "x", ":", "tuple", "(", "x", ")", ")", ")", ")", "for", "t", "in", "triangles2", "]", "triang1", "=", "np", ".", "array", "(", "sorted", "(", "[", "tuple", "(", "x", ")", "for", "x", "in", "triang1", "]", ")", ")", "triang2", "=", "np", ".", "array", "(", "sorted", "(", "[", "tuple", "(", "x", ")", "for", "x", "in", "triang2", "]", ")", ")", "return", "(", "(", "triang1", ".", "shape", "==", "triang2", ".", "shape", ")", "and", "np", ".", "allclose", "(", "triang1", ",", "triang2", ",", "0", ",", "tol", ")", ")" ]
compare two meshes .
train
false
7,747
def is_user_context(context): if (not context): return False if context.is_admin: return False if ((not context.user_id) or (not context.project_id)): return False return True
[ "def", "is_user_context", "(", "context", ")", ":", "if", "(", "not", "context", ")", ":", "return", "False", "if", "context", ".", "is_admin", ":", "return", "False", "if", "(", "(", "not", "context", ".", "user_id", ")", "or", "(", "not", "context", ".", "project_id", ")", ")", ":", "return", "False", "return", "True" ]
indicates if the request context is a normal user .
train
false
7,748
def GetOTP(user): return _ComputeOTP(_GetUserSecret(user), long((time.time() / _GRANULARITY)))
[ "def", "GetOTP", "(", "user", ")", ":", "return", "_ComputeOTP", "(", "_GetUserSecret", "(", "user", ")", ",", "long", "(", "(", "time", ".", "time", "(", ")", "/", "_GRANULARITY", ")", ")", ")" ]
gets a new otp for the specified user by looking up the users secret in the secrets database and using it to salt an md5 hash of time and username .
train
false
7,751
@receiver(post_save, sender=Release) def promote_latest_release(sender, instance, **kwargs): if kwargs.get('raw', False): return if instance.is_latest: Release.objects.filter(version=instance.version).exclude(pk=instance.pk).update(is_latest=False)
[ "@", "receiver", "(", "post_save", ",", "sender", "=", "Release", ")", "def", "promote_latest_release", "(", "sender", ",", "instance", ",", "**", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'raw'", ",", "False", ")", ":", "return", "if", "instance", ".", "is_latest", ":", "Release", ".", "objects", ".", "filter", "(", "version", "=", "instance", ".", "version", ")", ".", "exclude", "(", "pk", "=", "instance", ".", "pk", ")", ".", "update", "(", "is_latest", "=", "False", ")" ]
promote this release to be the latest if this flag is set .
train
false
7,752
def test_adjust_gamma_greater_one(): image = np.arange(0, 255, 4, np.uint8).reshape((8, 8)) expected = np.array([[0, 0, 0, 0, 1, 1, 2, 3], [4, 5, 6, 7, 9, 10, 12, 14], [16, 18, 20, 22, 25, 27, 30, 33], [36, 39, 42, 45, 49, 52, 56, 60], [64, 68, 72, 76, 81, 85, 90, 95], [100, 105, 110, 116, 121, 127, 132, 138], [144, 150, 156, 163, 169, 176, 182, 189], [196, 203, 211, 218, 225, 233, 241, 249]], dtype=np.uint8) result = exposure.adjust_gamma(image, 2) assert_array_equal(result, expected)
[ "def", "test_adjust_gamma_greater_one", "(", ")", ":", "image", "=", "np", ".", "arange", "(", "0", ",", "255", ",", "4", ",", "np", ".", "uint8", ")", ".", "reshape", "(", "(", "8", ",", "8", ")", ")", "expected", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", ",", "0", ",", "0", ",", "1", ",", "1", ",", "2", ",", "3", "]", ",", "[", "4", ",", "5", ",", "6", ",", "7", ",", "9", ",", "10", ",", "12", ",", "14", "]", ",", "[", "16", ",", "18", ",", "20", ",", "22", ",", "25", ",", "27", ",", "30", ",", "33", "]", ",", "[", "36", ",", "39", ",", "42", ",", "45", ",", "49", ",", "52", ",", "56", ",", "60", "]", ",", "[", "64", ",", "68", ",", "72", ",", "76", ",", "81", ",", "85", ",", "90", ",", "95", "]", ",", "[", "100", ",", "105", ",", "110", ",", "116", ",", "121", ",", "127", ",", "132", ",", "138", "]", ",", "[", "144", ",", "150", ",", "156", ",", "163", ",", "169", ",", "176", ",", "182", ",", "189", "]", ",", "[", "196", ",", "203", ",", "211", ",", "218", ",", "225", ",", "233", ",", "241", ",", "249", "]", "]", ",", "dtype", "=", "np", ".", "uint8", ")", "result", "=", "exposure", ".", "adjust_gamma", "(", "image", ",", "2", ")", "assert_array_equal", "(", "result", ",", "expected", ")" ]
verifying the output with expected results for gamma correction with gamma equal to two .
train
false
7,753
def create_profile_images(image_file, profile_image_names): storage = get_profile_image_storage() original = Image.open(image_file) image = _set_color_mode_to_rgb(original) image = _crop_image_to_square(image) for (size, name) in profile_image_names.items(): scaled = _scale_image(image, size) exif = _get_corrected_exif(scaled, original) with closing(_create_image_file(scaled, exif)) as scaled_image_file: storage.save(name, scaled_image_file)
[ "def", "create_profile_images", "(", "image_file", ",", "profile_image_names", ")", ":", "storage", "=", "get_profile_image_storage", "(", ")", "original", "=", "Image", ".", "open", "(", "image_file", ")", "image", "=", "_set_color_mode_to_rgb", "(", "original", ")", "image", "=", "_crop_image_to_square", "(", "image", ")", "for", "(", "size", ",", "name", ")", "in", "profile_image_names", ".", "items", "(", ")", ":", "scaled", "=", "_scale_image", "(", "image", ",", "size", ")", "exif", "=", "_get_corrected_exif", "(", "scaled", ",", "original", ")", "with", "closing", "(", "_create_image_file", "(", "scaled", ",", "exif", ")", ")", "as", "scaled_image_file", ":", "storage", ".", "save", "(", "name", ",", "scaled_image_file", ")" ]
generates a set of image files based on image_file and stores them according to the sizes and filenames specified in profile_image_names .
train
false
7,754
def python_version_tuple(): return tuple(_sys_version()[1].split('.'))
[ "def", "python_version_tuple", "(", ")", ":", "return", "tuple", "(", "_sys_version", "(", ")", "[", "1", "]", ".", "split", "(", "'.'", ")", ")" ]
returns the python version as tuple of strings .
train
false
7,755
def test_doc_string_contains_models(): for kind in ('mean', 'apparent'): for model in SIDEREAL_TIME_MODELS[kind]: assert (model in Time.sidereal_time.__doc__)
[ "def", "test_doc_string_contains_models", "(", ")", ":", "for", "kind", "in", "(", "'mean'", ",", "'apparent'", ")", ":", "for", "model", "in", "SIDEREAL_TIME_MODELS", "[", "kind", "]", ":", "assert", "(", "model", "in", "Time", ".", "sidereal_time", ".", "__doc__", ")" ]
the doc string is formatted; this ensures this remains working .
train
false
7,756
@shared_task def sleeping(i, **_): sleep(i)
[ "@", "shared_task", "def", "sleeping", "(", "i", ",", "**", "_", ")", ":", "sleep", "(", "i", ")" ]
task sleeping for i seconds .
train
false
7,758
def _decode_error(packet, packet_buff, offset): if (len(packet_buff) < (offset + 3)): raise InvalidPacketException((u'ERROR packet too small (<%s): %s' % ((offset + 3), hexlify(packet_buff)))) (error_code,) = struct.unpack_from('!H', packet_buff, offset) (error_msg, idx) = _get_string(packet_buff, (offset + 2)) if (not error_msg): raise InvalidPacketException((u'ERROR packet has empty error message: %s' % hexlify(packet_buff))) if (idx != len(packet_buff)): raise InvalidPacketException((u'Invalid ERROR packet: %s' % hexlify(packet_buff))) packet['error_code'] = error_code packet['error_msg'] = error_msg return packet
[ "def", "_decode_error", "(", "packet", ",", "packet_buff", ",", "offset", ")", ":", "if", "(", "len", "(", "packet_buff", ")", "<", "(", "offset", "+", "3", ")", ")", ":", "raise", "InvalidPacketException", "(", "(", "u'ERROR packet too small (<%s): %s'", "%", "(", "(", "offset", "+", "3", ")", ",", "hexlify", "(", "packet_buff", ")", ")", ")", ")", "(", "error_code", ",", ")", "=", "struct", ".", "unpack_from", "(", "'!H'", ",", "packet_buff", ",", "offset", ")", "(", "error_msg", ",", "idx", ")", "=", "_get_string", "(", "packet_buff", ",", "(", "offset", "+", "2", ")", ")", "if", "(", "not", "error_msg", ")", ":", "raise", "InvalidPacketException", "(", "(", "u'ERROR packet has empty error message: %s'", "%", "hexlify", "(", "packet_buff", ")", ")", ")", "if", "(", "idx", "!=", "len", "(", "packet_buff", ")", ")", ":", "raise", "InvalidPacketException", "(", "(", "u'Invalid ERROR packet: %s'", "%", "hexlify", "(", "packet_buff", ")", ")", ")", "packet", "[", "'error_code'", "]", "=", "error_code", "packet", "[", "'error_msg'", "]", "=", "error_msg", "return", "packet" ]
decodes a error packet .
train
false
7,761
def cpus(): with settings(hide('running', 'stdout')): res = run('python -c "import multiprocessing; print(multiprocessing.cpu_count())"') return int(res)
[ "def", "cpus", "(", ")", ":", "with", "settings", "(", "hide", "(", "'running'", ",", "'stdout'", ")", ")", ":", "res", "=", "run", "(", "'python -c \"import multiprocessing; print(multiprocessing.cpu_count())\"'", ")", "return", "int", "(", "res", ")" ]
get the number of cpu cores .
train
false
7,763
def test_time(): time_chart = TimeLine(truncate_label=1000) time_chart.add('times', [(time(1, 12, 29), 2), (time(21, 2, 29), 10), (time(12, 30, 59), 7)]) q = time_chart.render_pyquery() assert (list(map((lambda t: t.split(' ')[0]), q('.axis.x text').map(texts))) == ['02:46:40', '05:33:20', '08:20:00', '11:06:40', '13:53:20', '16:40:00', '19:26:40'])
[ "def", "test_time", "(", ")", ":", "time_chart", "=", "TimeLine", "(", "truncate_label", "=", "1000", ")", "time_chart", ".", "add", "(", "'times'", ",", "[", "(", "time", "(", "1", ",", "12", ",", "29", ")", ",", "2", ")", ",", "(", "time", "(", "21", ",", "2", ",", "29", ")", ",", "10", ")", ",", "(", "time", "(", "12", ",", "30", ",", "59", ")", ",", "7", ")", "]", ")", "q", "=", "time_chart", ".", "render_pyquery", "(", ")", "assert", "(", "list", "(", "map", "(", "(", "lambda", "t", ":", "t", ".", "split", "(", "' '", ")", "[", "0", "]", ")", ",", "q", "(", "'.axis.x text'", ")", ".", "map", "(", "texts", ")", ")", ")", "==", "[", "'02:46:40'", ",", "'05:33:20'", ",", "'08:20:00'", ",", "'11:06:40'", ",", "'13:53:20'", ",", "'16:40:00'", ",", "'19:26:40'", "]", ")" ]
test a simple timeline .
train
false
7,764
def webtest_submit(form, name=None, index=None, value=None, **args): fields = webtest_submit_fields(form, name, index=index, submit_value=value) if (form.method.upper() != 'GET'): args.setdefault('content_type', form.enctype) return form.response.goto(form.action, method=form.method, params=fields, **args)
[ "def", "webtest_submit", "(", "form", ",", "name", "=", "None", ",", "index", "=", "None", ",", "value", "=", "None", ",", "**", "args", ")", ":", "fields", "=", "webtest_submit_fields", "(", "form", ",", "name", ",", "index", "=", "index", ",", "submit_value", "=", "value", ")", "if", "(", "form", ".", "method", ".", "upper", "(", ")", "!=", "'GET'", ")", ":", "args", ".", "setdefault", "(", "'content_type'", ",", "form", ".", "enctype", ")", "return", "form", ".", "response", ".", "goto", "(", "form", ".", "action", ",", "method", "=", "form", ".", "method", ",", "params", "=", "fields", ",", "**", "args", ")" ]
backported version of webtest .
train
false
7,765
def image_member_delete(context, memb_id, session=None): session = (session or get_session()) member_ref = _image_member_get(context, memb_id, session) _image_member_delete(context, member_ref, session)
[ "def", "image_member_delete", "(", "context", ",", "memb_id", ",", "session", "=", "None", ")", ":", "session", "=", "(", "session", "or", "get_session", "(", ")", ")", "member_ref", "=", "_image_member_get", "(", "context", ",", "memb_id", ",", "session", ")", "_image_member_delete", "(", "context", ",", "member_ref", ",", "session", ")" ]
delete an imagemember object .
train
false
7,766
def locate_unbalanced_end(unbalanced_end, pre_delete, post_delete): while 1: if (not unbalanced_end): break finding = unbalanced_end[(-1)] finding_name = finding.split()[0].strip('<>/') if (not pre_delete): break next = pre_delete[(-1)] if ((next is DEL_END) or (not next.startswith('</'))): break name = next.split()[0].strip('<>/') if ((name == 'ins') or (name == 'del')): break if (name == finding_name): unbalanced_end.pop() post_delete.insert(0, pre_delete.pop()) else: break
[ "def", "locate_unbalanced_end", "(", "unbalanced_end", ",", "pre_delete", ",", "post_delete", ")", ":", "while", "1", ":", "if", "(", "not", "unbalanced_end", ")", ":", "break", "finding", "=", "unbalanced_end", "[", "(", "-", "1", ")", "]", "finding_name", "=", "finding", ".", "split", "(", ")", "[", "0", "]", ".", "strip", "(", "'<>/'", ")", "if", "(", "not", "pre_delete", ")", ":", "break", "next", "=", "pre_delete", "[", "(", "-", "1", ")", "]", "if", "(", "(", "next", "is", "DEL_END", ")", "or", "(", "not", "next", ".", "startswith", "(", "'</'", ")", ")", ")", ":", "break", "name", "=", "next", ".", "split", "(", ")", "[", "0", "]", ".", "strip", "(", "'<>/'", ")", "if", "(", "(", "name", "==", "'ins'", ")", "or", "(", "name", "==", "'del'", ")", ")", ":", "break", "if", "(", "name", "==", "finding_name", ")", ":", "unbalanced_end", ".", "pop", "(", ")", "post_delete", ".", "insert", "(", "0", ",", "pre_delete", ".", "pop", "(", ")", ")", "else", ":", "break" ]
like locate_unbalanced_start .
train
true
7,768
def _build_tree_structure(queryset): all_nodes = {} mptt_opts = queryset.model._mptt_meta items = queryset.order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr).values_list(u'pk', (u'%s_id' % mptt_opts.parent_attr)) for (p_id, parent_id) in items: all_nodes.setdefault((str(parent_id) if parent_id else 0), []).append(p_id) return all_nodes
[ "def", "_build_tree_structure", "(", "queryset", ")", ":", "all_nodes", "=", "{", "}", "mptt_opts", "=", "queryset", ".", "model", ".", "_mptt_meta", "items", "=", "queryset", ".", "order_by", "(", "mptt_opts", ".", "tree_id_attr", ",", "mptt_opts", ".", "left_attr", ")", ".", "values_list", "(", "u'pk'", ",", "(", "u'%s_id'", "%", "mptt_opts", ".", "parent_attr", ")", ")", "for", "(", "p_id", ",", "parent_id", ")", "in", "items", ":", "all_nodes", ".", "setdefault", "(", "(", "str", "(", "parent_id", ")", "if", "parent_id", "else", "0", ")", ",", "[", "]", ")", ".", "append", "(", "p_id", ")", "return", "all_nodes" ]
build an in-memory representation of the item tree .
train
false
7,770
@pytest.mark.parametrize('os_name, qversion, expected', [('linux', '5.2.1', True), ('linux', '5.4.1', True), ('nt', '5.2.1', False), ('nt', '5.3.0', True), ('nt', '5.4.1', True)]) def test_check_print_compat(os_name, qversion, expected, monkeypatch): monkeypatch.setattr('qutebrowser.utils.qtutils.os.name', os_name) monkeypatch.setattr('qutebrowser.utils.qtutils.qVersion', (lambda : qversion)) assert (qtutils.check_print_compat() == expected)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'os_name, qversion, expected'", ",", "[", "(", "'linux'", ",", "'5.2.1'", ",", "True", ")", ",", "(", "'linux'", ",", "'5.4.1'", ",", "True", ")", ",", "(", "'nt'", ",", "'5.2.1'", ",", "False", ")", ",", "(", "'nt'", ",", "'5.3.0'", ",", "True", ")", ",", "(", "'nt'", ",", "'5.4.1'", ",", "True", ")", "]", ")", "def", "test_check_print_compat", "(", "os_name", ",", "qversion", ",", "expected", ",", "monkeypatch", ")", ":", "monkeypatch", ".", "setattr", "(", "'qutebrowser.utils.qtutils.os.name'", ",", "os_name", ")", "monkeypatch", ".", "setattr", "(", "'qutebrowser.utils.qtutils.qVersion'", ",", "(", "lambda", ":", "qversion", ")", ")", "assert", "(", "qtutils", ".", "check_print_compat", "(", ")", "==", "expected", ")" ]
test check_print_compat .
train
false
7,771
def lateral(selectable, name=None): return _interpret_as_from(selectable).lateral(name=name)
[ "def", "lateral", "(", "selectable", ",", "name", "=", "None", ")", ":", "return", "_interpret_as_from", "(", "selectable", ")", ".", "lateral", "(", "name", "=", "name", ")" ]
return a :class: .
train
false
7,772
def make_future_info(first_sid, root_symbols, years, notice_date_func, expiration_date_func, start_date_func, month_codes=None): if (month_codes is None): month_codes = CME_CODE_TO_MONTH year_strs = list(map(str, years)) years = [pd.Timestamp(s, tz='UTC') for s in year_strs] contract_suffix_to_beginning_of_month = tuple((((month_code + year_str[(-2):]), (year + MonthBegin(month_num))) for ((year, year_str), (month_code, month_num)) in product(zip(years, year_strs), iteritems(month_codes)))) contracts = [] parts = product(root_symbols, contract_suffix_to_beginning_of_month) for (sid, (root_sym, (suffix, month_begin))) in enumerate(parts, first_sid): contracts.append({'sid': sid, 'root_symbol': root_sym, 'symbol': (root_sym + suffix), 'start_date': start_date_func(month_begin), 'notice_date': notice_date_func(month_begin), 'expiration_date': notice_date_func(month_begin), 'multiplier': 500, 'exchange': 'TEST', 'exchange_full': 'TEST FULL'}) return pd.DataFrame.from_records(contracts, index='sid')
[ "def", "make_future_info", "(", "first_sid", ",", "root_symbols", ",", "years", ",", "notice_date_func", ",", "expiration_date_func", ",", "start_date_func", ",", "month_codes", "=", "None", ")", ":", "if", "(", "month_codes", "is", "None", ")", ":", "month_codes", "=", "CME_CODE_TO_MONTH", "year_strs", "=", "list", "(", "map", "(", "str", ",", "years", ")", ")", "years", "=", "[", "pd", ".", "Timestamp", "(", "s", ",", "tz", "=", "'UTC'", ")", "for", "s", "in", "year_strs", "]", "contract_suffix_to_beginning_of_month", "=", "tuple", "(", "(", "(", "(", "month_code", "+", "year_str", "[", "(", "-", "2", ")", ":", "]", ")", ",", "(", "year", "+", "MonthBegin", "(", "month_num", ")", ")", ")", "for", "(", "(", "year", ",", "year_str", ")", ",", "(", "month_code", ",", "month_num", ")", ")", "in", "product", "(", "zip", "(", "years", ",", "year_strs", ")", ",", "iteritems", "(", "month_codes", ")", ")", ")", ")", "contracts", "=", "[", "]", "parts", "=", "product", "(", "root_symbols", ",", "contract_suffix_to_beginning_of_month", ")", "for", "(", "sid", ",", "(", "root_sym", ",", "(", "suffix", ",", "month_begin", ")", ")", ")", "in", "enumerate", "(", "parts", ",", "first_sid", ")", ":", "contracts", ".", "append", "(", "{", "'sid'", ":", "sid", ",", "'root_symbol'", ":", "root_sym", ",", "'symbol'", ":", "(", "root_sym", "+", "suffix", ")", ",", "'start_date'", ":", "start_date_func", "(", "month_begin", ")", ",", "'notice_date'", ":", "notice_date_func", "(", "month_begin", ")", ",", "'expiration_date'", ":", "notice_date_func", "(", "month_begin", ")", ",", "'multiplier'", ":", "500", ",", "'exchange'", ":", "'TEST'", ",", "'exchange_full'", ":", "'TEST FULL'", "}", ")", "return", "pd", ".", "DataFrame", ".", "from_records", "(", "contracts", ",", "index", "=", "'sid'", ")" ]
create a dataframe representing futures for root_symbols during year .
train
true
7,773
@with_setup(step_runner_environ) def test_count_raised_exceptions_as_failing_steps(): try: f = Feature.from_string(FEATURE8) feature_result = f.run() scenario_result = feature_result.scenario_results[0] assert_equals(len(scenario_result.steps_failed), 1) finally: registry.clear()
[ "@", "with_setup", "(", "step_runner_environ", ")", "def", "test_count_raised_exceptions_as_failing_steps", "(", ")", ":", "try", ":", "f", "=", "Feature", ".", "from_string", "(", "FEATURE8", ")", "feature_result", "=", "f", ".", "run", "(", ")", "scenario_result", "=", "feature_result", ".", "scenario_results", "[", "0", "]", "assert_equals", "(", "len", "(", "scenario_result", ".", "steps_failed", ")", ",", "1", ")", "finally", ":", "registry", ".", "clear", "(", ")" ]
when a step definition raises an exception .
train
false
7,775
def format_doc(docstring, *args, **kwargs): def set_docstring(obj): if (docstring is None): doc = obj.__doc__ obj.__doc__ = None elif isinstance(docstring, six.string_types): doc = docstring else: doc = docstring.__doc__ if (not doc): raise ValueError('docstring must be a string or containing a docstring that is not empty.') kwargs['__doc__'] = (obj.__doc__ or '') if (six.PY2 and isinstance(obj, type)): obj = type(obj.__name__, (obj,), {'__doc__': doc.format(*args, **kwargs), '__module__': obj.__module__}) else: obj.__doc__ = doc.format(*args, **kwargs) return obj return set_docstring
[ "def", "format_doc", "(", "docstring", ",", "*", "args", ",", "**", "kwargs", ")", ":", "def", "set_docstring", "(", "obj", ")", ":", "if", "(", "docstring", "is", "None", ")", ":", "doc", "=", "obj", ".", "__doc__", "obj", ".", "__doc__", "=", "None", "elif", "isinstance", "(", "docstring", ",", "six", ".", "string_types", ")", ":", "doc", "=", "docstring", "else", ":", "doc", "=", "docstring", ".", "__doc__", "if", "(", "not", "doc", ")", ":", "raise", "ValueError", "(", "'docstring must be a string or containing a docstring that is not empty.'", ")", "kwargs", "[", "'__doc__'", "]", "=", "(", "obj", ".", "__doc__", "or", "''", ")", "if", "(", "six", ".", "PY2", "and", "isinstance", "(", "obj", ",", "type", ")", ")", ":", "obj", "=", "type", "(", "obj", ".", "__name__", ",", "(", "obj", ",", ")", ",", "{", "'__doc__'", ":", "doc", ".", "format", "(", "*", "args", ",", "**", "kwargs", ")", ",", "'__module__'", ":", "obj", ".", "__module__", "}", ")", "else", ":", "obj", ".", "__doc__", "=", "doc", ".", "format", "(", "*", "args", ",", "**", "kwargs", ")", "return", "obj", "return", "set_docstring" ]
replaces the docstring of the decorated object and then formats it .
train
false
7,777
def transformVector3Blindly(tetragrid, vector3): x = getTransformedByList(tetragrid[0], vector3) y = getTransformedByList(tetragrid[1], vector3) z = getTransformedByList(tetragrid[2], vector3) vector3.x = x vector3.y = y vector3.z = z
[ "def", "transformVector3Blindly", "(", "tetragrid", ",", "vector3", ")", ":", "x", "=", "getTransformedByList", "(", "tetragrid", "[", "0", "]", ",", "vector3", ")", "y", "=", "getTransformedByList", "(", "tetragrid", "[", "1", "]", ",", "vector3", ")", "z", "=", "getTransformedByList", "(", "tetragrid", "[", "2", "]", ",", "vector3", ")", "vector3", ".", "x", "=", "x", "vector3", ".", "y", "=", "y", "vector3", ".", "z", "=", "z" ]
transform the vector3 by a tetragrid without checking to see if it exists .
train
false
7,778
def data_root(environ=None): return zipline_path(['data'], environ=environ)
[ "def", "data_root", "(", "environ", "=", "None", ")", ":", "return", "zipline_path", "(", "[", "'data'", "]", ",", "environ", "=", "environ", ")" ]
the root directory for zipline data files .
train
false
7,780
def _is_youtube_url(url): parsed_url = urlparse(url) netloc = parsed_url.netloc return (netloc in ['youtu.be', 'youtube.com', 'www.youtube.com'])
[ "def", "_is_youtube_url", "(", "url", ")", ":", "parsed_url", "=", "urlparse", "(", "url", ")", "netloc", "=", "parsed_url", ".", "netloc", "return", "(", "netloc", "in", "[", "'youtu.be'", ",", "'youtube.com'", ",", "'www.youtube.com'", "]", ")" ]
returns true if the url is to youtube .
train
false
7,781
def dump_files(): for file_path in FILES_LIST: log.info('PLS IMPLEMENT DUMP, want to dump %s', file_path)
[ "def", "dump_files", "(", ")", ":", "for", "file_path", "in", "FILES_LIST", ":", "log", ".", "info", "(", "'PLS IMPLEMENT DUMP, want to dump %s'", ",", "file_path", ")" ]
dump all the dropped files .
train
false
7,783
def psutil_phymem_usage(): import psutil try: percent = psutil.virtual_memory().percent except: percent = psutil.phymem_usage().percent return percent
[ "def", "psutil_phymem_usage", "(", ")", ":", "import", "psutil", "try", ":", "percent", "=", "psutil", ".", "virtual_memory", "(", ")", ".", "percent", "except", ":", "percent", "=", "psutil", ".", "phymem_usage", "(", ")", ".", "percent", "return", "percent" ]
return physical memory usage requires the cross-platform psutil library .
train
true
7,784
def unconstrain_stationary_univariate(constrained): n = constrained.shape[0] y = np.zeros((n, n), dtype=constrained.dtype) y[(n - 1):] = (- constrained) for k in range((n - 1), 0, (-1)): for i in range(k): y[((k - 1), i)] = ((y[(k, i)] - (y[(k, k)] * y[(k, ((k - i) - 1))])) / (1 - (y[(k, k)] ** 2))) r = y.diagonal() x = (r / ((1 - (r ** 2)) ** 0.5)) return x
[ "def", "unconstrain_stationary_univariate", "(", "constrained", ")", ":", "n", "=", "constrained", ".", "shape", "[", "0", "]", "y", "=", "np", ".", "zeros", "(", "(", "n", ",", "n", ")", ",", "dtype", "=", "constrained", ".", "dtype", ")", "y", "[", "(", "n", "-", "1", ")", ":", "]", "=", "(", "-", "constrained", ")", "for", "k", "in", "range", "(", "(", "n", "-", "1", ")", ",", "0", ",", "(", "-", "1", ")", ")", ":", "for", "i", "in", "range", "(", "k", ")", ":", "y", "[", "(", "(", "k", "-", "1", ")", ",", "i", ")", "]", "=", "(", "(", "y", "[", "(", "k", ",", "i", ")", "]", "-", "(", "y", "[", "(", "k", ",", "k", ")", "]", "*", "y", "[", "(", "k", ",", "(", "(", "k", "-", "i", ")", "-", "1", ")", ")", "]", ")", ")", "/", "(", "1", "-", "(", "y", "[", "(", "k", ",", "k", ")", "]", "**", "2", ")", ")", ")", "r", "=", "y", ".", "diagonal", "(", ")", "x", "=", "(", "r", "/", "(", "(", "1", "-", "(", "r", "**", "2", ")", ")", "**", "0.5", ")", ")", "return", "x" ]
transform constrained parameters used in likelihood evaluation to unconstrained parameters used by the optimizer parameters constrained : array constrained parameters of .
train
false
7,785
def perform_push(request, obj): return execute_locked(request, obj, _('All repositories were pushed.'), obj.do_push, request)
[ "def", "perform_push", "(", "request", ",", "obj", ")", ":", "return", "execute_locked", "(", "request", ",", "obj", ",", "_", "(", "'All repositories were pushed.'", ")", ",", "obj", ".", "do_push", ",", "request", ")" ]
helper function to do the repository push .
train
false
7,786
def params_from_strings(params, param_values, app, ignore_errors=False): rval = dict() param_values = (param_values or {}) for (key, value) in param_values.items(): value = json_fix(safe_loads(value)) if (key in params): value = params[key].value_from_basic(value, app, ignore_errors) rval[key] = value return rval
[ "def", "params_from_strings", "(", "params", ",", "param_values", ",", "app", ",", "ignore_errors", "=", "False", ")", ":", "rval", "=", "dict", "(", ")", "param_values", "=", "(", "param_values", "or", "{", "}", ")", "for", "(", "key", ",", "value", ")", "in", "param_values", ".", "items", "(", ")", ":", "value", "=", "json_fix", "(", "safe_loads", "(", "value", ")", ")", "if", "(", "key", "in", "params", ")", ":", "value", "=", "params", "[", "key", "]", ".", "value_from_basic", "(", "value", ",", "app", ",", "ignore_errors", ")", "rval", "[", "key", "]", "=", "value", "return", "rval" ]
convert a dictionary of strings as produced by params_to_strings back into parameter values .
train
false
7,788
def test_base_epochs(): raw = _get_data()[0] epochs = BaseEpochs(raw.info, None, np.ones((1, 3), int), event_id, tmin, tmax) assert_raises(NotImplementedError, epochs.get_data) assert_raises(ValueError, BaseEpochs, raw.info, None, np.ones((1, 3), float), event_id, tmin, tmax) assert_raises(ValueError, BaseEpochs, raw.info, None, np.ones((1, 3, 2), int), event_id, tmin, tmax)
[ "def", "test_base_epochs", "(", ")", ":", "raw", "=", "_get_data", "(", ")", "[", "0", "]", "epochs", "=", "BaseEpochs", "(", "raw", ".", "info", ",", "None", ",", "np", ".", "ones", "(", "(", "1", ",", "3", ")", ",", "int", ")", ",", "event_id", ",", "tmin", ",", "tmax", ")", "assert_raises", "(", "NotImplementedError", ",", "epochs", ".", "get_data", ")", "assert_raises", "(", "ValueError", ",", "BaseEpochs", ",", "raw", ".", "info", ",", "None", ",", "np", ".", "ones", "(", "(", "1", ",", "3", ")", ",", "float", ")", ",", "event_id", ",", "tmin", ",", "tmax", ")", "assert_raises", "(", "ValueError", ",", "BaseEpochs", ",", "raw", ".", "info", ",", "None", ",", "np", ".", "ones", "(", "(", "1", ",", "3", ",", "2", ")", ",", "int", ")", ",", "event_id", ",", "tmin", ",", "tmax", ")" ]
test base epochs class .
train
false
7,789
def clip_read(data): qual = data['quality_scores'] (left, right) = return_merged_clips(data) seq = data['bases'] qual = data['quality_scores'] new_seq = seq[(left - 1):right] new_qual = qual[(left - 1):right] return (new_seq, new_qual)
[ "def", "clip_read", "(", "data", ")", ":", "qual", "=", "data", "[", "'quality_scores'", "]", "(", "left", ",", "right", ")", "=", "return_merged_clips", "(", "data", ")", "seq", "=", "data", "[", "'bases'", "]", "qual", "=", "data", "[", "'quality_scores'", "]", "new_seq", "=", "seq", "[", "(", "left", "-", "1", ")", ":", "right", "]", "new_qual", "=", "qual", "[", "(", "left", "-", "1", ")", ":", "right", "]", "return", "(", "new_seq", ",", "new_qual", ")" ]
given the data for one read it returns clipped seq and qual .
train
false
7,790
def _translate_page_into(page, language, default=None): try: if (page.language == language): return page if (language is not None): translations = dict(((t.language, t) for t in page.available_translations())) if (language in translations): return translations[language] except AttributeError: pass if hasattr(default, u'__call__'): return default(page=page) return default
[ "def", "_translate_page_into", "(", "page", ",", "language", ",", "default", "=", "None", ")", ":", "try", ":", "if", "(", "page", ".", "language", "==", "language", ")", ":", "return", "page", "if", "(", "language", "is", "not", "None", ")", ":", "translations", "=", "dict", "(", "(", "(", "t", ".", "language", ",", "t", ")", "for", "t", "in", "page", ".", "available_translations", "(", ")", ")", ")", "if", "(", "language", "in", "translations", ")", ":", "return", "translations", "[", "language", "]", "except", "AttributeError", ":", "pass", "if", "hasattr", "(", "default", ",", "u'__call__'", ")", ":", "return", "default", "(", "page", "=", "page", ")", "return", "default" ]
return the translation for a given page .
train
true
7,792
def bspline_basis_set(d, knots, x): n_splines = ((len(knots) - d) - 1) return [bspline_basis(d, knots, i, x) for i in range(n_splines)]
[ "def", "bspline_basis_set", "(", "d", ",", "knots", ",", "x", ")", ":", "n_splines", "=", "(", "(", "len", "(", "knots", ")", "-", "d", ")", "-", "1", ")", "return", "[", "bspline_basis", "(", "d", ",", "knots", ",", "i", ",", "x", ")", "for", "i", "in", "range", "(", "n_splines", ")", "]" ]
return the len-d-1 b-splines at x of degree d with knots .
train
false
7,796
def MGF1(mgfSeed, maskLen, hash): T = b('') for counter in xrange(ceil_div(maskLen, hash.digest_size)): c = long_to_bytes(counter, 4) try: T = (T + hash.new((mgfSeed + c)).digest()) except AttributeError: T = (T + Hash_new(hash, (mgfSeed + c)).digest()) assert (len(T) >= maskLen) return T[:maskLen]
[ "def", "MGF1", "(", "mgfSeed", ",", "maskLen", ",", "hash", ")", ":", "T", "=", "b", "(", "''", ")", "for", "counter", "in", "xrange", "(", "ceil_div", "(", "maskLen", ",", "hash", ".", "digest_size", ")", ")", ":", "c", "=", "long_to_bytes", "(", "counter", ",", "4", ")", "try", ":", "T", "=", "(", "T", "+", "hash", ".", "new", "(", "(", "mgfSeed", "+", "c", ")", ")", ".", "digest", "(", ")", ")", "except", "AttributeError", ":", "T", "=", "(", "T", "+", "Hash_new", "(", "hash", ",", "(", "mgfSeed", "+", "c", ")", ")", ".", "digest", "(", ")", ")", "assert", "(", "len", "(", "T", ")", ">=", "maskLen", ")", "return", "T", "[", ":", "maskLen", "]" ]
mask generation function .
train
false
7,797
def load_name_range(start, end, invalidate=False): q = Session.query(Person).filter(Person.name.between(('person %.2d' % start), ('person %.2d' % end))).options(cache_address_bits).options(FromCache('default', 'name_range')) q = q.options(RelationshipCache(Person.addresses, 'default')) if invalidate: q.invalidate() return q.all()
[ "def", "load_name_range", "(", "start", ",", "end", ",", "invalidate", "=", "False", ")", ":", "q", "=", "Session", ".", "query", "(", "Person", ")", ".", "filter", "(", "Person", ".", "name", ".", "between", "(", "(", "'person %.2d'", "%", "start", ")", ",", "(", "'person %.2d'", "%", "end", ")", ")", ")", ".", "options", "(", "cache_address_bits", ")", ".", "options", "(", "FromCache", "(", "'default'", ",", "'name_range'", ")", ")", "q", "=", "q", ".", "options", "(", "RelationshipCache", "(", "Person", ".", "addresses", ",", "'default'", ")", ")", "if", "invalidate", ":", "q", ".", "invalidate", "(", ")", "return", "q", ".", "all", "(", ")" ]
load person objects on a range of names .
train
false
7,798
def remote_repr(arg): return arg
[ "def", "remote_repr", "(", "arg", ")", ":", "return", "arg" ]
return the repr() rendering of the supplied arg .
train
false
7,799
def _verify_signature(message, signature, certs): for pem in certs: verifier = Verifier.from_string(pem, is_x509_cert=True) if verifier.verify(message, signature): return raise AppIdentityError('Invalid token signature')
[ "def", "_verify_signature", "(", "message", ",", "signature", ",", "certs", ")", ":", "for", "pem", "in", "certs", ":", "verifier", "=", "Verifier", ".", "from_string", "(", "pem", ",", "is_x509_cert", "=", "True", ")", "if", "verifier", ".", "verify", "(", "message", ",", "signature", ")", ":", "return", "raise", "AppIdentityError", "(", "'Invalid token signature'", ")" ]
verifies signed content using a list of certificates .
train
true
7,800
def is_valid_path(path, urlconf=None): try: resolve(path, urlconf) return True except Resolver404: return False
[ "def", "is_valid_path", "(", "path", ",", "urlconf", "=", "None", ")", ":", "try", ":", "resolve", "(", "path", ",", "urlconf", ")", "return", "True", "except", "Resolver404", ":", "return", "False" ]
returns true if the given path resolves against the default url resolver .
train
false
7,802
def strip_console_codes(output): if ('\x1b' not in output): return output old_word = '' return_str = '' index = 0 output = ('\x1b[m%s' % output) console_codes = '%G|\\[m|\\[[\\d;]+[HJnrm]' while (index < len(output)): tmp_index = 0 tmp_word = '' while ((len(re.findall('\x1b', tmp_word)) < 2) and ((index + tmp_index) < len(output))): tmp_word += output[(index + tmp_index)] tmp_index += 1 tmp_word = re.sub('\x1b', '', tmp_word) index += (len(tmp_word) + 1) if (tmp_word == old_word): continue try: special_code = re.findall(console_codes, tmp_word)[0] except IndexError: if ((index + tmp_index) < len(output)): raise ValueError(('%s is not included in the known console codes list %s' % (tmp_word, console_codes))) continue if (special_code == tmp_word): continue old_word = tmp_word return_str += tmp_word[len(special_code):] return return_str
[ "def", "strip_console_codes", "(", "output", ")", ":", "if", "(", "'\\x1b'", "not", "in", "output", ")", ":", "return", "output", "old_word", "=", "''", "return_str", "=", "''", "index", "=", "0", "output", "=", "(", "'\\x1b[m%s'", "%", "output", ")", "console_codes", "=", "'%G|\\\\[m|\\\\[[\\\\d;]+[HJnrm]'", "while", "(", "index", "<", "len", "(", "output", ")", ")", ":", "tmp_index", "=", "0", "tmp_word", "=", "''", "while", "(", "(", "len", "(", "re", ".", "findall", "(", "'\\x1b'", ",", "tmp_word", ")", ")", "<", "2", ")", "and", "(", "(", "index", "+", "tmp_index", ")", "<", "len", "(", "output", ")", ")", ")", ":", "tmp_word", "+=", "output", "[", "(", "index", "+", "tmp_index", ")", "]", "tmp_index", "+=", "1", "tmp_word", "=", "re", ".", "sub", "(", "'\\x1b'", ",", "''", ",", "tmp_word", ")", "index", "+=", "(", "len", "(", "tmp_word", ")", "+", "1", ")", "if", "(", "tmp_word", "==", "old_word", ")", ":", "continue", "try", ":", "special_code", "=", "re", ".", "findall", "(", "console_codes", ",", "tmp_word", ")", "[", "0", "]", "except", "IndexError", ":", "if", "(", "(", "index", "+", "tmp_index", ")", "<", "len", "(", "output", ")", ")", ":", "raise", "ValueError", "(", "(", "'%s is not included in the known console codes list %s'", "%", "(", "tmp_word", ",", "console_codes", ")", ")", ")", "continue", "if", "(", "special_code", "==", "tmp_word", ")", ":", "continue", "old_word", "=", "tmp_word", "return_str", "+=", "tmp_word", "[", "len", "(", "special_code", ")", ":", "]", "return", "return_str" ]
remove the linux console escape and control sequences from the console output .
train
false
7,803
def join_room(room): socketio = flask.current_app.extensions['socketio'] socketio.server.enter_room(flask.request.sid, room, namespace=flask.request.namespace)
[ "def", "join_room", "(", "room", ")", ":", "socketio", "=", "flask", ".", "current_app", ".", "extensions", "[", "'socketio'", "]", "socketio", ".", "server", ".", "enter_room", "(", "flask", ".", "request", ".", "sid", ",", "room", ",", "namespace", "=", "flask", ".", "request", ".", "namespace", ")" ]
join a room .
train
false
7,804
def get_test_data_fps(): test_data = get_test_data() temp_dir = get_qiime_temp_dir() result = {} for (k, v) in test_data.items(): f = NamedTemporaryFile(prefix=k, dir=temp_dir) f.write('\n'.join(v)) f.flush() result[k] = (f.name, f) return result
[ "def", "get_test_data_fps", "(", ")", ":", "test_data", "=", "get_test_data", "(", ")", "temp_dir", "=", "get_qiime_temp_dir", "(", ")", "result", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "test_data", ".", "items", "(", ")", ":", "f", "=", "NamedTemporaryFile", "(", "prefix", "=", "k", ",", "dir", "=", "temp_dir", ")", "f", ".", "write", "(", "'\\n'", ".", "join", "(", "v", ")", ")", "f", ".", "flush", "(", ")", "result", "[", "k", "]", "=", "(", "f", ".", "name", ",", "f", ")", "return", "result" ]
returns test data as dict of filepaths .
train
false
7,805
def rsync_files(src, dst, args, logger=None, quiet=True): if (args is None): args = '' RSYNC_CMD = ("rsync -a %%s '%%s' %%s %s --exclude-from=/etc/cobbler/rsync.exclude" % args) if quiet: RSYNC_CMD += ' --quiet' else: RSYNC_CMD += ' --progress' if (not dst.endswith('/')): dst = ('%s/' % dst) if (not src.endswith('/')): src = ('%s/' % src) spacer = '' if ((not src.startswith('rsync://')) and (not src.startswith('/'))): spacer = ' -e "ssh" ' rsync_cmd = (RSYNC_CMD % (spacer, src, dst)) try: res = subprocess_call(logger, rsync_cmd) if (res != 0): die(logger, ("Failed to run the rsync command: '%s'" % rsync_cmd)) except: return False return True
[ "def", "rsync_files", "(", "src", ",", "dst", ",", "args", ",", "logger", "=", "None", ",", "quiet", "=", "True", ")", ":", "if", "(", "args", "is", "None", ")", ":", "args", "=", "''", "RSYNC_CMD", "=", "(", "\"rsync -a %%s '%%s' %%s %s --exclude-from=/etc/cobbler/rsync.exclude\"", "%", "args", ")", "if", "quiet", ":", "RSYNC_CMD", "+=", "' --quiet'", "else", ":", "RSYNC_CMD", "+=", "' --progress'", "if", "(", "not", "dst", ".", "endswith", "(", "'/'", ")", ")", ":", "dst", "=", "(", "'%s/'", "%", "dst", ")", "if", "(", "not", "src", ".", "endswith", "(", "'/'", ")", ")", ":", "src", "=", "(", "'%s/'", "%", "src", ")", "spacer", "=", "''", "if", "(", "(", "not", "src", ".", "startswith", "(", "'rsync://'", ")", ")", "and", "(", "not", "src", ".", "startswith", "(", "'/'", ")", ")", ")", ":", "spacer", "=", "' -e \"ssh\" '", "rsync_cmd", "=", "(", "RSYNC_CMD", "%", "(", "spacer", ",", "src", ",", "dst", ")", ")", "try", ":", "res", "=", "subprocess_call", "(", "logger", ",", "rsync_cmd", ")", "if", "(", "res", "!=", "0", ")", ":", "die", "(", "logger", ",", "(", "\"Failed to run the rsync command: '%s'\"", "%", "rsync_cmd", ")", ")", "except", ":", "return", "False", "return", "True" ]
sync files from src to dst .
train
false
7,806
def int2byte(i): return (chr(i) if PY2 else bytes((i,)))
[ "def", "int2byte", "(", "i", ")", ":", "return", "(", "chr", "(", "i", ")", "if", "PY2", "else", "bytes", "(", "(", "i", ",", ")", ")", ")" ]
converts an integer to a byte .
train
false
7,808
def get_lumination_change_value(label): return _check_range_and_return('lumination_change', label, (-19), 19)
[ "def", "get_lumination_change_value", "(", "label", ")", ":", "return", "_check_range_and_return", "(", "'lumination_change'", ",", "label", ",", "(", "-", "19", ")", ",", "19", ")" ]
returns the value corresponding to a lumination change label int .
train
false
7,809
def _is_dunder(name): return ((name[:2] == name[(-2):] == '__') and (name[2:3] != '_') and (name[(-3):(-2)] != '_') and (len(name) > 4))
[ "def", "_is_dunder", "(", "name", ")", ":", "return", "(", "(", "name", "[", ":", "2", "]", "==", "name", "[", "(", "-", "2", ")", ":", "]", "==", "'__'", ")", "and", "(", "name", "[", "2", ":", "3", "]", "!=", "'_'", ")", "and", "(", "name", "[", "(", "-", "3", ")", ":", "(", "-", "2", ")", "]", "!=", "'_'", ")", "and", "(", "len", "(", "name", ")", ">", "4", ")", ")" ]
returns true if a __dunder__ name .
train
false
7,811
def ErrCheckBool(result, func, args): if (not result): raise WinError() return args
[ "def", "ErrCheckBool", "(", "result", ",", "func", ",", "args", ")", ":", "if", "(", "not", "result", ")", ":", "raise", "WinError", "(", ")", "return", "args" ]
errcheck function for windows functions that return a bool true on success .
train
false
7,812
def redo(): with RopeContext() as ctx: changes = ctx.project.history.tobe_redone if (changes is None): env.error('Nothing to redo!') return False if env.user_confirm(('Redo [%s]?' % str(changes))): progress = ProgressHandler(('Redo %s' % str(changes))) for c in ctx.project.history.redo(task_handle=progress.handle): reload_changes(c)
[ "def", "redo", "(", ")", ":", "with", "RopeContext", "(", ")", "as", "ctx", ":", "changes", "=", "ctx", ".", "project", ".", "history", ".", "tobe_redone", "if", "(", "changes", "is", "None", ")", ":", "env", ".", "error", "(", "'Nothing to redo!'", ")", "return", "False", "if", "env", ".", "user_confirm", "(", "(", "'Redo [%s]?'", "%", "str", "(", "changes", ")", ")", ")", ":", "progress", "=", "ProgressHandler", "(", "(", "'Redo %s'", "%", "str", "(", "changes", ")", ")", ")", "for", "c", "in", "ctx", ".", "project", ".", "history", ".", "redo", "(", "task_handle", "=", "progress", ".", "handle", ")", ":", "reload_changes", "(", "c", ")" ]
redo last changes .
train
false
7,813
def spawn_n(func, *args, **kwargs): _context = common_context.get_current() profiler_info = _serialize_profile_info() @functools.wraps(func) def context_wrapper(*args, **kwargs): if (_context is not None): _context.update_store() if (profiler_info and profiler): profiler.init(**profiler_info) func(*args, **kwargs) eventlet.spawn_n(context_wrapper, *args, **kwargs)
[ "def", "spawn_n", "(", "func", ",", "*", "args", ",", "**", "kwargs", ")", ":", "_context", "=", "common_context", ".", "get_current", "(", ")", "profiler_info", "=", "_serialize_profile_info", "(", ")", "@", "functools", ".", "wraps", "(", "func", ")", "def", "context_wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "_context", "is", "not", "None", ")", ":", "_context", ".", "update_store", "(", ")", "if", "(", "profiler_info", "and", "profiler", ")", ":", "profiler", ".", "init", "(", "**", "profiler_info", ")", "func", "(", "*", "args", ",", "**", "kwargs", ")", "eventlet", ".", "spawn_n", "(", "context_wrapper", ",", "*", "args", ",", "**", "kwargs", ")" ]
passthrough method for eventlet .
train
false
7,814
def find_test_dir(start_dir=None): if (not start_dir): start_dir = '.' target = os.path.abspath(start_dir) while True: if (os.path.isdir(os.path.join(target, 'Bio')) and os.path.isdir(os.path.join(target, 'Tests'))): return os.path.abspath(os.path.join(target, 'Tests')) (new, tmp) = os.path.split(target) if (target == new): break target = new raise ValueError(('Not within Biopython source tree: %r' % os.path.abspath(start_dir)))
[ "def", "find_test_dir", "(", "start_dir", "=", "None", ")", ":", "if", "(", "not", "start_dir", ")", ":", "start_dir", "=", "'.'", "target", "=", "os", ".", "path", ".", "abspath", "(", "start_dir", ")", "while", "True", ":", "if", "(", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "target", ",", "'Bio'", ")", ")", "and", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "target", ",", "'Tests'", ")", ")", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "target", ",", "'Tests'", ")", ")", "(", "new", ",", "tmp", ")", "=", "os", ".", "path", ".", "split", "(", "target", ")", "if", "(", "target", "==", "new", ")", ":", "break", "target", "=", "new", "raise", "ValueError", "(", "(", "'Not within Biopython source tree: %r'", "%", "os", ".", "path", ".", "abspath", "(", "start_dir", ")", ")", ")" ]
finds the absolute path of biopythons tests directory .
train
false
7,815
def Probability(o): return (o / (o + 1))
[ "def", "Probability", "(", "o", ")", ":", "return", "(", "o", "/", "(", "o", "+", "1", ")", ")" ]
computes the probability corresponding to given odds .
train
false
7,819
def checkTorrentFinished(): logger.info('Checking if any torrents have finished seeding and can be removed') with postprocessor_lock: myDB = db.DBConnection() results = myDB.select('SELECT * from snatched WHERE Status="Seed_Processed"') for album in results: hash = album['FolderName'] albumid = album['AlbumID'] torrent_removed = False if (headphones.CONFIG.TORRENT_DOWNLOADER == 1): torrent_removed = transmission.removeTorrent(hash, True) else: torrent_removed = utorrent.removeTorrent(hash, True) if torrent_removed: myDB.action('DELETE from snatched WHERE status = "Seed_Processed" and AlbumID=?', [albumid]) logger.info('Checking finished torrents completed')
[ "def", "checkTorrentFinished", "(", ")", ":", "logger", ".", "info", "(", "'Checking if any torrents have finished seeding and can be removed'", ")", "with", "postprocessor_lock", ":", "myDB", "=", "db", ".", "DBConnection", "(", ")", "results", "=", "myDB", ".", "select", "(", "'SELECT * from snatched WHERE Status=\"Seed_Processed\"'", ")", "for", "album", "in", "results", ":", "hash", "=", "album", "[", "'FolderName'", "]", "albumid", "=", "album", "[", "'AlbumID'", "]", "torrent_removed", "=", "False", "if", "(", "headphones", ".", "CONFIG", ".", "TORRENT_DOWNLOADER", "==", "1", ")", ":", "torrent_removed", "=", "transmission", ".", "removeTorrent", "(", "hash", ",", "True", ")", "else", ":", "torrent_removed", "=", "utorrent", ".", "removeTorrent", "(", "hash", ",", "True", ")", "if", "torrent_removed", ":", "myDB", ".", "action", "(", "'DELETE from snatched WHERE status = \"Seed_Processed\" and AlbumID=?'", ",", "[", "albumid", "]", ")", "logger", ".", "info", "(", "'Checking finished torrents completed'", ")" ]
remove torrent + data if post processed and finished seeding .
train
false
7,820
def number_format(value, decimal_pos=None): return numberformat.format(value, get_format('DECIMAL_SEPARATOR'), decimal_pos, get_format('NUMBER_GROUPING'), get_format('THOUSAND_SEPARATOR'))
[ "def", "number_format", "(", "value", ",", "decimal_pos", "=", "None", ")", ":", "return", "numberformat", ".", "format", "(", "value", ",", "get_format", "(", "'DECIMAL_SEPARATOR'", ")", ",", "decimal_pos", ",", "get_format", "(", "'NUMBER_GROUPING'", ")", ",", "get_format", "(", "'THOUSAND_SEPARATOR'", ")", ")" ]
enforces 2 decimal places after a number if only one is given also formats comma separators every 3rd digit before decimal place .
train
false
7,821
def make_libvirtError(error_class, msg, error_code=None, error_domain=None, error_message=None, error_level=None, str1=None, str2=None, str3=None, int1=None, int2=None): exc = error_class(msg) exc.err = (error_code, error_domain, error_message, error_level, str1, str2, str3, int1, int2) return exc
[ "def", "make_libvirtError", "(", "error_class", ",", "msg", ",", "error_code", "=", "None", ",", "error_domain", "=", "None", ",", "error_message", "=", "None", ",", "error_level", "=", "None", ",", "str1", "=", "None", ",", "str2", "=", "None", ",", "str3", "=", "None", ",", "int1", "=", "None", ",", "int2", "=", "None", ")", ":", "exc", "=", "error_class", "(", "msg", ")", "exc", ".", "err", "=", "(", "error_code", ",", "error_domain", ",", "error_message", ",", "error_level", ",", "str1", ",", "str2", ",", "str3", ",", "int1", ",", "int2", ")", "return", "exc" ]
convenience function for creating libvirterror exceptions which allow you to specify arguments in constructor without having to manipulate the err tuple directly .
train
false
7,822
@nodes_or_number(0) def wheel_graph(n, create_using=None): (n_name, nodes) = n if (n_name == 0): G = nx.empty_graph(0, create_using=create_using) G.name = 'wheel_graph(0)' return G G = star_graph(nodes, create_using) G.name = ('wheel_graph(%s)' % (n_name,)) if (len(G) > 2): G.add_edges_from(pairwise(nodes[1:])) G.add_edge(nodes[(-1)], nodes[1]) return G
[ "@", "nodes_or_number", "(", "0", ")", "def", "wheel_graph", "(", "n", ",", "create_using", "=", "None", ")", ":", "(", "n_name", ",", "nodes", ")", "=", "n", "if", "(", "n_name", "==", "0", ")", ":", "G", "=", "nx", ".", "empty_graph", "(", "0", ",", "create_using", "=", "create_using", ")", "G", ".", "name", "=", "'wheel_graph(0)'", "return", "G", "G", "=", "star_graph", "(", "nodes", ",", "create_using", ")", "G", ".", "name", "=", "(", "'wheel_graph(%s)'", "%", "(", "n_name", ",", ")", ")", "if", "(", "len", "(", "G", ")", ">", "2", ")", ":", "G", ".", "add_edges_from", "(", "pairwise", "(", "nodes", "[", "1", ":", "]", ")", ")", "G", ".", "add_edge", "(", "nodes", "[", "(", "-", "1", ")", "]", ",", "nodes", "[", "1", "]", ")", "return", "G" ]
return the wheel graph the wheel graph consists of a hub node connected to a cycle of nodes .
train
false
7,824
def pre_init_hook_for_submodules(cr, model, field): env = api.Environment(cr, SUPERUSER_ID, {}) with cr.savepoint(): table = env[model]._table column_exists = table_has_column(cr, table, field) if column_exists: extract_query = ("\n SELECT id, '%(model)s', '%(model)s,' || id, 'db', %(field)s\n FROM %(table)s\n WHERE %(field)s IS NOT NULL\n " % {'table': table, 'field': field, 'model': model}) image_field = 'file_db_store' else: extract_query = ("\n SELECT\n res_id,\n res_model,\n CONCAT_WS(',', res_model, res_id),\n 'filestore',\n id\n FROM ir_attachment\n WHERE res_field='%(field)s' AND res_model='%(model)s'\n " % {'model': model, 'field': field}) image_field = 'attachment_id' cr.execute(('\n INSERT INTO base_multi_image_image (\n owner_id,\n owner_model,\n owner_ref_id,\n storage,\n %s\n )\n %s\n ' % (image_field, extract_query)))
[ "def", "pre_init_hook_for_submodules", "(", "cr", ",", "model", ",", "field", ")", ":", "env", "=", "api", ".", "Environment", "(", "cr", ",", "SUPERUSER_ID", ",", "{", "}", ")", "with", "cr", ".", "savepoint", "(", ")", ":", "table", "=", "env", "[", "model", "]", ".", "_table", "column_exists", "=", "table_has_column", "(", "cr", ",", "table", ",", "field", ")", "if", "column_exists", ":", "extract_query", "=", "(", "\"\\n SELECT id, '%(model)s', '%(model)s,' || id, 'db', %(field)s\\n FROM %(table)s\\n WHERE %(field)s IS NOT NULL\\n \"", "%", "{", "'table'", ":", "table", ",", "'field'", ":", "field", ",", "'model'", ":", "model", "}", ")", "image_field", "=", "'file_db_store'", "else", ":", "extract_query", "=", "(", "\"\\n SELECT\\n res_id,\\n res_model,\\n CONCAT_WS(',', res_model, res_id),\\n 'filestore',\\n id\\n FROM ir_attachment\\n WHERE res_field='%(field)s' AND res_model='%(model)s'\\n \"", "%", "{", "'model'", ":", "model", ",", "'field'", ":", "field", "}", ")", "image_field", "=", "'attachment_id'", "cr", ".", "execute", "(", "(", "'\\n INSERT INTO base_multi_image_image (\\n owner_id,\\n owner_model,\\n owner_ref_id,\\n storage,\\n %s\\n )\\n %s\\n '", "%", "(", "image_field", ",", "extract_query", ")", ")", ")" ]
moves images from single to multi mode .
train
false
7,825
def count_seqs_from_file(fasta_file, parser=parse_fasta): result = 0 lens = [] for record in parser(fasta_file): result += 1 lens.append(len(record[1])) if (result == 0): return (result, None, None) else: return (result, mean(lens), std(lens))
[ "def", "count_seqs_from_file", "(", "fasta_file", ",", "parser", "=", "parse_fasta", ")", ":", "result", "=", "0", "lens", "=", "[", "]", "for", "record", "in", "parser", "(", "fasta_file", ")", ":", "result", "+=", "1", "lens", ".", "append", "(", "len", "(", "record", "[", "1", "]", ")", ")", "if", "(", "result", "==", "0", ")", ":", "return", "(", "result", ",", "None", ",", "None", ")", "else", ":", "return", "(", "result", ",", "mean", "(", "lens", ")", ",", "std", "(", "lens", ")", ")" ]
return number of sequences in fasta_file fasta_file: an open file object .
train
false
7,826
def get_digest(value): if (not isinstance(value, str)): return value value = value.lower() if (value == 'md5'): return md5 elif (value == 'sha1'): return sha1 elif (value == 'sha224'): return sha224 elif (value == 'sha256'): return sha256 elif (value == 'sha384'): return sha384 elif (value == 'sha512'): return sha512 else: raise ValueError(('Invalid digest algorithm: %s' % value))
[ "def", "get_digest", "(", "value", ")", ":", "if", "(", "not", "isinstance", "(", "value", ",", "str", ")", ")", ":", "return", "value", "value", "=", "value", ".", "lower", "(", ")", "if", "(", "value", "==", "'md5'", ")", ":", "return", "md5", "elif", "(", "value", "==", "'sha1'", ")", ":", "return", "sha1", "elif", "(", "value", "==", "'sha224'", ")", ":", "return", "sha224", "elif", "(", "value", "==", "'sha256'", ")", ":", "return", "sha256", "elif", "(", "value", "==", "'sha384'", ")", ":", "return", "sha384", "elif", "(", "value", "==", "'sha512'", ")", ":", "return", "sha512", "else", ":", "raise", "ValueError", "(", "(", "'Invalid digest algorithm: %s'", "%", "value", ")", ")" ]
return a hashlib digest algorithm from a string .
train
false