id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
2,701
def get_metric(name): try: return globals()[name] except: raise ValueError('Invalid metric function.')
[ "def", "get_metric", "(", "name", ")", ":", "try", ":", "return", "globals", "(", ")", "[", "name", "]", "except", ":", "raise", "ValueError", "(", "'Invalid metric function.'", ")" ]
return the current value for the named metric .
train
false
2,702
def path_to_key(datastore, path): key_parts = [] path_parts = path.strip(u'/').split(u'/') for (n, x) in enumerate(path_parts): (name, ext) = x.rsplit('.', 1) key_parts.extend([ext, name]) return datastore.key(*key_parts)
[ "def", "path_to_key", "(", "datastore", ",", "path", ")", ":", "key_parts", "=", "[", "]", "path_parts", "=", "path", ".", "strip", "(", "u'/'", ")", ".", "split", "(", "u'/'", ")", "for", "(", "n", ",", "x", ")", "in", "enumerate", "(", "path_parts", ")", ":", "(", "name", ",", "ext", ")", "=", "x", ".", "rsplit", "(", "'.'", ",", "1", ")", "key_parts", ".", "extend", "(", "[", "ext", ",", "name", "]", ")", "return", "datastore", ".", "key", "(", "*", "key_parts", ")" ]
translates a file system path to a datastore key .
train
false
2,703
@_api_version(1.21) @_client_version('1.5.0') def inspect_network(network_id): response = _client_wrapper('inspect_network', network_id) _clear_context() return response
[ "@", "_api_version", "(", "1.21", ")", "@", "_client_version", "(", "'1.5.0'", ")", "def", "inspect_network", "(", "network_id", ")", ":", "response", "=", "_client_wrapper", "(", "'inspect_network'", ",", "network_id", ")", "_clear_context", "(", ")", "return", "response" ]
inspect network network_id id of network cli example: .
train
false
2,704
@task def gitrepos(branch=None, fork='sympy'): with cd('/home/vagrant'): if (not exists('sympy-cache.git')): error('Run fab vagrant prepare first') if (not branch): branch = local('git rev-parse --abbrev-ref HEAD', capture=True) if (branch == 'master'): raise Exception('Cannot release from master') run('mkdir -p repos') with cd('/home/vagrant/repos'): run('git clone --reference ../sympy-cache.git https://github.com/{fork}/sympy.git'.format(fork=fork)) with cd('/home/vagrant/repos/sympy'): run(('git checkout -t origin/%s' % branch))
[ "@", "task", "def", "gitrepos", "(", "branch", "=", "None", ",", "fork", "=", "'sympy'", ")", ":", "with", "cd", "(", "'/home/vagrant'", ")", ":", "if", "(", "not", "exists", "(", "'sympy-cache.git'", ")", ")", ":", "error", "(", "'Run fab vagrant prepare first'", ")", "if", "(", "not", "branch", ")", ":", "branch", "=", "local", "(", "'git rev-parse --abbrev-ref HEAD'", ",", "capture", "=", "True", ")", "if", "(", "branch", "==", "'master'", ")", ":", "raise", "Exception", "(", "'Cannot release from master'", ")", "run", "(", "'mkdir -p repos'", ")", "with", "cd", "(", "'/home/vagrant/repos'", ")", ":", "run", "(", "'git clone --reference ../sympy-cache.git https://github.com/{fork}/sympy.git'", ".", "format", "(", "fork", "=", "fork", ")", ")", "with", "cd", "(", "'/home/vagrant/repos/sympy'", ")", ":", "run", "(", "(", "'git checkout -t origin/%s'", "%", "branch", ")", ")" ]
clone the repo fab vagrant prepare (namely .
train
false
2,705
def _GetSchemaEntryForPropertyType(property_type): from google.appengine.ext import db _MODEL_TYPE_TO_SCHEMA_ENTRY = {db.StringProperty: (_schema_type.STRING, None), db.IntegerProperty: (_schema_type.INT32, None), db.BooleanProperty: (_schema_type.BOOLEAN, None), db.FloatProperty: (_schema_type.DOUBLE, None), db.TextProperty: (_schema_type.STRING, None)} return _MODEL_TYPE_TO_SCHEMA_ENTRY.get(property_type, (None, None))
[ "def", "_GetSchemaEntryForPropertyType", "(", "property_type", ")", ":", "from", "google", ".", "appengine", ".", "ext", "import", "db", "_MODEL_TYPE_TO_SCHEMA_ENTRY", "=", "{", "db", ".", "StringProperty", ":", "(", "_schema_type", ".", "STRING", ",", "None", ")", ",", "db", ".", "IntegerProperty", ":", "(", "_schema_type", ".", "INT32", ",", "None", ")", ",", "db", ".", "BooleanProperty", ":", "(", "_schema_type", ".", "BOOLEAN", ",", "None", ")", ",", "db", ".", "FloatProperty", ":", "(", "_schema_type", ".", "DOUBLE", ",", "None", ")", ",", "db", ".", "TextProperty", ":", "(", "_schema_type", ".", "STRING", ",", "None", ")", "}", "return", "_MODEL_TYPE_TO_SCHEMA_ENTRY", ".", "get", "(", "property_type", ",", "(", "None", ",", "None", ")", ")" ]
converts db .
train
false
2,706
def test_disabled_blocking_update(basedir, config_stub, download_stub, data_tmpdir, tmpdir, win_registry, caplog): config_stub.data = {'content': {'host-block-lists': generic_blocklists(tmpdir), 'host-blocking-enabled': False}} host_blocker = adblock.HostBlocker() host_blocker.adblock_update() while host_blocker._in_progress: current_download = host_blocker._in_progress[0] with caplog.at_level(logging.ERROR): current_download.finished.emit() host_blocker.read_hosts() for str_url in URLS_TO_CHECK: assert (not host_blocker.is_blocked(QUrl(str_url)))
[ "def", "test_disabled_blocking_update", "(", "basedir", ",", "config_stub", ",", "download_stub", ",", "data_tmpdir", ",", "tmpdir", ",", "win_registry", ",", "caplog", ")", ":", "config_stub", ".", "data", "=", "{", "'content'", ":", "{", "'host-block-lists'", ":", "generic_blocklists", "(", "tmpdir", ")", ",", "'host-blocking-enabled'", ":", "False", "}", "}", "host_blocker", "=", "adblock", ".", "HostBlocker", "(", ")", "host_blocker", ".", "adblock_update", "(", ")", "while", "host_blocker", ".", "_in_progress", ":", "current_download", "=", "host_blocker", ".", "_in_progress", "[", "0", "]", "with", "caplog", ".", "at_level", "(", "logging", ".", "ERROR", ")", ":", "current_download", ".", "finished", ".", "emit", "(", ")", "host_blocker", ".", "read_hosts", "(", ")", "for", "str_url", "in", "URLS_TO_CHECK", ":", "assert", "(", "not", "host_blocker", ".", "is_blocked", "(", "QUrl", "(", "str_url", ")", ")", ")" ]
ensure no url is blocked when host blocking is disabled .
train
false
2,707
def _isproperdist(X): X = np.asarray(X) if ((not np.allclose(np.sum(X), 1)) or (not np.all((X >= 0))) or (not np.all((X <= 1)))): return False else: return True
[ "def", "_isproperdist", "(", "X", ")", ":", "X", "=", "np", ".", "asarray", "(", "X", ")", "if", "(", "(", "not", "np", ".", "allclose", "(", "np", ".", "sum", "(", "X", ")", ",", "1", ")", ")", "or", "(", "not", "np", ".", "all", "(", "(", "X", ">=", "0", ")", ")", ")", "or", "(", "not", "np", ".", "all", "(", "(", "X", "<=", "1", ")", ")", ")", ")", ":", "return", "False", "else", ":", "return", "True" ]
checks to see if x is a proper probability distribution .
train
false
2,708
def job_complete(node, verbose=False): if ((node.PollPath is None) or node.istip()): raise JobError(('Attempting to merge tip: %s' % node.Name)) if node.Processed: raise JobError(('Already processed node: %s' % node.Name)) if os.path.exists(node.PollPath): node.EndTime = time() node.TotalTime = (node.EndTime - node.StartTime) node.ExitStatus = open(node.PollPath).read().strip() if (node.ExitStatus != '0'): raise JobError(('Node %s did not complete correctly!' % node.Name)) if verbose: print ('finishing %s, %f seconds' % (node.Name, node.TotalTime)) node.Processed = True return True else: return False
[ "def", "job_complete", "(", "node", ",", "verbose", "=", "False", ")", ":", "if", "(", "(", "node", ".", "PollPath", "is", "None", ")", "or", "node", ".", "istip", "(", ")", ")", ":", "raise", "JobError", "(", "(", "'Attempting to merge tip: %s'", "%", "node", ".", "Name", ")", ")", "if", "node", ".", "Processed", ":", "raise", "JobError", "(", "(", "'Already processed node: %s'", "%", "node", ".", "Name", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "node", ".", "PollPath", ")", ":", "node", ".", "EndTime", "=", "time", "(", ")", "node", ".", "TotalTime", "=", "(", "node", ".", "EndTime", "-", "node", ".", "StartTime", ")", "node", ".", "ExitStatus", "=", "open", "(", "node", ".", "PollPath", ")", ".", "read", "(", ")", ".", "strip", "(", ")", "if", "(", "node", ".", "ExitStatus", "!=", "'0'", ")", ":", "raise", "JobError", "(", "(", "'Node %s did not complete correctly!'", "%", "node", ".", "Name", ")", ")", "if", "verbose", ":", "print", "(", "'finishing %s, %f seconds'", "%", "(", "node", ".", "Name", ",", "node", ".", "TotalTime", ")", ")", "node", ".", "Processed", "=", "True", "return", "True", "else", ":", "return", "False" ]
check if the job is complete .
train
false
2,709
def dmp_ground_nth(f, N, u, K): v = u for n in N: if (n < 0): raise IndexError(('`n` must be non-negative, got %i' % n)) elif (n >= len(f)): return K.zero else: d = dmp_degree(f, v) if (d == (- oo)): d = (-1) (f, v) = (f[(d - n)], (v - 1)) return f
[ "def", "dmp_ground_nth", "(", "f", ",", "N", ",", "u", ",", "K", ")", ":", "v", "=", "u", "for", "n", "in", "N", ":", "if", "(", "n", "<", "0", ")", ":", "raise", "IndexError", "(", "(", "'`n` must be non-negative, got %i'", "%", "n", ")", ")", "elif", "(", "n", ">=", "len", "(", "f", ")", ")", ":", "return", "K", ".", "zero", "else", ":", "d", "=", "dmp_degree", "(", "f", ",", "v", ")", "if", "(", "d", "==", "(", "-", "oo", ")", ")", ":", "d", "=", "(", "-", "1", ")", "(", "f", ",", "v", ")", "=", "(", "f", "[", "(", "d", "-", "n", ")", "]", ",", "(", "v", "-", "1", ")", ")", "return", "f" ]
return the ground n-th coefficient of f in k[x] .
train
false
2,711
@pytest.mark.network def test_uptodate_columns_flag(script, data): script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0', 'simple2==3.0') script.pip('install', '-e', 'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package') result = script.pip('list', '-f', data.find_links, '--no-index', '--uptodate', '--format=columns') assert ('Package' in result.stdout) assert ('Version' in result.stdout) assert ('Location' in result.stdout) assert ('pip-test-package (0.1.1,' not in result.stdout) assert ('pip-test-package 0.1.1' in result.stdout), str(result) assert ('simple2 3.0' in result.stdout), str(result)
[ "@", "pytest", ".", "mark", ".", "network", "def", "test_uptodate_columns_flag", "(", "script", ",", "data", ")", ":", "script", ".", "pip", "(", "'install'", ",", "'-f'", ",", "data", ".", "find_links", ",", "'--no-index'", ",", "'simple==1.0'", ",", "'simple2==3.0'", ")", "script", ".", "pip", "(", "'install'", ",", "'-e'", ",", "'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'", ")", "result", "=", "script", ".", "pip", "(", "'list'", ",", "'-f'", ",", "data", ".", "find_links", ",", "'--no-index'", ",", "'--uptodate'", ",", "'--format=columns'", ")", "assert", "(", "'Package'", "in", "result", ".", "stdout", ")", "assert", "(", "'Version'", "in", "result", ".", "stdout", ")", "assert", "(", "'Location'", "in", "result", ".", "stdout", ")", "assert", "(", "'pip-test-package (0.1.1,'", "not", "in", "result", ".", "stdout", ")", "assert", "(", "'pip-test-package 0.1.1'", "in", "result", ".", "stdout", ")", ",", "str", "(", "result", ")", "assert", "(", "'simple2 3.0'", "in", "result", ".", "stdout", ")", ",", "str", "(", "result", ")" ]
test the behavior of --uptodate --format=columns flag in the list command .
train
false
2,712
def register_command(command, callback, **kwargs): return get_parser().add_subparser(command, parent_defaults={u'cli_command_callback': callback}, **kwargs)
[ "def", "register_command", "(", "command", ",", "callback", ",", "**", "kwargs", ")", ":", "return", "get_parser", "(", ")", ".", "add_subparser", "(", "command", ",", "parent_defaults", "=", "{", "u'cli_command_callback'", ":", "callback", "}", ",", "**", "kwargs", ")" ]
register a callback function to be executed when flexget is launched with the given command .
train
false
2,713
def is_coroutine_function(func): return getattr(func, '__tornado_coroutine__', False)
[ "def", "is_coroutine_function", "(", "func", ")", ":", "return", "getattr", "(", "func", ",", "'__tornado_coroutine__'", ",", "False", ")" ]
return whether *func* is a coroutine function .
train
false
2,715
def SetFileProperty(output, source_name, property_name, values, sep): output.write('set_source_files_properties(') output.write(source_name) output.write(' PROPERTIES ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('")\n')
[ "def", "SetFileProperty", "(", "output", ",", "source_name", ",", "property_name", ",", "values", ",", "sep", ")", ":", "output", ".", "write", "(", "'set_source_files_properties('", ")", "output", ".", "write", "(", "source_name", ")", "output", ".", "write", "(", "' PROPERTIES '", ")", "output", ".", "write", "(", "property_name", ")", "output", ".", "write", "(", "' \"'", ")", "for", "value", "in", "values", ":", "output", ".", "write", "(", "CMakeStringEscape", "(", "value", ")", ")", "output", ".", "write", "(", "sep", ")", "output", ".", "write", "(", "'\")\\n'", ")" ]
given a set of source file .
train
false
2,716
def delete_topic_rule(ruleName, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_topic_rule(ruleName=ruleName) return {'deleted': True} except ClientError as e: return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
[ "def", "delete_topic_rule", "(", "ruleName", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "conn", ".", "delete_topic_rule", "(", "ruleName", "=", "ruleName", ")", "return", "{", "'deleted'", ":", "True", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'deleted'", ":", "False", ",", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
given a rule name .
train
false
2,717
def cell_to_packed_rowcol(cell): (row, col, row_abs, col_abs) = cell_to_rowcol(cell) if (col >= MAX_COL): raise Exception(('Column %s greater than IV in formula' % cell)) if (row >= MAX_ROW): raise Exception(('Row %s greater than %d in formula' % (cell, MAX_ROW))) col |= (int((not row_abs)) << 15) col |= (int((not col_abs)) << 14) return (row, col)
[ "def", "cell_to_packed_rowcol", "(", "cell", ")", ":", "(", "row", ",", "col", ",", "row_abs", ",", "col_abs", ")", "=", "cell_to_rowcol", "(", "cell", ")", "if", "(", "col", ">=", "MAX_COL", ")", ":", "raise", "Exception", "(", "(", "'Column %s greater than IV in formula'", "%", "cell", ")", ")", "if", "(", "row", ">=", "MAX_ROW", ")", ":", "raise", "Exception", "(", "(", "'Row %s greater than %d in formula'", "%", "(", "cell", ",", "MAX_ROW", ")", ")", ")", "col", "|=", "(", "int", "(", "(", "not", "row_abs", ")", ")", "<<", "15", ")", "col", "|=", "(", "int", "(", "(", "not", "col_abs", ")", ")", "<<", "14", ")", "return", "(", "row", ",", "col", ")" ]
pack row and column into the required 4 byte format .
train
false
2,718
def test_iris(): skip_if_no_data() data = iris.Iris() assert (data.X is not None) assert np.all((data.X != np.inf)) assert np.all((data.X != np.nan))
[ "def", "test_iris", "(", ")", ":", "skip_if_no_data", "(", ")", "data", "=", "iris", ".", "Iris", "(", ")", "assert", "(", "data", ".", "X", "is", "not", "None", ")", "assert", "np", ".", "all", "(", "(", "data", ".", "X", "!=", "np", ".", "inf", ")", ")", "assert", "np", ".", "all", "(", "(", "data", ".", "X", "!=", "np", ".", "nan", ")", ")" ]
load iris dataset .
train
false
2,720
def jid_dir(jid, job_dir=None, hash_type='sha256'): if six.PY3: jhash = getattr(hashlib, hash_type)(jid.encode('utf-8')).hexdigest() else: jhash = getattr(hashlib, hash_type)(str(jid)).hexdigest() parts = [] if (job_dir is not None): parts.append(job_dir) parts.extend([jhash[:2], jhash[2:]]) return os.path.join(*parts)
[ "def", "jid_dir", "(", "jid", ",", "job_dir", "=", "None", ",", "hash_type", "=", "'sha256'", ")", ":", "if", "six", ".", "PY3", ":", "jhash", "=", "getattr", "(", "hashlib", ",", "hash_type", ")", "(", "jid", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "else", ":", "jhash", "=", "getattr", "(", "hashlib", ",", "hash_type", ")", "(", "str", "(", "jid", ")", ")", ".", "hexdigest", "(", ")", "parts", "=", "[", "]", "if", "(", "job_dir", "is", "not", "None", ")", ":", "parts", ".", "append", "(", "job_dir", ")", "parts", ".", "extend", "(", "[", "jhash", "[", ":", "2", "]", ",", "jhash", "[", "2", ":", "]", "]", ")", "return", "os", ".", "path", ".", "join", "(", "*", "parts", ")" ]
return the jid_dir for the given job id .
train
false
2,721
@contextmanager def track_memory_usage(metric, course_id): memory_types = ['rss', 'vms'] process = psutil.Process() baseline_memory_info = process.get_memory_info() baseline_usages = [getattr(baseline_memory_info, memory_type) for memory_type in memory_types] (yield) for (memory_type, baseline_usage) in zip(memory_types, baseline_usages): total_memory_info = process.get_memory_info() total_usage = getattr(total_memory_info, memory_type) memory_used = (total_usage - baseline_usage) dog_stats_api.increment(((metric + '.') + memory_type), memory_used, tags=['course_id:{}'.format(course_id)])
[ "@", "contextmanager", "def", "track_memory_usage", "(", "metric", ",", "course_id", ")", ":", "memory_types", "=", "[", "'rss'", ",", "'vms'", "]", "process", "=", "psutil", ".", "Process", "(", ")", "baseline_memory_info", "=", "process", ".", "get_memory_info", "(", ")", "baseline_usages", "=", "[", "getattr", "(", "baseline_memory_info", ",", "memory_type", ")", "for", "memory_type", "in", "memory_types", "]", "(", "yield", ")", "for", "(", "memory_type", ",", "baseline_usage", ")", "in", "zip", "(", "memory_types", ",", "baseline_usages", ")", ":", "total_memory_info", "=", "process", ".", "get_memory_info", "(", ")", "total_usage", "=", "getattr", "(", "total_memory_info", ",", "memory_type", ")", "memory_used", "=", "(", "total_usage", "-", "baseline_usage", ")", "dog_stats_api", ".", "increment", "(", "(", "(", "metric", "+", "'.'", ")", "+", "memory_type", ")", ",", "memory_used", ",", "tags", "=", "[", "'course_id:{}'", ".", "format", "(", "course_id", ")", "]", ")" ]
context manager to track how much memory a given process uses .
train
false
2,723
def key_json(minion_id, pillar, pillar_key=None): key_data = __salt__['redis.get_key'](minion_id) if (not key_data): return {} data = json.loads(key_data) if (isinstance(data, dict) and (not pillar_key)): return data elif (not pillar_key): return {'redis_pillar': data} else: return {pillar_key: data}
[ "def", "key_json", "(", "minion_id", ",", "pillar", ",", "pillar_key", "=", "None", ")", ":", "key_data", "=", "__salt__", "[", "'redis.get_key'", "]", "(", "minion_id", ")", "if", "(", "not", "key_data", ")", ":", "return", "{", "}", "data", "=", "json", ".", "loads", "(", "key_data", ")", "if", "(", "isinstance", "(", "data", ",", "dict", ")", "and", "(", "not", "pillar_key", ")", ")", ":", "return", "data", "elif", "(", "not", "pillar_key", ")", ":", "return", "{", "'redis_pillar'", ":", "data", "}", "else", ":", "return", "{", "pillar_key", ":", "data", "}" ]
pulls a string from redis and deserializes it from json .
train
true
2,724
def p_command_goto(p): p[0] = ('GOTO', int(p[2]))
[ "def", "p_command_goto", "(", "p", ")", ":", "p", "[", "0", "]", "=", "(", "'GOTO'", ",", "int", "(", "p", "[", "2", "]", ")", ")" ]
command : goto integer .
train
false
2,725
def connect_directconnect(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.directconnect.layer1 import DirectConnectConnection return DirectConnectConnection(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, **kwargs)
[ "def", "connect_directconnect", "(", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "**", "kwargs", ")", ":", "from", "boto", ".", "directconnect", ".", "layer1", "import", "DirectConnectConnection", "return", "DirectConnectConnection", "(", "aws_access_key_id", "=", "aws_access_key_id", ",", "aws_secret_access_key", "=", "aws_secret_access_key", ",", "**", "kwargs", ")" ]
connect to aws directconnect :type aws_access_key_id: string .
train
false
2,726
def is_builtin_name(name): if (name.startswith(u'__') and name.endswith(u'__')): return (ALL_LOWER_CASE_RE.match(name[2:(-2)]) is not None) return False
[ "def", "is_builtin_name", "(", "name", ")", ":", "if", "(", "name", ".", "startswith", "(", "u'__'", ")", "and", "name", ".", "endswith", "(", "u'__'", ")", ")", ":", "return", "(", "ALL_LOWER_CASE_RE", ".", "match", "(", "name", "[", "2", ":", "(", "-", "2", ")", "]", ")", "is", "not", "None", ")", "return", "False" ]
for example .
train
true
2,727
def runtest(test, verbose, quiet, huntrleaks=False, use_resources=None, pgo=False): test_support.verbose = verbose if (use_resources is not None): test_support.use_resources = use_resources try: return runtest_inner(test, verbose, quiet, huntrleaks, pgo) finally: cleanup_test_droppings(test, verbose)
[ "def", "runtest", "(", "test", ",", "verbose", ",", "quiet", ",", "huntrleaks", "=", "False", ",", "use_resources", "=", "None", ",", "pgo", "=", "False", ")", ":", "test_support", ".", "verbose", "=", "verbose", "if", "(", "use_resources", "is", "not", "None", ")", ":", "test_support", ".", "use_resources", "=", "use_resources", "try", ":", "return", "runtest_inner", "(", "test", ",", "verbose", ",", "quiet", ",", "huntrleaks", ",", "pgo", ")", "finally", ":", "cleanup_test_droppings", "(", "test", ",", "verbose", ")" ]
run a single test .
train
false
2,728
@image_comparison(baseline_images=[u'EventCollection_plot__add_positions']) def test__EventCollection__add_positions(): (splt, coll, props) = generate_EventCollection_plot() new_positions = np.hstack([props[u'positions'], props[u'extra_positions'][0]]) coll.add_positions(props[u'extra_positions'][0]) np.testing.assert_array_equal(new_positions, coll.get_positions()) check_segments(coll, new_positions, props[u'linelength'], props[u'lineoffset'], props[u'orientation']) splt.set_title(u'EventCollection: add_positions') splt.set_xlim((-1), 35)
[ "@", "image_comparison", "(", "baseline_images", "=", "[", "u'EventCollection_plot__add_positions'", "]", ")", "def", "test__EventCollection__add_positions", "(", ")", ":", "(", "splt", ",", "coll", ",", "props", ")", "=", "generate_EventCollection_plot", "(", ")", "new_positions", "=", "np", ".", "hstack", "(", "[", "props", "[", "u'positions'", "]", ",", "props", "[", "u'extra_positions'", "]", "[", "0", "]", "]", ")", "coll", ".", "add_positions", "(", "props", "[", "u'extra_positions'", "]", "[", "0", "]", ")", "np", ".", "testing", ".", "assert_array_equal", "(", "new_positions", ",", "coll", ".", "get_positions", "(", ")", ")", "check_segments", "(", "coll", ",", "new_positions", ",", "props", "[", "u'linelength'", "]", ",", "props", "[", "u'lineoffset'", "]", ",", "props", "[", "u'orientation'", "]", ")", "splt", ".", "set_title", "(", "u'EventCollection: add_positions'", ")", "splt", ".", "set_xlim", "(", "(", "-", "1", ")", ",", "35", ")" ]
check to make sure add_positions works properly .
train
false
2,731
def test_alias_args_commented(): _ip.magic('alias commetarg echo this is %%s a commented out arg') with capture_output() as cap: _ip.run_cell('commetarg') nt.assert_equal(cap.stdout, 'this is %s a commented out arg')
[ "def", "test_alias_args_commented", "(", ")", ":", "_ip", ".", "magic", "(", "'alias commetarg echo this is %%s a commented out arg'", ")", "with", "capture_output", "(", ")", "as", "cap", ":", "_ip", ".", "run_cell", "(", "'commetarg'", ")", "nt", ".", "assert_equal", "(", "cap", ".", "stdout", ",", "'this is %s a commented out arg'", ")" ]
check that alias correctly ignores commented out args .
train
false
2,733
def multigammaln(a, d): a = np.asarray(a) if ((not np.isscalar(d)) or (np.floor(d) != d)): raise ValueError('d should be a positive integer (dimension)') if np.any((a <= (0.5 * (d - 1)))): raise ValueError(('condition a (%f) > 0.5 * (d-1) (%f) not met' % (a, (0.5 * (d - 1))))) res = (((d * (d - 1)) * 0.25) * np.log(np.pi)) res += np.sum(loggam([(a - ((j - 1.0) / 2)) for j in range(1, (d + 1))]), axis=0) return res
[ "def", "multigammaln", "(", "a", ",", "d", ")", ":", "a", "=", "np", ".", "asarray", "(", "a", ")", "if", "(", "(", "not", "np", ".", "isscalar", "(", "d", ")", ")", "or", "(", "np", ".", "floor", "(", "d", ")", "!=", "d", ")", ")", ":", "raise", "ValueError", "(", "'d should be a positive integer (dimension)'", ")", "if", "np", ".", "any", "(", "(", "a", "<=", "(", "0.5", "*", "(", "d", "-", "1", ")", ")", ")", ")", ":", "raise", "ValueError", "(", "(", "'condition a (%f) > 0.5 * (d-1) (%f) not met'", "%", "(", "a", ",", "(", "0.5", "*", "(", "d", "-", "1", ")", ")", ")", ")", ")", "res", "=", "(", "(", "(", "d", "*", "(", "d", "-", "1", ")", ")", "*", "0.25", ")", "*", "np", ".", "log", "(", "np", ".", "pi", ")", ")", "res", "+=", "np", ".", "sum", "(", "loggam", "(", "[", "(", "a", "-", "(", "(", "j", "-", "1.0", ")", "/", "2", ")", ")", "for", "j", "in", "range", "(", "1", ",", "(", "d", "+", "1", ")", ")", "]", ")", ",", "axis", "=", "0", ")", "return", "res" ]
multivariate log gamma .
train
false
2,734
def _psturng(q, r, v): if (q < 0.0): raise ValueError('q should be >= 0') opt_func = (lambda p, r, v: abs((_qsturng(p, r, v) - q))) if (v == 1): if (q < _qsturng(0.9, r, 1)): return 0.1 elif (q > _qsturng(0.999, r, 1)): return 0.001 return (1.0 - fminbound(opt_func, 0.9, 0.999, args=(r, v))) else: if (q < _qsturng(0.1, r, v)): return 0.9 elif (q > _qsturng(0.999, r, v)): return 0.001 return (1.0 - fminbound(opt_func, 0.1, 0.999, args=(r, v)))
[ "def", "_psturng", "(", "q", ",", "r", ",", "v", ")", ":", "if", "(", "q", "<", "0.0", ")", ":", "raise", "ValueError", "(", "'q should be >= 0'", ")", "opt_func", "=", "(", "lambda", "p", ",", "r", ",", "v", ":", "abs", "(", "(", "_qsturng", "(", "p", ",", "r", ",", "v", ")", "-", "q", ")", ")", ")", "if", "(", "v", "==", "1", ")", ":", "if", "(", "q", "<", "_qsturng", "(", "0.9", ",", "r", ",", "1", ")", ")", ":", "return", "0.1", "elif", "(", "q", ">", "_qsturng", "(", "0.999", ",", "r", ",", "1", ")", ")", ":", "return", "0.001", "return", "(", "1.0", "-", "fminbound", "(", "opt_func", ",", "0.9", ",", "0.999", ",", "args", "=", "(", "r", ",", "v", ")", ")", ")", "else", ":", "if", "(", "q", "<", "_qsturng", "(", "0.1", ",", "r", ",", "v", ")", ")", ":", "return", "0.9", "elif", "(", "q", ">", "_qsturng", "(", "0.999", ",", "r", ",", "v", ")", ")", ":", "return", "0.001", "return", "(", "1.0", "-", "fminbound", "(", "opt_func", ",", "0.1", ",", "0.999", ",", "args", "=", "(", "r", ",", "v", ")", ")", ")" ]
scalar version of psturng .
train
true
2,735
@deprecated(since='0.7', message='Use os.path.sep, instead.') def file_separator(): return FILE_SEPARATOR[_os.name]
[ "@", "deprecated", "(", "since", "=", "'0.7'", ",", "message", "=", "'Use os.path.sep, instead.'", ")", "def", "file_separator", "(", ")", ":", "return", "FILE_SEPARATOR", "[", "_os", ".", "name", "]" ]
get the file separator for the current operating system .
train
false
2,736
def validate_value_type(value, spec): if ('maxlen' in spec): return (len(value) <= spec['maxlen']) if (spec['base'] == 'string'): if ('enumeration' in spec): if (value not in spec['enumeration']): raise NotValid('value not in enumeration') else: return valid_string(value) elif (spec['base'] == 'list'): for val in [v.strip() for v in value.split(',')]: valid(spec['member'], val) else: return valid(spec['base'], value) return True
[ "def", "validate_value_type", "(", "value", ",", "spec", ")", ":", "if", "(", "'maxlen'", "in", "spec", ")", ":", "return", "(", "len", "(", "value", ")", "<=", "spec", "[", "'maxlen'", "]", ")", "if", "(", "spec", "[", "'base'", "]", "==", "'string'", ")", ":", "if", "(", "'enumeration'", "in", "spec", ")", ":", "if", "(", "value", "not", "in", "spec", "[", "'enumeration'", "]", ")", ":", "raise", "NotValid", "(", "'value not in enumeration'", ")", "else", ":", "return", "valid_string", "(", "value", ")", "elif", "(", "spec", "[", "'base'", "]", "==", "'list'", ")", ":", "for", "val", "in", "[", "v", ".", "strip", "(", ")", "for", "v", "in", "value", ".", "split", "(", "','", ")", "]", ":", "valid", "(", "spec", "[", "'member'", "]", ",", "val", ")", "else", ":", "return", "valid", "(", "spec", "[", "'base'", "]", ",", "value", ")", "return", "True" ]
c_value_type = {base: string .
train
true
2,737
def _absolute_url(is_secure, url_path): site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME) parts = (('https' if is_secure else 'http'), site_name, url_path, '', '', '') return urlparse.urlunparse(parts)
[ "def", "_absolute_url", "(", "is_secure", ",", "url_path", ")", ":", "site_name", "=", "configuration_helpers", ".", "get_value", "(", "'SITE_NAME'", ",", "settings", ".", "SITE_NAME", ")", "parts", "=", "(", "(", "'https'", "if", "is_secure", "else", "'http'", ")", ",", "site_name", ",", "url_path", ",", "''", ",", "''", ",", "''", ")", "return", "urlparse", ".", "urlunparse", "(", "parts", ")" ]
construct an absolute url back to the site .
train
false
2,738
def configure_typogrify(pelicanobj, mathjax_settings): if (not pelicanobj.settings.get('TYPOGRIFY', False)): return try: import typogrify from distutils.version import LooseVersion if (LooseVersion(typogrify.__version__) < LooseVersion('2.0.7')): raise TypeError('Incorrect version of Typogrify') from typogrify.filters import typogrify pelicanobj.settings['TYPOGRIFY_IGNORE_TAGS'].extend(['.math', 'script']) except (ImportError, TypeError) as e: pelicanobj.settings['TYPOGRIFY'] = False if isinstance(e, ImportError): print '\nTypogrify is not installed, so it is being ignored.\nIf you want to use it, please install via: pip install typogrify\n' if isinstance(e, TypeError): print '\nA more recent version of Typogrify is needed for the render_math module.\nPlease upgrade Typogrify to the latest version (anything equal or above version 2.0.7 is okay).\nTypogrify will be turned off due to this reason.\n'
[ "def", "configure_typogrify", "(", "pelicanobj", ",", "mathjax_settings", ")", ":", "if", "(", "not", "pelicanobj", ".", "settings", ".", "get", "(", "'TYPOGRIFY'", ",", "False", ")", ")", ":", "return", "try", ":", "import", "typogrify", "from", "distutils", ".", "version", "import", "LooseVersion", "if", "(", "LooseVersion", "(", "typogrify", ".", "__version__", ")", "<", "LooseVersion", "(", "'2.0.7'", ")", ")", ":", "raise", "TypeError", "(", "'Incorrect version of Typogrify'", ")", "from", "typogrify", ".", "filters", "import", "typogrify", "pelicanobj", ".", "settings", "[", "'TYPOGRIFY_IGNORE_TAGS'", "]", ".", "extend", "(", "[", "'.math'", ",", "'script'", "]", ")", "except", "(", "ImportError", ",", "TypeError", ")", "as", "e", ":", "pelicanobj", ".", "settings", "[", "'TYPOGRIFY'", "]", "=", "False", "if", "isinstance", "(", "e", ",", "ImportError", ")", ":", "print", "'\\nTypogrify is not installed, so it is being ignored.\\nIf you want to use it, please install via: pip install typogrify\\n'", "if", "isinstance", "(", "e", ",", "TypeError", ")", ":", "print", "'\\nA more recent version of Typogrify is needed for the render_math module.\\nPlease upgrade Typogrify to the latest version (anything equal or above version 2.0.7 is okay).\\nTypogrify will be turned off due to this reason.\\n'" ]
instructs typogrify to ignore math tags - which allows typogrify to play nicely with math related content .
train
true
2,739
def test_run_random(): path = fs.relpath(join(abspath(dirname(__file__)), 'no_features', 'unexistent-folder')) runner = Runner(path, random=True) assert_equals(True, runner.random) with patch.object(random, 'shuffle') as pshuffle: runner.run() pshuffle.assert_called_once_with([])
[ "def", "test_run_random", "(", ")", ":", "path", "=", "fs", ".", "relpath", "(", "join", "(", "abspath", "(", "dirname", "(", "__file__", ")", ")", ",", "'no_features'", ",", "'unexistent-folder'", ")", ")", "runner", "=", "Runner", "(", "path", ",", "random", "=", "True", ")", "assert_equals", "(", "True", ",", "runner", ".", "random", ")", "with", "patch", ".", "object", "(", "random", ",", "'shuffle'", ")", "as", "pshuffle", ":", "runner", ".", "run", "(", ")", "pshuffle", ".", "assert_called_once_with", "(", "[", "]", ")" ]
randomise the feature order .
train
false
2,740
def toposort(dsk, dependencies=None): return _toposort(dsk, dependencies=dependencies)
[ "def", "toposort", "(", "dsk", ",", "dependencies", "=", "None", ")", ":", "return", "_toposort", "(", "dsk", ",", "dependencies", "=", "dependencies", ")" ]
sorts prereqs_d .
train
false
2,741
def normalDict(request_data): return dict(((k, v[0]) for (k, v) in request_data.iteritems()))
[ "def", "normalDict", "(", "request_data", ")", ":", "return", "dict", "(", "(", "(", "k", ",", "v", "[", "0", "]", ")", "for", "(", "k", ",", "v", ")", "in", "request_data", ".", "iteritems", "(", ")", ")", ")" ]
converts a django request mutlivaluedict into a standard python dict whose values are the first value from each of the multivaluedicts value lists .
train
false
2,743
def xmlDoc_from_xml(response): utf8body = (body_as_utf8(response) or ' ') try: lxdoc = libxml2.readDoc(utf8body, response.url, 'utf-8', xml_parser_options) except TypeError: lxdoc = libxml2.readDoc(utf8body.replace('\x00', ''), response.url, 'utf-8', xml_parser_options) return lxdoc
[ "def", "xmlDoc_from_xml", "(", "response", ")", ":", "utf8body", "=", "(", "body_as_utf8", "(", "response", ")", "or", "' '", ")", "try", ":", "lxdoc", "=", "libxml2", ".", "readDoc", "(", "utf8body", ",", "response", ".", "url", ",", "'utf-8'", ",", "xml_parser_options", ")", "except", "TypeError", ":", "lxdoc", "=", "libxml2", ".", "readDoc", "(", "utf8body", ".", "replace", "(", "'\\x00'", ",", "''", ")", ",", "response", ".", "url", ",", "'utf-8'", ",", "xml_parser_options", ")", "return", "lxdoc" ]
return libxml2 doc for xmls .
train
false
2,744
def execute_cmd(cmd, from_async=False): for hook in frappe.get_hooks(u'override_whitelisted_methods', {}).get(cmd, []): cmd = hook break try: method = get_attr(cmd) except: frappe.respond_as_web_page(title=u'Invalid Method', html=u'Method not found', indicator_color=u'red', http_status_code=404) return if from_async: method = method.queue is_whitelisted(method) ret = frappe.call(method, **frappe.form_dict) if ret: frappe.response[u'message'] = ret
[ "def", "execute_cmd", "(", "cmd", ",", "from_async", "=", "False", ")", ":", "for", "hook", "in", "frappe", ".", "get_hooks", "(", "u'override_whitelisted_methods'", ",", "{", "}", ")", ".", "get", "(", "cmd", ",", "[", "]", ")", ":", "cmd", "=", "hook", "break", "try", ":", "method", "=", "get_attr", "(", "cmd", ")", "except", ":", "frappe", ".", "respond_as_web_page", "(", "title", "=", "u'Invalid Method'", ",", "html", "=", "u'Method not found'", ",", "indicator_color", "=", "u'red'", ",", "http_status_code", "=", "404", ")", "return", "if", "from_async", ":", "method", "=", "method", ".", "queue", "is_whitelisted", "(", "method", ")", "ret", "=", "frappe", ".", "call", "(", "method", ",", "**", "frappe", ".", "form_dict", ")", "if", "ret", ":", "frappe", ".", "response", "[", "u'message'", "]", "=", "ret" ]
execute a request as python module .
train
false
2,746
def page_not_found(request, template_name='404.html'): t = loader.get_template(template_name) return http.HttpResponseNotFound(t.render(RequestContext(request, {'request_path': request.path})))
[ "def", "page_not_found", "(", "request", ",", "template_name", "=", "'404.html'", ")", ":", "t", "=", "loader", ".", "get_template", "(", "template_name", ")", "return", "http", ".", "HttpResponseNotFound", "(", "t", ".", "render", "(", "RequestContext", "(", "request", ",", "{", "'request_path'", ":", "request", ".", "path", "}", ")", ")", ")" ]
default 404 handler .
train
false
2,747
def _check_bases(seq_string): seq_string = ''.join(seq_string.split()).upper() for c in '0123456789': seq_string = seq_string.replace(c, '') if (not set(seq_string).issubset(set('ABCDGHKMNRSTVWY'))): raise TypeError(('Invalid character found in %s' % repr(seq_string))) return (' ' + seq_string)
[ "def", "_check_bases", "(", "seq_string", ")", ":", "seq_string", "=", "''", ".", "join", "(", "seq_string", ".", "split", "(", ")", ")", ".", "upper", "(", ")", "for", "c", "in", "'0123456789'", ":", "seq_string", "=", "seq_string", ".", "replace", "(", "c", ",", "''", ")", "if", "(", "not", "set", "(", "seq_string", ")", ".", "issubset", "(", "set", "(", "'ABCDGHKMNRSTVWY'", ")", ")", ")", ":", "raise", "TypeError", "(", "(", "'Invalid character found in %s'", "%", "repr", "(", "seq_string", ")", ")", ")", "return", "(", "' '", "+", "seq_string", ")" ]
check characters in a string .
train
false
2,748
def dup_gf_factor(f, K): f = dup_convert(f, K, K.dom) (coeff, factors) = gf_factor(f, K.mod, K.dom) for (i, (f, k)) in enumerate(factors): factors[i] = (dup_convert(f, K.dom, K), k) return (K.convert(coeff, K.dom), factors)
[ "def", "dup_gf_factor", "(", "f", ",", "K", ")", ":", "f", "=", "dup_convert", "(", "f", ",", "K", ",", "K", ".", "dom", ")", "(", "coeff", ",", "factors", ")", "=", "gf_factor", "(", "f", ",", "K", ".", "mod", ",", "K", ".", "dom", ")", "for", "(", "i", ",", "(", "f", ",", "k", ")", ")", "in", "enumerate", "(", "factors", ")", ":", "factors", "[", "i", "]", "=", "(", "dup_convert", "(", "f", ",", "K", ".", "dom", ",", "K", ")", ",", "k", ")", "return", "(", "K", ".", "convert", "(", "coeff", ",", "K", ".", "dom", ")", ",", "factors", ")" ]
factor univariate polynomials over finite fields .
train
false
2,749
def build_schema_test(name, schema, schema_store, failing_instances, passing_instances): body = {'schema': schema, 'schema_store': schema_store, 'validator': getValidator(schema, schema_store), 'passing_instances': passing_instances, 'failing_instances': failing_instances} for error_type in failing_instances: for (i, inst) in enumerate(failing_instances[error_type]): def test(self, inst=inst, error_type=error_type): e = self.assertRaises(ValidationError, self.validator.validate, inst) self.assertEqual(e.validator, error_type) test.__name__ = ('test_fails_validation_%s_%d' % (error_type, i)) body[test.__name__] = test for (i, inst) in enumerate(passing_instances): def test(self, inst=inst): self.validator.validate(inst) test.__name__ = ('test_passes_validation_%d' % (i,)) body[test.__name__] = test return type(name, (TestCase, object), body)
[ "def", "build_schema_test", "(", "name", ",", "schema", ",", "schema_store", ",", "failing_instances", ",", "passing_instances", ")", ":", "body", "=", "{", "'schema'", ":", "schema", ",", "'schema_store'", ":", "schema_store", ",", "'validator'", ":", "getValidator", "(", "schema", ",", "schema_store", ")", ",", "'passing_instances'", ":", "passing_instances", ",", "'failing_instances'", ":", "failing_instances", "}", "for", "error_type", "in", "failing_instances", ":", "for", "(", "i", ",", "inst", ")", "in", "enumerate", "(", "failing_instances", "[", "error_type", "]", ")", ":", "def", "test", "(", "self", ",", "inst", "=", "inst", ",", "error_type", "=", "error_type", ")", ":", "e", "=", "self", ".", "assertRaises", "(", "ValidationError", ",", "self", ".", "validator", ".", "validate", ",", "inst", ")", "self", ".", "assertEqual", "(", "e", ".", "validator", ",", "error_type", ")", "test", ".", "__name__", "=", "(", "'test_fails_validation_%s_%d'", "%", "(", "error_type", ",", "i", ")", ")", "body", "[", "test", ".", "__name__", "]", "=", "test", "for", "(", "i", ",", "inst", ")", "in", "enumerate", "(", "passing_instances", ")", ":", "def", "test", "(", "self", ",", "inst", "=", "inst", ")", ":", "self", ".", "validator", ".", "validate", "(", "inst", ")", "test", ".", "__name__", "=", "(", "'test_passes_validation_%d'", "%", "(", "i", ",", ")", ")", "body", "[", "test", ".", "__name__", "]", "=", "test", "return", "type", "(", "name", ",", "(", "TestCase", ",", "object", ")", ",", "body", ")" ]
create test case verifying that various instances pass and fail verification with a given json schema .
train
false
2,751
def tune(scale, acc_rate): if (acc_rate < 0.001): scale *= 0.1 elif (acc_rate < 0.05): scale *= 0.5 elif (acc_rate < 0.2): scale *= 0.9 elif (acc_rate > 0.95): scale *= 10.0 elif (acc_rate > 0.75): scale *= 2.0 elif (acc_rate > 0.5): scale *= 1.1 return scale
[ "def", "tune", "(", "scale", ",", "acc_rate", ")", ":", "if", "(", "acc_rate", "<", "0.001", ")", ":", "scale", "*=", "0.1", "elif", "(", "acc_rate", "<", "0.05", ")", ":", "scale", "*=", "0.5", "elif", "(", "acc_rate", "<", "0.2", ")", ":", "scale", "*=", "0.9", "elif", "(", "acc_rate", ">", "0.95", ")", ":", "scale", "*=", "10.0", "elif", "(", "acc_rate", ">", "0.75", ")", ":", "scale", "*=", "2.0", "elif", "(", "acc_rate", ">", "0.5", ")", ":", "scale", "*=", "1.1", "return", "scale" ]
set attributes for the specified device cli example: .
train
false
2,752
def match_descriptors(descriptors1, descriptors2, metric=None, p=2, max_distance=np.inf, cross_check=True): if (descriptors1.shape[1] != descriptors2.shape[1]): raise ValueError('Descriptor length must equal.') if (metric is None): if np.issubdtype(descriptors1.dtype, np.bool): metric = 'hamming' else: metric = 'euclidean' distances = cdist(descriptors1, descriptors2, metric=metric, p=p) indices1 = np.arange(descriptors1.shape[0]) indices2 = np.argmin(distances, axis=1) if cross_check: matches1 = np.argmin(distances, axis=0) mask = (indices1 == matches1[indices2]) indices1 = indices1[mask] indices2 = indices2[mask] matches = np.column_stack((indices1, indices2)) if (max_distance < np.inf): matches = matches[(distances[(indices1, indices2)] < max_distance)] return matches
[ "def", "match_descriptors", "(", "descriptors1", ",", "descriptors2", ",", "metric", "=", "None", ",", "p", "=", "2", ",", "max_distance", "=", "np", ".", "inf", ",", "cross_check", "=", "True", ")", ":", "if", "(", "descriptors1", ".", "shape", "[", "1", "]", "!=", "descriptors2", ".", "shape", "[", "1", "]", ")", ":", "raise", "ValueError", "(", "'Descriptor length must equal.'", ")", "if", "(", "metric", "is", "None", ")", ":", "if", "np", ".", "issubdtype", "(", "descriptors1", ".", "dtype", ",", "np", ".", "bool", ")", ":", "metric", "=", "'hamming'", "else", ":", "metric", "=", "'euclidean'", "distances", "=", "cdist", "(", "descriptors1", ",", "descriptors2", ",", "metric", "=", "metric", ",", "p", "=", "p", ")", "indices1", "=", "np", ".", "arange", "(", "descriptors1", ".", "shape", "[", "0", "]", ")", "indices2", "=", "np", ".", "argmin", "(", "distances", ",", "axis", "=", "1", ")", "if", "cross_check", ":", "matches1", "=", "np", ".", "argmin", "(", "distances", ",", "axis", "=", "0", ")", "mask", "=", "(", "indices1", "==", "matches1", "[", "indices2", "]", ")", "indices1", "=", "indices1", "[", "mask", "]", "indices2", "=", "indices2", "[", "mask", "]", "matches", "=", "np", ".", "column_stack", "(", "(", "indices1", ",", "indices2", ")", ")", "if", "(", "max_distance", "<", "np", ".", "inf", ")", ":", "matches", "=", "matches", "[", "(", "distances", "[", "(", "indices1", ",", "indices2", ")", "]", "<", "max_distance", ")", "]", "return", "matches" ]
brute-force matching of descriptors .
train
false
2,753
def get_corrected_commands(command): corrected_commands = (corrected for rule in get_rules() if rule.is_match(command) for corrected in rule.get_corrected_commands(command)) return organize_commands(corrected_commands)
[ "def", "get_corrected_commands", "(", "command", ")", ":", "corrected_commands", "=", "(", "corrected", "for", "rule", "in", "get_rules", "(", ")", "if", "rule", ".", "is_match", "(", "command", ")", "for", "corrected", "in", "rule", ".", "get_corrected_commands", "(", "command", ")", ")", "return", "organize_commands", "(", "corrected_commands", ")" ]
returns generator with sorted and unique corrected commands .
train
true
2,754
def mask_comments(input): search_re = re.compile('(.*?)(#)(.*)') return [search_re.sub(comment_replace, line) for line in input]
[ "def", "mask_comments", "(", "input", ")", ":", "search_re", "=", "re", ".", "compile", "(", "'(.*?)(#)(.*)'", ")", "return", "[", "search_re", ".", "sub", "(", "comment_replace", ",", "line", ")", "for", "line", "in", "input", "]" ]
mask the quoted strings so we skip braces inside quoted strings .
train
false
2,755
@pytest.mark.parametrize('parallel', [True, False]) def test_no_header(parallel, read_basic, read_no_header): with pytest.raises(ValueError): read_basic('A B C\n1 2 3\n4 5 6', header_start=None, data_start=0, parallel=parallel) t2 = read_no_header('A B C\n1 2 3\n4 5 6', parallel=parallel) expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('col1', 'col2', 'col3')) assert_table_equal(t2, expected)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'parallel'", ",", "[", "True", ",", "False", "]", ")", "def", "test_no_header", "(", "parallel", ",", "read_basic", ",", "read_no_header", ")", ":", "with", "pytest", ".", "raises", "(", "ValueError", ")", ":", "read_basic", "(", "'A B C\\n1 2 3\\n4 5 6'", ",", "header_start", "=", "None", ",", "data_start", "=", "0", ",", "parallel", "=", "parallel", ")", "t2", "=", "read_no_header", "(", "'A B C\\n1 2 3\\n4 5 6'", ",", "parallel", "=", "parallel", ")", "expected", "=", "Table", "(", "[", "[", "'A'", ",", "'1'", ",", "'4'", "]", ",", "[", "'B'", ",", "'2'", ",", "'5'", "]", ",", "[", "'C'", ",", "'3'", ",", "'6'", "]", "]", ",", "names", "=", "(", "'col1'", ",", "'col2'", ",", "'col3'", ")", ")", "assert_table_equal", "(", "t2", ",", "expected", ")" ]
the header should not be read when header_start=none .
train
false
2,756
def metadef_tag_delete_namespace_content(context, namespace_name, session=None): session = (session or get_session()) return metadef_tag_api.delete_by_namespace_name(context, namespace_name, session)
[ "def", "metadef_tag_delete_namespace_content", "(", "context", ",", "namespace_name", ",", "session", "=", "None", ")", ":", "session", "=", "(", "session", "or", "get_session", "(", ")", ")", "return", "metadef_tag_api", ".", "delete_by_namespace_name", "(", "context", ",", "namespace_name", ",", "session", ")" ]
delete an tag or raise if namespace or tag doesnt exist .
train
false
2,758
def _rgb_to_hsv(rgbs): (rgbs, n_dim) = _check_color_dim(rgbs) hsvs = list() for rgb in rgbs: rgb = rgb[:3] idx = np.argmax(rgb) val = rgb[idx] c = (val - np.min(rgb)) if (c == 0): hue = 0 sat = 0 else: if (idx == 0): hue = (((rgb[1] - rgb[2]) / c) % 6) elif (idx == 1): hue = (((rgb[2] - rgb[0]) / c) + 2) else: hue = (((rgb[0] - rgb[1]) / c) + 4) hue *= 60 sat = (c / val) hsv = [hue, sat, val] hsvs.append(hsv) hsvs = np.array(hsvs, dtype=np.float32) if (n_dim == 4): hsvs = np.concatenate((hsvs, rgbs[:, 3]), axis=1) return hsvs
[ "def", "_rgb_to_hsv", "(", "rgbs", ")", ":", "(", "rgbs", ",", "n_dim", ")", "=", "_check_color_dim", "(", "rgbs", ")", "hsvs", "=", "list", "(", ")", "for", "rgb", "in", "rgbs", ":", "rgb", "=", "rgb", "[", ":", "3", "]", "idx", "=", "np", ".", "argmax", "(", "rgb", ")", "val", "=", "rgb", "[", "idx", "]", "c", "=", "(", "val", "-", "np", ".", "min", "(", "rgb", ")", ")", "if", "(", "c", "==", "0", ")", ":", "hue", "=", "0", "sat", "=", "0", "else", ":", "if", "(", "idx", "==", "0", ")", ":", "hue", "=", "(", "(", "(", "rgb", "[", "1", "]", "-", "rgb", "[", "2", "]", ")", "/", "c", ")", "%", "6", ")", "elif", "(", "idx", "==", "1", ")", ":", "hue", "=", "(", "(", "(", "rgb", "[", "2", "]", "-", "rgb", "[", "0", "]", ")", "/", "c", ")", "+", "2", ")", "else", ":", "hue", "=", "(", "(", "(", "rgb", "[", "0", "]", "-", "rgb", "[", "1", "]", ")", "/", "c", ")", "+", "4", ")", "hue", "*=", "60", "sat", "=", "(", "c", "/", "val", ")", "hsv", "=", "[", "hue", ",", "sat", ",", "val", "]", "hsvs", ".", "append", "(", "hsv", ")", "hsvs", "=", "np", ".", "array", "(", "hsvs", ",", "dtype", "=", "np", ".", "float32", ")", "if", "(", "n_dim", "==", "4", ")", ":", "hsvs", "=", "np", ".", "concatenate", "(", "(", "hsvs", ",", "rgbs", "[", ":", ",", "3", "]", ")", ",", "axis", "=", "1", ")", "return", "hsvs" ]
convert nx3 or nx4 rgb to hsv .
train
true
2,759
def split_at(it, split_value): def _chunk_iterator(first): v = first while (v != split_value): (yield v) v = it.next() while True: (yield _chunk_iterator(it.next()))
[ "def", "split_at", "(", "it", ",", "split_value", ")", ":", "def", "_chunk_iterator", "(", "first", ")", ":", "v", "=", "first", "while", "(", "v", "!=", "split_value", ")", ":", "(", "yield", "v", ")", "v", "=", "it", ".", "next", "(", ")", "while", "True", ":", "(", "yield", "_chunk_iterator", "(", "it", ".", "next", "(", ")", ")", ")" ]
splits an iterator c{it} at values of c{split_value} .
train
true
2,761
def _get_meg_system(info): system = '306m' for ch in info['chs']: if (ch['kind'] == FIFF.FIFFV_MEG_CH): coil_type = (ch['coil_type'] & 65535) if (coil_type == FIFF.FIFFV_COIL_NM_122): system = '122m' break elif ((coil_type // 1000) == 3): system = '306m' break elif ((coil_type == FIFF.FIFFV_COIL_MAGNES_MAG) or (coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD)): nmag = np.sum([(c['kind'] == FIFF.FIFFV_MEG_CH) for c in info['chs']]) system = ('Magnes_3600wh' if (nmag > 150) else 'Magnes_2500wh') break elif (coil_type == FIFF.FIFFV_COIL_CTF_GRAD): system = 'CTF_275' break elif (coil_type == FIFF.FIFFV_COIL_KIT_GRAD): system = 'KIT' break elif (coil_type == FIFF.FIFFV_COIL_BABY_GRAD): system = 'BabySQUID' break return system
[ "def", "_get_meg_system", "(", "info", ")", ":", "system", "=", "'306m'", "for", "ch", "in", "info", "[", "'chs'", "]", ":", "if", "(", "ch", "[", "'kind'", "]", "==", "FIFF", ".", "FIFFV_MEG_CH", ")", ":", "coil_type", "=", "(", "ch", "[", "'coil_type'", "]", "&", "65535", ")", "if", "(", "coil_type", "==", "FIFF", ".", "FIFFV_COIL_NM_122", ")", ":", "system", "=", "'122m'", "break", "elif", "(", "(", "coil_type", "//", "1000", ")", "==", "3", ")", ":", "system", "=", "'306m'", "break", "elif", "(", "(", "coil_type", "==", "FIFF", ".", "FIFFV_COIL_MAGNES_MAG", ")", "or", "(", "coil_type", "==", "FIFF", ".", "FIFFV_COIL_MAGNES_GRAD", ")", ")", ":", "nmag", "=", "np", ".", "sum", "(", "[", "(", "c", "[", "'kind'", "]", "==", "FIFF", ".", "FIFFV_MEG_CH", ")", "for", "c", "in", "info", "[", "'chs'", "]", "]", ")", "system", "=", "(", "'Magnes_3600wh'", "if", "(", "nmag", ">", "150", ")", "else", "'Magnes_2500wh'", ")", "break", "elif", "(", "coil_type", "==", "FIFF", ".", "FIFFV_COIL_CTF_GRAD", ")", ":", "system", "=", "'CTF_275'", "break", "elif", "(", "coil_type", "==", "FIFF", ".", "FIFFV_COIL_KIT_GRAD", ")", ":", "system", "=", "'KIT'", "break", "elif", "(", "coil_type", "==", "FIFF", ".", "FIFFV_COIL_BABY_GRAD", ")", ":", "system", "=", "'BabySQUID'", "break", "return", "system" ]
educated guess for the helmet type based on channels .
train
false
2,762
def get_quote_num(num, count, name): if num: num = int(num) if (count == 0): raise Exception('No quotes found for {}.'.format(name)) if (num and (num < 0)): num = (((count + num) + 1) if ((num + count) > (-1)) else (count + 1)) if (num and (num > count)): raise Exception('I only have {} quote{} for {}.'.format(count, ('s', '')[(count == 1)], name)) if (num and (num == 0)): num = 1 if (not num): num = random.randint(1, count) return num
[ "def", "get_quote_num", "(", "num", ",", "count", ",", "name", ")", ":", "if", "num", ":", "num", "=", "int", "(", "num", ")", "if", "(", "count", "==", "0", ")", ":", "raise", "Exception", "(", "'No quotes found for {}.'", ".", "format", "(", "name", ")", ")", "if", "(", "num", "and", "(", "num", "<", "0", ")", ")", ":", "num", "=", "(", "(", "(", "count", "+", "num", ")", "+", "1", ")", "if", "(", "(", "num", "+", "count", ")", ">", "(", "-", "1", ")", ")", "else", "(", "count", "+", "1", ")", ")", "if", "(", "num", "and", "(", "num", ">", "count", ")", ")", ":", "raise", "Exception", "(", "'I only have {} quote{} for {}.'", ".", "format", "(", "count", ",", "(", "'s'", ",", "''", ")", "[", "(", "count", "==", "1", ")", "]", ",", "name", ")", ")", "if", "(", "num", "and", "(", "num", "==", "0", ")", ")", ":", "num", "=", "1", "if", "(", "not", "num", ")", ":", "num", "=", "random", ".", "randint", "(", "1", ",", "count", ")", "return", "num" ]
returns the quote number to fetch from the db .
train
false
2,763
def getopt(args, shortopts, longopts=[]): opts = [] if (type(longopts) == type('')): longopts = [longopts] else: longopts = list(longopts) while (args and args[0].startswith('-') and (args[0] != '-')): if (args[0] == '--'): args = args[1:] break if args[0].startswith('--'): (opts, args) = do_longs(opts, args[0][2:], longopts, args[1:]) else: (opts, args) = do_shorts(opts, args[0][1:], shortopts, args[1:]) return (opts, args)
[ "def", "getopt", "(", "args", ",", "shortopts", ",", "longopts", "=", "[", "]", ")", ":", "opts", "=", "[", "]", "if", "(", "type", "(", "longopts", ")", "==", "type", "(", "''", ")", ")", ":", "longopts", "=", "[", "longopts", "]", "else", ":", "longopts", "=", "list", "(", "longopts", ")", "while", "(", "args", "and", "args", "[", "0", "]", ".", "startswith", "(", "'-'", ")", "and", "(", "args", "[", "0", "]", "!=", "'-'", ")", ")", ":", "if", "(", "args", "[", "0", "]", "==", "'--'", ")", ":", "args", "=", "args", "[", "1", ":", "]", "break", "if", "args", "[", "0", "]", ".", "startswith", "(", "'--'", ")", ":", "(", "opts", ",", "args", ")", "=", "do_longs", "(", "opts", ",", "args", "[", "0", "]", "[", "2", ":", "]", ",", "longopts", ",", "args", "[", "1", ":", "]", ")", "else", ":", "(", "opts", ",", "args", ")", "=", "do_shorts", "(", "opts", ",", "args", "[", "0", "]", "[", "1", ":", "]", ",", "shortopts", ",", "args", "[", "1", ":", "]", ")", "return", "(", "opts", ",", "args", ")" ]
getopt -> opts .
train
true
2,764
def remove_error_class(klass): if isinstance(klass, python.str_types): if (klass not in ERROR_CLASS_MAP): raise ValueError(('Code %s is not registered' % (klass,))) elif isinstance(klass, python.class_types): classes = ERROR_CLASS_MAP.values() if (klass not in classes): raise ValueError(('Class %s is not registered' % (klass,))) klass = ERROR_CLASS_MAP.keys()[classes.index(klass)] else: raise TypeError('Invalid type, expected class or string') del ERROR_CLASS_MAP[klass]
[ "def", "remove_error_class", "(", "klass", ")", ":", "if", "isinstance", "(", "klass", ",", "python", ".", "str_types", ")", ":", "if", "(", "klass", "not", "in", "ERROR_CLASS_MAP", ")", ":", "raise", "ValueError", "(", "(", "'Code %s is not registered'", "%", "(", "klass", ",", ")", ")", ")", "elif", "isinstance", "(", "klass", ",", "python", ".", "class_types", ")", ":", "classes", "=", "ERROR_CLASS_MAP", ".", "values", "(", ")", "if", "(", "klass", "not", "in", "classes", ")", ":", "raise", "ValueError", "(", "(", "'Class %s is not registered'", "%", "(", "klass", ",", ")", ")", ")", "klass", "=", "ERROR_CLASS_MAP", ".", "keys", "(", ")", "[", "classes", ".", "index", "(", "klass", ")", "]", "else", ":", "raise", "TypeError", "(", "'Invalid type, expected class or string'", ")", "del", "ERROR_CLASS_MAP", "[", "klass", "]" ]
removes a class from the l{error_class_map} .
train
true
2,765
def UpdateIncludeState(filename, include_state, io=codecs): headerfile = None try: headerfile = io.open(filename, 'r', 'utf8', 'replace') except IOError: return False linenum = 0 for line in headerfile: linenum += 1 clean_line = CleanseComments(line) match = _RE_PATTERN_INCLUDE.search(clean_line) if match: include = match.group(2) include_state.setdefault(include, ('%s:%d' % (filename, linenum))) return True
[ "def", "UpdateIncludeState", "(", "filename", ",", "include_state", ",", "io", "=", "codecs", ")", ":", "headerfile", "=", "None", "try", ":", "headerfile", "=", "io", ".", "open", "(", "filename", ",", "'r'", ",", "'utf8'", ",", "'replace'", ")", "except", "IOError", ":", "return", "False", "linenum", "=", "0", "for", "line", "in", "headerfile", ":", "linenum", "+=", "1", "clean_line", "=", "CleanseComments", "(", "line", ")", "match", "=", "_RE_PATTERN_INCLUDE", ".", "search", "(", "clean_line", ")", "if", "match", ":", "include", "=", "match", ".", "group", "(", "2", ")", "include_state", ".", "setdefault", "(", "include", ",", "(", "'%s:%d'", "%", "(", "filename", ",", "linenum", ")", ")", ")", "return", "True" ]
fill up the include_dict with new includes found from the file .
train
true
2,766
def sorted_score(scores): score_lst = [(scores[k], k) for k in scores] sort_lst = sorted(score_lst, reverse=True) return [(i[1], i[0]) for i in sort_lst]
[ "def", "sorted_score", "(", "scores", ")", ":", "score_lst", "=", "[", "(", "scores", "[", "k", "]", ",", "k", ")", "for", "k", "in", "scores", "]", "sort_lst", "=", "sorted", "(", "score_lst", ",", "reverse", "=", "True", ")", "return", "[", "(", "i", "[", "1", "]", ",", "i", "[", "0", "]", ")", "for", "i", "in", "sort_lst", "]" ]
count the list for max to min .
train
false
2,767
def _build_instance_metadata_url(url, version, path): return ('%s/%s/%s' % (url, version, path))
[ "def", "_build_instance_metadata_url", "(", "url", ",", "version", ",", "path", ")", ":", "return", "(", "'%s/%s/%s'", "%", "(", "url", ",", "version", ",", "path", ")", ")" ]
builds an ec2 metadata url for fetching information about an instance .
train
false
2,769
def get_current_theme_name(override=None): if (override and ((override in themes) or (override == '__common__'))): return override theme_name = request.args.get('theme', request.preferences.get_value('theme')) if (theme_name not in themes): theme_name = default_theme return theme_name
[ "def", "get_current_theme_name", "(", "override", "=", "None", ")", ":", "if", "(", "override", "and", "(", "(", "override", "in", "themes", ")", "or", "(", "override", "==", "'__common__'", ")", ")", ")", ":", "return", "override", "theme_name", "=", "request", ".", "args", ".", "get", "(", "'theme'", ",", "request", ".", "preferences", ".", "get_value", "(", "'theme'", ")", ")", "if", "(", "theme_name", "not", "in", "themes", ")", ":", "theme_name", "=", "default_theme", "return", "theme_name" ]
returns theme name .
train
true
2,770
@toolz.memoize def alias_it(s): if (hasattr(s, '_group_by_clause') and (s._group_by_clause is not None) and len(s._group_by_clause)): return s.alias(next(aliases)) else: return s
[ "@", "toolz", ".", "memoize", "def", "alias_it", "(", "s", ")", ":", "if", "(", "hasattr", "(", "s", ",", "'_group_by_clause'", ")", "and", "(", "s", ".", "_group_by_clause", "is", "not", "None", ")", "and", "len", "(", "s", ".", "_group_by_clause", ")", ")", ":", "return", "s", ".", "alias", "(", "next", "(", "aliases", ")", ")", "else", ":", "return", "s" ]
alias a selectable if it has a group by clause .
train
false
2,772
@require_context def volume_type_get_all(context, inactive=False, filters=None): filters = (filters or {}) read_deleted = ('yes' if inactive else 'no') rows = model_query(context, models.VolumeTypes, read_deleted=read_deleted).options(joinedload('extra_specs')).order_by('name').all() result = {} for row in rows: result[row['name']] = _dict_with_extra_specs(row) return result
[ "@", "require_context", "def", "volume_type_get_all", "(", "context", ",", "inactive", "=", "False", ",", "filters", "=", "None", ")", ":", "filters", "=", "(", "filters", "or", "{", "}", ")", "read_deleted", "=", "(", "'yes'", "if", "inactive", "else", "'no'", ")", "rows", "=", "model_query", "(", "context", ",", "models", ".", "VolumeTypes", ",", "read_deleted", "=", "read_deleted", ")", ".", "options", "(", "joinedload", "(", "'extra_specs'", ")", ")", ".", "order_by", "(", "'name'", ")", ".", "all", "(", ")", "result", "=", "{", "}", "for", "row", "in", "rows", ":", "result", "[", "row", "[", "'name'", "]", "]", "=", "_dict_with_extra_specs", "(", "row", ")", "return", "result" ]
get all volume types .
train
false
2,774
def check_acls_comm_obj(obj, profile): if obj.read_permission_public: return True if (obj.read_permission_reviewer and check_acls(profile, obj, 'reviewer')): return True if (obj.read_permission_senior_reviewer and check_acls(profile, obj, 'senior_reviewer')): return True if (obj.read_permission_mozilla_contact and check_acls(profile, obj, 'moz_contact')): return True if (obj.read_permission_staff and check_acls(profile, obj, 'admin')): return True return False
[ "def", "check_acls_comm_obj", "(", "obj", ",", "profile", ")", ":", "if", "obj", ".", "read_permission_public", ":", "return", "True", "if", "(", "obj", ".", "read_permission_reviewer", "and", "check_acls", "(", "profile", ",", "obj", ",", "'reviewer'", ")", ")", ":", "return", "True", "if", "(", "obj", ".", "read_permission_senior_reviewer", "and", "check_acls", "(", "profile", ",", "obj", ",", "'senior_reviewer'", ")", ")", ":", "return", "True", "if", "(", "obj", ".", "read_permission_mozilla_contact", "and", "check_acls", "(", "profile", ",", "obj", ",", "'moz_contact'", ")", ")", ":", "return", "True", "if", "(", "obj", ".", "read_permission_staff", "and", "check_acls", "(", "profile", ",", "obj", ",", "'admin'", ")", ")", ":", "return", "True", "return", "False" ]
cross-reference acls and note/thread permissions .
train
false
2,775
def mvsk2mc(args): (mu, sig2, sk, kur) = args cnt = ([None] * 4) cnt[0] = mu cnt[1] = sig2 cnt[2] = (sk * (sig2 ** 1.5)) cnt[3] = ((kur + 3.0) * (sig2 ** 2.0)) return tuple(cnt)
[ "def", "mvsk2mc", "(", "args", ")", ":", "(", "mu", ",", "sig2", ",", "sk", ",", "kur", ")", "=", "args", "cnt", "=", "(", "[", "None", "]", "*", "4", ")", "cnt", "[", "0", "]", "=", "mu", "cnt", "[", "1", "]", "=", "sig2", "cnt", "[", "2", "]", "=", "(", "sk", "*", "(", "sig2", "**", "1.5", ")", ")", "cnt", "[", "3", "]", "=", "(", "(", "kur", "+", "3.0", ")", "*", "(", "sig2", "**", "2.0", ")", ")", "return", "tuple", "(", "cnt", ")" ]
convert mean .
train
false
2,780
def _get_vif_instance(vif, cls, **kwargs): return cls(id=vif['id'], address=vif['address'], network=_nova_to_osvif_network(vif['network']), has_traffic_filtering=vif.is_neutron_filtering_enabled(), preserve_on_delete=vif['preserve_on_delete'], active=vif['active'], **kwargs)
[ "def", "_get_vif_instance", "(", "vif", ",", "cls", ",", "**", "kwargs", ")", ":", "return", "cls", "(", "id", "=", "vif", "[", "'id'", "]", ",", "address", "=", "vif", "[", "'address'", "]", ",", "network", "=", "_nova_to_osvif_network", "(", "vif", "[", "'network'", "]", ")", ",", "has_traffic_filtering", "=", "vif", ".", "is_neutron_filtering_enabled", "(", ")", ",", "preserve_on_delete", "=", "vif", "[", "'preserve_on_delete'", "]", ",", "active", "=", "vif", "[", "'active'", "]", ",", "**", "kwargs", ")" ]
instantiate an os-vif vif instance .
train
false
2,781
def extract_auth_sub_token_from_url(url, scopes_param_prefix='auth_sub_scopes', rsa_key=None): if isinstance(url, (str, unicode)): url = atom.url.parse_url(url) if ('token' not in url.params): return None scopes = [] if (scopes_param_prefix in url.params): scopes = url.params[scopes_param_prefix].split(' ') token_value = url.params['token'] if rsa_key: token = SecureAuthSubToken(rsa_key, scopes=scopes) else: token = AuthSubToken(scopes=scopes) token.set_token_string(token_value) return token
[ "def", "extract_auth_sub_token_from_url", "(", "url", ",", "scopes_param_prefix", "=", "'auth_sub_scopes'", ",", "rsa_key", "=", "None", ")", ":", "if", "isinstance", "(", "url", ",", "(", "str", ",", "unicode", ")", ")", ":", "url", "=", "atom", ".", "url", ".", "parse_url", "(", "url", ")", "if", "(", "'token'", "not", "in", "url", ".", "params", ")", ":", "return", "None", "scopes", "=", "[", "]", "if", "(", "scopes_param_prefix", "in", "url", ".", "params", ")", ":", "scopes", "=", "url", ".", "params", "[", "scopes_param_prefix", "]", ".", "split", "(", "' '", ")", "token_value", "=", "url", ".", "params", "[", "'token'", "]", "if", "rsa_key", ":", "token", "=", "SecureAuthSubToken", "(", "rsa_key", ",", "scopes", "=", "scopes", ")", "else", ":", "token", "=", "AuthSubToken", "(", "scopes", "=", "scopes", ")", "token", ".", "set_token_string", "(", "token_value", ")", "return", "token" ]
creates an authsubtoken and sets the token value and scopes from the url .
train
false
2,783
def _write_bem_surfaces_block(fid, surfs): for surf in surfs: start_block(fid, FIFF.FIFFB_BEM_SURF) write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma']) write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id']) write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf['coord_frame']) write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np']) write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri']) write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr']) write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES, (surf['tris'] + 1)) if (('nn' in surf) and (surf['nn'] is not None) and (len(surf['nn']) > 0)): write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf['nn']) end_block(fid, FIFF.FIFFB_BEM_SURF)
[ "def", "_write_bem_surfaces_block", "(", "fid", ",", "surfs", ")", ":", "for", "surf", "in", "surfs", ":", "start_block", "(", "fid", ",", "FIFF", ".", "FIFFB_BEM_SURF", ")", "write_float", "(", "fid", ",", "FIFF", ".", "FIFF_BEM_SIGMA", ",", "surf", "[", "'sigma'", "]", ")", "write_int", "(", "fid", ",", "FIFF", ".", "FIFF_BEM_SURF_ID", ",", "surf", "[", "'id'", "]", ")", "write_int", "(", "fid", ",", "FIFF", ".", "FIFF_MNE_COORD_FRAME", ",", "surf", "[", "'coord_frame'", "]", ")", "write_int", "(", "fid", ",", "FIFF", ".", "FIFF_BEM_SURF_NNODE", ",", "surf", "[", "'np'", "]", ")", "write_int", "(", "fid", ",", "FIFF", ".", "FIFF_BEM_SURF_NTRI", ",", "surf", "[", "'ntri'", "]", ")", "write_float_matrix", "(", "fid", ",", "FIFF", ".", "FIFF_BEM_SURF_NODES", ",", "surf", "[", "'rr'", "]", ")", "write_int_matrix", "(", "fid", ",", "FIFF", ".", "FIFF_BEM_SURF_TRIANGLES", ",", "(", "surf", "[", "'tris'", "]", "+", "1", ")", ")", "if", "(", "(", "'nn'", "in", "surf", ")", "and", "(", "surf", "[", "'nn'", "]", "is", "not", "None", ")", "and", "(", "len", "(", "surf", "[", "'nn'", "]", ")", ">", "0", ")", ")", ":", "write_float_matrix", "(", "fid", ",", "FIFF", ".", "FIFF_BEM_SURF_NORMALS", ",", "surf", "[", "'nn'", "]", ")", "end_block", "(", "fid", ",", "FIFF", ".", "FIFFB_BEM_SURF", ")" ]
helper to actually write bem surfaces .
train
false
2,784
def deprecatedToUsefulText(name, deprecated): from twisted.python.deprecate import _getDeprecationWarningString version = versionToUsefulObject(deprecated[1]) if deprecated[2]: if isinstance(deprecated[2], ast.Keyword): replacement = deprecated[2].asList()[1].value else: replacement = deprecated[2].value else: replacement = None return (_getDeprecationWarningString(name, version, replacement=replacement) + '.')
[ "def", "deprecatedToUsefulText", "(", "name", ",", "deprecated", ")", ":", "from", "twisted", ".", "python", ".", "deprecate", "import", "_getDeprecationWarningString", "version", "=", "versionToUsefulObject", "(", "deprecated", "[", "1", "]", ")", "if", "deprecated", "[", "2", "]", ":", "if", "isinstance", "(", "deprecated", "[", "2", "]", ",", "ast", ".", "Keyword", ")", ":", "replacement", "=", "deprecated", "[", "2", "]", ".", "asList", "(", ")", "[", "1", "]", ".", "value", "else", ":", "replacement", "=", "deprecated", "[", "2", "]", ".", "value", "else", ":", "replacement", "=", "None", "return", "(", "_getDeprecationWarningString", "(", "name", ",", "version", ",", "replacement", "=", "replacement", ")", "+", "'.'", ")" ]
change a c{@deprecated} to a display string .
train
false
2,786
def filter_entity(entity_ref): if entity_ref: entity_ref.pop('dn', None) return entity_ref
[ "def", "filter_entity", "(", "entity_ref", ")", ":", "if", "entity_ref", ":", "entity_ref", ".", "pop", "(", "'dn'", ",", "None", ")", "return", "entity_ref" ]
filter out private items in an entity dict .
train
false
2,787
def information(title, message=None, details=None, informative_text=None): if (message is None): message = title mbox = QtWidgets.QMessageBox(active_window()) mbox.setStandardButtons(QtWidgets.QMessageBox.Close) mbox.setDefaultButton(QtWidgets.QMessageBox.Close) mbox.setWindowTitle(title) mbox.setWindowModality(Qt.WindowModal) mbox.setTextFormat(Qt.PlainText) mbox.setText(message) if informative_text: mbox.setInformativeText(informative_text) if details: mbox.setDetailedText(details) pixmap = icons.cola().pixmap(defs.large_icon) mbox.setIconPixmap(pixmap) mbox.exec_()
[ "def", "information", "(", "title", ",", "message", "=", "None", ",", "details", "=", "None", ",", "informative_text", "=", "None", ")", ":", "if", "(", "message", "is", "None", ")", ":", "message", "=", "title", "mbox", "=", "QtWidgets", ".", "QMessageBox", "(", "active_window", "(", ")", ")", "mbox", ".", "setStandardButtons", "(", "QtWidgets", ".", "QMessageBox", ".", "Close", ")", "mbox", ".", "setDefaultButton", "(", "QtWidgets", ".", "QMessageBox", ".", "Close", ")", "mbox", ".", "setWindowTitle", "(", "title", ")", "mbox", ".", "setWindowModality", "(", "Qt", ".", "WindowModal", ")", "mbox", ".", "setTextFormat", "(", "Qt", ".", "PlainText", ")", "mbox", ".", "setText", "(", "message", ")", "if", "informative_text", ":", "mbox", ".", "setInformativeText", "(", "informative_text", ")", "if", "details", ":", "mbox", ".", "setDetailedText", "(", "details", ")", "pixmap", "=", "icons", ".", "cola", "(", ")", ".", "pixmap", "(", "defs", ".", "large_icon", ")", "mbox", ".", "setIconPixmap", "(", "pixmap", ")", "mbox", ".", "exec_", "(", ")" ]
show information with the provided title and message .
train
false
2,788
@pytest.mark.django_db def test_update_comment(project0_nongnu, store0): db_unit = _update_translation(store0, 0, {'translator_comment': u'7amada'}) store0.sync() store_unit = store0.file.store.findid(db_unit.getid()) assert (db_unit.getnotes(origin='translator') == u'7amada') assert (db_unit.getnotes(origin='translator') == store_unit.getnotes(origin='translator')) po_file = factory.getobject(store0.file.path) assert (db_unit.getnotes(origin='translator') == po_file.findid(db_unit.getid()).getnotes(origin='translator'))
[ "@", "pytest", ".", "mark", ".", "django_db", "def", "test_update_comment", "(", "project0_nongnu", ",", "store0", ")", ":", "db_unit", "=", "_update_translation", "(", "store0", ",", "0", ",", "{", "'translator_comment'", ":", "u'7amada'", "}", ")", "store0", ".", "sync", "(", ")", "store_unit", "=", "store0", ".", "file", ".", "store", ".", "findid", "(", "db_unit", ".", "getid", "(", ")", ")", "assert", "(", "db_unit", ".", "getnotes", "(", "origin", "=", "'translator'", ")", "==", "u'7amada'", ")", "assert", "(", "db_unit", ".", "getnotes", "(", "origin", "=", "'translator'", ")", "==", "store_unit", ".", "getnotes", "(", "origin", "=", "'translator'", ")", ")", "po_file", "=", "factory", ".", "getobject", "(", "store0", ".", "file", ".", "path", ")", "assert", "(", "db_unit", ".", "getnotes", "(", "origin", "=", "'translator'", ")", "==", "po_file", ".", "findid", "(", "db_unit", ".", "getid", "(", ")", ")", ".", "getnotes", "(", "origin", "=", "'translator'", ")", ")" ]
tests translator comments are stored and synced .
train
false
2,789
@profiler.trace def vpnservice_create(request, **kwargs): body = {'vpnservice': {'admin_state_up': kwargs['admin_state_up'], 'name': kwargs['name'], 'description': kwargs['description'], 'router_id': kwargs['router_id'], 'subnet_id': kwargs['subnet_id']}} vpnservice = neutronclient(request).create_vpnservice(body).get('vpnservice') return VPNService(vpnservice)
[ "@", "profiler", ".", "trace", "def", "vpnservice_create", "(", "request", ",", "**", "kwargs", ")", ":", "body", "=", "{", "'vpnservice'", ":", "{", "'admin_state_up'", ":", "kwargs", "[", "'admin_state_up'", "]", ",", "'name'", ":", "kwargs", "[", "'name'", "]", ",", "'description'", ":", "kwargs", "[", "'description'", "]", ",", "'router_id'", ":", "kwargs", "[", "'router_id'", "]", ",", "'subnet_id'", ":", "kwargs", "[", "'subnet_id'", "]", "}", "}", "vpnservice", "=", "neutronclient", "(", "request", ")", ".", "create_vpnservice", "(", "body", ")", ".", "get", "(", "'vpnservice'", ")", "return", "VPNService", "(", "vpnservice", ")" ]
create vpnservice .
train
false
2,790
def retrySQL(timeoutSec=(60 * 5), logger=None): if (logger is None): logger = logging.getLogger(__name__) def retryFilter(e, args, kwargs): if isinstance(e, (pymysql.InternalError, pymysql.OperationalError)): if (e.args and (e.args[0] in _ALL_RETRIABLE_ERROR_CODES)): return True elif isinstance(e, pymysql.Error): if (e.args and inspect.isclass(e.args[0]) and issubclass(e.args[0], socket_error)): return True return False retryExceptions = tuple([pymysql.InternalError, pymysql.OperationalError, pymysql.Error]) return make_retry_decorator(timeoutSec=timeoutSec, initialRetryDelaySec=0.1, maxRetryDelaySec=10, retryExceptions=retryExceptions, retryFilter=retryFilter, logger=logger)
[ "def", "retrySQL", "(", "timeoutSec", "=", "(", "60", "*", "5", ")", ",", "logger", "=", "None", ")", ":", "if", "(", "logger", "is", "None", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "def", "retryFilter", "(", "e", ",", "args", ",", "kwargs", ")", ":", "if", "isinstance", "(", "e", ",", "(", "pymysql", ".", "InternalError", ",", "pymysql", ".", "OperationalError", ")", ")", ":", "if", "(", "e", ".", "args", "and", "(", "e", ".", "args", "[", "0", "]", "in", "_ALL_RETRIABLE_ERROR_CODES", ")", ")", ":", "return", "True", "elif", "isinstance", "(", "e", ",", "pymysql", ".", "Error", ")", ":", "if", "(", "e", ".", "args", "and", "inspect", ".", "isclass", "(", "e", ".", "args", "[", "0", "]", ")", "and", "issubclass", "(", "e", ".", "args", "[", "0", "]", ",", "socket_error", ")", ")", ":", "return", "True", "return", "False", "retryExceptions", "=", "tuple", "(", "[", "pymysql", ".", "InternalError", ",", "pymysql", ".", "OperationalError", ",", "pymysql", ".", "Error", "]", ")", "return", "make_retry_decorator", "(", "timeoutSec", "=", "timeoutSec", ",", "initialRetryDelaySec", "=", "0.1", ",", "maxRetryDelaySec", "=", "10", ",", "retryExceptions", "=", "retryExceptions", ",", "retryFilter", "=", "retryFilter", ",", "logger", "=", "logger", ")" ]
return a closure suitable for use as a decorator for retrying a pymysql dao function on certain failures that warrant retries .
train
true
2,791
def corr_arma(k_vars, ar, ma): from scipy.linalg import toeplitz from statsmodels.tsa.arima_process import arma2ar ar = arma2ar(ar, ma, nobs=k_vars)[:k_vars] return toeplitz(ar)
[ "def", "corr_arma", "(", "k_vars", ",", "ar", ",", "ma", ")", ":", "from", "scipy", ".", "linalg", "import", "toeplitz", "from", "statsmodels", ".", "tsa", ".", "arima_process", "import", "arma2ar", "ar", "=", "arma2ar", "(", "ar", ",", "ma", ",", "nobs", "=", "k_vars", ")", "[", ":", "k_vars", "]", "return", "toeplitz", "(", "ar", ")" ]
create arma correlation matrix converts arma to autoregressive lag-polynomial with k_var lags ar and arma might need to be switched for generating residual process parameters ar : array_like .
train
false
2,792
def pushValue(value): getCurrentThreadData().valueStack.append(copy.deepcopy(value))
[ "def", "pushValue", "(", "value", ")", ":", "getCurrentThreadData", "(", ")", ".", "valueStack", ".", "append", "(", "copy", ".", "deepcopy", "(", "value", ")", ")" ]
push value to the stack .
train
false
2,793
def RequireTestImage(f): @functools.wraps(f) def Decorator(testinstance): image_path = os.path.join(testinstance.base_path, 'win7_trial_64bit.raw') if os.access(image_path, os.R_OK): return f(testinstance) else: return testinstance.skipTest('No win7_trial_64bit.raw memory image,skipping test. Download it here: goo.gl/19AJGl and put it in test_data.') return Decorator
[ "def", "RequireTestImage", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "Decorator", "(", "testinstance", ")", ":", "image_path", "=", "os", ".", "path", ".", "join", "(", "testinstance", ".", "base_path", ",", "'win7_trial_64bit.raw'", ")", "if", "os", ".", "access", "(", "image_path", ",", "os", ".", "R_OK", ")", ":", "return", "f", "(", "testinstance", ")", "else", ":", "return", "testinstance", ".", "skipTest", "(", "'No win7_trial_64bit.raw memory image,skipping test. Download it here: goo.gl/19AJGl and put it in test_data.'", ")", "return", "Decorator" ]
decorator that skips tests if we dont have the memory image .
train
false
2,796
def col_by_name(colname): col = 0 pow = 1 for i in xrange((len(colname) - 1), (-1), (-1)): ch = colname[i] col += (((ord(ch) - ord('A')) + 1) * pow) pow *= 26 return (col - 1)
[ "def", "col_by_name", "(", "colname", ")", ":", "col", "=", "0", "pow", "=", "1", "for", "i", "in", "xrange", "(", "(", "len", "(", "colname", ")", "-", "1", ")", ",", "(", "-", "1", ")", ",", "(", "-", "1", ")", ")", ":", "ch", "=", "colname", "[", "i", "]", "col", "+=", "(", "(", "(", "ord", "(", "ch", ")", "-", "ord", "(", "'A'", ")", ")", "+", "1", ")", "*", "pow", ")", "pow", "*=", "26", "return", "(", "col", "-", "1", ")" ]
a -> 0 .
train
false
2,797
def get_all_elbs(region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: return [e for e in conn.get_all_load_balancers()] except boto.exception.BotoServerError as error: log.warning(error) return []
[ "def", "get_all_elbs", "(", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "try", ":", "return", "[", "e", "for", "e", "in", "conn", ".", "get_all_load_balancers", "(", ")", "]", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "error", ":", "log", ".", "warning", "(", "error", ")", "return", "[", "]" ]
return all load balancers associated with an account cli example: .
train
true
2,798
def errno_from_exception(e): if hasattr(e, 'errno'): return e.errno elif e.args: return e.args[0] else: return None
[ "def", "errno_from_exception", "(", "e", ")", ":", "if", "hasattr", "(", "e", ",", "'errno'", ")", ":", "return", "e", ".", "errno", "elif", "e", ".", "args", ":", "return", "e", ".", "args", "[", "0", "]", "else", ":", "return", "None" ]
provides the errno from an exception object .
train
true
2,799
def fetch_token_header_payload(token): token = token.encode(u'utf-8') try: (signing_input, crypto_segment) = token.rsplit('.', 1) (header_segment, payload_segment) = signing_input.split('.', 1) except ValueError: raise jwt.DecodeError(u'Not enough segments') try: header = json.loads(jwt.utils.base64url_decode(header_segment).decode(u'utf-8')) except TypeError as e: current_app.logger.exception(e) raise jwt.DecodeError(u'Invalid header padding') try: payload = json.loads(jwt.utils.base64url_decode(payload_segment).decode(u'utf-8')) except TypeError as e: current_app.logger.exception(e) raise jwt.DecodeError(u'Invalid payload padding') return (header, payload)
[ "def", "fetch_token_header_payload", "(", "token", ")", ":", "token", "=", "token", ".", "encode", "(", "u'utf-8'", ")", "try", ":", "(", "signing_input", ",", "crypto_segment", ")", "=", "token", ".", "rsplit", "(", "'.'", ",", "1", ")", "(", "header_segment", ",", "payload_segment", ")", "=", "signing_input", ".", "split", "(", "'.'", ",", "1", ")", "except", "ValueError", ":", "raise", "jwt", ".", "DecodeError", "(", "u'Not enough segments'", ")", "try", ":", "header", "=", "json", ".", "loads", "(", "jwt", ".", "utils", ".", "base64url_decode", "(", "header_segment", ")", ".", "decode", "(", "u'utf-8'", ")", ")", "except", "TypeError", "as", "e", ":", "current_app", ".", "logger", ".", "exception", "(", "e", ")", "raise", "jwt", ".", "DecodeError", "(", "u'Invalid header padding'", ")", "try", ":", "payload", "=", "json", ".", "loads", "(", "jwt", ".", "utils", ".", "base64url_decode", "(", "payload_segment", ")", ".", "decode", "(", "u'utf-8'", ")", ")", "except", "TypeError", "as", "e", ":", "current_app", ".", "logger", ".", "exception", "(", "e", ")", "raise", "jwt", ".", "DecodeError", "(", "u'Invalid payload padding'", ")", "return", "(", "header", ",", "payload", ")" ]
fetch the header and payload out of the jwt token .
train
false
2,800
def split_by_position(linked_promotions, context): for linked_promotion in linked_promotions: promotion = linked_promotion.content_object if (not promotion): continue key = ('promotions_%s' % linked_promotion.position.lower()) if (key not in context): context[key] = [] context[key].append(promotion)
[ "def", "split_by_position", "(", "linked_promotions", ",", "context", ")", ":", "for", "linked_promotion", "in", "linked_promotions", ":", "promotion", "=", "linked_promotion", ".", "content_object", "if", "(", "not", "promotion", ")", ":", "continue", "key", "=", "(", "'promotions_%s'", "%", "linked_promotion", ".", "position", ".", "lower", "(", ")", ")", "if", "(", "key", "not", "in", "context", ")", ":", "context", "[", "key", "]", "=", "[", "]", "context", "[", "key", "]", ".", "append", "(", "promotion", ")" ]
split the list of promotions into separate lists .
train
false
2,801
def check_key_file(user, source, config='.ssh/authorized_keys', saltenv='base'): keyfile = __salt__['cp.cache_file'](source, saltenv) if (not keyfile): return {} s_keys = _validate_keys(keyfile) if (not s_keys): err = 'No keys detected in {0}. Is file properly formatted?'.format(source) log.error(err) __context__['ssh_auth.error'] = err return {} else: ret = {} for key in s_keys: ret[key] = check_key(user, key, s_keys[key]['enc'], s_keys[key]['comment'], s_keys[key]['options'], config) return ret
[ "def", "check_key_file", "(", "user", ",", "source", ",", "config", "=", "'.ssh/authorized_keys'", ",", "saltenv", "=", "'base'", ")", ":", "keyfile", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "source", ",", "saltenv", ")", "if", "(", "not", "keyfile", ")", ":", "return", "{", "}", "s_keys", "=", "_validate_keys", "(", "keyfile", ")", "if", "(", "not", "s_keys", ")", ":", "err", "=", "'No keys detected in {0}. Is file properly formatted?'", ".", "format", "(", "source", ")", "log", ".", "error", "(", "err", ")", "__context__", "[", "'ssh_auth.error'", "]", "=", "err", "return", "{", "}", "else", ":", "ret", "=", "{", "}", "for", "key", "in", "s_keys", ":", "ret", "[", "key", "]", "=", "check_key", "(", "user", ",", "key", ",", "s_keys", "[", "key", "]", "[", "'enc'", "]", ",", "s_keys", "[", "key", "]", "[", "'comment'", "]", ",", "s_keys", "[", "key", "]", "[", "'options'", "]", ",", "config", ")", "return", "ret" ]
check a keyfile from a source destination against the local keys and return the keys to change cli example: .
train
true
2,803
def _RegisterBoundsValidatorIfNeeded(parser, name, flag_values): if ((parser.lower_bound is not None) or (parser.upper_bound is not None)): def Checker(value): if ((value is not None) and parser.IsOutsideBounds(value)): message = ('%s is not %s' % (value, parser.syntactic_help)) raise gflags_validators.Error(message) return True RegisterValidator(name, Checker, flag_values=flag_values)
[ "def", "_RegisterBoundsValidatorIfNeeded", "(", "parser", ",", "name", ",", "flag_values", ")", ":", "if", "(", "(", "parser", ".", "lower_bound", "is", "not", "None", ")", "or", "(", "parser", ".", "upper_bound", "is", "not", "None", ")", ")", ":", "def", "Checker", "(", "value", ")", ":", "if", "(", "(", "value", "is", "not", "None", ")", "and", "parser", ".", "IsOutsideBounds", "(", "value", ")", ")", ":", "message", "=", "(", "'%s is not %s'", "%", "(", "value", ",", "parser", ".", "syntactic_help", ")", ")", "raise", "gflags_validators", ".", "Error", "(", "message", ")", "return", "True", "RegisterValidator", "(", "name", ",", "Checker", ",", "flag_values", "=", "flag_values", ")" ]
enforce lower and upper bounds for numeric flags .
train
false
2,804
def run_in_background(name, args, **kwargs): if is_running(name): wf().logger.info(u'Task `{0}` is already running'.format(name)) return argcache = _arg_cache(name) with open(argcache, u'wb') as file_obj: pickle.dump({u'args': args, u'kwargs': kwargs}, file_obj) wf().logger.debug(u'Command arguments cached to `{0}`'.format(argcache)) cmd = [u'/usr/bin/python', __file__, name] wf().logger.debug(u'Calling {0!r} ...'.format(cmd)) retcode = subprocess.call(cmd) if retcode: wf().logger.error(u'Failed to call task in background') else: wf().logger.debug(u'Executing task `{0}` in background...'.format(name)) return retcode
[ "def", "run_in_background", "(", "name", ",", "args", ",", "**", "kwargs", ")", ":", "if", "is_running", "(", "name", ")", ":", "wf", "(", ")", ".", "logger", ".", "info", "(", "u'Task `{0}` is already running'", ".", "format", "(", "name", ")", ")", "return", "argcache", "=", "_arg_cache", "(", "name", ")", "with", "open", "(", "argcache", ",", "u'wb'", ")", "as", "file_obj", ":", "pickle", ".", "dump", "(", "{", "u'args'", ":", "args", ",", "u'kwargs'", ":", "kwargs", "}", ",", "file_obj", ")", "wf", "(", ")", ".", "logger", ".", "debug", "(", "u'Command arguments cached to `{0}`'", ".", "format", "(", "argcache", ")", ")", "cmd", "=", "[", "u'/usr/bin/python'", ",", "__file__", ",", "name", "]", "wf", "(", ")", ".", "logger", ".", "debug", "(", "u'Calling {0!r} ...'", ".", "format", "(", "cmd", ")", ")", "retcode", "=", "subprocess", ".", "call", "(", "cmd", ")", "if", "retcode", ":", "wf", "(", ")", ".", "logger", ".", "error", "(", "u'Failed to call task in background'", ")", "else", ":", "wf", "(", ")", ".", "logger", ".", "debug", "(", "u'Executing task `{0}` in background...'", ".", "format", "(", "name", ")", ")", "return", "retcode" ]
pickle arguments to cache file .
train
false
2,805
def find_additional_properties(instance, schema): properties = schema.get('properties', {}) patterns = '|'.join(schema.get('patternProperties', {})) for property in instance: if (property not in properties): if (patterns and re.search(patterns, property)): continue (yield property)
[ "def", "find_additional_properties", "(", "instance", ",", "schema", ")", ":", "properties", "=", "schema", ".", "get", "(", "'properties'", ",", "{", "}", ")", "patterns", "=", "'|'", ".", "join", "(", "schema", ".", "get", "(", "'patternProperties'", ",", "{", "}", ")", ")", "for", "property", "in", "instance", ":", "if", "(", "property", "not", "in", "properties", ")", ":", "if", "(", "patterns", "and", "re", ".", "search", "(", "patterns", ",", "property", ")", ")", ":", "continue", "(", "yield", "property", ")" ]
return the set of additional properties for the given instance .
train
true
2,806
def get_task_by_resourceuri_and_taskId(component_type, resource_uri, task_id, ipaddr, port): task_uri_constant = urihelper.singletonURIHelperInstance.getUri(component_type, 'task') (s, h) = service_json_request(ipaddr, port, 'GET', task_uri_constant.format(resource_uri, task_id), None) if (not s): return None o = json_decode(s) return o
[ "def", "get_task_by_resourceuri_and_taskId", "(", "component_type", ",", "resource_uri", ",", "task_id", ",", "ipaddr", ",", "port", ")", ":", "task_uri_constant", "=", "urihelper", ".", "singletonURIHelperInstance", ".", "getUri", "(", "component_type", ",", "'task'", ")", "(", "s", ",", "h", ")", "=", "service_json_request", "(", "ipaddr", ",", "port", ",", "'GET'", ",", "task_uri_constant", ".", "format", "(", "resource_uri", ",", "task_id", ")", ",", "None", ")", "if", "(", "not", "s", ")", ":", "return", "None", "o", "=", "json_decode", "(", "s", ")", "return", "o" ]
returns the single task details .
train
false
2,807
def floatx(): return _FLOATX
[ "def", "floatx", "(", ")", ":", "return", "_FLOATX" ]
returns the default float type .
train
false
2,808
def addLoopByComplex(derivation, endMultiplier, loopLists, path, pointComplex, vertexes): loops = loopLists[(-1)] loop = [] loops.append(loop) for point in path: pointMinusBegin = (point - derivation.axisStart) dotVector3 = derivation.axisProjectiveSpace.getDotVector3(pointMinusBegin) dotVector3Complex = dotVector3.dropAxis() dotPointComplex = (pointComplex * dotVector3Complex) dotPoint = Vector3(dotPointComplex.real, dotPointComplex.imag, dotVector3.z) projectedVector3 = (derivation.axisProjectiveSpace.getVector3ByPoint(dotPoint) + derivation.axisStart) loop.append(projectedVector3)
[ "def", "addLoopByComplex", "(", "derivation", ",", "endMultiplier", ",", "loopLists", ",", "path", ",", "pointComplex", ",", "vertexes", ")", ":", "loops", "=", "loopLists", "[", "(", "-", "1", ")", "]", "loop", "=", "[", "]", "loops", ".", "append", "(", "loop", ")", "for", "point", "in", "path", ":", "pointMinusBegin", "=", "(", "point", "-", "derivation", ".", "axisStart", ")", "dotVector3", "=", "derivation", ".", "axisProjectiveSpace", ".", "getDotVector3", "(", "pointMinusBegin", ")", "dotVector3Complex", "=", "dotVector3", ".", "dropAxis", "(", ")", "dotPointComplex", "=", "(", "pointComplex", "*", "dotVector3Complex", ")", "dotPoint", "=", "Vector3", "(", "dotPointComplex", ".", "real", ",", "dotPointComplex", ".", "imag", ",", "dotVector3", ".", "z", ")", "projectedVector3", "=", "(", "derivation", ".", "axisProjectiveSpace", ".", "getVector3ByPoint", "(", "dotPoint", ")", "+", "derivation", ".", "axisStart", ")", "loop", ".", "append", "(", "projectedVector3", ")" ]
add an indexed loop to the vertexes .
train
false
2,812
def _apply_prediction(G, func, ebunch=None): if (ebunch is None): ebunch = nx.non_edges(G) return ((u, v, func(u, v)) for (u, v) in ebunch)
[ "def", "_apply_prediction", "(", "G", ",", "func", ",", "ebunch", "=", "None", ")", ":", "if", "(", "ebunch", "is", "None", ")", ":", "ebunch", "=", "nx", ".", "non_edges", "(", "G", ")", "return", "(", "(", "u", ",", "v", ",", "func", "(", "u", ",", "v", ")", ")", "for", "(", "u", ",", "v", ")", "in", "ebunch", ")" ]
applies the given function to each edge in the specified iterable of edges .
train
false
2,813
@pytest.mark.parametrize(u'table', [u'users', u'"users"']) def test_suggested_column_names_from_shadowed_visible_table(completer, complete_event, table): text = (u'SELECT FROM ' + table) position = len(u'SELECT ') result = set(completer.get_completions(Document(text=text, cursor_position=position), complete_event)) assert (set(result) == set(((testdata.columns(u'users') + testdata.functions()) + list((testdata.builtin_functions() + testdata.keywords())))))
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "u'table'", ",", "[", "u'users'", ",", "u'\"users\"'", "]", ")", "def", "test_suggested_column_names_from_shadowed_visible_table", "(", "completer", ",", "complete_event", ",", "table", ")", ":", "text", "=", "(", "u'SELECT FROM '", "+", "table", ")", "position", "=", "len", "(", "u'SELECT '", ")", "result", "=", "set", "(", "completer", ".", "get_completions", "(", "Document", "(", "text", "=", "text", ",", "cursor_position", "=", "position", ")", ",", "complete_event", ")", ")", "assert", "(", "set", "(", "result", ")", "==", "set", "(", "(", "(", "testdata", ".", "columns", "(", "u'users'", ")", "+", "testdata", ".", "functions", "(", ")", ")", "+", "list", "(", "(", "testdata", ".", "builtin_functions", "(", ")", "+", "testdata", ".", "keywords", "(", ")", ")", ")", ")", ")", ")" ]
suggest column and function names when selecting from table .
train
false
2,815
def getInradiusFirstByHeightWidth(defaultInradius, elementNode): demiheight = getFloatByPrefixBeginEnd(elementNode, 'demiheight', 'height', defaultInradius.imag) demiwidth = getFloatByPrefixBeginEnd(elementNode, 'demiwidth', 'width', defaultInradius.real) return getInradius(complex(demiwidth, demiheight), elementNode)
[ "def", "getInradiusFirstByHeightWidth", "(", "defaultInradius", ",", "elementNode", ")", ":", "demiheight", "=", "getFloatByPrefixBeginEnd", "(", "elementNode", ",", "'demiheight'", ",", "'height'", ",", "defaultInradius", ".", "imag", ")", "demiwidth", "=", "getFloatByPrefixBeginEnd", "(", "elementNode", ",", "'demiwidth'", ",", "'width'", ",", "defaultInradius", ".", "real", ")", "return", "getInradius", "(", "complex", "(", "demiwidth", ",", "demiheight", ")", ",", "elementNode", ")" ]
get inradius .
train
false
2,816
def safe(text): return _safe(text)
[ "def", "safe", "(", "text", ")", ":", "return", "_safe", "(", "text", ")" ]
marks the value as a string that should not be auto-escaped .
train
false
2,817
def _auto_configure_disk(session, vdi_ref, new_gb): if (new_gb == 0): LOG.debug('Skipping auto_config_disk as destination size is 0GB') return with vdi_attached(session, vdi_ref, read_only=False) as dev: partitions = _get_partitions(dev) if (len(partitions) != 1): reason = _('Disk must have only one partition.') raise exception.CannotResizeDisk(reason=reason) (num, start, old_sectors, fstype, name, flags) = partitions[0] if (fstype not in ('ext3', 'ext4')): reason = _('Disk contains a filesystem we are unable to resize: %s') raise exception.CannotResizeDisk(reason=(reason % fstype)) if (num != 1): reason = _('The only partition should be partition 1.') raise exception.CannotResizeDisk(reason=reason) new_sectors = ((new_gb * units.Gi) / SECTOR_SIZE) _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags)
[ "def", "_auto_configure_disk", "(", "session", ",", "vdi_ref", ",", "new_gb", ")", ":", "if", "(", "new_gb", "==", "0", ")", ":", "LOG", ".", "debug", "(", "'Skipping auto_config_disk as destination size is 0GB'", ")", "return", "with", "vdi_attached", "(", "session", ",", "vdi_ref", ",", "read_only", "=", "False", ")", "as", "dev", ":", "partitions", "=", "_get_partitions", "(", "dev", ")", "if", "(", "len", "(", "partitions", ")", "!=", "1", ")", ":", "reason", "=", "_", "(", "'Disk must have only one partition.'", ")", "raise", "exception", ".", "CannotResizeDisk", "(", "reason", "=", "reason", ")", "(", "num", ",", "start", ",", "old_sectors", ",", "fstype", ",", "name", ",", "flags", ")", "=", "partitions", "[", "0", "]", "if", "(", "fstype", "not", "in", "(", "'ext3'", ",", "'ext4'", ")", ")", ":", "reason", "=", "_", "(", "'Disk contains a filesystem we are unable to resize: %s'", ")", "raise", "exception", ".", "CannotResizeDisk", "(", "reason", "=", "(", "reason", "%", "fstype", ")", ")", "if", "(", "num", "!=", "1", ")", ":", "reason", "=", "_", "(", "'The only partition should be partition 1.'", ")", "raise", "exception", ".", "CannotResizeDisk", "(", "reason", "=", "reason", ")", "new_sectors", "=", "(", "(", "new_gb", "*", "units", ".", "Gi", ")", "/", "SECTOR_SIZE", ")", "_resize_part_and_fs", "(", "dev", ",", "start", ",", "old_sectors", ",", "new_sectors", ",", "flags", ")" ]
partition and resize fs to match the size specified by flavors .
train
false
2,818
def get_desktop_root(*append): return __get_root('desktop', *append)
[ "def", "get_desktop_root", "(", "*", "append", ")", ":", "return", "__get_root", "(", "'desktop'", ",", "*", "append", ")" ]
returns the directory for desktop .
train
false
2,819
def fft_freqs(n_fft, fs): return ((np.arange(0, ((n_fft // 2) + 1)) / float(n_fft)) * float(fs))
[ "def", "fft_freqs", "(", "n_fft", ",", "fs", ")", ":", "return", "(", "(", "np", ".", "arange", "(", "0", ",", "(", "(", "n_fft", "//", "2", ")", "+", "1", ")", ")", "/", "float", "(", "n_fft", ")", ")", "*", "float", "(", "fs", ")", ")" ]
return frequencies for dft parameters n_fft : int number of points in the fft .
train
true
2,821
def _get_world_to_view_matrix(scene): from mayavi.core.ui.mayavi_scene import MayaviScene from tvtk.pyface.tvtk_scene import TVTKScene if (not isinstance(scene, (MayaviScene, TVTKScene))): raise TypeError(('scene must be an instance of TVTKScene/MayaviScene, found type %s' % type(scene))) cam = scene.camera scene_size = tuple(scene.get_size()) clip_range = cam.clipping_range aspect_ratio = (float(scene_size[0]) / scene_size[1]) vtk_comb_trans_mat = cam.get_composite_projection_transform_matrix(aspect_ratio, clip_range[0], clip_range[1]) vtk_comb_trans_mat = vtk_comb_trans_mat.to_array() return vtk_comb_trans_mat
[ "def", "_get_world_to_view_matrix", "(", "scene", ")", ":", "from", "mayavi", ".", "core", ".", "ui", ".", "mayavi_scene", "import", "MayaviScene", "from", "tvtk", ".", "pyface", ".", "tvtk_scene", "import", "TVTKScene", "if", "(", "not", "isinstance", "(", "scene", ",", "(", "MayaviScene", ",", "TVTKScene", ")", ")", ")", ":", "raise", "TypeError", "(", "(", "'scene must be an instance of TVTKScene/MayaviScene, found type %s'", "%", "type", "(", "scene", ")", ")", ")", "cam", "=", "scene", ".", "camera", "scene_size", "=", "tuple", "(", "scene", ".", "get_size", "(", ")", ")", "clip_range", "=", "cam", ".", "clipping_range", "aspect_ratio", "=", "(", "float", "(", "scene_size", "[", "0", "]", ")", "/", "scene_size", "[", "1", "]", ")", "vtk_comb_trans_mat", "=", "cam", ".", "get_composite_projection_transform_matrix", "(", "aspect_ratio", ",", "clip_range", "[", "0", "]", ",", "clip_range", "[", "1", "]", ")", "vtk_comb_trans_mat", "=", "vtk_comb_trans_mat", ".", "to_array", "(", ")", "return", "vtk_comb_trans_mat" ]
return the 4x4 matrix to transform xyz space to the current view .
train
false
2,822
def blend_channels_linear_light(bottom_chan, top_chan): return numpy.clip(((bottom_chan[:, :] + (2 * top_chan[:, :])) - 1), 0, 1)
[ "def", "blend_channels_linear_light", "(", "bottom_chan", ",", "top_chan", ")", ":", "return", "numpy", ".", "clip", "(", "(", "(", "bottom_chan", "[", ":", ",", ":", "]", "+", "(", "2", "*", "top_chan", "[", ":", ",", ":", "]", ")", ")", "-", "1", ")", ",", "0", ",", "1", ")" ]
return combination of bottom and top channels .
train
false
2,823
def get_group_perms(user_or_group, obj): check = ObjectPermissionChecker(user_or_group) return check.get_group_perms(obj)
[ "def", "get_group_perms", "(", "user_or_group", ",", "obj", ")", ":", "check", "=", "ObjectPermissionChecker", "(", "user_or_group", ")", "return", "check", ".", "get_group_perms", "(", "obj", ")" ]
returns permissions for given user/group and object pair .
train
false
2,824
def assign_staff_role_to_ccx(ccx_locator, user, master_course_id): coach_role_on_master_course = CourseCcxCoachRole(master_course_id) if coach_role_on_master_course.has_user(user): role = CourseStaffRole(ccx_locator) if (not role.has_user(user)): with ccx_course(ccx_locator) as course: allow_access(course, user, 'staff', send_email=False)
[ "def", "assign_staff_role_to_ccx", "(", "ccx_locator", ",", "user", ",", "master_course_id", ")", ":", "coach_role_on_master_course", "=", "CourseCcxCoachRole", "(", "master_course_id", ")", "if", "coach_role_on_master_course", ".", "has_user", "(", "user", ")", ":", "role", "=", "CourseStaffRole", "(", "ccx_locator", ")", "if", "(", "not", "role", ".", "has_user", "(", "user", ")", ")", ":", "with", "ccx_course", "(", "ccx_locator", ")", "as", "course", ":", "allow_access", "(", "course", ",", "user", ",", "'staff'", ",", "send_email", "=", "False", ")" ]
check if user has ccx_coach role on master course then assign him staff role on ccx only if role is not already assigned .
train
false
2,829
def send_user_export_to_admins(u): body = json.dumps(generate_user_export(u), default=encode_datetime) msg = django.core.mail.EmailMessage(subject=('User export for %d' % u.id), body=body, to=[email for (name, email) in django.conf.settings.ADMINS]) msg.send()
[ "def", "send_user_export_to_admins", "(", "u", ")", ":", "body", "=", "json", ".", "dumps", "(", "generate_user_export", "(", "u", ")", ",", "default", "=", "encode_datetime", ")", "msg", "=", "django", ".", "core", ".", "mail", ".", "EmailMessage", "(", "subject", "=", "(", "'User export for %d'", "%", "u", ".", "id", ")", ",", "body", "=", "body", ",", "to", "=", "[", "email", "for", "(", "name", ",", "email", ")", "in", "django", ".", "conf", ".", "settings", ".", "ADMINS", "]", ")", "msg", ".", "send", "(", ")" ]
you might want to call this function before deleting a user .
train
false
2,830
def _compute_multivariate_sample_acovf(endog, maxlag): endog = np.array(endog) if (endog.ndim == 1): endog = endog[:, np.newaxis] endog -= np.mean(endog, axis=0) (nobs, k_endog) = endog.shape sample_autocovariances = [] for s in range((maxlag + 1)): sample_autocovariances.append(np.zeros((k_endog, k_endog))) for t in range((nobs - s)): sample_autocovariances[s] += np.outer(endog[t], endog[(t + s)]) sample_autocovariances[s] /= nobs return sample_autocovariances
[ "def", "_compute_multivariate_sample_acovf", "(", "endog", ",", "maxlag", ")", ":", "endog", "=", "np", ".", "array", "(", "endog", ")", "if", "(", "endog", ".", "ndim", "==", "1", ")", ":", "endog", "=", "endog", "[", ":", ",", "np", ".", "newaxis", "]", "endog", "-=", "np", ".", "mean", "(", "endog", ",", "axis", "=", "0", ")", "(", "nobs", ",", "k_endog", ")", "=", "endog", ".", "shape", "sample_autocovariances", "=", "[", "]", "for", "s", "in", "range", "(", "(", "maxlag", "+", "1", ")", ")", ":", "sample_autocovariances", ".", "append", "(", "np", ".", "zeros", "(", "(", "k_endog", ",", "k_endog", ")", ")", ")", "for", "t", "in", "range", "(", "(", "nobs", "-", "s", ")", ")", ":", "sample_autocovariances", "[", "s", "]", "+=", "np", ".", "outer", "(", "endog", "[", "t", "]", ",", "endog", "[", "(", "t", "+", "s", ")", "]", ")", "sample_autocovariances", "[", "s", "]", "/=", "nobs", "return", "sample_autocovariances" ]
computer multivariate sample autocovariances parameters endog : array_like sample data on which to compute sample autocovariances .
train
false
2,831
def _index_list(key_or_list, direction=None): if (direction is not None): return [(key_or_list, direction)] else: if isinstance(key_or_list, string_type): return [(key_or_list, ASCENDING)] elif (not isinstance(key_or_list, (list, tuple))): raise TypeError('if no direction is specified, key_or_list must be an instance of list') return key_or_list
[ "def", "_index_list", "(", "key_or_list", ",", "direction", "=", "None", ")", ":", "if", "(", "direction", "is", "not", "None", ")", ":", "return", "[", "(", "key_or_list", ",", "direction", ")", "]", "else", ":", "if", "isinstance", "(", "key_or_list", ",", "string_type", ")", ":", "return", "[", "(", "key_or_list", ",", "ASCENDING", ")", "]", "elif", "(", "not", "isinstance", "(", "key_or_list", ",", "(", "list", ",", "tuple", ")", ")", ")", ":", "raise", "TypeError", "(", "'if no direction is specified, key_or_list must be an instance of list'", ")", "return", "key_or_list" ]
helper to generate a list of pairs .
train
true
2,832
def _check_str(text, testfunc): if ((text is not None) and (not testfunc(text))): warnings.warn(("String %s doesn't match the given regexp" % text), PhyloXMLWarning, stacklevel=2)
[ "def", "_check_str", "(", "text", ",", "testfunc", ")", ":", "if", "(", "(", "text", "is", "not", "None", ")", "and", "(", "not", "testfunc", "(", "text", ")", ")", ")", ":", "warnings", ".", "warn", "(", "(", "\"String %s doesn't match the given regexp\"", "%", "text", ")", ",", "PhyloXMLWarning", ",", "stacklevel", "=", "2", ")" ]
check a string using testfunc .
train
false
2,833
def track_sunset(offset=None): def track_sunset_decorator(action): 'Decorator to track sunset events.' event.track_sunset(HASS, functools.partial(action, HASS), offset) return action return track_sunset_decorator
[ "def", "track_sunset", "(", "offset", "=", "None", ")", ":", "def", "track_sunset_decorator", "(", "action", ")", ":", "event", ".", "track_sunset", "(", "HASS", ",", "functools", ".", "partial", "(", "action", ",", "HASS", ")", ",", "offset", ")", "return", "action", "return", "track_sunset_decorator" ]
decorator factory to track sunset events .
train
false