id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
17,114
def _get_ch_type(inst, ch_type): if (ch_type is None): for type_ in ['mag', 'grad', 'planar1', 'planar2', 'eeg']: if (type_ in inst): ch_type = type_ break else: raise RuntimeError('No plottable channel types found') return ch_type
[ "def", "_get_ch_type", "(", "inst", ",", "ch_type", ")", ":", "if", "(", "ch_type", "is", "None", ")", ":", "for", "type_", "in", "[", "'mag'", ",", "'grad'", ",", "'planar1'", ",", "'planar2'", ",", "'eeg'", "]", ":", "if", "(", "type_", "in", "inst", ")", ":", "ch_type", "=", "type_", "break", "else", ":", "raise", "RuntimeError", "(", "'No plottable channel types found'", ")", "return", "ch_type" ]
helper to choose a single channel type .
train
false
17,115
@control_command(args=[(u'task_name', text_t), (u'soft', float), (u'hard', float)], signature=u'<task_name> <soft_secs> [hard_secs]') def time_limit(state, task_name=None, hard=None, soft=None, **kwargs): try: task = state.app.tasks[task_name] except KeyError: logger.error(u'Change time limit attempt for unknown task %s', task_name, exc_info=True) return nok(u'unknown task') task.soft_time_limit = soft task.time_limit = hard logger.info(u'New time limits for tasks of type %s: soft=%s hard=%s', task_name, soft, hard) return ok(u'time limits set successfully')
[ "@", "control_command", "(", "args", "=", "[", "(", "u'task_name'", ",", "text_t", ")", ",", "(", "u'soft'", ",", "float", ")", ",", "(", "u'hard'", ",", "float", ")", "]", ",", "signature", "=", "u'<task_name> <soft_secs> [hard_secs]'", ")", "def", "time_limit", "(", "state", ",", "task_name", "=", "None", ",", "hard", "=", "None", ",", "soft", "=", "None", ",", "**", "kwargs", ")", ":", "try", ":", "task", "=", "state", ".", "app", ".", "tasks", "[", "task_name", "]", "except", "KeyError", ":", "logger", ".", "error", "(", "u'Change time limit attempt for unknown task %s'", ",", "task_name", ",", "exc_info", "=", "True", ")", "return", "nok", "(", "u'unknown task'", ")", "task", ".", "soft_time_limit", "=", "soft", "task", ".", "time_limit", "=", "hard", "logger", ".", "info", "(", "u'New time limits for tasks of type %s: soft=%s hard=%s'", ",", "task_name", ",", "soft", ",", "hard", ")", "return", "ok", "(", "u'time limits set successfully'", ")" ]
tell worker(s) to modify the time limit for task by type .
train
false
17,116
def test_import_submodule_global_unshadowed(pyi_builder): pyi_builder.test_source('\n # Assert that this submodule is unshadowed by this global variable.\n import sys\n from pyi_testmod_submodule_global_unshadowed import submodule\n assert type(submodule) == type(sys)\n ')
[ "def", "test_import_submodule_global_unshadowed", "(", "pyi_builder", ")", ":", "pyi_builder", ".", "test_source", "(", "'\\n # Assert that this submodule is unshadowed by this global variable.\\n import sys\\n from pyi_testmod_submodule_global_unshadowed import submodule\\n assert type(submodule) == type(sys)\\n '", ")" ]
functional test validating issue #1919 .
train
false
17,117
def stEnergyEntropy(frame, numOfShortBlocks=10): Eol = numpy.sum((frame ** 2)) L = len(frame) subWinLength = int(numpy.floor((L / numOfShortBlocks))) if (L != (subWinLength * numOfShortBlocks)): frame = frame[0:(subWinLength * numOfShortBlocks)] subWindows = frame.reshape(subWinLength, numOfShortBlocks, order='F').copy() s = (numpy.sum((subWindows ** 2), axis=0) / (Eol + eps)) Entropy = (- numpy.sum((s * numpy.log2((s + eps))))) return Entropy
[ "def", "stEnergyEntropy", "(", "frame", ",", "numOfShortBlocks", "=", "10", ")", ":", "Eol", "=", "numpy", ".", "sum", "(", "(", "frame", "**", "2", ")", ")", "L", "=", "len", "(", "frame", ")", "subWinLength", "=", "int", "(", "numpy", ".", "floor", "(", "(", "L", "/", "numOfShortBlocks", ")", ")", ")", "if", "(", "L", "!=", "(", "subWinLength", "*", "numOfShortBlocks", ")", ")", ":", "frame", "=", "frame", "[", "0", ":", "(", "subWinLength", "*", "numOfShortBlocks", ")", "]", "subWindows", "=", "frame", ".", "reshape", "(", "subWinLength", ",", "numOfShortBlocks", ",", "order", "=", "'F'", ")", ".", "copy", "(", ")", "s", "=", "(", "numpy", ".", "sum", "(", "(", "subWindows", "**", "2", ")", ",", "axis", "=", "0", ")", "/", "(", "Eol", "+", "eps", ")", ")", "Entropy", "=", "(", "-", "numpy", ".", "sum", "(", "(", "s", "*", "numpy", ".", "log2", "(", "(", "s", "+", "eps", ")", ")", ")", ")", ")", "return", "Entropy" ]
computes entropy of energy .
train
false
17,118
def _resp_content_type_property(): def getter(self): if ('content-type' in self.headers): return self.headers.get('content-type').split(';')[0] def setter(self, value): self.headers['content-type'] = value return property(getter, setter, doc='Retrieve and set the response Content-Type header')
[ "def", "_resp_content_type_property", "(", ")", ":", "def", "getter", "(", "self", ")", ":", "if", "(", "'content-type'", "in", "self", ".", "headers", ")", ":", "return", "self", ".", "headers", ".", "get", "(", "'content-type'", ")", ".", "split", "(", "';'", ")", "[", "0", "]", "def", "setter", "(", "self", ",", "value", ")", ":", "self", ".", "headers", "[", "'content-type'", "]", "=", "value", "return", "property", "(", "getter", ",", "setter", ",", "doc", "=", "'Retrieve and set the response Content-Type header'", ")" ]
set and retrieve response .
train
false
17,119
def find_skips_in_file(path): BUG_RE = re.compile('\\s*@.*skip_because\\(bug=[\\\'"](\\d+)[\\\'"]') DEF_RE = re.compile('\\s*def (\\w+)\\(') bug_found = False results = [] with open(path, 'rb') as content: lines = content.readlines() for (x, line) in enumerate(lines): if (not bug_found): res = BUG_RE.match(line) if res: bug_no = int(res.group(1)) debug('Found bug skip %s on line %d', bug_no, (x + 1)) bug_found = True else: res = DEF_RE.match(line) if res: method = res.group(1) debug('Found test method %s skips for bug %d', method, bug_no) results.append((method, bug_no)) bug_found = False return results
[ "def", "find_skips_in_file", "(", "path", ")", ":", "BUG_RE", "=", "re", ".", "compile", "(", "'\\\\s*@.*skip_because\\\\(bug=[\\\\\\'\"](\\\\d+)[\\\\\\'\"]'", ")", "DEF_RE", "=", "re", ".", "compile", "(", "'\\\\s*def (\\\\w+)\\\\('", ")", "bug_found", "=", "False", "results", "=", "[", "]", "with", "open", "(", "path", ",", "'rb'", ")", "as", "content", ":", "lines", "=", "content", ".", "readlines", "(", ")", "for", "(", "x", ",", "line", ")", "in", "enumerate", "(", "lines", ")", ":", "if", "(", "not", "bug_found", ")", ":", "res", "=", "BUG_RE", ".", "match", "(", "line", ")", "if", "res", ":", "bug_no", "=", "int", "(", "res", ".", "group", "(", "1", ")", ")", "debug", "(", "'Found bug skip %s on line %d'", ",", "bug_no", ",", "(", "x", "+", "1", ")", ")", "bug_found", "=", "True", "else", ":", "res", "=", "DEF_RE", ".", "match", "(", "line", ")", "if", "res", ":", "method", "=", "res", ".", "group", "(", "1", ")", "debug", "(", "'Found test method %s skips for bug %d'", ",", "method", ",", "bug_no", ")", "results", ".", "append", "(", "(", "method", ",", "bug_no", ")", ")", "bug_found", "=", "False", "return", "results" ]
return the skip tuples in a test file .
train
false
17,120
def read_model_metadata(source): if isinstance(source, compat.string_type): parts = compat.urlparse(source) if ((parts.scheme in ('', 'file')) and os.path.isdir(parts.path)): source = parts.path return read_model_metadata_bundle(source) elif ((len(parts.scheme) == 1) and os.path.isdir(source)): return read_model_metadata_bundle(source) else: return _json_from_url(source) else: return json.load(source)
[ "def", "read_model_metadata", "(", "source", ")", ":", "if", "isinstance", "(", "source", ",", "compat", ".", "string_type", ")", ":", "parts", "=", "compat", ".", "urlparse", "(", "source", ")", "if", "(", "(", "parts", ".", "scheme", "in", "(", "''", ",", "'file'", ")", ")", "and", "os", ".", "path", ".", "isdir", "(", "parts", ".", "path", ")", ")", ":", "source", "=", "parts", ".", "path", "return", "read_model_metadata_bundle", "(", "source", ")", "elif", "(", "(", "len", "(", "parts", ".", "scheme", ")", "==", "1", ")", "and", "os", ".", "path", ".", "isdir", "(", "source", ")", ")", ":", "return", "read_model_metadata_bundle", "(", "source", ")", "else", ":", "return", "_json_from_url", "(", "source", ")", "else", ":", "return", "json", ".", "load", "(", "source", ")" ]
reads a model description from source which can be a filename .
train
false
17,125
def DnsNameValid(dnsname): if ((not dnsname) or (not DNSNAME_RE.match(dnsname))): return False else: return True
[ "def", "DnsNameValid", "(", "dnsname", ")", ":", "if", "(", "(", "not", "dnsname", ")", "or", "(", "not", "DNSNAME_RE", ".", "match", "(", "dnsname", ")", ")", ")", ":", "return", "False", "else", ":", "return", "True" ]
tests whether a string is a valid dns name .
train
false
17,126
def test_existing_path_FileLinks_alt_formatter(): td = mkdtemp() tf1 = NamedTemporaryFile(dir=td) tf2 = NamedTemporaryFile(dir=td) def fake_formatter(dirname, fnames, included_suffixes): return ['hello', 'world'] fl = display.FileLinks(td, notebook_display_formatter=fake_formatter) actual = fl._repr_html_() actual = actual.split('\n') actual.sort() expected = ['hello', 'world'] expected.sort() nt.assert_equal(actual, expected)
[ "def", "test_existing_path_FileLinks_alt_formatter", "(", ")", ":", "td", "=", "mkdtemp", "(", ")", "tf1", "=", "NamedTemporaryFile", "(", "dir", "=", "td", ")", "tf2", "=", "NamedTemporaryFile", "(", "dir", "=", "td", ")", "def", "fake_formatter", "(", "dirname", ",", "fnames", ",", "included_suffixes", ")", ":", "return", "[", "'hello'", ",", "'world'", "]", "fl", "=", "display", ".", "FileLinks", "(", "td", ",", "notebook_display_formatter", "=", "fake_formatter", ")", "actual", "=", "fl", ".", "_repr_html_", "(", ")", "actual", "=", "actual", ".", "split", "(", "'\\n'", ")", "actual", ".", "sort", "(", ")", "expected", "=", "[", "'hello'", ",", "'world'", "]", "expected", ".", "sort", "(", ")", "nt", ".", "assert_equal", "(", "actual", ",", "expected", ")" ]
filelinks: calling _repr_html_ functions as expected w/ an alt formatter .
train
false
17,127
def randombytes(n): if is_py2: L = [chr(random.randrange(0, 256)) for i in range(n)] else: L = [chr(random.randrange(0, 256)).encode('utf-8') for i in range(n)] return ''.join(L)
[ "def", "randombytes", "(", "n", ")", ":", "if", "is_py2", ":", "L", "=", "[", "chr", "(", "random", ".", "randrange", "(", "0", ",", "256", ")", ")", "for", "i", "in", "range", "(", "n", ")", "]", "else", ":", "L", "=", "[", "chr", "(", "random", ".", "randrange", "(", "0", ",", "256", ")", ")", ".", "encode", "(", "'utf-8'", ")", "for", "i", "in", "range", "(", "n", ")", "]", "return", "''", ".", "join", "(", "L", ")" ]
return n random bytes .
train
false
17,129
@contextmanager def captured(disallow_stderr=True): import sys stdout = sys.stdout stderr = sys.stderr sys.stdout = outfile = StringIO() sys.stderr = errfile = StringIO() c = CapturedText() try: (yield c) finally: c.stdout = outfile.getvalue() c.stderr = strip_expected(errfile.getvalue()) sys.stdout = stdout sys.stderr = stderr if (disallow_stderr and c.stderr): raise Exception(('Got stderr output: %s' % c.stderr))
[ "@", "contextmanager", "def", "captured", "(", "disallow_stderr", "=", "True", ")", ":", "import", "sys", "stdout", "=", "sys", ".", "stdout", "stderr", "=", "sys", ".", "stderr", "sys", ".", "stdout", "=", "outfile", "=", "StringIO", "(", ")", "sys", ".", "stderr", "=", "errfile", "=", "StringIO", "(", ")", "c", "=", "CapturedText", "(", ")", "try", ":", "(", "yield", "c", ")", "finally", ":", "c", ".", "stdout", "=", "outfile", ".", "getvalue", "(", ")", "c", ".", "stderr", "=", "strip_expected", "(", "errfile", ".", "getvalue", "(", ")", ")", "sys", ".", "stdout", "=", "stdout", "sys", ".", "stderr", "=", "stderr", "if", "(", "disallow_stderr", "and", "c", ".", "stderr", ")", ":", "raise", "Exception", "(", "(", "'Got stderr output: %s'", "%", "c", ".", "stderr", ")", ")" ]
context manager to capture the printed output of the code in the with block bind the context manager to a variable using as and the result will be in the stdout property .
train
false
17,132
def user_config_files(): return filter(os.path.exists, map(os.path.expanduser, config_files))
[ "def", "user_config_files", "(", ")", ":", "return", "filter", "(", "os", ".", "path", ".", "exists", ",", "map", "(", "os", ".", "path", ".", "expanduser", ",", "config_files", ")", ")" ]
return path to any existing user config files .
train
true
17,133
def file_size(session, ds_browser, ds_path, file_name): client_factory = session.vim.client.factory search_spec = search_datastore_spec(client_factory, file_name) search_task = session._call_method(session.vim, 'SearchDatastore_Task', ds_browser, datastorePath=str(ds_path), searchSpec=search_spec) task_info = session._wait_for_task(search_task) if hasattr(task_info.result, 'file'): return task_info.result.file[0].fileSize
[ "def", "file_size", "(", "session", ",", "ds_browser", ",", "ds_path", ",", "file_name", ")", ":", "client_factory", "=", "session", ".", "vim", ".", "client", ".", "factory", "search_spec", "=", "search_datastore_spec", "(", "client_factory", ",", "file_name", ")", "search_task", "=", "session", ".", "_call_method", "(", "session", ".", "vim", ",", "'SearchDatastore_Task'", ",", "ds_browser", ",", "datastorePath", "=", "str", "(", "ds_path", ")", ",", "searchSpec", "=", "search_spec", ")", "task_info", "=", "session", ".", "_wait_for_task", "(", "search_task", ")", "if", "hasattr", "(", "task_info", ".", "result", ",", "'file'", ")", ":", "return", "task_info", ".", "result", ".", "file", "[", "0", "]", ".", "fileSize" ]
obtains the size of a given file .
train
false
17,135
def get_generated_by_for_biom_tables(): return ('QIIME ' + get_qiime_library_version())
[ "def", "get_generated_by_for_biom_tables", "(", ")", ":", "return", "(", "'QIIME '", "+", "get_qiime_library_version", "(", ")", ")" ]
returns a "generated by" string for use when creating biom tables returns str .
train
false
17,136
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs): from matplotlib import rcParams if ((not fig.axes) and (not fig.lines)): return dpi = fig.dpi if (fmt == 'retina'): dpi = (dpi * 2) fmt = 'png' kw = {'format': fmt, 'facecolor': fig.get_facecolor(), 'edgecolor': fig.get_edgecolor(), 'dpi': dpi, 'bbox_inches': bbox_inches} kw.update(kwargs) bytes_io = BytesIO() fig.canvas.print_figure(bytes_io, **kw) data = bytes_io.getvalue() if (fmt == 'svg'): data = data.decode('utf-8') return data
[ "def", "print_figure", "(", "fig", ",", "fmt", "=", "'png'", ",", "bbox_inches", "=", "'tight'", ",", "**", "kwargs", ")", ":", "from", "matplotlib", "import", "rcParams", "if", "(", "(", "not", "fig", ".", "axes", ")", "and", "(", "not", "fig", ".", "lines", ")", ")", ":", "return", "dpi", "=", "fig", ".", "dpi", "if", "(", "fmt", "==", "'retina'", ")", ":", "dpi", "=", "(", "dpi", "*", "2", ")", "fmt", "=", "'png'", "kw", "=", "{", "'format'", ":", "fmt", ",", "'facecolor'", ":", "fig", ".", "get_facecolor", "(", ")", ",", "'edgecolor'", ":", "fig", ".", "get_edgecolor", "(", ")", ",", "'dpi'", ":", "dpi", ",", "'bbox_inches'", ":", "bbox_inches", "}", "kw", ".", "update", "(", "kwargs", ")", "bytes_io", "=", "BytesIO", "(", ")", "fig", ".", "canvas", ".", "print_figure", "(", "bytes_io", ",", "**", "kw", ")", "data", "=", "bytes_io", ".", "getvalue", "(", ")", "if", "(", "fmt", "==", "'svg'", ")", ":", "data", "=", "data", ".", "decode", "(", "'utf-8'", ")", "return", "data" ]
print a figure to an image .
train
false
17,137
def base_msg_type(type_): if (type_ is None): return None if ('[' in type_): return type_[:type_.find('[')] return type_
[ "def", "base_msg_type", "(", "type_", ")", ":", "if", "(", "type_", "is", "None", ")", ":", "return", "None", "if", "(", "'['", "in", "type_", ")", ":", "return", "type_", "[", ":", "type_", ".", "find", "(", "'['", ")", "]", "return", "type_" ]
compute the base data type .
train
false
17,139
def output(): return s3_rest_controller()
[ "def", "output", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
print out via pretty print .
train
false
17,140
def clear_caches(): global FS_CACHE, MR_CACHE old = (FS_CACHE, MR_CACHE) (FS_CACHE, MR_CACHE) = (None, None) return old
[ "def", "clear_caches", "(", ")", ":", "global", "FS_CACHE", ",", "MR_CACHE", "old", "=", "(", "FS_CACHE", ",", "MR_CACHE", ")", "(", "FS_CACHE", ",", "MR_CACHE", ")", "=", "(", "None", ",", "None", ")", "return", "old" ]
jinja2 keeps internal caches for environments and lexers .
train
false
17,141
def sentence_bleu(references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False): return corpus_bleu([references], [hypothesis], weights, smoothing_function, auto_reweigh)
[ "def", "sentence_bleu", "(", "references", ",", "hypothesis", ",", "weights", "=", "(", "0.25", ",", "0.25", ",", "0.25", ",", "0.25", ")", ",", "smoothing_function", "=", "None", ",", "auto_reweigh", "=", "False", ")", ":", "return", "corpus_bleu", "(", "[", "references", "]", ",", "[", "hypothesis", "]", ",", "weights", ",", "smoothing_function", ",", "auto_reweigh", ")" ]
calculate bleu score from papineni .
train
false
17,142
def follow_url(parser, token): bits = token.split_contents() if (len(bits) != 2): raise TemplateSyntaxError('Accepted format {% follow_url [instance] %}') else: return DisplayActivityFollowUrl(bits[1])
[ "def", "follow_url", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "(", "len", "(", "bits", ")", "!=", "2", ")", ":", "raise", "TemplateSyntaxError", "(", "'Accepted format {% follow_url [instance] %}'", ")", "else", ":", "return", "DisplayActivityFollowUrl", "(", "bits", "[", "1", "]", ")" ]
renders the url of the follow view for a particular actor instance <a href="{% follow_url other_user %}"> {% if request .
train
false
17,143
def must_be_valid_project(func=None, retractions_valid=False): def must_be_valid_project_inner(func): @functools.wraps(func) def wrapped(*args, **kwargs): _inject_nodes(kwargs) if getattr(kwargs['node'], 'is_collection', True): raise HTTPError(http.NOT_FOUND) if ((not retractions_valid) and getattr(kwargs['node'].retraction, 'is_retracted', False)): raise HTTPError(http.BAD_REQUEST, data=dict(message_long='Viewing withdrawn registrations is not permitted')) else: return func(*args, **kwargs) return wrapped if func: return must_be_valid_project_inner(func) return must_be_valid_project_inner
[ "def", "must_be_valid_project", "(", "func", "=", "None", ",", "retractions_valid", "=", "False", ")", ":", "def", "must_be_valid_project_inner", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "**", "kwargs", ")", ":", "_inject_nodes", "(", "kwargs", ")", "if", "getattr", "(", "kwargs", "[", "'node'", "]", ",", "'is_collection'", ",", "True", ")", ":", "raise", "HTTPError", "(", "http", ".", "NOT_FOUND", ")", "if", "(", "(", "not", "retractions_valid", ")", "and", "getattr", "(", "kwargs", "[", "'node'", "]", ".", "retraction", ",", "'is_retracted'", ",", "False", ")", ")", ":", "raise", "HTTPError", "(", "http", ".", "BAD_REQUEST", ",", "data", "=", "dict", "(", "message_long", "=", "'Viewing withdrawn registrations is not permitted'", ")", ")", "else", ":", "return", "func", "(", "*", "args", ",", "**", "kwargs", ")", "return", "wrapped", "if", "func", ":", "return", "must_be_valid_project_inner", "(", "func", ")", "return", "must_be_valid_project_inner" ]
ensures permissions to retractions are never implicitly granted .
train
false
17,144
@cli_app.command('create-app') @click.option('--name', prompt='Your new app name', help='Your application name, directory will have this name') @click.option('--engine', prompt='Your engine type, SQLAlchemy or MongoEngine', type=click.Choice(['SQLAlchemy', 'MongoEngine']), default='SQLAlchemy', help='Write your engine type') def create_app(name, engine): try: if (engine.lower() == 'sqlalchemy'): url = urlopen(SQLA_REPO_URL) dirname = 'Flask-AppBuilder-Skeleton-master' elif (engine.lower() == 'mongoengine'): url = urlopen(MONGOENGIE_REPO_URL) dirname = 'Flask-AppBuilder-Skeleton-me-master' zipfile = ZipFile(BytesIO(url.read())) zipfile.extractall() os.rename(dirname, name) click.echo(click.style('Downloaded the skeleton app, good coding!', fg='green')) return True except Exception as e: click.echo(click.style('Something went wrong {0}'.format(e), fg='red')) if (engine.lower() == 'sqlalchemy'): click.echo(click.style('Try downloading from {0}'.format(SQLA_REPO_URL), fg='green')) elif (engine.lower() == 'mongoengine'): click.echo(click.style('Try downloading from {0}'.format(MONGOENGIE_REPO_URL), fg='green')) return False
[ "@", "cli_app", ".", "command", "(", "'create-app'", ")", "@", "click", ".", "option", "(", "'--name'", ",", "prompt", "=", "'Your new app name'", ",", "help", "=", "'Your application name, directory will have this name'", ")", "@", "click", ".", "option", "(", "'--engine'", ",", "prompt", "=", "'Your engine type, SQLAlchemy or MongoEngine'", ",", "type", "=", "click", ".", "Choice", "(", "[", "'SQLAlchemy'", ",", "'MongoEngine'", "]", ")", ",", "default", "=", "'SQLAlchemy'", ",", "help", "=", "'Write your engine type'", ")", "def", "create_app", "(", "name", ",", "engine", ")", ":", "try", ":", "if", "(", "engine", ".", "lower", "(", ")", "==", "'sqlalchemy'", ")", ":", "url", "=", "urlopen", "(", "SQLA_REPO_URL", ")", "dirname", "=", "'Flask-AppBuilder-Skeleton-master'", "elif", "(", "engine", ".", "lower", "(", ")", "==", "'mongoengine'", ")", ":", "url", "=", "urlopen", "(", "MONGOENGIE_REPO_URL", ")", "dirname", "=", "'Flask-AppBuilder-Skeleton-me-master'", "zipfile", "=", "ZipFile", "(", "BytesIO", "(", "url", ".", "read", "(", ")", ")", ")", "zipfile", ".", "extractall", "(", ")", "os", ".", "rename", "(", "dirname", ",", "name", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "'Downloaded the skeleton app, good coding!'", ",", "fg", "=", "'green'", ")", ")", "return", "True", "except", "Exception", "as", "e", ":", "click", ".", "echo", "(", "click", ".", "style", "(", "'Something went wrong {0}'", ".", "format", "(", "e", ")", ",", "fg", "=", "'red'", ")", ")", "if", "(", "engine", ".", "lower", "(", ")", "==", "'sqlalchemy'", ")", ":", "click", ".", "echo", "(", "click", ".", "style", "(", "'Try downloading from {0}'", ".", "format", "(", "SQLA_REPO_URL", ")", ",", "fg", "=", "'green'", ")", ")", "elif", "(", "engine", ".", "lower", "(", ")", "==", "'mongoengine'", ")", ":", "click", ".", "echo", "(", "click", ".", "style", "(", "'Try downloading from {0}'", ".", "format", "(", "MONGOENGIE_REPO_URL", ")", ",", "fg", "=", "'green'", ")", ")", "return", "False" ]
creates soap services application and distribute flask config into user con defined context for each method call .
train
false
17,145
def rewrap(text, width=None): if (width is None): width = 80 text = textwrap.dedent(text) def needs_wrapping(line): return (not line[0].isspace()) wrapped_text = '' for (do_wrap, lines) in itertools.groupby(text.splitlines(True), key=needs_wrapping): paragraph = ''.join(lines) if do_wrap: paragraph = textwrap.fill(paragraph, width) wrapped_text += paragraph return wrapped_text
[ "def", "rewrap", "(", "text", ",", "width", "=", "None", ")", ":", "if", "(", "width", "is", "None", ")", ":", "width", "=", "80", "text", "=", "textwrap", ".", "dedent", "(", "text", ")", "def", "needs_wrapping", "(", "line", ")", ":", "return", "(", "not", "line", "[", "0", "]", ".", "isspace", "(", ")", ")", "wrapped_text", "=", "''", "for", "(", "do_wrap", ",", "lines", ")", "in", "itertools", ".", "groupby", "(", "text", ".", "splitlines", "(", "True", ")", ",", "key", "=", "needs_wrapping", ")", ":", "paragraph", "=", "''", ".", "join", "(", "lines", ")", "if", "do_wrap", ":", "paragraph", "=", "textwrap", ".", "fill", "(", "paragraph", ",", "width", ")", "wrapped_text", "+=", "paragraph", "return", "wrapped_text" ]
rewrap text for output to the console .
train
true
17,146
def collect(resource_obj, list_kwargs, name_filter, before_str=None): result = [] request = resource_obj.list(**list_kwargs) while request: response = request.execute() result.extend(__filter_items(response.get('items', []), name_filter, before_str)) try: request = resource_obj.list_next(request, response) except AttributeError: request = None return result
[ "def", "collect", "(", "resource_obj", ",", "list_kwargs", ",", "name_filter", ",", "before_str", "=", "None", ")", ":", "result", "=", "[", "]", "request", "=", "resource_obj", ".", "list", "(", "**", "list_kwargs", ")", "while", "request", ":", "response", "=", "request", ".", "execute", "(", ")", "result", ".", "extend", "(", "__filter_items", "(", "response", ".", "get", "(", "'items'", ",", "[", "]", ")", ",", "name_filter", ",", "before_str", ")", ")", "try", ":", "request", "=", "resource_obj", ".", "list_next", "(", "request", ",", "response", ")", "except", "AttributeError", ":", "request", "=", "None", "return", "result" ]
instead of performing bulk delete .
train
false
17,147
def plugin_validate(plugin, context, data_dict, schema, action): if hasattr(plugin, 'validate'): result = plugin.validate(context, data_dict, schema, action) if (result is not None): return result return toolkit.navl_validate(data_dict, schema, context)
[ "def", "plugin_validate", "(", "plugin", ",", "context", ",", "data_dict", ",", "schema", ",", "action", ")", ":", "if", "hasattr", "(", "plugin", ",", "'validate'", ")", ":", "result", "=", "plugin", ".", "validate", "(", "context", ",", "data_dict", ",", "schema", ",", "action", ")", "if", "(", "result", "is", "not", "None", ")", ":", "return", "result", "return", "toolkit", ".", "navl_validate", "(", "data_dict", ",", "schema", ",", "context", ")" ]
backwards compatibility with 2 .
train
false
17,148
def serialize_type_with_template(oftype, accessor, types): template = Template(type_description(oftype, types)['template']) mapping = {'ARG': accessor} return template.substitute(mapping)
[ "def", "serialize_type_with_template", "(", "oftype", ",", "accessor", ",", "types", ")", ":", "template", "=", "Template", "(", "type_description", "(", "oftype", ",", "types", ")", "[", "'template'", "]", ")", "mapping", "=", "{", "'ARG'", ":", "accessor", "}", "return", "template", ".", "substitute", "(", "mapping", ")" ]
returns a serialization template for the given type with all placeholders replaced with the actual values .
train
false
17,149
@pytest.mark.parametrize('qversion, version, op, expected', [('5.4.0', '5.4.0', operator.ge, True), ('5.4.0', '5.4.0', operator.eq, True), ('5.4.0', '5.4', operator.eq, True), ('5.4.1', '5.4', operator.ge, True), ('5.3.2', '5.4', operator.ge, False), ('5.3.0', '5.3.2', operator.ge, False)]) def test_version_check(monkeypatch, qversion, version, op, expected): monkeypatch.setattr('qutebrowser.utils.qtutils.qVersion', (lambda : qversion)) assert (qtutils.version_check(version, op) == expected)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'qversion, version, op, expected'", ",", "[", "(", "'5.4.0'", ",", "'5.4.0'", ",", "operator", ".", "ge", ",", "True", ")", ",", "(", "'5.4.0'", ",", "'5.4.0'", ",", "operator", ".", "eq", ",", "True", ")", ",", "(", "'5.4.0'", ",", "'5.4'", ",", "operator", ".", "eq", ",", "True", ")", ",", "(", "'5.4.1'", ",", "'5.4'", ",", "operator", ".", "ge", ",", "True", ")", ",", "(", "'5.3.2'", ",", "'5.4'", ",", "operator", ".", "ge", ",", "False", ")", ",", "(", "'5.3.0'", ",", "'5.3.2'", ",", "operator", ".", "ge", ",", "False", ")", "]", ")", "def", "test_version_check", "(", "monkeypatch", ",", "qversion", ",", "version", ",", "op", ",", "expected", ")", ":", "monkeypatch", ".", "setattr", "(", "'qutebrowser.utils.qtutils.qVersion'", ",", "(", "lambda", ":", "qversion", ")", ")", "assert", "(", "qtutils", ".", "version_check", "(", "version", ",", "op", ")", "==", "expected", ")" ]
test for version_check() .
train
false
17,153
@pytest.fixture(scope='session') def qapp(qapp): qapp.setApplicationName('qute_test') return qapp
[ "@", "pytest", ".", "fixture", "(", "scope", "=", "'session'", ")", "def", "qapp", "(", "qapp", ")", ":", "qapp", ".", "setApplicationName", "(", "'qute_test'", ")", "return", "qapp" ]
change the name of the qapplication instance .
train
false
17,156
def _xml_to_dict(xmltree): if (sys.version_info < (2, 7)): children_len = len(xmltree.getchildren()) else: children_len = len(xmltree) if (children_len < 1): name = xmltree.tag if ('}' in name): comps = name.split('}') name = comps[1] return {name: xmltree.text} xmldict = {} for item in xmltree: name = item.tag if ('}' in name): comps = name.split('}') name = comps[1] if (name not in xmldict): if (sys.version_info < (2, 7)): children_len = len(item.getchildren()) else: children_len = len(item) if (children_len > 0): xmldict[name] = _xml_to_dict(item) else: xmldict[name] = item.text else: if (not isinstance(xmldict[name], list)): tempvar = xmldict[name] xmldict[name] = [] xmldict[name].append(tempvar) xmldict[name].append(_xml_to_dict(item)) return xmldict
[ "def", "_xml_to_dict", "(", "xmltree", ")", ":", "if", "(", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", ")", ":", "children_len", "=", "len", "(", "xmltree", ".", "getchildren", "(", ")", ")", "else", ":", "children_len", "=", "len", "(", "xmltree", ")", "if", "(", "children_len", "<", "1", ")", ":", "name", "=", "xmltree", ".", "tag", "if", "(", "'}'", "in", "name", ")", ":", "comps", "=", "name", ".", "split", "(", "'}'", ")", "name", "=", "comps", "[", "1", "]", "return", "{", "name", ":", "xmltree", ".", "text", "}", "xmldict", "=", "{", "}", "for", "item", "in", "xmltree", ":", "name", "=", "item", ".", "tag", "if", "(", "'}'", "in", "name", ")", ":", "comps", "=", "name", ".", "split", "(", "'}'", ")", "name", "=", "comps", "[", "1", "]", "if", "(", "name", "not", "in", "xmldict", ")", ":", "if", "(", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", ")", ":", "children_len", "=", "len", "(", "item", ".", "getchildren", "(", ")", ")", "else", ":", "children_len", "=", "len", "(", "item", ")", "if", "(", "children_len", ">", "0", ")", ":", "xmldict", "[", "name", "]", "=", "_xml_to_dict", "(", "item", ")", "else", ":", "xmldict", "[", "name", "]", "=", "item", ".", "text", "else", ":", "if", "(", "not", "isinstance", "(", "xmldict", "[", "name", "]", ",", "list", ")", ")", ":", "tempvar", "=", "xmldict", "[", "name", "]", "xmldict", "[", "name", "]", "=", "[", "]", "xmldict", "[", "name", "]", ".", "append", "(", "tempvar", ")", "xmldict", "[", "name", "]", ".", "append", "(", "_xml_to_dict", "(", "item", ")", ")", "return", "xmldict" ]
convert an xml tree into a dict .
train
true
17,157
def retry_effect_with_timeout(effect, timeout, retry_wait=timedelta(seconds=1), backoff=True, time=time.time): class State(object, ): end_time = None wait_time = None def should_retry(exc_info): if (State.end_time is None): State.end_time = (time() + timeout) if (time() >= State.end_time): return Effect(Constant(False)) else: retry_delay = State.wait_time.total_seconds() effect = Effect(Delay(retry_delay)).on(success=(lambda x: Effect(Constant(True)))) if backoff: State.wait_time *= 2 return effect State.wait_time = retry_wait return retry(effect, should_retry)
[ "def", "retry_effect_with_timeout", "(", "effect", ",", "timeout", ",", "retry_wait", "=", "timedelta", "(", "seconds", "=", "1", ")", ",", "backoff", "=", "True", ",", "time", "=", "time", ".", "time", ")", ":", "class", "State", "(", "object", ",", ")", ":", "end_time", "=", "None", "wait_time", "=", "None", "def", "should_retry", "(", "exc_info", ")", ":", "if", "(", "State", ".", "end_time", "is", "None", ")", ":", "State", ".", "end_time", "=", "(", "time", "(", ")", "+", "timeout", ")", "if", "(", "time", "(", ")", ">=", "State", ".", "end_time", ")", ":", "return", "Effect", "(", "Constant", "(", "False", ")", ")", "else", ":", "retry_delay", "=", "State", ".", "wait_time", ".", "total_seconds", "(", ")", "effect", "=", "Effect", "(", "Delay", "(", "retry_delay", ")", ")", ".", "on", "(", "success", "=", "(", "lambda", "x", ":", "Effect", "(", "Constant", "(", "True", ")", ")", ")", ")", "if", "backoff", ":", "State", ".", "wait_time", "*=", "2", "return", "effect", "State", ".", "wait_time", "=", "retry_wait", "return", "retry", "(", "effect", ",", "should_retry", ")" ]
if effect fails .
train
false
17,159
def isListLike(value): return isinstance(value, (list, tuple, set, BigArray))
[ "def", "isListLike", "(", "value", ")", ":", "return", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ",", "set", ",", "BigArray", ")", ")" ]
returns true if the given value is a list-like instance .
train
false
17,160
def make_rule_key(prefix, rule, group_id, cidr_ip): if isinstance(rule, dict): (proto, from_port, to_port) = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')] if ((proto not in ['icmp', 'tcp', 'udp']) and (from_port == (-1)) and (to_port == (-1))): from_port = 'none' to_port = 'none' else: (proto, from_port, to_port) = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')] key = ('%s-%s-%s-%s-%s-%s' % (prefix, proto, from_port, to_port, group_id, cidr_ip)) return key.lower().replace('-none', '-None')
[ "def", "make_rule_key", "(", "prefix", ",", "rule", ",", "group_id", ",", "cidr_ip", ")", ":", "if", "isinstance", "(", "rule", ",", "dict", ")", ":", "(", "proto", ",", "from_port", ",", "to_port", ")", "=", "[", "rule", ".", "get", "(", "x", ",", "None", ")", "for", "x", "in", "(", "'proto'", ",", "'from_port'", ",", "'to_port'", ")", "]", "if", "(", "(", "proto", "not", "in", "[", "'icmp'", ",", "'tcp'", ",", "'udp'", "]", ")", "and", "(", "from_port", "==", "(", "-", "1", ")", ")", "and", "(", "to_port", "==", "(", "-", "1", ")", ")", ")", ":", "from_port", "=", "'none'", "to_port", "=", "'none'", "else", ":", "(", "proto", ",", "from_port", ",", "to_port", ")", "=", "[", "getattr", "(", "rule", ",", "x", ",", "None", ")", "for", "x", "in", "(", "'ip_protocol'", ",", "'from_port'", ",", "'to_port'", ")", "]", "key", "=", "(", "'%s-%s-%s-%s-%s-%s'", "%", "(", "prefix", ",", "proto", ",", "from_port", ",", "to_port", ",", "group_id", ",", "cidr_ip", ")", ")", "return", "key", ".", "lower", "(", ")", ".", "replace", "(", "'-none'", ",", "'-None'", ")" ]
creates a unique key for an individual group rule .
train
false
17,161
def build_dataloader(config, be, frcn_rois_per_img): dl = DataLoader(config, be) dl = TypeCast(dl, index=0, dtype=np.float32) dl = BGRMeanSubtract(dl, index=0, pixel_mean=util.FRCN_PIXEL_MEANS) dl = ObjectLocalization(dl, frcn_rois_per_img=frcn_rois_per_img) return dl
[ "def", "build_dataloader", "(", "config", ",", "be", ",", "frcn_rois_per_img", ")", ":", "dl", "=", "DataLoader", "(", "config", ",", "be", ")", "dl", "=", "TypeCast", "(", "dl", ",", "index", "=", "0", ",", "dtype", "=", "np", ".", "float32", ")", "dl", "=", "BGRMeanSubtract", "(", "dl", ",", "index", "=", "0", ",", "pixel_mean", "=", "util", ".", "FRCN_PIXEL_MEANS", ")", "dl", "=", "ObjectLocalization", "(", "dl", ",", "frcn_rois_per_img", "=", "frcn_rois_per_img", ")", "return", "dl" ]
builds the dataloader for the faster-rcnn network using our aeon loader .
train
false
17,162
@app.route('/send-multiple/<draft_id>', methods=['DELETE']) def multi_send_finish(draft_id): account = g.namespace.account if (account.discriminator == 'easaccount'): raise InputError('Multiple send is not supported for this provider.') valid_public_id(draft_id) draft = get_sending_draft(draft_id, g.namespace.id, g.db_session) if (not draft.is_sending): raise InputError('Invalid draft, not part of a multi-send transaction') if (not isinstance(account, GenericAccount)): try: with writable_connection_pool(account.id).get() as crispin_client: remote_delete_sent(crispin_client, account.id, draft.message_id_header, delete_multiple=True) except Exception: log_exception(sys.exc_info(), draft_public_id=draft.public_id) update_draft_on_send(account, draft, g.db_session) schedule_action('save_sent_email', draft, draft.namespace.id, g.db_session) return g.encoder.jsonify(draft)
[ "@", "app", ".", "route", "(", "'/send-multiple/<draft_id>'", ",", "methods", "=", "[", "'DELETE'", "]", ")", "def", "multi_send_finish", "(", "draft_id", ")", ":", "account", "=", "g", ".", "namespace", ".", "account", "if", "(", "account", ".", "discriminator", "==", "'easaccount'", ")", ":", "raise", "InputError", "(", "'Multiple send is not supported for this provider.'", ")", "valid_public_id", "(", "draft_id", ")", "draft", "=", "get_sending_draft", "(", "draft_id", ",", "g", ".", "namespace", ".", "id", ",", "g", ".", "db_session", ")", "if", "(", "not", "draft", ".", "is_sending", ")", ":", "raise", "InputError", "(", "'Invalid draft, not part of a multi-send transaction'", ")", "if", "(", "not", "isinstance", "(", "account", ",", "GenericAccount", ")", ")", ":", "try", ":", "with", "writable_connection_pool", "(", "account", ".", "id", ")", ".", "get", "(", ")", "as", "crispin_client", ":", "remote_delete_sent", "(", "crispin_client", ",", "account", ".", "id", ",", "draft", ".", "message_id_header", ",", "delete_multiple", "=", "True", ")", "except", "Exception", ":", "log_exception", "(", "sys", ".", "exc_info", "(", ")", ",", "draft_public_id", "=", "draft", ".", "public_id", ")", "update_draft_on_send", "(", "account", ",", "draft", ",", "g", ".", "db_session", ")", "schedule_action", "(", "'save_sent_email'", ",", "draft", ",", "draft", ".", "namespace", ".", "id", ",", "g", ".", "db_session", ")", "return", "g", ".", "encoder", ".", "jsonify", "(", "draft", ")" ]
closes out a multi-send session by marking the sending draft as sent and moving it to the users sent folder .
train
false
17,163
@register.render_tag def set_page_permissions(context, token): page = context[token.split_contents()[1]] model = page.get_content_model() try: opts = model._meta except AttributeError: if (model is None): error = _(u'Could not load the model for the following page, was it removed?') obj = page else: error = _(u'An error occured with the following class. Does it subclass Page directly?') obj = model.__class__.__name__ raise ImproperlyConfigured((error + (u" '%s'" % obj))) perm_name = ((opts.app_label + u'.%s_') + opts.object_name.lower()) request = context[u'request'] setattr(page, u'perms', {}) for perm_type in (u'add', u'change', u'delete'): perm = request.user.has_perm((perm_name % perm_type)) perm = (perm and getattr(model, (u'can_%s' % perm_type))(request)) page.perms[perm_type] = perm return u''
[ "@", "register", ".", "render_tag", "def", "set_page_permissions", "(", "context", ",", "token", ")", ":", "page", "=", "context", "[", "token", ".", "split_contents", "(", ")", "[", "1", "]", "]", "model", "=", "page", ".", "get_content_model", "(", ")", "try", ":", "opts", "=", "model", ".", "_meta", "except", "AttributeError", ":", "if", "(", "model", "is", "None", ")", ":", "error", "=", "_", "(", "u'Could not load the model for the following page, was it removed?'", ")", "obj", "=", "page", "else", ":", "error", "=", "_", "(", "u'An error occured with the following class. Does it subclass Page directly?'", ")", "obj", "=", "model", ".", "__class__", ".", "__name__", "raise", "ImproperlyConfigured", "(", "(", "error", "+", "(", "u\" '%s'\"", "%", "obj", ")", ")", ")", "perm_name", "=", "(", "(", "opts", ".", "app_label", "+", "u'.%s_'", ")", "+", "opts", ".", "object_name", ".", "lower", "(", ")", ")", "request", "=", "context", "[", "u'request'", "]", "setattr", "(", "page", ",", "u'perms'", ",", "{", "}", ")", "for", "perm_type", "in", "(", "u'add'", ",", "u'change'", ",", "u'delete'", ")", ":", "perm", "=", "request", ".", "user", ".", "has_perm", "(", "(", "perm_name", "%", "perm_type", ")", ")", "perm", "=", "(", "perm", "and", "getattr", "(", "model", ",", "(", "u'can_%s'", "%", "perm_type", ")", ")", "(", "request", ")", ")", "page", ".", "perms", "[", "perm_type", "]", "=", "perm", "return", "u''" ]
assigns a permissions dict to the given page instance .
train
false
17,164
@require_role('admin') def group_del(request): group_ids = request.GET.get('id', '') group_id_list = group_ids.split(',') for group_id in group_id_list: AssetGroup.objects.filter(id=group_id).delete() return HttpResponse(u'\u5220\u9664\u6210\u529f')
[ "@", "require_role", "(", "'admin'", ")", "def", "group_del", "(", "request", ")", ":", "group_ids", "=", "request", ".", "GET", ".", "get", "(", "'id'", ",", "''", ")", "group_id_list", "=", "group_ids", ".", "split", "(", "','", ")", "for", "group_id", "in", "group_id_list", ":", "AssetGroup", ".", "objects", ".", "filter", "(", "id", "=", "group_id", ")", ".", "delete", "(", ")", "return", "HttpResponse", "(", "u'\\u5220\\u9664\\u6210\\u529f'", ")" ]
del a group .
train
false
17,165
def safestr(obj, encoding='utf-8'): if isinstance(obj, unicode): return obj.encode(encoding) elif isinstance(obj, str): return obj elif hasattr(obj, 'next'): return itertools.imap(safestr, obj) else: return str(obj)
[ "def", "safestr", "(", "obj", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "isinstance", "(", "obj", ",", "unicode", ")", ":", "return", "obj", ".", "encode", "(", "encoding", ")", "elif", "isinstance", "(", "obj", ",", "str", ")", ":", "return", "obj", "elif", "hasattr", "(", "obj", ",", "'next'", ")", ":", "return", "itertools", ".", "imap", "(", "safestr", ",", "obj", ")", "else", ":", "return", "str", "(", "obj", ")" ]
converts any given object to utf-8 encoded string .
train
true
17,166
def _cache_id(minion_id, cache_file): try: with salt.utils.fopen(cache_file, 'w') as idf: idf.write(minion_id) except (IOError, OSError) as exc: log.error('Could not cache minion ID: {0}'.format(exc))
[ "def", "_cache_id", "(", "minion_id", ",", "cache_file", ")", ":", "try", ":", "with", "salt", ".", "utils", ".", "fopen", "(", "cache_file", ",", "'w'", ")", "as", "idf", ":", "idf", ".", "write", "(", "minion_id", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "exc", ":", "log", ".", "error", "(", "'Could not cache minion ID: {0}'", ".", "format", "(", "exc", ")", ")" ]
helper function .
train
false
17,168
def method_name_for_op(op, commute=False): if commute: return _ops_to_commuted_methods[op] return _ops_to_methods[op]
[ "def", "method_name_for_op", "(", "op", ",", "commute", "=", "False", ")", ":", "if", "commute", ":", "return", "_ops_to_commuted_methods", "[", "op", "]", "return", "_ops_to_methods", "[", "op", "]" ]
get the name of the python magic method corresponding to op .
train
false
17,169
def memstr_to_bytes(text): kilo = 1024 units = dict(K=kilo, M=(kilo ** 2), G=(kilo ** 3)) try: size = int((units[text[(-1)]] * float(text[:(-1)]))) except (KeyError, ValueError): raise ValueError(("Invalid literal for size give: %s (type %s) should be alike '10G', '500M', '50K'." % (text, type(text)))) return size
[ "def", "memstr_to_bytes", "(", "text", ")", ":", "kilo", "=", "1024", "units", "=", "dict", "(", "K", "=", "kilo", ",", "M", "=", "(", "kilo", "**", "2", ")", ",", "G", "=", "(", "kilo", "**", "3", ")", ")", "try", ":", "size", "=", "int", "(", "(", "units", "[", "text", "[", "(", "-", "1", ")", "]", "]", "*", "float", "(", "text", "[", ":", "(", "-", "1", ")", "]", ")", ")", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "(", "\"Invalid literal for size give: %s (type %s) should be alike '10G', '500M', '50K'.\"", "%", "(", "text", ",", "type", "(", "text", ")", ")", ")", ")", "return", "size" ]
convert a memory text to its value in bytes .
train
false
17,170
def snippet_seq(name, offset=0, count=1024): path = os.path.join(test_file_dir, 'snippets', ('%s.txt' % name)) with open(path, 'rb') as f: text = f.read().decode('utf-8') snippets = text.split('\n\n') (start, end) = (offset, (offset + count)) return tuple(snippets[start:end])
[ "def", "snippet_seq", "(", "name", ",", "offset", "=", "0", ",", "count", "=", "1024", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "test_file_dir", ",", "'snippets'", ",", "(", "'%s.txt'", "%", "name", ")", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "text", "=", "f", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "snippets", "=", "text", ".", "split", "(", "'\\n\\n'", ")", "(", "start", ",", "end", ")", "=", "(", "offset", ",", "(", "offset", "+", "count", ")", ")", "return", "tuple", "(", "snippets", "[", "start", ":", "end", "]", ")" ]
return a tuple containing the unicode text snippets read from the snippet file having *name* .
train
false
17,171
def check_internet(): try: urlopen('https://github.com') except URLError as err: return False return True
[ "def", "check_internet", "(", ")", ":", "try", ":", "urlopen", "(", "'https://github.com'", ")", "except", "URLError", "as", "err", ":", "return", "False", "return", "True" ]
check if internet is available .
train
false
17,172
def Root(): share = Share(None, None, '', None) r = Owner('').makeInterface({'_name': ''}, share, '') share.root_interface = r return r
[ "def", "Root", "(", ")", ":", "share", "=", "Share", "(", "None", ",", "None", ",", "''", ",", "None", ")", "r", "=", "Owner", "(", "''", ")", ".", "makeInterface", "(", "{", "'_name'", ":", "''", "}", ",", "share", ",", "''", ")", "share", ".", "root_interface", "=", "r", "return", "r" ]
create a new guppy root object .
train
false
17,173
def test_feature_max_length_on_step_with_table(): feature = Feature.from_string(FEATURE5) assert_equals(feature.max_length, 83)
[ "def", "test_feature_max_length_on_step_with_table", "(", ")", ":", "feature", "=", "Feature", ".", "from_string", "(", "FEATURE5", ")", "assert_equals", "(", "feature", ".", "max_length", ",", "83", ")" ]
the max length of a feature considering when the table of some of the steps is longer than the remaining things .
train
false
17,174
def python_2_unicode_compatible(klass): if (not six.PY3): klass.__unicode__ = klass.__str__ klass.__str__ = (lambda self: self.__unicode__().encode(u'utf-8')) return klass
[ "def", "python_2_unicode_compatible", "(", "klass", ")", ":", "if", "(", "not", "six", ".", "PY3", ")", ":", "klass", ".", "__unicode__", "=", "klass", ".", "__str__", "klass", ".", "__str__", "=", "(", "lambda", "self", ":", "self", ".", "__unicode__", "(", ")", ".", "encode", "(", "u'utf-8'", ")", ")", "return", "klass" ]
a decorator that defines __unicode__ and __str__ methods under python 2 .
train
true
17,175
def random_wipe(devs): global PROGRESS PROGRESS = 0.0 threads = [] sentinels = [] for dev in devs: thread = RandomWorker(dev) thread.start() threads.append(thread) sentinel = RandomSentinel(worker=thread) sentinel.start() sentinels.append(sentinel) tobreak = False while (not tobreak): progress = 0 numthreads = len(threads) tobreak = True for thread in threads: tobreak &= (not thread.is_alive()) progress += thread.progress() progress /= numthreads PROGRESS = progress time.sleep(1)
[ "def", "random_wipe", "(", "devs", ")", ":", "global", "PROGRESS", "PROGRESS", "=", "0.0", "threads", "=", "[", "]", "sentinels", "=", "[", "]", "for", "dev", "in", "devs", ":", "thread", "=", "RandomWorker", "(", "dev", ")", "thread", ".", "start", "(", ")", "threads", ".", "append", "(", "thread", ")", "sentinel", "=", "RandomSentinel", "(", "worker", "=", "thread", ")", "sentinel", ".", "start", "(", ")", "sentinels", ".", "append", "(", "sentinel", ")", "tobreak", "=", "False", "while", "(", "not", "tobreak", ")", ":", "progress", "=", "0", "numthreads", "=", "len", "(", "threads", ")", "tobreak", "=", "True", "for", "thread", "in", "threads", ":", "tobreak", "&=", "(", "not", "thread", ".", "is_alive", "(", ")", ")", "progress", "+=", "thread", ".", "progress", "(", ")", "progress", "/=", "numthreads", "PROGRESS", "=", "progress", "time", ".", "sleep", "(", "1", ")" ]
concurrently wipe devs using /dev/random .
train
false
17,176
def gen_list_name(): for name in _LIST_NAMES: (yield name) counter = itertools.count(1000) while True: (yield (b(str(next(counter))) + '\x00'))
[ "def", "gen_list_name", "(", ")", ":", "for", "name", "in", "_LIST_NAMES", ":", "(", "yield", "name", ")", "counter", "=", "itertools", ".", "count", "(", "1000", ")", "while", "True", ":", "(", "yield", "(", "b", "(", "str", "(", "next", "(", "counter", ")", ")", ")", "+", "'\\x00'", ")", ")" ]
generate "keys" for encoded lists in the sequence b"0" .
train
true
17,178
def verify_snapshot_list(test): if (not (str(type(test)) == "<class 'curator.snapshotlist.SnapshotList'>")): raise TypeError('Not an SnapshotList object. Type: {0}.'.format(type(test)))
[ "def", "verify_snapshot_list", "(", "test", ")", ":", "if", "(", "not", "(", "str", "(", "type", "(", "test", ")", ")", "==", "\"<class 'curator.snapshotlist.SnapshotList'>\"", ")", ")", ":", "raise", "TypeError", "(", "'Not an SnapshotList object. Type: {0}.'", ".", "format", "(", "type", "(", "test", ")", ")", ")" ]
test if test is a proper :class:curator .
train
false
17,180
@login_required def delete_answer(request, question_id, answer_id): answer = get_object_or_404(Answer, pk=answer_id, question=question_id) if (not answer.allows_delete(request.user)): raise PermissionDenied if (request.method == 'GET'): return render(request, 'questions/confirm_answer_delete.html', {'answer': answer}) log.warning(('User %s is deleting answer with id=%s' % (request.user, answer.id))) answer.delete() statsd.incr('questions.delete_answer') return HttpResponseRedirect(reverse('questions.details', args=[question_id]))
[ "@", "login_required", "def", "delete_answer", "(", "request", ",", "question_id", ",", "answer_id", ")", ":", "answer", "=", "get_object_or_404", "(", "Answer", ",", "pk", "=", "answer_id", ",", "question", "=", "question_id", ")", "if", "(", "not", "answer", ".", "allows_delete", "(", "request", ".", "user", ")", ")", ":", "raise", "PermissionDenied", "if", "(", "request", ".", "method", "==", "'GET'", ")", ":", "return", "render", "(", "request", ",", "'questions/confirm_answer_delete.html'", ",", "{", "'answer'", ":", "answer", "}", ")", "log", ".", "warning", "(", "(", "'User %s is deleting answer with id=%s'", "%", "(", "request", ".", "user", ",", "answer", ".", "id", ")", ")", ")", "answer", ".", "delete", "(", ")", "statsd", ".", "incr", "(", "'questions.delete_answer'", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'questions.details'", ",", "args", "=", "[", "question_id", "]", ")", ")" ]
delete an answer .
train
false
17,181
def list_topic_rules(topic=None, ruleDisabled=None, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) kwargs = {} if (topic is not None): kwargs['topic'] = topic if (ruleDisabled is not None): kwargs['ruleDisabled'] = ruleDisabled rules = [] for ret in salt.utils.boto3.paged_call(conn.list_topic_rules, marker_flag='nextToken', marker_arg='nextToken', **kwargs): rules.extend(ret['rules']) if (not bool(rules)): log.warning('No rules found') return {'rules': rules} except ClientError as e: return {'error': salt.utils.boto3.get_error(e)}
[ "def", "list_topic_rules", "(", "topic", "=", "None", ",", "ruleDisabled", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "kwargs", "=", "{", "}", "if", "(", "topic", "is", "not", "None", ")", ":", "kwargs", "[", "'topic'", "]", "=", "topic", "if", "(", "ruleDisabled", "is", "not", "None", ")", ":", "kwargs", "[", "'ruleDisabled'", "]", "=", "ruleDisabled", "rules", "=", "[", "]", "for", "ret", "in", "salt", ".", "utils", ".", "boto3", ".", "paged_call", "(", "conn", ".", "list_topic_rules", ",", "marker_flag", "=", "'nextToken'", ",", "marker_arg", "=", "'nextToken'", ",", "**", "kwargs", ")", ":", "rules", ".", "extend", "(", "ret", "[", "'rules'", "]", ")", "if", "(", "not", "bool", "(", "rules", ")", ")", ":", "log", ".", "warning", "(", "'No rules found'", ")", "return", "{", "'rules'", ":", "rules", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
list all rules returns list of rules cli example: .
train
false
17,182
@pytest.mark.cmd def test_test_checks_noargs(): with pytest.raises(CommandError) as e: call_command('test_checks') assert ('Either --unit or a pair of --source and --target must be provided.' in str(e))
[ "@", "pytest", ".", "mark", ".", "cmd", "def", "test_test_checks_noargs", "(", ")", ":", "with", "pytest", ".", "raises", "(", "CommandError", ")", "as", "e", ":", "call_command", "(", "'test_checks'", ")", "assert", "(", "'Either --unit or a pair of --source and --target must be provided.'", "in", "str", "(", "e", ")", ")" ]
no args should fail wanting either --unit or --source .
train
false
17,183
def gf_compose(f, g, p, K): if (len(g) <= 1): return gf_strip([gf_eval(f, gf_LC(g, K), p, K)]) if (not f): return [] h = [f[0]] for c in f[1:]: h = gf_mul(h, g, p, K) h = gf_add_ground(h, c, p, K) return h
[ "def", "gf_compose", "(", "f", ",", "g", ",", "p", ",", "K", ")", ":", "if", "(", "len", "(", "g", ")", "<=", "1", ")", ":", "return", "gf_strip", "(", "[", "gf_eval", "(", "f", ",", "gf_LC", "(", "g", ",", "K", ")", ",", "p", ",", "K", ")", "]", ")", "if", "(", "not", "f", ")", ":", "return", "[", "]", "h", "=", "[", "f", "[", "0", "]", "]", "for", "c", "in", "f", "[", "1", ":", "]", ":", "h", "=", "gf_mul", "(", "h", ",", "g", ",", "p", ",", "K", ")", "h", "=", "gf_add_ground", "(", "h", ",", "c", ",", "p", ",", "K", ")", "return", "h" ]
compute polynomial composition f(g) in gf(p)[x] .
train
false
17,184
def print_info(): categories = {addrspace.BaseAddressSpace: 'Address Spaces', commands.Command: 'Plugins', obj.Profile: 'Profiles', scan.ScannerCheck: 'Scanner Checks'} for (c, n) in sorted(categories.items()): lower = (c == commands.Command) plugins = registry.get_plugin_classes(c, lower=lower) print '\n' print '{0}'.format(n) print ('-' * len(n)) result = [] max_length = 0 for (clsname, cls) in sorted(plugins.items()): try: doc = cls.__doc__.strip().splitlines()[0] except AttributeError: doc = 'No docs' result.append((clsname, doc)) max_length = max(len(clsname), max_length) for (name, doc) in result: print '{0:{2}} - {1:15}'.format(name, doc, max_length)
[ "def", "print_info", "(", ")", ":", "categories", "=", "{", "addrspace", ".", "BaseAddressSpace", ":", "'Address Spaces'", ",", "commands", ".", "Command", ":", "'Plugins'", ",", "obj", ".", "Profile", ":", "'Profiles'", ",", "scan", ".", "ScannerCheck", ":", "'Scanner Checks'", "}", "for", "(", "c", ",", "n", ")", "in", "sorted", "(", "categories", ".", "items", "(", ")", ")", ":", "lower", "=", "(", "c", "==", "commands", ".", "Command", ")", "plugins", "=", "registry", ".", "get_plugin_classes", "(", "c", ",", "lower", "=", "lower", ")", "print", "'\\n'", "print", "'{0}'", ".", "format", "(", "n", ")", "print", "(", "'-'", "*", "len", "(", "n", ")", ")", "result", "=", "[", "]", "max_length", "=", "0", "for", "(", "clsname", ",", "cls", ")", "in", "sorted", "(", "plugins", ".", "items", "(", ")", ")", ":", "try", ":", "doc", "=", "cls", ".", "__doc__", ".", "strip", "(", ")", ".", "splitlines", "(", ")", "[", "0", "]", "except", "AttributeError", ":", "doc", "=", "'No docs'", "result", ".", "append", "(", "(", "clsname", ",", "doc", ")", ")", "max_length", "=", "max", "(", "len", "(", "clsname", ")", ",", "max_length", ")", "for", "(", "name", ",", "doc", ")", "in", "result", ":", "print", "'{0:{2}} - {1:15}'", ".", "format", "(", "name", ",", "doc", ",", "max_length", ")" ]
returns the results .
train
false
17,185
@task @cmdopts([BOKCHOY_COVERAGERC, BOKCHOY_DEFAULT_STORE, BOKCHOY_DEFAULT_STORE_DEPR]) @timed def start_servers(options): coveragerc = options.get('coveragerc', Env.BOK_CHOY_COVERAGERC) def start_server(cmd, logfile, cwd=None): '\n Starts a single server.\n ' print cmd, logfile run_background_process(cmd, out_log=logfile, err_log=logfile, cwd=cwd) for (service, info) in Env.BOK_CHOY_SERVERS.iteritems(): address = '0.0.0.0:{}'.format(info['port']) cmd = 'DEFAULT_STORE={default_store} coverage run --rcfile={coveragerc} -m manage {service} --settings bok_choy runserver {address} --traceback --noreload'.format(default_store=options.default_store, coveragerc=coveragerc, service=service, address=address) start_server(cmd, info['log']) for (service, info) in Env.BOK_CHOY_STUBS.iteritems(): cmd = 'python -m stubs.start {service} {port} {config}'.format(service=service, port=info['port'], config=info.get('config', '')) start_server(cmd, info['log'], cwd=Env.BOK_CHOY_STUB_DIR)
[ "@", "task", "@", "cmdopts", "(", "[", "BOKCHOY_COVERAGERC", ",", "BOKCHOY_DEFAULT_STORE", ",", "BOKCHOY_DEFAULT_STORE_DEPR", "]", ")", "@", "timed", "def", "start_servers", "(", "options", ")", ":", "coveragerc", "=", "options", ".", "get", "(", "'coveragerc'", ",", "Env", ".", "BOK_CHOY_COVERAGERC", ")", "def", "start_server", "(", "cmd", ",", "logfile", ",", "cwd", "=", "None", ")", ":", "print", "cmd", ",", "logfile", "run_background_process", "(", "cmd", ",", "out_log", "=", "logfile", ",", "err_log", "=", "logfile", ",", "cwd", "=", "cwd", ")", "for", "(", "service", ",", "info", ")", "in", "Env", ".", "BOK_CHOY_SERVERS", ".", "iteritems", "(", ")", ":", "address", "=", "'0.0.0.0:{}'", ".", "format", "(", "info", "[", "'port'", "]", ")", "cmd", "=", "'DEFAULT_STORE={default_store} coverage run --rcfile={coveragerc} -m manage {service} --settings bok_choy runserver {address} --traceback --noreload'", ".", "format", "(", "default_store", "=", "options", ".", "default_store", ",", "coveragerc", "=", "coveragerc", ",", "service", "=", "service", ",", "address", "=", "address", ")", "start_server", "(", "cmd", ",", "info", "[", "'log'", "]", ")", "for", "(", "service", ",", "info", ")", "in", "Env", ".", "BOK_CHOY_STUBS", ".", "iteritems", "(", ")", ":", "cmd", "=", "'python -m stubs.start {service} {port} {config}'", ".", "format", "(", "service", "=", "service", ",", "port", "=", "info", "[", "'port'", "]", ",", "config", "=", "info", ".", "get", "(", "'config'", ",", "''", ")", ")", "start_server", "(", "cmd", ",", "info", "[", "'log'", "]", ",", "cwd", "=", "Env", ".", "BOK_CHOY_STUB_DIR", ")" ]
start the servers we will run tests on .
train
false
17,188
def show_bokehjs(bokehjs_action, develop=False): print() if develop: print('Installed Bokeh for DEVELOPMENT:') else: print('Installed Bokeh:') if (bokehjs_action in ['built', 'installed']): print((' - using %s built BokehJS from bokehjs/build\n' % (bright(yellow('NEWLY')) if (bokehjs_action == 'built') else bright(yellow('PREVIOUSLY'))))) else: print((" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow('PACKAGED')))) print()
[ "def", "show_bokehjs", "(", "bokehjs_action", ",", "develop", "=", "False", ")", ":", "print", "(", ")", "if", "develop", ":", "print", "(", "'Installed Bokeh for DEVELOPMENT:'", ")", "else", ":", "print", "(", "'Installed Bokeh:'", ")", "if", "(", "bokehjs_action", "in", "[", "'built'", ",", "'installed'", "]", ")", ":", "print", "(", "(", "' - using %s built BokehJS from bokehjs/build\\n'", "%", "(", "bright", "(", "yellow", "(", "'NEWLY'", ")", ")", "if", "(", "bokehjs_action", "==", "'built'", ")", "else", "bright", "(", "yellow", "(", "'PREVIOUSLY'", ")", ")", ")", ")", ")", "else", ":", "print", "(", "(", "\" - using %s BokehJS, located in 'bokeh.server.static'\\n\"", "%", "bright", "(", "yellow", "(", "'PACKAGED'", ")", ")", ")", ")", "print", "(", ")" ]
print a useful report after setuptools output describing where and how bokehjs is installed .
train
true
17,189
def add_extension(module, name, code): code = int(code) if (not (1 <= code <= 2147483647)): raise ValueError, 'code out of range' key = (module, name) if ((_extension_registry.get(key) == code) and (_inverted_registry.get(code) == key)): return if (key in _extension_registry): raise ValueError(('key %s is already registered with code %s' % (key, _extension_registry[key]))) if (code in _inverted_registry): raise ValueError(('code %s is already in use for key %s' % (code, _inverted_registry[code]))) _extension_registry[key] = code _inverted_registry[code] = key
[ "def", "add_extension", "(", "module", ",", "name", ",", "code", ")", ":", "code", "=", "int", "(", "code", ")", "if", "(", "not", "(", "1", "<=", "code", "<=", "2147483647", ")", ")", ":", "raise", "ValueError", ",", "'code out of range'", "key", "=", "(", "module", ",", "name", ")", "if", "(", "(", "_extension_registry", ".", "get", "(", "key", ")", "==", "code", ")", "and", "(", "_inverted_registry", ".", "get", "(", "code", ")", "==", "key", ")", ")", ":", "return", "if", "(", "key", "in", "_extension_registry", ")", ":", "raise", "ValueError", "(", "(", "'key %s is already registered with code %s'", "%", "(", "key", ",", "_extension_registry", "[", "key", "]", ")", ")", ")", "if", "(", "code", "in", "_inverted_registry", ")", ":", "raise", "ValueError", "(", "(", "'code %s is already in use for key %s'", "%", "(", "code", ",", "_inverted_registry", "[", "code", "]", ")", ")", ")", "_extension_registry", "[", "key", "]", "=", "code", "_inverted_registry", "[", "code", "]", "=", "key" ]
register an extension code .
train
true
17,190
def cling_wrap(package_name, dir_name, **kw): resource = Requirement.parse(package_name) return Cling(resource_filename(resource, dir_name), **kw)
[ "def", "cling_wrap", "(", "package_name", ",", "dir_name", ",", "**", "kw", ")", ":", "resource", "=", "Requirement", ".", "parse", "(", "package_name", ")", "return", "Cling", "(", "resource_filename", "(", "resource", ",", "dir_name", ")", ",", "**", "kw", ")" ]
return a cling that serves from the given package and dir_name .
train
true
17,192
def create_tracking_context(user): context_tracker = tracker.get_tracker().resolve_context() return {'lms_user_id': user.id, 'lms_client_id': context_tracker.get('client_id'), 'lms_ip': context_tracker.get('ip')}
[ "def", "create_tracking_context", "(", "user", ")", ":", "context_tracker", "=", "tracker", ".", "get_tracker", "(", ")", ".", "resolve_context", "(", ")", "return", "{", "'lms_user_id'", ":", "user", ".", "id", ",", "'lms_client_id'", ":", "context_tracker", ".", "get", "(", "'client_id'", ")", ",", "'lms_ip'", ":", "context_tracker", ".", "get", "(", "'ip'", ")", "}" ]
assembles attributes from user and request objects to be sent along in ecommerce api calls for tracking purposes .
train
false
17,193
def is_cygwingcc(): out_string = check_output(['gcc', '-dumpmachine']) return out_string.strip().endswith('cygwin')
[ "def", "is_cygwingcc", "(", ")", ":", "out_string", "=", "check_output", "(", "[", "'gcc'", ",", "'-dumpmachine'", "]", ")", "return", "out_string", ".", "strip", "(", ")", ".", "endswith", "(", "'cygwin'", ")" ]
try to determine if the gcc that would be used is from cygwin .
train
false
17,194
def _importSeceditConfig(infdata): try: _d = uuid.uuid4().hex _tSdbfile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'), 'salt-secedit-import-{0}.sdb'.format(_d)) _tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'), 'salt-secedit-config-{0}.inf'.format(_d)) _ret = __salt__['file.remove'](_tSdbfile) _ret = __salt__['file.remove'](_tInfFile) _ret = __salt__['file.touch'](_tInfFile) _ret = __salt__['file.append'](_tInfFile, infdata) _ret = __salt__['cmd.run']('secedit /configure /db {0} /cfg {1}'.format(_tSdbfile, _tInfFile)) _ret = __salt__['file.remove'](_tSdbfile) _ret = __salt__['file.remove'](_tInfFile) return True except Exception as e: log.debug('error occurred while trying to import secedit data') return False
[ "def", "_importSeceditConfig", "(", "infdata", ")", ":", "try", ":", "_d", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "_tSdbfile", "=", "'{0}\\\\{1}'", ".", "format", "(", "__salt__", "[", "'config.get'", "]", "(", "'cachedir'", ")", ",", "'salt-secedit-import-{0}.sdb'", ".", "format", "(", "_d", ")", ")", "_tInfFile", "=", "'{0}\\\\{1}'", ".", "format", "(", "__salt__", "[", "'config.get'", "]", "(", "'cachedir'", ")", ",", "'salt-secedit-config-{0}.inf'", ".", "format", "(", "_d", ")", ")", "_ret", "=", "__salt__", "[", "'file.remove'", "]", "(", "_tSdbfile", ")", "_ret", "=", "__salt__", "[", "'file.remove'", "]", "(", "_tInfFile", ")", "_ret", "=", "__salt__", "[", "'file.touch'", "]", "(", "_tInfFile", ")", "_ret", "=", "__salt__", "[", "'file.append'", "]", "(", "_tInfFile", ",", "infdata", ")", "_ret", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'secedit /configure /db {0} /cfg {1}'", ".", "format", "(", "_tSdbfile", ",", "_tInfFile", ")", ")", "_ret", "=", "__salt__", "[", "'file.remove'", "]", "(", "_tSdbfile", ")", "_ret", "=", "__salt__", "[", "'file.remove'", "]", "(", "_tInfFile", ")", "return", "True", "except", "Exception", "as", "e", ":", "log", ".", "debug", "(", "'error occurred while trying to import secedit data'", ")", "return", "False" ]
helper function to write data to a temp file/run secedit to import policy/cleanup .
train
false
17,195
def OVFOSNameID(flavor): version = OSVersion(flavor) arch = archFor(flavor) if ('ubuntu' in version): map = {'i386': ('Ubuntu', 93), 'x86_64': ('Ubuntu 64-bit', 94)} else: map = {'i386': ('Linux', 36), 'x86_64': ('Linux 64-bit', 101)} (osname, osid) = map[arch] return (osname, osid)
[ "def", "OVFOSNameID", "(", "flavor", ")", ":", "version", "=", "OSVersion", "(", "flavor", ")", "arch", "=", "archFor", "(", "flavor", ")", "if", "(", "'ubuntu'", "in", "version", ")", ":", "map", "=", "{", "'i386'", ":", "(", "'Ubuntu'", ",", "93", ")", ",", "'x86_64'", ":", "(", "'Ubuntu 64-bit'", ",", "94", ")", "}", "else", ":", "map", "=", "{", "'i386'", ":", "(", "'Linux'", ",", "36", ")", ",", "'x86_64'", ":", "(", "'Linux 64-bit'", ",", "101", ")", "}", "(", "osname", ",", "osid", ")", "=", "map", "[", "arch", "]", "return", "(", "osname", ",", "osid", ")" ]
return ovf-specified for flavor .
train
false
17,196
def _template_func(setup, func): def inner(_it, _timer, _func=func): setup() _t0 = _timer() for _i in _it: _func() _t1 = _timer() return (_t1 - _t0) return inner
[ "def", "_template_func", "(", "setup", ",", "func", ")", ":", "def", "inner", "(", "_it", ",", "_timer", ",", "_func", "=", "func", ")", ":", "setup", "(", ")", "_t0", "=", "_timer", "(", ")", "for", "_i", "in", "_it", ":", "_func", "(", ")", "_t1", "=", "_timer", "(", ")", "return", "(", "_t1", "-", "_t0", ")", "return", "inner" ]
create a timer function .
train
false
17,199
def track_comment_created_event(request, course, comment, commentable_id, followed): obj_type = ('comment' if comment.get('parent_id') else 'response') event_name = _EVENT_NAME_TEMPLATE.format(obj_type=obj_type, action_name='created') event_data = {'discussion': {'id': comment.thread_id}, 'commentable_id': commentable_id, 'options': {'followed': followed}} parent_id = comment.get('parent_id') if parent_id: event_data['response'] = {'id': parent_id} track_created_event(request, event_name, course, comment, event_data)
[ "def", "track_comment_created_event", "(", "request", ",", "course", ",", "comment", ",", "commentable_id", ",", "followed", ")", ":", "obj_type", "=", "(", "'comment'", "if", "comment", ".", "get", "(", "'parent_id'", ")", "else", "'response'", ")", "event_name", "=", "_EVENT_NAME_TEMPLATE", ".", "format", "(", "obj_type", "=", "obj_type", ",", "action_name", "=", "'created'", ")", "event_data", "=", "{", "'discussion'", ":", "{", "'id'", ":", "comment", ".", "thread_id", "}", ",", "'commentable_id'", ":", "commentable_id", ",", "'options'", ":", "{", "'followed'", ":", "followed", "}", "}", "parent_id", "=", "comment", ".", "get", "(", "'parent_id'", ")", "if", "parent_id", ":", "event_data", "[", "'response'", "]", "=", "{", "'id'", ":", "parent_id", "}", "track_created_event", "(", "request", ",", "event_name", ",", "course", ",", "comment", ",", "event_data", ")" ]
send analytics event for a newly created response or comment .
train
false
17,200
def select_ip_version(host, port): if ((':' in host) and hasattr(socket, 'AF_INET6')): return socket.AF_INET6 return socket.AF_INET
[ "def", "select_ip_version", "(", "host", ",", "port", ")", ":", "if", "(", "(", "':'", "in", "host", ")", "and", "hasattr", "(", "socket", ",", "'AF_INET6'", ")", ")", ":", "return", "socket", ".", "AF_INET6", "return", "socket", ".", "AF_INET" ]
returns af_inet4 or af_inet6 depending on where to connect to .
train
true
17,201
def is_informational(status): return ((100 <= status) and (status <= 199))
[ "def", "is_informational", "(", "status", ")", ":", "return", "(", "(", "100", "<=", "status", ")", "and", "(", "status", "<=", "199", ")", ")" ]
check if http status code is informational .
train
false
17,203
def stack_context(stack_name, create_res=True, convergence=False): def stack_delete(test_fn): @six.wraps(test_fn) def wrapped_test(test_case, *args, **kwargs): def create_stack(): ctx = getattr(test_case, 'ctx', None) if (ctx is not None): stack = setup_stack(stack_name, ctx, create_res, convergence) setattr(test_case, 'stack', stack) def delete_stack(): stack = getattr(test_case, 'stack', None) if ((stack is not None) and (stack.id is not None)): clean_up_stack(stack, delete_res=create_res) create_stack() try: test_fn(test_case, *args, **kwargs) except Exception: (exc_class, exc_val, exc_tb) = sys.exc_info() try: delete_stack() finally: six.reraise(exc_class, exc_val, exc_tb) else: delete_stack() return wrapped_test return stack_delete
[ "def", "stack_context", "(", "stack_name", ",", "create_res", "=", "True", ",", "convergence", "=", "False", ")", ":", "def", "stack_delete", "(", "test_fn", ")", ":", "@", "six", ".", "wraps", "(", "test_fn", ")", "def", "wrapped_test", "(", "test_case", ",", "*", "args", ",", "**", "kwargs", ")", ":", "def", "create_stack", "(", ")", ":", "ctx", "=", "getattr", "(", "test_case", ",", "'ctx'", ",", "None", ")", "if", "(", "ctx", "is", "not", "None", ")", ":", "stack", "=", "setup_stack", "(", "stack_name", ",", "ctx", ",", "create_res", ",", "convergence", ")", "setattr", "(", "test_case", ",", "'stack'", ",", "stack", ")", "def", "delete_stack", "(", ")", ":", "stack", "=", "getattr", "(", "test_case", ",", "'stack'", ",", "None", ")", "if", "(", "(", "stack", "is", "not", "None", ")", "and", "(", "stack", ".", "id", "is", "not", "None", ")", ")", ":", "clean_up_stack", "(", "stack", ",", "delete_res", "=", "create_res", ")", "create_stack", "(", ")", "try", ":", "test_fn", "(", "test_case", ",", "*", "args", ",", "**", "kwargs", ")", "except", "Exception", ":", "(", "exc_class", ",", "exc_val", ",", "exc_tb", ")", "=", "sys", ".", "exc_info", "(", ")", "try", ":", "delete_stack", "(", ")", "finally", ":", "six", ".", "reraise", "(", "exc_class", ",", "exc_val", ",", "exc_tb", ")", "else", ":", "delete_stack", "(", ")", "return", "wrapped_test", "return", "stack_delete" ]
decorator for creating and deleting stack .
train
false
17,204
@app.before_request def get_current_user(): if session.get('user'): g.user = session.get('user') return result = get_user_from_cookie(cookies=request.cookies, app_id=FB_APP_ID, app_secret=FB_APP_SECRET) if result: user = User.query.filter((User.id == result['uid'])).first() if (not user): graph = GraphAPI(result['access_token']) profile = graph.get_object('me') if ('link' not in profile): profile['link'] = '' user = User(id=str(profile['id']), name=profile['name'], profile_url=profile['link'], access_token=result['access_token']) db.session.add(user) elif (user.access_token != result['access_token']): user.access_token = result['access_token'] session['user'] = dict(name=user.name, profile_url=user.profile_url, id=user.id, access_token=user.access_token) db.session.commit() g.user = session.get('user', None)
[ "@", "app", ".", "before_request", "def", "get_current_user", "(", ")", ":", "if", "session", ".", "get", "(", "'user'", ")", ":", "g", ".", "user", "=", "session", ".", "get", "(", "'user'", ")", "return", "result", "=", "get_user_from_cookie", "(", "cookies", "=", "request", ".", "cookies", ",", "app_id", "=", "FB_APP_ID", ",", "app_secret", "=", "FB_APP_SECRET", ")", "if", "result", ":", "user", "=", "User", ".", "query", ".", "filter", "(", "(", "User", ".", "id", "==", "result", "[", "'uid'", "]", ")", ")", ".", "first", "(", ")", "if", "(", "not", "user", ")", ":", "graph", "=", "GraphAPI", "(", "result", "[", "'access_token'", "]", ")", "profile", "=", "graph", ".", "get_object", "(", "'me'", ")", "if", "(", "'link'", "not", "in", "profile", ")", ":", "profile", "[", "'link'", "]", "=", "''", "user", "=", "User", "(", "id", "=", "str", "(", "profile", "[", "'id'", "]", ")", ",", "name", "=", "profile", "[", "'name'", "]", ",", "profile_url", "=", "profile", "[", "'link'", "]", ",", "access_token", "=", "result", "[", "'access_token'", "]", ")", "db", ".", "session", ".", "add", "(", "user", ")", "elif", "(", "user", ".", "access_token", "!=", "result", "[", "'access_token'", "]", ")", ":", "user", ".", "access_token", "=", "result", "[", "'access_token'", "]", "session", "[", "'user'", "]", "=", "dict", "(", "name", "=", "user", ".", "name", ",", "profile_url", "=", "user", ".", "profile_url", ",", "id", "=", "user", ".", "id", ",", "access_token", "=", "user", ".", "access_token", ")", "db", ".", "session", ".", "commit", "(", ")", "g", ".", "user", "=", "session", ".", "get", "(", "'user'", ",", "None", ")" ]
return a token_user for the owner of this process .
train
true
17,205
def flush(from_test=False): cache = frappe.cache() check_email_limit([]) auto_commit = (not from_test) if frappe.are_emails_muted(): msgprint(_(u'Emails are muted')) from_test = True smtpserver = SMTPServer() make_cache_queue() for i in xrange(cache.llen(u'cache_email_queue')): email = cache.lpop(u'cache_email_queue') if (cint(frappe.defaults.get_defaults().get(u'hold_queue')) == 1): break if email: send_one(email, smtpserver, auto_commit, from_test=from_test)
[ "def", "flush", "(", "from_test", "=", "False", ")", ":", "cache", "=", "frappe", ".", "cache", "(", ")", "check_email_limit", "(", "[", "]", ")", "auto_commit", "=", "(", "not", "from_test", ")", "if", "frappe", ".", "are_emails_muted", "(", ")", ":", "msgprint", "(", "_", "(", "u'Emails are muted'", ")", ")", "from_test", "=", "True", "smtpserver", "=", "SMTPServer", "(", ")", "make_cache_queue", "(", ")", "for", "i", "in", "xrange", "(", "cache", ".", "llen", "(", "u'cache_email_queue'", ")", ")", ":", "email", "=", "cache", ".", "lpop", "(", "u'cache_email_queue'", ")", "if", "(", "cint", "(", "frappe", ".", "defaults", ".", "get_defaults", "(", ")", ".", "get", "(", "u'hold_queue'", ")", ")", "==", "1", ")", ":", "break", "if", "email", ":", "send_one", "(", "email", ",", "smtpserver", ",", "auto_commit", ",", "from_test", "=", "from_test", ")" ]
flush entries in the specified set .
train
false
17,206
def verify_ordering(test_class, page, expected_orderings): xblocks = page.xblocks blocks_checked = set() for expected_ordering in expected_orderings: for xblock in xblocks: parent = expected_ordering.keys()[0] if (xblock.name == parent): blocks_checked.add(parent) children = xblock.children expected_length = len(expected_ordering.get(parent)) test_class.assertEqual(expected_length, len(children), 'Number of children incorrect for group {0}. Expected {1} but got {2}.'.format(parent, expected_length, len(children))) for (idx, expected) in enumerate(expected_ordering.get(parent)): test_class.assertEqual(expected, children[idx].name) blocks_checked.add(expected) break test_class.assertEqual(len(blocks_checked), len(xblocks))
[ "def", "verify_ordering", "(", "test_class", ",", "page", ",", "expected_orderings", ")", ":", "xblocks", "=", "page", ".", "xblocks", "blocks_checked", "=", "set", "(", ")", "for", "expected_ordering", "in", "expected_orderings", ":", "for", "xblock", "in", "xblocks", ":", "parent", "=", "expected_ordering", ".", "keys", "(", ")", "[", "0", "]", "if", "(", "xblock", ".", "name", "==", "parent", ")", ":", "blocks_checked", ".", "add", "(", "parent", ")", "children", "=", "xblock", ".", "children", "expected_length", "=", "len", "(", "expected_ordering", ".", "get", "(", "parent", ")", ")", "test_class", ".", "assertEqual", "(", "expected_length", ",", "len", "(", "children", ")", ",", "'Number of children incorrect for group {0}. Expected {1} but got {2}.'", ".", "format", "(", "parent", ",", "expected_length", ",", "len", "(", "children", ")", ")", ")", "for", "(", "idx", ",", "expected", ")", "in", "enumerate", "(", "expected_ordering", ".", "get", "(", "parent", ")", ")", ":", "test_class", ".", "assertEqual", "(", "expected", ",", "children", "[", "idx", "]", ".", "name", ")", "blocks_checked", ".", "add", "(", "expected", ")", "break", "test_class", ".", "assertEqual", "(", "len", "(", "blocks_checked", ")", ",", "len", "(", "xblocks", ")", ")" ]
verifies the expected ordering of xblocks on the page .
train
false
17,207
def _remove_dataset_fields(content): content = yaml.safe_load(content) dataset = content['dataset'] for key in dataset: if (key not in _ok_to_log): dataset[key] = 'REMOVED' return yaml.safe_dump(content)
[ "def", "_remove_dataset_fields", "(", "content", ")", ":", "content", "=", "yaml", ".", "safe_load", "(", "content", ")", "dataset", "=", "content", "[", "'dataset'", "]", "for", "key", "in", "dataset", ":", "if", "(", "key", "not", "in", "_ok_to_log", ")", ":", "dataset", "[", "key", "]", "=", "'REMOVED'", "return", "yaml", ".", "safe_dump", "(", "content", ")" ]
remove non-whitelisted fields from dataset for logging .
train
false
17,209
def validate_no_unc(root, value, default): if (value and (not value.startswith('\\\\'))): return validate_notempty(root, value, default) else: return ((T('UNC path "%s" not allowed here') % value), None)
[ "def", "validate_no_unc", "(", "root", ",", "value", ",", "default", ")", ":", "if", "(", "value", "and", "(", "not", "value", ".", "startswith", "(", "'\\\\\\\\'", ")", ")", ")", ":", "return", "validate_notempty", "(", "root", ",", "value", ",", "default", ")", "else", ":", "return", "(", "(", "T", "(", "'UNC path \"%s\" not allowed here'", ")", "%", "value", ")", ",", "None", ")" ]
check if path isnt a unc path .
train
false
17,210
def knuth_bin_width(data, return_bins=False, quiet=True): from scipy import optimize knuthF = _KnuthF(data) (dx0, bins0) = freedman_bin_width(data, True) M = optimize.fmin(knuthF, len(bins0), disp=(not quiet))[0] bins = knuthF.bins(M) dx = (bins[1] - bins[0]) if return_bins: return (dx, bins) else: return dx
[ "def", "knuth_bin_width", "(", "data", ",", "return_bins", "=", "False", ",", "quiet", "=", "True", ")", ":", "from", "scipy", "import", "optimize", "knuthF", "=", "_KnuthF", "(", "data", ")", "(", "dx0", ",", "bins0", ")", "=", "freedman_bin_width", "(", "data", ",", "True", ")", "M", "=", "optimize", ".", "fmin", "(", "knuthF", ",", "len", "(", "bins0", ")", ",", "disp", "=", "(", "not", "quiet", ")", ")", "[", "0", "]", "bins", "=", "knuthF", ".", "bins", "(", "M", ")", "dx", "=", "(", "bins", "[", "1", "]", "-", "bins", "[", "0", "]", ")", "if", "return_bins", ":", "return", "(", "dx", ",", "bins", ")", "else", ":", "return", "dx" ]
return the optimal histogram bin width using knuths rule .
train
false
17,211
def has_profile(user): profile_model = get_profile_model() try: profile = user.get_profile() except AttributeError: related_name = profile_model._meta.get_field('user').related_query_name() profile = getattr(user, related_name, None) except profile_model.DoesNotExist: profile = None return bool(profile)
[ "def", "has_profile", "(", "user", ")", ":", "profile_model", "=", "get_profile_model", "(", ")", "try", ":", "profile", "=", "user", ".", "get_profile", "(", ")", "except", "AttributeError", ":", "related_name", "=", "profile_model", ".", "_meta", ".", "get_field", "(", "'user'", ")", ".", "related_query_name", "(", ")", "profile", "=", "getattr", "(", "user", ",", "related_name", ",", "None", ")", "except", "profile_model", ".", "DoesNotExist", ":", "profile", "=", "None", "return", "bool", "(", "profile", ")" ]
test utility function to check if user has profile .
train
false
17,212
def reject(match, include_accepted=False, include_denied=False): skey = get_key(__opts__) return skey.reject(match, include_accepted=include_accepted, include_denied=include_denied)
[ "def", "reject", "(", "match", ",", "include_accepted", "=", "False", ",", "include_denied", "=", "False", ")", ":", "skey", "=", "get_key", "(", "__opts__", ")", "return", "skey", ".", "reject", "(", "match", ",", "include_accepted", "=", "include_accepted", ",", "include_denied", "=", "include_denied", ")" ]
reject keys based on a glob match .
train
true
17,213
def _chunk_read(response, local_file, chunk_size=65536, initial_size=0): bytes_so_far = initial_size total_size = int(response.headers['Content-Length'].strip()) total_size += initial_size progress = ProgressBar(total_size, initial_value=bytes_so_far, max_chars=40, spinner=True, mesg='downloading') while True: chunk = response.read(chunk_size) bytes_so_far += len(chunk) if (not chunk): sys.stderr.write('\n') break _chunk_write(chunk, local_file, progress)
[ "def", "_chunk_read", "(", "response", ",", "local_file", ",", "chunk_size", "=", "65536", ",", "initial_size", "=", "0", ")", ":", "bytes_so_far", "=", "initial_size", "total_size", "=", "int", "(", "response", ".", "headers", "[", "'Content-Length'", "]", ".", "strip", "(", ")", ")", "total_size", "+=", "initial_size", "progress", "=", "ProgressBar", "(", "total_size", ",", "initial_value", "=", "bytes_so_far", ",", "max_chars", "=", "40", ",", "spinner", "=", "True", ",", "mesg", "=", "'downloading'", ")", "while", "True", ":", "chunk", "=", "response", ".", "read", "(", "chunk_size", ")", "bytes_so_far", "+=", "len", "(", "chunk", ")", "if", "(", "not", "chunk", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'\\n'", ")", "break", "_chunk_write", "(", "chunk", ",", "local_file", ",", "progress", ")" ]
download a file chunk by chunk and show advancement can also be used when resuming downloads over http .
train
true
17,215
def str(val): return format('%.12g', val)
[ "def", "str", "(", "val", ")", ":", "return", "format", "(", "'%.12g'", ",", "val", ")" ]
this is a bit evil - str gets mapped to a c-api function and is being redefined here .
train
false
17,216
def netmask_to_cidr(dq): if isinstance(dq, basestring): dq = IPAddr(dq) v = dq.toUnsigned(networkOrder=False) c = 0 while (v & 2147483648): c += 1 v <<= 1 v = (v & 4294967295) if (v != 0): raise RuntimeError(('Netmask %s is not CIDR-compatible' % (dq,))) return c
[ "def", "netmask_to_cidr", "(", "dq", ")", ":", "if", "isinstance", "(", "dq", ",", "basestring", ")", ":", "dq", "=", "IPAddr", "(", "dq", ")", "v", "=", "dq", ".", "toUnsigned", "(", "networkOrder", "=", "False", ")", "c", "=", "0", "while", "(", "v", "&", "2147483648", ")", ":", "c", "+=", "1", "v", "<<=", "1", "v", "=", "(", "v", "&", "4294967295", ")", "if", "(", "v", "!=", "0", ")", ":", "raise", "RuntimeError", "(", "(", "'Netmask %s is not CIDR-compatible'", "%", "(", "dq", ",", ")", ")", ")", "return", "c" ]
takes a netmask as either an ipaddr or a string .
train
false
17,217
def to_sensor_db_model(sensor_api_model=None): class_name = getattr(sensor_api_model, 'class_name', None) pack = getattr(sensor_api_model, 'pack', None) entry_point = get_sensor_entry_point(sensor_api_model) artifact_uri = getattr(sensor_api_model, 'artifact_uri', None) description = getattr(sensor_api_model, 'description', None) trigger_types = getattr(sensor_api_model, 'trigger_types', []) poll_interval = getattr(sensor_api_model, 'poll_interval', None) enabled = getattr(sensor_api_model, 'enabled', True) poll_interval = getattr(sensor_api_model, 'poll_interval', None) if (poll_interval and (poll_interval < MINIMUM_POLL_INTERVAL)): raise ValueError(('Minimum possible poll_interval is %s seconds' % MINIMUM_POLL_INTERVAL)) for trigger_type in trigger_types: trigger_type['pack'] = pack trigger_type_refs = create_trigger_types(trigger_types) return _create_sensor_type(pack=pack, name=class_name, description=description, artifact_uri=artifact_uri, entry_point=entry_point, trigger_types=trigger_type_refs, poll_interval=poll_interval, enabled=enabled)
[ "def", "to_sensor_db_model", "(", "sensor_api_model", "=", "None", ")", ":", "class_name", "=", "getattr", "(", "sensor_api_model", ",", "'class_name'", ",", "None", ")", "pack", "=", "getattr", "(", "sensor_api_model", ",", "'pack'", ",", "None", ")", "entry_point", "=", "get_sensor_entry_point", "(", "sensor_api_model", ")", "artifact_uri", "=", "getattr", "(", "sensor_api_model", ",", "'artifact_uri'", ",", "None", ")", "description", "=", "getattr", "(", "sensor_api_model", ",", "'description'", ",", "None", ")", "trigger_types", "=", "getattr", "(", "sensor_api_model", ",", "'trigger_types'", ",", "[", "]", ")", "poll_interval", "=", "getattr", "(", "sensor_api_model", ",", "'poll_interval'", ",", "None", ")", "enabled", "=", "getattr", "(", "sensor_api_model", ",", "'enabled'", ",", "True", ")", "poll_interval", "=", "getattr", "(", "sensor_api_model", ",", "'poll_interval'", ",", "None", ")", "if", "(", "poll_interval", "and", "(", "poll_interval", "<", "MINIMUM_POLL_INTERVAL", ")", ")", ":", "raise", "ValueError", "(", "(", "'Minimum possible poll_interval is %s seconds'", "%", "MINIMUM_POLL_INTERVAL", ")", ")", "for", "trigger_type", "in", "trigger_types", ":", "trigger_type", "[", "'pack'", "]", "=", "pack", "trigger_type_refs", "=", "create_trigger_types", "(", "trigger_types", ")", "return", "_create_sensor_type", "(", "pack", "=", "pack", ",", "name", "=", "class_name", ",", "description", "=", "description", ",", "artifact_uri", "=", "artifact_uri", ",", "entry_point", "=", "entry_point", ",", "trigger_types", "=", "trigger_type_refs", ",", "poll_interval", "=", "poll_interval", ",", "enabled", "=", "enabled", ")" ]
converts a sensortypeapi model to db model .
train
false
17,220
@task def release_render_lock(): render_lock.release()
[ "@", "task", "def", "release_render_lock", "(", ")", ":", "render_lock", ".", "release", "(", ")" ]
a task to release the render document lock .
train
false
17,221
def get_https_proxy(network_service='Ethernet'): if (__grains__['os'] == 'Windows'): return _get_proxy_windows(['https']) return _get_proxy_osx('getsecurewebproxy', network_service)
[ "def", "get_https_proxy", "(", "network_service", "=", "'Ethernet'", ")", ":", "if", "(", "__grains__", "[", "'os'", "]", "==", "'Windows'", ")", ":", "return", "_get_proxy_windows", "(", "[", "'https'", "]", ")", "return", "_get_proxy_osx", "(", "'getsecurewebproxy'", ",", "network_service", ")" ]
returns the current https proxy settings network_service the network service to apply the changes to .
train
false
17,222
def metric(l): global evals evals += 1 return (sum(l) < 3)
[ "def", "metric", "(", "l", ")", ":", "global", "evals", "evals", "+=", "1", "return", "(", "sum", "(", "l", ")", "<", "3", ")" ]
example metric for search .
train
false
17,223
def unify_instance(instance): newdict = dict() for (k, v) in six.iteritems(instance): if isinstance(v, datetime.datetime): v = v.replace(tzinfo=None) elif (k == 'fault'): continue elif (k == 'pci_devices'): continue newdict[k] = v return newdict
[ "def", "unify_instance", "(", "instance", ")", ":", "newdict", "=", "dict", "(", ")", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "instance", ")", ":", "if", "isinstance", "(", "v", ",", "datetime", ".", "datetime", ")", ":", "v", "=", "v", ".", "replace", "(", "tzinfo", "=", "None", ")", "elif", "(", "k", "==", "'fault'", ")", ":", "continue", "elif", "(", "k", "==", "'pci_devices'", ")", ":", "continue", "newdict", "[", "k", "]", "=", "v", "return", "newdict" ]
return a dict-like instance for both object-initiated and model-initiated sources that can reasonably be compared .
train
false
17,227
def getLinesWithoutRedundancy(duplicateWord, lines): oldDuplicationIndex = None for (lineIndex, line) in enumerate(lines): firstWord = gcodec.getFirstWordFromLine(line) if (firstWord == duplicateWord): if (oldDuplicationIndex == None): oldDuplicationIndex = lineIndex else: lines[oldDuplicationIndex] = line lines[lineIndex] = '' elif (firstWord.startswith('G') or (firstWord == 'M101') or (firstWord == 'M103')): oldDuplicationIndex = None return lines
[ "def", "getLinesWithoutRedundancy", "(", "duplicateWord", ",", "lines", ")", ":", "oldDuplicationIndex", "=", "None", "for", "(", "lineIndex", ",", "line", ")", "in", "enumerate", "(", "lines", ")", ":", "firstWord", "=", "gcodec", ".", "getFirstWordFromLine", "(", "line", ")", "if", "(", "firstWord", "==", "duplicateWord", ")", ":", "if", "(", "oldDuplicationIndex", "==", "None", ")", ":", "oldDuplicationIndex", "=", "lineIndex", "else", ":", "lines", "[", "oldDuplicationIndex", "]", "=", "line", "lines", "[", "lineIndex", "]", "=", "''", "elif", "(", "firstWord", ".", "startswith", "(", "'G'", ")", "or", "(", "firstWord", "==", "'M101'", ")", "or", "(", "firstWord", "==", "'M103'", ")", ")", ":", "oldDuplicationIndex", "=", "None", "return", "lines" ]
get gcode lines without redundant first words .
train
false
17,229
def build_sentence_encoder(tparams, options): opt_ret = dict() trng = RandomStreams(1234) x = tensor.matrix('x', dtype='int64') mask = tensor.matrix('x_mask', dtype='float32') n_timesteps = x.shape[0] n_samples = x.shape[1] emb = tparams['Wemb'][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']]) proj = get_layer(options['encoder'])[1](tparams, emb, None, options, prefix='encoder', mask=mask) sents = proj[0][(-1)] sents = l2norm(sents) return (trng, [x, mask], sents)
[ "def", "build_sentence_encoder", "(", "tparams", ",", "options", ")", ":", "opt_ret", "=", "dict", "(", ")", "trng", "=", "RandomStreams", "(", "1234", ")", "x", "=", "tensor", ".", "matrix", "(", "'x'", ",", "dtype", "=", "'int64'", ")", "mask", "=", "tensor", ".", "matrix", "(", "'x_mask'", ",", "dtype", "=", "'float32'", ")", "n_timesteps", "=", "x", ".", "shape", "[", "0", "]", "n_samples", "=", "x", ".", "shape", "[", "1", "]", "emb", "=", "tparams", "[", "'Wemb'", "]", "[", "x", ".", "flatten", "(", ")", "]", ".", "reshape", "(", "[", "n_timesteps", ",", "n_samples", ",", "options", "[", "'dim_word'", "]", "]", ")", "proj", "=", "get_layer", "(", "options", "[", "'encoder'", "]", ")", "[", "1", "]", "(", "tparams", ",", "emb", ",", "None", ",", "options", ",", "prefix", "=", "'encoder'", ",", "mask", "=", "mask", ")", "sents", "=", "proj", "[", "0", "]", "[", "(", "-", "1", ")", "]", "sents", "=", "l2norm", "(", "sents", ")", "return", "(", "trng", ",", "[", "x", ",", "mask", "]", ",", "sents", ")" ]
encoder only .
train
false
17,230
def dt_filter(reporter, search_string=' ', forceClear=True, quiet=True): if forceClear: if (not dt_filter(reporter, forceClear=False, quiet=quiet)): return False config = current.test_config browser = config.browser sleep_limit = 10 elem = browser.find_element_by_css_selector('label > input[type="text"]') elem.clear() elem.send_keys(search_string) time.sleep(1) waiting_elem = browser.find_element_by_id('datatable_processing') sleep_time = 0 while (waiting_elem.value_of_css_property('visibility') == 'visible'): time.sleep(1) sleep_time += 1 if (sleep_time > sleep_limit): if (not quiet): reporter(("DataTable filter didn't respond within %d seconds" % sleep_limit)) return False return True
[ "def", "dt_filter", "(", "reporter", ",", "search_string", "=", "' '", ",", "forceClear", "=", "True", ",", "quiet", "=", "True", ")", ":", "if", "forceClear", ":", "if", "(", "not", "dt_filter", "(", "reporter", ",", "forceClear", "=", "False", ",", "quiet", "=", "quiet", ")", ")", ":", "return", "False", "config", "=", "current", ".", "test_config", "browser", "=", "config", ".", "browser", "sleep_limit", "=", "10", "elem", "=", "browser", ".", "find_element_by_css_selector", "(", "'label > input[type=\"text\"]'", ")", "elem", ".", "clear", "(", ")", "elem", ".", "send_keys", "(", "search_string", ")", "time", ".", "sleep", "(", "1", ")", "waiting_elem", "=", "browser", ".", "find_element_by_id", "(", "'datatable_processing'", ")", "sleep_time", "=", "0", "while", "(", "waiting_elem", ".", "value_of_css_property", "(", "'visibility'", ")", "==", "'visible'", ")", ":", "time", ".", "sleep", "(", "1", ")", "sleep_time", "+=", "1", "if", "(", "sleep_time", ">", "sleep_limit", ")", ":", "if", "(", "not", "quiet", ")", ":", "reporter", "(", "(", "\"DataTable filter didn't respond within %d seconds\"", "%", "sleep_limit", ")", ")", "return", "False", "return", "True" ]
filter the datatable .
train
false
17,231
def filter_and_sort(file_list): file_list = [filename for filename in file_list if filename.endswith('.py')] def key(item): return len(item) file_list.sort(key=key) return file_list
[ "def", "filter_and_sort", "(", "file_list", ")", ":", "file_list", "=", "[", "filename", "for", "filename", "in", "file_list", "if", "filename", ".", "endswith", "(", "'.py'", ")", "]", "def", "key", "(", "item", ")", ":", "return", "len", "(", "item", ")", "file_list", ".", "sort", "(", "key", "=", "key", ")", "return", "file_list" ]
out of a list of file names .
train
false
17,232
def test_permutation_step_down_p(): try: try: from sklearn.feature_extraction.image import grid_to_graph except ImportError: from scikits.learn.feature_extraction.image import grid_to_graph except ImportError: return rng = np.random.RandomState(0) X = rng.randn(9, 2, 10) X[:, 0:2, 0:2] += 2 X[:, 1, 5:9] += 0.5 thresh = 2 (t, clusters, p, H0) = permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=1.0) (t, clusters, p_old, H0) = permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=0.0) assert_equal(np.sum((p_old < 0.05)), 1) (t, clusters, p_new, H0) = permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=0.05) assert_equal(np.sum((p_new < 0.05)), 2) assert_true(np.all((p_old >= p_new)))
[ "def", "test_permutation_step_down_p", "(", ")", ":", "try", ":", "try", ":", "from", "sklearn", ".", "feature_extraction", ".", "image", "import", "grid_to_graph", "except", "ImportError", ":", "from", "scikits", ".", "learn", ".", "feature_extraction", ".", "image", "import", "grid_to_graph", "except", "ImportError", ":", "return", "rng", "=", "np", ".", "random", ".", "RandomState", "(", "0", ")", "X", "=", "rng", ".", "randn", "(", "9", ",", "2", ",", "10", ")", "X", "[", ":", ",", "0", ":", "2", ",", "0", ":", "2", "]", "+=", "2", "X", "[", ":", ",", "1", ",", "5", ":", "9", "]", "+=", "0.5", "thresh", "=", "2", "(", "t", ",", "clusters", ",", "p", ",", "H0", ")", "=", "permutation_cluster_1samp_test", "(", "X", ",", "threshold", "=", "thresh", ",", "step_down_p", "=", "1.0", ")", "(", "t", ",", "clusters", ",", "p_old", ",", "H0", ")", "=", "permutation_cluster_1samp_test", "(", "X", ",", "threshold", "=", "thresh", ",", "step_down_p", "=", "0.0", ")", "assert_equal", "(", "np", ".", "sum", "(", "(", "p_old", "<", "0.05", ")", ")", ",", "1", ")", "(", "t", ",", "clusters", ",", "p_new", ",", "H0", ")", "=", "permutation_cluster_1samp_test", "(", "X", ",", "threshold", "=", "thresh", ",", "step_down_p", "=", "0.05", ")", "assert_equal", "(", "np", ".", "sum", "(", "(", "p_new", "<", "0.05", ")", ")", ",", "2", ")", "assert_true", "(", "np", ".", "all", "(", "(", "p_old", ">=", "p_new", ")", ")", ")" ]
test cluster level permutations with step_down_p .
train
false
17,235
def solve_de(f, x, DE, order, g, k): sol = None syms = DE.free_symbols.difference({g, x}) if syms: RE = _transform_DE_RE(DE, g, k, order, syms) else: RE = hyper_re(DE, g, k) if (not RE.free_symbols.difference({k})): sol = _solve_hyper_RE(f, x, RE, g, k) if sol: return sol if syms: DE = _transform_explike_DE(DE, g, x, order, syms) if (not DE.free_symbols.difference({x})): sol = _solve_explike_DE(f, x, DE, g, k) if sol: return sol
[ "def", "solve_de", "(", "f", ",", "x", ",", "DE", ",", "order", ",", "g", ",", "k", ")", ":", "sol", "=", "None", "syms", "=", "DE", ".", "free_symbols", ".", "difference", "(", "{", "g", ",", "x", "}", ")", "if", "syms", ":", "RE", "=", "_transform_DE_RE", "(", "DE", ",", "g", ",", "k", ",", "order", ",", "syms", ")", "else", ":", "RE", "=", "hyper_re", "(", "DE", ",", "g", ",", "k", ")", "if", "(", "not", "RE", ".", "free_symbols", ".", "difference", "(", "{", "k", "}", ")", ")", ":", "sol", "=", "_solve_hyper_RE", "(", "f", ",", "x", ",", "RE", ",", "g", ",", "k", ")", "if", "sol", ":", "return", "sol", "if", "syms", ":", "DE", "=", "_transform_explike_DE", "(", "DE", ",", "g", ",", "x", ",", "order", ",", "syms", ")", "if", "(", "not", "DE", ".", "free_symbols", ".", "difference", "(", "{", "x", "}", ")", ")", ":", "sol", "=", "_solve_explike_DE", "(", "f", ",", "x", ",", "DE", ",", "g", ",", "k", ")", "if", "sol", ":", "return", "sol" ]
solves the de .
train
false
17,236
def _get_user_gnupghome(user): if (user == 'salt'): gnupghome = os.path.join(salt.syspaths.CONFIG_DIR, 'gpgkeys') else: gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg') return gnupghome
[ "def", "_get_user_gnupghome", "(", "user", ")", ":", "if", "(", "user", "==", "'salt'", ")", ":", "gnupghome", "=", "os", ".", "path", ".", "join", "(", "salt", ".", "syspaths", ".", "CONFIG_DIR", ",", "'gpgkeys'", ")", "else", ":", "gnupghome", "=", "os", ".", "path", ".", "join", "(", "_get_user_info", "(", "user", ")", "[", "'home'", "]", ",", "'.gnupg'", ")", "return", "gnupghome" ]
return default gnupg home directory path for a user .
train
false
17,237
def _AddClearExtensionMethod(cls): def ClearExtension(self, extension_handle): _VerifyExtensionHandle(self, extension_handle) if (extension_handle in self._fields): del self._fields[extension_handle] self._Modified() cls.ClearExtension = ClearExtension
[ "def", "_AddClearExtensionMethod", "(", "cls", ")", ":", "def", "ClearExtension", "(", "self", ",", "extension_handle", ")", ":", "_VerifyExtensionHandle", "(", "self", ",", "extension_handle", ")", "if", "(", "extension_handle", "in", "self", ".", "_fields", ")", ":", "del", "self", ".", "_fields", "[", "extension_handle", "]", "self", ".", "_Modified", "(", ")", "cls", ".", "ClearExtension", "=", "ClearExtension" ]
helper for _addmessagemethods() .
train
true
17,239
def _sync_close(f): _sync_flush(f) f.close()
[ "def", "_sync_close", "(", "f", ")", ":", "_sync_flush", "(", "f", ")", "f", ".", "close", "(", ")" ]
close file f .
train
false
17,240
@require_role('admin') def group_list(request): (header_title, path1, path2) = (u'\u67e5\u770b\u8d44\u4ea7\u7ec4', u'\u8d44\u4ea7\u7ba1\u7406', u'\u67e5\u770b\u8d44\u4ea7\u7ec4') keyword = request.GET.get('keyword', '') asset_group_list = AssetGroup.objects.all() group_id = request.GET.get('id') if group_id: asset_group_list = asset_group_list.filter(id=group_id) if keyword: asset_group_list = asset_group_list.filter((Q(name__contains=keyword) | Q(comment__contains=keyword))) (asset_group_list, p, asset_groups, page_range, current_page, show_first, show_end) = pages(asset_group_list, request) return my_render('jasset/group_list.html', locals(), request)
[ "@", "require_role", "(", "'admin'", ")", "def", "group_list", "(", "request", ")", ":", "(", "header_title", ",", "path1", ",", "path2", ")", "=", "(", "u'\\u67e5\\u770b\\u8d44\\u4ea7\\u7ec4'", ",", "u'\\u8d44\\u4ea7\\u7ba1\\u7406'", ",", "u'\\u67e5\\u770b\\u8d44\\u4ea7\\u7ec4'", ")", "keyword", "=", "request", ".", "GET", ".", "get", "(", "'keyword'", ",", "''", ")", "asset_group_list", "=", "AssetGroup", ".", "objects", ".", "all", "(", ")", "group_id", "=", "request", ".", "GET", ".", "get", "(", "'id'", ")", "if", "group_id", ":", "asset_group_list", "=", "asset_group_list", ".", "filter", "(", "id", "=", "group_id", ")", "if", "keyword", ":", "asset_group_list", "=", "asset_group_list", ".", "filter", "(", "(", "Q", "(", "name__contains", "=", "keyword", ")", "|", "Q", "(", "comment__contains", "=", "keyword", ")", ")", ")", "(", "asset_group_list", ",", "p", ",", "asset_groups", ",", "page_range", ",", "current_page", ",", "show_first", ",", "show_end", ")", "=", "pages", "(", "asset_group_list", ",", "request", ")", "return", "my_render", "(", "'jasset/group_list.html'", ",", "locals", "(", ")", ",", "request", ")" ]
list user group .
train
false
17,241
@frappe.whitelist() def bulk_update(docs): docs = json.loads(docs) failed_docs = [] for doc in docs: try: ddoc = {key: val for (key, val) in doc.iteritems() if (key not in [u'doctype', u'docname'])} doctype = doc[u'doctype'] docname = doc[u'docname'] doc = frappe.get_doc(doctype, docname) doc.update(ddoc) doc.save() except: failed_docs.append({u'doc': doc, u'exc': frappe.utils.get_traceback()}) return {u'failed_docs': failed_docs}
[ "@", "frappe", ".", "whitelist", "(", ")", "def", "bulk_update", "(", "docs", ")", ":", "docs", "=", "json", ".", "loads", "(", "docs", ")", "failed_docs", "=", "[", "]", "for", "doc", "in", "docs", ":", "try", ":", "ddoc", "=", "{", "key", ":", "val", "for", "(", "key", ",", "val", ")", "in", "doc", ".", "iteritems", "(", ")", "if", "(", "key", "not", "in", "[", "u'doctype'", ",", "u'docname'", "]", ")", "}", "doctype", "=", "doc", "[", "u'doctype'", "]", "docname", "=", "doc", "[", "u'docname'", "]", "doc", "=", "frappe", ".", "get_doc", "(", "doctype", ",", "docname", ")", "doc", ".", "update", "(", "ddoc", ")", "doc", ".", "save", "(", ")", "except", ":", "failed_docs", ".", "append", "(", "{", "u'doc'", ":", "doc", ",", "u'exc'", ":", "frappe", ".", "utils", ".", "get_traceback", "(", ")", "}", ")", "return", "{", "u'failed_docs'", ":", "failed_docs", "}" ]
bulk update documents .
train
false
17,243
def make_env_wiz(): w = _make_flat_wiz(make_envvar, sorted(builtins.__xonsh_env__._docs.keys())) return w
[ "def", "make_env_wiz", "(", ")", ":", "w", "=", "_make_flat_wiz", "(", "make_envvar", ",", "sorted", "(", "builtins", ".", "__xonsh_env__", ".", "_docs", ".", "keys", "(", ")", ")", ")", "return", "w" ]
makes an environment variable wizard .
train
false
17,244
def bokehjsdir(dev=False): dir1 = join(ROOT_DIR, '..', 'bokehjs', 'build') dir2 = join(serverdir(), 'static') if (dev and isdir(dir1)): return dir1 else: return dir2
[ "def", "bokehjsdir", "(", "dev", "=", "False", ")", ":", "dir1", "=", "join", "(", "ROOT_DIR", ",", "'..'", ",", "'bokehjs'", ",", "'build'", ")", "dir2", "=", "join", "(", "serverdir", "(", ")", ",", "'static'", ")", "if", "(", "dev", "and", "isdir", "(", "dir1", ")", ")", ":", "return", "dir1", "else", ":", "return", "dir2" ]
get the location of the bokehjs source files .
train
true
17,245
@cli.command('save') @click.option('--filename', default='processed-%04d.png', type=click.Path(), help='The format for the filename.', show_default=True) @processor def save_cmd(images, filename): for (idx, image) in enumerate(images): try: fn = (filename % (idx + 1)) click.echo(('Saving "%s" as "%s"' % (image.filename, fn))) (yield image.save(fn)) except Exception as e: click.echo(('Could not save image "%s": %s' % (image.filename, e)), err=True)
[ "@", "cli", ".", "command", "(", "'save'", ")", "@", "click", ".", "option", "(", "'--filename'", ",", "default", "=", "'processed-%04d.png'", ",", "type", "=", "click", ".", "Path", "(", ")", ",", "help", "=", "'The format for the filename.'", ",", "show_default", "=", "True", ")", "@", "processor", "def", "save_cmd", "(", "images", ",", "filename", ")", ":", "for", "(", "idx", ",", "image", ")", "in", "enumerate", "(", "images", ")", ":", "try", ":", "fn", "=", "(", "filename", "%", "(", "idx", "+", "1", ")", ")", "click", ".", "echo", "(", "(", "'Saving \"%s\" as \"%s\"'", "%", "(", "image", ".", "filename", ",", "fn", ")", ")", ")", "(", "yield", "image", ".", "save", "(", "fn", ")", ")", "except", "Exception", "as", "e", ":", "click", ".", "echo", "(", "(", "'Could not save image \"%s\": %s'", "%", "(", "image", ".", "filename", ",", "e", ")", ")", ",", "err", "=", "True", ")" ]
saves all processed images to a series of files .
train
false
17,246
def uuid(dev=None): try: if (dev is None): return list(os.walk('/sys/fs/bcache/'))[0][1][0] else: return os.path.basename(_bcsys(dev, 'cache')) except: return False
[ "def", "uuid", "(", "dev", "=", "None", ")", ":", "try", ":", "if", "(", "dev", "is", "None", ")", ":", "return", "list", "(", "os", ".", "walk", "(", "'/sys/fs/bcache/'", ")", ")", "[", "0", "]", "[", "1", "]", "[", "0", "]", "else", ":", "return", "os", ".", "path", ".", "basename", "(", "_bcsys", "(", "dev", ",", "'cache'", ")", ")", "except", ":", "return", "False" ]
generate unique id in uuid4 format .
train
false
17,247
def enrich_varname(varname): greek = 'alpha beta gamma delta epsilon varepsilon zeta eta theta vartheta iota kappa lambda mu nu xi pi rho sigma tau upsilon phi varphi chi psi omega'.split() greek += [x.capitalize() for x in greek] greek.append('hbar') greek.append('infty') if (varname in greek): return u'\\{letter}'.format(letter=varname) else: return varname.replace('_', '\\_')
[ "def", "enrich_varname", "(", "varname", ")", ":", "greek", "=", "'alpha beta gamma delta epsilon varepsilon zeta eta theta vartheta iota kappa lambda mu nu xi pi rho sigma tau upsilon phi varphi chi psi omega'", ".", "split", "(", ")", "greek", "+=", "[", "x", ".", "capitalize", "(", ")", "for", "x", "in", "greek", "]", "greek", ".", "append", "(", "'hbar'", ")", "greek", ".", "append", "(", "'infty'", ")", "if", "(", "varname", "in", "greek", ")", ":", "return", "u'\\\\{letter}'", ".", "format", "(", "letter", "=", "varname", ")", "else", ":", "return", "varname", ".", "replace", "(", "'_'", ",", "'\\\\_'", ")" ]
prepend a backslash if were given a greek character .
train
false
17,249
def Unify(l): seen = {} return [seen.setdefault(e, e) for e in l if (e not in seen)]
[ "def", "Unify", "(", "l", ")", ":", "seen", "=", "{", "}", "return", "[", "seen", ".", "setdefault", "(", "e", ",", "e", ")", "for", "e", "in", "l", "if", "(", "e", "not", "in", "seen", ")", "]" ]
removes duplicate elements from l .
train
false
17,250
def p4_has_command(cmd): real_cmd = p4_build_cmd(['help', cmd]) p = subprocess.Popen(real_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() return (p.returncode == 0)
[ "def", "p4_has_command", "(", "cmd", ")", ":", "real_cmd", "=", "p4_build_cmd", "(", "[", "'help'", ",", "cmd", "]", ")", "p", "=", "subprocess", ".", "Popen", "(", "real_cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "p", ".", "communicate", "(", ")", "return", "(", "p", ".", "returncode", "==", "0", ")" ]
ask p4 for help on this command .
train
false