id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
45,844
def _install_custom_language(): lang_file = resources.config_home(u'language') if (not core.exists(lang_file)): return try: lang = core.read(lang_file).strip() except: return if lang: compat.setenv(u'LANGUAGE', lang)
[ "def", "_install_custom_language", "(", ")", ":", "lang_file", "=", "resources", ".", "config_home", "(", "u'language'", ")", "if", "(", "not", "core", ".", "exists", "(", "lang_file", ")", ")", ":", "return", "try", ":", "lang", "=", "core", ".", "read", "(", "lang_file", ")", ".", "strip", "(", ")", "except", ":", "return", "if", "lang", ":", "compat", ".", "setenv", "(", "u'LANGUAGE'", ",", "lang", ")" ]
allow a custom language to be set in ~/ .
train
false
45,845
@step('I (.*) capturing of screenshots before and after each step$') def configure_screenshots_for_all_steps(_step, action): action = action.strip() if (action == 'enable'): world.auto_capture_screenshots = True elif (action == 'disable'): world.auto_capture_screenshots = False else: raise ValueError('Parameter `action` should be one of "enable" or "disable".')
[ "@", "step", "(", "'I (.*) capturing of screenshots before and after each step$'", ")", "def", "configure_screenshots_for_all_steps", "(", "_step", ",", "action", ")", ":", "action", "=", "action", ".", "strip", "(", ")", "if", "(", "action", "==", "'enable'", ")", ":", "world", ".", "auto_capture_screenshots", "=", "True", "elif", "(", "action", "==", "'disable'", ")", ":", "world", ".", "auto_capture_screenshots", "=", "False", "else", ":", "raise", "ValueError", "(", "'Parameter `action` should be one of \"enable\" or \"disable\".'", ")" ]
a step to be used in * .
train
false
45,846
def json2csv(fp, outfile, fields, encoding='utf8', errors='replace', gzip_compress=False): (writer, outf) = outf_writer_compat(outfile, encoding, errors, gzip_compress) writer.writerow(fields) for line in fp: tweet = json.loads(line) row = extract_fields(tweet, fields) writer.writerow(row) outf.close()
[ "def", "json2csv", "(", "fp", ",", "outfile", ",", "fields", ",", "encoding", "=", "'utf8'", ",", "errors", "=", "'replace'", ",", "gzip_compress", "=", "False", ")", ":", "(", "writer", ",", "outf", ")", "=", "outf_writer_compat", "(", "outfile", ",", "encoding", ",", "errors", ",", "gzip_compress", ")", "writer", ".", "writerow", "(", "fields", ")", "for", "line", "in", "fp", ":", "tweet", "=", "json", ".", "loads", "(", "line", ")", "row", "=", "extract_fields", "(", "tweet", ",", "fields", ")", "writer", ".", "writerow", "(", "row", ")", "outf", ".", "close", "(", ")" ]
extract selected fields from a file of line-separated json tweets and write to a file in csv format .
train
false
45,847
@register.simple_tag(name='page_title_breadcrumbs', takes_context=True) def page_title_breadcrumbs_tag(context, *crumbs): return configuration_helpers.page_title_breadcrumbs(*crumbs)
[ "@", "register", ".", "simple_tag", "(", "name", "=", "'page_title_breadcrumbs'", ",", "takes_context", "=", "True", ")", "def", "page_title_breadcrumbs_tag", "(", "context", ",", "*", "crumbs", ")", ":", "return", "configuration_helpers", ".", "page_title_breadcrumbs", "(", "*", "crumbs", ")" ]
django template that creates breadcrumbs for page titles: {% page_title_breadcrumbs "specific" "less specific" general %} .
train
false
45,848
def s_random(value, min_length, max_length, num_mutations=25, fuzzable=True, step=None, name=None): random = primitives.random_data(value, min_length, max_length, num_mutations, fuzzable, step, name) blocks.CURRENT.push(random)
[ "def", "s_random", "(", "value", ",", "min_length", ",", "max_length", ",", "num_mutations", "=", "25", ",", "fuzzable", "=", "True", ",", "step", "=", "None", ",", "name", "=", "None", ")", ":", "random", "=", "primitives", ".", "random_data", "(", "value", ",", "min_length", ",", "max_length", ",", "num_mutations", ",", "fuzzable", ",", "step", ",", "name", ")", "blocks", ".", "CURRENT", ".", "push", "(", "random", ")" ]
generate a random chunk of data while maintaining a copy of the original .
train
false
45,849
def shrink_tensor(x, w): return x[tuple(([slice(w, (- w))] * x.ndim))]
[ "def", "shrink_tensor", "(", "x", ",", "w", ")", ":", "return", "x", "[", "tuple", "(", "(", "[", "slice", "(", "w", ",", "(", "-", "w", ")", ")", "]", "*", "x", ".", "ndim", ")", ")", "]" ]
x : a theano tensortype variable w : a theano integer scalar returns: y: a theano tensortype variable containing all but the borders of x .
train
false
45,850
def palette_color(r, g, b, palette, t_index): distances = [((((r - _r) ** 2) + ((g - _g) ** 2)) + ((b - _b) ** 2)) for (_r, _g, _b) in palette] distances = map(sqrt, distances) if (t_index is not None): distances = (distances[:t_index] + distances[(t_index + 1):]) return distances.index(min(distances))
[ "def", "palette_color", "(", "r", ",", "g", ",", "b", ",", "palette", ",", "t_index", ")", ":", "distances", "=", "[", "(", "(", "(", "(", "r", "-", "_r", ")", "**", "2", ")", "+", "(", "(", "g", "-", "_g", ")", "**", "2", ")", ")", "+", "(", "(", "b", "-", "_b", ")", "**", "2", ")", ")", "for", "(", "_r", ",", "_g", ",", "_b", ")", "in", "palette", "]", "distances", "=", "map", "(", "sqrt", ",", "distances", ")", "if", "(", "t_index", "is", "not", "None", ")", ":", "distances", "=", "(", "distances", "[", ":", "t_index", "]", "+", "distances", "[", "(", "t_index", "+", "1", ")", ":", "]", ")", "return", "distances", ".", "index", "(", "min", "(", "distances", ")", ")" ]
return best palette match index .
train
false
45,851
def init_source(): codename = crypto_util.genrandomid() filesystem_id = crypto_util.hash_codename(codename) journalist_filename = crypto_util.display_id() source = db.Source(filesystem_id, journalist_filename) db.db_session.add(source) db.db_session.commit() os.mkdir(store.path(source.filesystem_id)) crypto_util.genkeypair(source.filesystem_id, codename) return (source, codename)
[ "def", "init_source", "(", ")", ":", "codename", "=", "crypto_util", ".", "genrandomid", "(", ")", "filesystem_id", "=", "crypto_util", ".", "hash_codename", "(", "codename", ")", "journalist_filename", "=", "crypto_util", ".", "display_id", "(", ")", "source", "=", "db", ".", "Source", "(", "filesystem_id", ",", "journalist_filename", ")", "db", ".", "db_session", ".", "add", "(", "source", ")", "db", ".", "db_session", ".", "commit", "(", ")", "os", ".", "mkdir", "(", "store", ".", "path", "(", "source", ".", "filesystem_id", ")", ")", "crypto_util", ".", "genkeypair", "(", "source", ".", "filesystem_id", ",", "codename", ")", "return", "(", "source", ",", "codename", ")" ]
initialize a source: create their database record .
train
false
45,852
def country_code_for_valid_region(region_code): metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None) if (metadata is None): raise Exception(('Invalid region code %s' % region_code)) return metadata.country_code
[ "def", "country_code_for_valid_region", "(", "region_code", ")", ":", "metadata", "=", "PhoneMetadata", ".", "metadata_for_region", "(", "region_code", ".", "upper", "(", ")", ",", "None", ")", "if", "(", "metadata", "is", "None", ")", ":", "raise", "Exception", "(", "(", "'Invalid region code %s'", "%", "region_code", ")", ")", "return", "metadata", ".", "country_code" ]
returns the country calling code for a specific region .
train
true
45,853
def assoc(d, key, value, factory=dict): d2 = factory() d2[key] = value return merge(d, d2, factory=factory)
[ "def", "assoc", "(", "d", ",", "key", ",", "value", ",", "factory", "=", "dict", ")", ":", "d2", "=", "factory", "(", ")", "d2", "[", "key", "]", "=", "value", "return", "merge", "(", "d", ",", "d2", ",", "factory", "=", "factory", ")" ]
return a new dict with new key value pair new dict has d[key] set to value .
train
false
45,854
def _get_yum_config_value(name): conf = _get_yum_config() if (name in conf.keys()): return conf.get(name) return None
[ "def", "_get_yum_config_value", "(", "name", ")", ":", "conf", "=", "_get_yum_config", "(", ")", "if", "(", "name", "in", "conf", ".", "keys", "(", ")", ")", ":", "return", "conf", ".", "get", "(", "name", ")", "return", "None" ]
look for a specific config variable and return its value .
train
true
45,856
def job_get_by_tag(tag): try: job = Job.objects.get(tag=tag) return job except Job.DoesNotExist: return None
[ "def", "job_get_by_tag", "(", "tag", ")", ":", "try", ":", "job", "=", "Job", ".", "objects", ".", "get", "(", "tag", "=", "tag", ")", "return", "job", "except", "Job", ".", "DoesNotExist", ":", "return", "None" ]
return a job based on its tag .
train
false
45,857
def fast_cross_3d(x, y): assert (x.ndim == 2) assert (y.ndim == 2) assert (x.shape[1] == 3) assert (y.shape[1] == 3) assert (((x.shape[0] == 1) or (y.shape[0] == 1)) or (x.shape[0] == y.shape[0])) if (max([x.shape[0], y.shape[0]]) >= 500): return np.c_[(((x[:, 1] * y[:, 2]) - (x[:, 2] * y[:, 1])), ((x[:, 2] * y[:, 0]) - (x[:, 0] * y[:, 2])), ((x[:, 0] * y[:, 1]) - (x[:, 1] * y[:, 0])))] else: return np.cross(x, y)
[ "def", "fast_cross_3d", "(", "x", ",", "y", ")", ":", "assert", "(", "x", ".", "ndim", "==", "2", ")", "assert", "(", "y", ".", "ndim", "==", "2", ")", "assert", "(", "x", ".", "shape", "[", "1", "]", "==", "3", ")", "assert", "(", "y", ".", "shape", "[", "1", "]", "==", "3", ")", "assert", "(", "(", "(", "x", ".", "shape", "[", "0", "]", "==", "1", ")", "or", "(", "y", ".", "shape", "[", "0", "]", "==", "1", ")", ")", "or", "(", "x", ".", "shape", "[", "0", "]", "==", "y", ".", "shape", "[", "0", "]", ")", ")", "if", "(", "max", "(", "[", "x", ".", "shape", "[", "0", "]", ",", "y", ".", "shape", "[", "0", "]", "]", ")", ">=", "500", ")", ":", "return", "np", ".", "c_", "[", "(", "(", "(", "x", "[", ":", ",", "1", "]", "*", "y", "[", ":", ",", "2", "]", ")", "-", "(", "x", "[", ":", ",", "2", "]", "*", "y", "[", ":", ",", "1", "]", ")", ")", ",", "(", "(", "x", "[", ":", ",", "2", "]", "*", "y", "[", ":", ",", "0", "]", ")", "-", "(", "x", "[", ":", ",", "0", "]", "*", "y", "[", ":", ",", "2", "]", ")", ")", ",", "(", "(", "x", "[", ":", ",", "0", "]", "*", "y", "[", ":", ",", "1", "]", ")", "-", "(", "x", "[", ":", ",", "1", "]", "*", "y", "[", ":", ",", "0", "]", ")", ")", ")", "]", "else", ":", "return", "np", ".", "cross", "(", "x", ",", "y", ")" ]
compute cross product between list of 3d vectors .
train
true
45,858
def add_to_date(date, years=0, months=0, days=0, hours=0, as_string=False, as_datetime=False): from dateutil.relativedelta import relativedelta if (date == None): date = now_datetime() if hours: as_datetime = True if isinstance(date, basestring): as_string = True if (u' ' in date): as_datetime = True date = parser.parse(date) date = (date + relativedelta(years=years, months=months, days=days, hours=hours)) if as_string: if as_datetime: return date.strftime(DATETIME_FORMAT) else: return date.strftime(DATE_FORMAT) else: return date
[ "def", "add_to_date", "(", "date", ",", "years", "=", "0", ",", "months", "=", "0", ",", "days", "=", "0", ",", "hours", "=", "0", ",", "as_string", "=", "False", ",", "as_datetime", "=", "False", ")", ":", "from", "dateutil", ".", "relativedelta", "import", "relativedelta", "if", "(", "date", "==", "None", ")", ":", "date", "=", "now_datetime", "(", ")", "if", "hours", ":", "as_datetime", "=", "True", "if", "isinstance", "(", "date", ",", "basestring", ")", ":", "as_string", "=", "True", "if", "(", "u' '", "in", "date", ")", ":", "as_datetime", "=", "True", "date", "=", "parser", ".", "parse", "(", "date", ")", "date", "=", "(", "date", "+", "relativedelta", "(", "years", "=", "years", ",", "months", "=", "months", ",", "days", "=", "days", ",", "hours", "=", "hours", ")", ")", "if", "as_string", ":", "if", "as_datetime", ":", "return", "date", ".", "strftime", "(", "DATETIME_FORMAT", ")", "else", ":", "return", "date", ".", "strftime", "(", "DATE_FORMAT", ")", "else", ":", "return", "date" ]
adds days to the given date .
train
false
45,859
def sorted_dependencies(graph): graph = dict(((key, set(value)) for (key, value) in graph.items())) L = [] S = set((parent for (parent, req) in graph.items() if (not req))) while S: n = S.pop() L.append(n) parents = [parent for (parent, req) in graph.items() if (n in req)] for parent in parents: graph[parent].remove(n) if (not graph[parent]): S.add(parent) nonempty = [k for (k, v) in graph.items() if v] if nonempty: raise ArgumentError(('Cyclic dependency of: %s' % ', '.join(nonempty))) return L
[ "def", "sorted_dependencies", "(", "graph", ")", ":", "graph", "=", "dict", "(", "(", "(", "key", ",", "set", "(", "value", ")", ")", "for", "(", "key", ",", "value", ")", "in", "graph", ".", "items", "(", ")", ")", ")", "L", "=", "[", "]", "S", "=", "set", "(", "(", "parent", "for", "(", "parent", ",", "req", ")", "in", "graph", ".", "items", "(", ")", "if", "(", "not", "req", ")", ")", ")", "while", "S", ":", "n", "=", "S", ".", "pop", "(", ")", "L", ".", "append", "(", "n", ")", "parents", "=", "[", "parent", "for", "(", "parent", ",", "req", ")", "in", "graph", ".", "items", "(", ")", "if", "(", "n", "in", "req", ")", "]", "for", "parent", "in", "parents", ":", "graph", "[", "parent", "]", ".", "remove", "(", "n", ")", "if", "(", "not", "graph", "[", "parent", "]", ")", ":", "S", ".", "add", "(", "parent", ")", "nonempty", "=", "[", "k", "for", "(", "k", ",", "v", ")", "in", "graph", ".", "items", "(", ")", "if", "v", "]", "if", "nonempty", ":", "raise", "ArgumentError", "(", "(", "'Cyclic dependency of: %s'", "%", "', '", ".", "join", "(", "nonempty", ")", ")", ")", "return", "L" ]
return keys from deps ordered by dependency .
train
false
45,860
def _groupby_leading_idxs(shape): idxs = itertools.product(*[range(s) for s in shape]) return itertools.groupby(idxs, (lambda x: x[:(-1)]))
[ "def", "_groupby_leading_idxs", "(", "shape", ")", ":", "idxs", "=", "itertools", ".", "product", "(", "*", "[", "range", "(", "s", ")", "for", "s", "in", "shape", "]", ")", "return", "itertools", ".", "groupby", "(", "idxs", ",", "(", "lambda", "x", ":", "x", "[", ":", "(", "-", "1", ")", "]", ")", ")" ]
group the indices for shape by the leading indices of shape .
train
false
45,861
def _advanced_component_types(show_unsupported): enabled_block_types = _filter_disabled_blocks(ADVANCED_COMPONENT_TYPES) if XBlockStudioConfigurationFlag.is_enabled(): authorable_blocks = authorable_xblocks(allow_unsupported=show_unsupported) filtered_blocks = {} for block in authorable_blocks: if (block.name in enabled_block_types): filtered_blocks[block.name] = block.support_level return filtered_blocks else: all_blocks = {} for block_name in enabled_block_types: all_blocks[block_name] = True return all_blocks
[ "def", "_advanced_component_types", "(", "show_unsupported", ")", ":", "enabled_block_types", "=", "_filter_disabled_blocks", "(", "ADVANCED_COMPONENT_TYPES", ")", "if", "XBlockStudioConfigurationFlag", ".", "is_enabled", "(", ")", ":", "authorable_blocks", "=", "authorable_xblocks", "(", "allow_unsupported", "=", "show_unsupported", ")", "filtered_blocks", "=", "{", "}", "for", "block", "in", "authorable_blocks", ":", "if", "(", "block", ".", "name", "in", "enabled_block_types", ")", ":", "filtered_blocks", "[", "block", ".", "name", "]", "=", "block", ".", "support_level", "return", "filtered_blocks", "else", ":", "all_blocks", "=", "{", "}", "for", "block_name", "in", "enabled_block_types", ":", "all_blocks", "[", "block_name", "]", "=", "True", "return", "all_blocks" ]
return advanced component types which can be created .
train
false
45,864
def check_install(): if ((platform.system() == 'Darwin') and (sys.executable != '/usr/bin/python')): print(('*' * 79)) print(textwrap.fill("WARNING: You are not using the version of Python included with macOS. If you intend to use Voltron with the LLDB included with Xcode, or GDB installed with Homebrew, it will not work unless it is installed using the system's default Python. If you intend to use Voltron with a debugger installed by some other method, it may be safe to ignore this warning. See the following documentation for more detailed installation instructions: https://github.com/snare/voltron/wiki/Installation", 79)) print(('*' * 79)) elif (platform.system() == 'Linux'): try: output = check_output(['gdb', '-batch', '-q', '--nx', '-ex', 'pi print(sys.version_info.major)']).decode('utf-8') gdb_python = int(output) if (gdb_python != sys.version_info.major): print(('*' * 79)) print(textwrap.fill('WARNING: You are installing Voltron using Python {0}.x and GDB is linked with Python {1}.x. GDB will not be able to load Voltron. Please install using Python {1} if you intend to use Voltron with the copy of GDB that is installed. See the following documentation for more detailed installation instructions: https://github.com/snare/voltron/wiki/Installation'.format(sys.version_info.major, gdb_python), 79)) print(('*' * 79)) except: pass
[ "def", "check_install", "(", ")", ":", "if", "(", "(", "platform", ".", "system", "(", ")", "==", "'Darwin'", ")", "and", "(", "sys", ".", "executable", "!=", "'/usr/bin/python'", ")", ")", ":", "print", "(", "(", "'*'", "*", "79", ")", ")", "print", "(", "textwrap", ".", "fill", "(", "\"WARNING: You are not using the version of Python included with macOS. If you intend to use Voltron with the LLDB included with Xcode, or GDB installed with Homebrew, it will not work unless it is installed using the system's default Python. If you intend to use Voltron with a debugger installed by some other method, it may be safe to ignore this warning. See the following documentation for more detailed installation instructions: https://github.com/snare/voltron/wiki/Installation\"", ",", "79", ")", ")", "print", "(", "(", "'*'", "*", "79", ")", ")", "elif", "(", "platform", ".", "system", "(", ")", "==", "'Linux'", ")", ":", "try", ":", "output", "=", "check_output", "(", "[", "'gdb'", ",", "'-batch'", ",", "'-q'", ",", "'--nx'", ",", "'-ex'", ",", "'pi print(sys.version_info.major)'", "]", ")", ".", "decode", "(", "'utf-8'", ")", "gdb_python", "=", "int", "(", "output", ")", "if", "(", "gdb_python", "!=", "sys", ".", "version_info", ".", "major", ")", ":", "print", "(", "(", "'*'", "*", "79", ")", ")", "print", "(", "textwrap", ".", "fill", "(", "'WARNING: You are installing Voltron using Python {0}.x and GDB is linked with Python {1}.x. GDB will not be able to load Voltron. Please install using Python {1} if you intend to use Voltron with the copy of GDB that is installed. See the following documentation for more detailed installation instructions: https://github.com/snare/voltron/wiki/Installation'", ".", "format", "(", "sys", ".", "version_info", ".", "major", ",", "gdb_python", ")", ",", "79", ")", ")", "print", "(", "(", "'*'", "*", "79", ")", ")", "except", ":", "pass" ]
try to detect the two most common installation errors: 1 .
train
true
45,867
def provider_respond(server, request, response, data): add_openid_simple_registration(request, response, data) add_openid_attribute_exchange(request, response, data) webresponse = server.encodeResponse(response) http_response = HttpResponse(webresponse.body) http_response.status_code = webresponse.code for (key, val) in webresponse.headers.iteritems(): http_response[key] = val return http_response
[ "def", "provider_respond", "(", "server", ",", "request", ",", "response", ",", "data", ")", ":", "add_openid_simple_registration", "(", "request", ",", "response", ",", "data", ")", "add_openid_attribute_exchange", "(", "request", ",", "response", ",", "data", ")", "webresponse", "=", "server", ".", "encodeResponse", "(", "response", ")", "http_response", "=", "HttpResponse", "(", "webresponse", ".", "body", ")", "http_response", ".", "status_code", "=", "webresponse", ".", "code", "for", "(", "key", ",", "val", ")", "in", "webresponse", ".", "headers", ".", "iteritems", "(", ")", ":", "http_response", "[", "key", "]", "=", "val", "return", "http_response" ]
respond to an openid request .
train
false
45,868
def getTimeSinceLastUpdate(IOType): global last_update_times current_time = time() last_time = last_update_times.get(IOType) if (not last_time): time_since_update = 1 else: time_since_update = (current_time - last_time) last_update_times[IOType] = current_time return time_since_update
[ "def", "getTimeSinceLastUpdate", "(", "IOType", ")", ":", "global", "last_update_times", "current_time", "=", "time", "(", ")", "last_time", "=", "last_update_times", ".", "get", "(", "IOType", ")", "if", "(", "not", "last_time", ")", ":", "time_since_update", "=", "1", "else", ":", "time_since_update", "=", "(", "current_time", "-", "last_time", ")", "last_update_times", "[", "IOType", "]", "=", "current_time", "return", "time_since_update" ]
return the elapsed time since last update .
train
true
45,870
def hardlinkFile(srcFile, destFile): try: link(srcFile, destFile) fixSetGroupID(destFile) except Exception as e: sickrage.srCore.srLogger.warning((u'Failed to create hardlink of %s at %s. Error: %r. Copying instead' % (srcFile, destFile, e))) copyFile(srcFile, destFile)
[ "def", "hardlinkFile", "(", "srcFile", ",", "destFile", ")", ":", "try", ":", "link", "(", "srcFile", ",", "destFile", ")", "fixSetGroupID", "(", "destFile", ")", "except", "Exception", "as", "e", ":", "sickrage", ".", "srCore", ".", "srLogger", ".", "warning", "(", "(", "u'Failed to create hardlink of %s at %s. Error: %r. Copying instead'", "%", "(", "srcFile", ",", "destFile", ",", "e", ")", ")", ")", "copyFile", "(", "srcFile", ",", "destFile", ")" ]
create a hard-link between source and destination .
train
false
45,871
def as_dtype(nbtype): if isinstance(nbtype, (types.Complex, types.Integer, types.Float)): return np.dtype(str(nbtype)) if (nbtype is types.bool_): return np.dtype('?') if isinstance(nbtype, (types.NPDatetime, types.NPTimedelta)): letter = _as_dtype_letters[type(nbtype)] if nbtype.unit: return np.dtype(('%s[%s]' % (letter, nbtype.unit))) else: return np.dtype(letter) if isinstance(nbtype, (types.CharSeq, types.UnicodeCharSeq)): letter = _as_dtype_letters[type(nbtype)] return np.dtype(('%s%d' % (letter, nbtype.count))) if isinstance(nbtype, types.Record): return nbtype.dtype raise NotImplementedError(('%r cannot be represented as a Numpy dtype' % (nbtype,)))
[ "def", "as_dtype", "(", "nbtype", ")", ":", "if", "isinstance", "(", "nbtype", ",", "(", "types", ".", "Complex", ",", "types", ".", "Integer", ",", "types", ".", "Float", ")", ")", ":", "return", "np", ".", "dtype", "(", "str", "(", "nbtype", ")", ")", "if", "(", "nbtype", "is", "types", ".", "bool_", ")", ":", "return", "np", ".", "dtype", "(", "'?'", ")", "if", "isinstance", "(", "nbtype", ",", "(", "types", ".", "NPDatetime", ",", "types", ".", "NPTimedelta", ")", ")", ":", "letter", "=", "_as_dtype_letters", "[", "type", "(", "nbtype", ")", "]", "if", "nbtype", ".", "unit", ":", "return", "np", ".", "dtype", "(", "(", "'%s[%s]'", "%", "(", "letter", ",", "nbtype", ".", "unit", ")", ")", ")", "else", ":", "return", "np", ".", "dtype", "(", "letter", ")", "if", "isinstance", "(", "nbtype", ",", "(", "types", ".", "CharSeq", ",", "types", ".", "UnicodeCharSeq", ")", ")", ":", "letter", "=", "_as_dtype_letters", "[", "type", "(", "nbtype", ")", "]", "return", "np", ".", "dtype", "(", "(", "'%s%d'", "%", "(", "letter", ",", "nbtype", ".", "count", ")", ")", ")", "if", "isinstance", "(", "nbtype", ",", "types", ".", "Record", ")", ":", "return", "nbtype", ".", "dtype", "raise", "NotImplementedError", "(", "(", "'%r cannot be represented as a Numpy dtype'", "%", "(", "nbtype", ",", ")", ")", ")" ]
return a numpy dtype instance corresponding to the given numba type .
train
false
45,873
def get_tiles_by_chunk(chunkcol, chunkrow): tilecol = (chunkcol - (chunkcol % 2)) tilerow = (chunkrow - (chunkrow % 4)) if ((chunkcol % 2) == 0): colrange = ((tilecol - 2), tilecol) else: colrange = (tilecol,) if ((chunkrow % 4) == 0): rowrange = xrange((tilerow - 4), ((tilerow + 32) + 1), 4) else: rowrange = xrange(tilerow, ((tilerow + 32) + 1), 4) return product(colrange, rowrange)
[ "def", "get_tiles_by_chunk", "(", "chunkcol", ",", "chunkrow", ")", ":", "tilecol", "=", "(", "chunkcol", "-", "(", "chunkcol", "%", "2", ")", ")", "tilerow", "=", "(", "chunkrow", "-", "(", "chunkrow", "%", "4", ")", ")", "if", "(", "(", "chunkcol", "%", "2", ")", "==", "0", ")", ":", "colrange", "=", "(", "(", "tilecol", "-", "2", ")", ",", "tilecol", ")", "else", ":", "colrange", "=", "(", "tilecol", ",", ")", "if", "(", "(", "chunkrow", "%", "4", ")", "==", "0", ")", ":", "rowrange", "=", "xrange", "(", "(", "tilerow", "-", "4", ")", ",", "(", "(", "tilerow", "+", "32", ")", "+", "1", ")", ",", "4", ")", "else", ":", "rowrange", "=", "xrange", "(", "tilerow", ",", "(", "(", "tilerow", "+", "32", ")", "+", "1", ")", ",", "4", ")", "return", "product", "(", "colrange", ",", "rowrange", ")" ]
for the given chunk .
train
false
45,874
def _listconf(env_path): files = [os.path.join(env_path, name) for name in sorted(os.listdir(env_path)) if name.endswith('.conf')] return files
[ "def", "_listconf", "(", "env_path", ")", ":", "files", "=", "[", "os", ".", "path", ".", "join", "(", "env_path", ",", "name", ")", "for", "name", "in", "sorted", "(", "os", ".", "listdir", "(", "env_path", ")", ")", "if", "name", ".", "endswith", "(", "'.conf'", ")", "]", "return", "files" ]
list configuration files in a folder .
train
false
45,875
def to_host_port_tuple(host_port_str, default_port=80): uri = URIReference(scheme=None, authority=host_port_str, path=None, query=None, fragment=None) host = uri.host.strip('[]') if (not uri.port): port = default_port else: port = int(uri.port) return (host, port)
[ "def", "to_host_port_tuple", "(", "host_port_str", ",", "default_port", "=", "80", ")", ":", "uri", "=", "URIReference", "(", "scheme", "=", "None", ",", "authority", "=", "host_port_str", ",", "path", "=", "None", ",", "query", "=", "None", ",", "fragment", "=", "None", ")", "host", "=", "uri", ".", "host", ".", "strip", "(", "'[]'", ")", "if", "(", "not", "uri", ".", "port", ")", ":", "port", "=", "default_port", "else", ":", "port", "=", "int", "(", "uri", ".", "port", ")", "return", "(", "host", ",", "port", ")" ]
converts the given string containing a host and possibly a port to a tuple .
train
false
45,876
def _st_mask_from_s_inds(n_times, n_vertices, vertices, set_as=True): mask = np.zeros((n_times, n_vertices), dtype=bool) mask[:, vertices] = True mask = mask.ravel() if (set_as is False): mask = np.logical_not(mask) return mask
[ "def", "_st_mask_from_s_inds", "(", "n_times", ",", "n_vertices", ",", "vertices", ",", "set_as", "=", "True", ")", ":", "mask", "=", "np", ".", "zeros", "(", "(", "n_times", ",", "n_vertices", ")", ",", "dtype", "=", "bool", ")", "mask", "[", ":", ",", "vertices", "]", "=", "True", "mask", "=", "mask", ".", "ravel", "(", ")", "if", "(", "set_as", "is", "False", ")", ":", "mask", "=", "np", ".", "logical_not", "(", "mask", ")", "return", "mask" ]
compute mask to apply to a spatio-temporal connectivity matrix .
train
false
45,877
def _get_provide_specs_from_apps(category): if (category not in _provide_specs): provide_list = [] for app_config in apps.get_app_configs(): if (not isinstance(app_config, AppConfig)): continue spec_list = app_config.provides.get(category, ()) if isinstance(spec_list, six.string_types): spec_list = (spec_list,) for spec in spec_list: if (spec not in provide_list): provide_list.append(spec) _provide_specs[category] = provide_list return _provide_specs[category]
[ "def", "_get_provide_specs_from_apps", "(", "category", ")", ":", "if", "(", "category", "not", "in", "_provide_specs", ")", ":", "provide_list", "=", "[", "]", "for", "app_config", "in", "apps", ".", "get_app_configs", "(", ")", ":", "if", "(", "not", "isinstance", "(", "app_config", ",", "AppConfig", ")", ")", ":", "continue", "spec_list", "=", "app_config", ".", "provides", ".", "get", "(", "category", ",", "(", ")", ")", "if", "isinstance", "(", "spec_list", ",", "six", ".", "string_types", ")", ":", "spec_list", "=", "(", "spec_list", ",", ")", "for", "spec", "in", "spec_list", ":", "if", "(", "spec", "not", "in", "provide_list", ")", ":", "provide_list", ".", "append", "(", "spec", ")", "_provide_specs", "[", "category", "]", "=", "provide_list", "return", "_provide_specs", "[", "category", "]" ]
load provide spec strings from installed shuup .
train
false
45,878
def proxy_connect(args): if args.proxy_host: if ((args.proxy_type.lower() == 'socks5') or (not args.proxy_type)): socks.set_default_proxy(socks.SOCKS5, args.proxy_host, int(args.proxy_port)) elif (args.proxy_type.lower() == 'http'): socks.set_default_proxy(socks.HTTP, args.proxy_host, int(args.proxy_port)) elif (args.proxy_type.lower() == 'socks4'): socks.set_default_proxy(socks.SOCKS4, args.proxy_host, int(args.proxy_port)) else: printNicely(magenta('Sorry, wrong proxy type specified! Aborting...')) sys.exit() socket.socket = socks.socksocket
[ "def", "proxy_connect", "(", "args", ")", ":", "if", "args", ".", "proxy_host", ":", "if", "(", "(", "args", ".", "proxy_type", ".", "lower", "(", ")", "==", "'socks5'", ")", "or", "(", "not", "args", ".", "proxy_type", ")", ")", ":", "socks", ".", "set_default_proxy", "(", "socks", ".", "SOCKS5", ",", "args", ".", "proxy_host", ",", "int", "(", "args", ".", "proxy_port", ")", ")", "elif", "(", "args", ".", "proxy_type", ".", "lower", "(", ")", "==", "'http'", ")", ":", "socks", ".", "set_default_proxy", "(", "socks", ".", "HTTP", ",", "args", ".", "proxy_host", ",", "int", "(", "args", ".", "proxy_port", ")", ")", "elif", "(", "args", ".", "proxy_type", ".", "lower", "(", ")", "==", "'socks4'", ")", ":", "socks", ".", "set_default_proxy", "(", "socks", ".", "SOCKS4", ",", "args", ".", "proxy_host", ",", "int", "(", "args", ".", "proxy_port", ")", ")", "else", ":", "printNicely", "(", "magenta", "(", "'Sorry, wrong proxy type specified! Aborting...'", ")", ")", "sys", ".", "exit", "(", ")", "socket", ".", "socket", "=", "socks", ".", "socksocket" ]
connect to specified proxy .
train
false
45,879
def fanout_cast(conf, context, topic, msg, **kwargs): _multi_send(_cast, context, ('fanout~' + str(topic)), msg, **kwargs)
[ "def", "fanout_cast", "(", "conf", ",", "context", ",", "topic", ",", "msg", ",", "**", "kwargs", ")", ":", "_multi_send", "(", "_cast", ",", "context", ",", "(", "'fanout~'", "+", "str", "(", "topic", ")", ")", ",", "msg", ",", "**", "kwargs", ")" ]
cast to all consumers of a topic .
train
false
45,880
def get_course_masquerade(user, course_key): masquerade_settings = getattr(user, 'masquerade_settings', {}) return masquerade_settings.get(course_key, None)
[ "def", "get_course_masquerade", "(", "user", ",", "course_key", ")", ":", "masquerade_settings", "=", "getattr", "(", "user", ",", "'masquerade_settings'", ",", "{", "}", ")", "return", "masquerade_settings", ".", "get", "(", "course_key", ",", "None", ")" ]
returns the masquerade for the current user for the specified course .
train
false
45,881
def test_precision_recall_f1_score(): y_real = np.array([['a', 'b', 'c'], ['a', 'b', 'e', 'f', 'g'], ['a', 'b']]) y_pred = np.array([['a', 'b', 'c'], ['a', 'b', 'c', 'd'], ['e', 'f']]) (p, r, f) = precision_recall_fscore(y_real, y_pred) assert_array_almost_equal(p, [1, 0.4, 0], 2) assert_array_almost_equal(r, [1.0, 0.5, 0], 2) assert_array_almost_equal(f, [1.0, 0.44, 0], 2) ps = precision_score(y_real, y_pred) assert_array_almost_equal(ps, 0.4666, 2) rs = recall_score(y_real, y_pred) assert_array_almost_equal(rs, 0.5, 2) fs = f1_score(y_real, y_pred) assert_array_almost_equal(fs, 0.48, 2)
[ "def", "test_precision_recall_f1_score", "(", ")", ":", "y_real", "=", "np", ".", "array", "(", "[", "[", "'a'", ",", "'b'", ",", "'c'", "]", ",", "[", "'a'", ",", "'b'", ",", "'e'", ",", "'f'", ",", "'g'", "]", ",", "[", "'a'", ",", "'b'", "]", "]", ")", "y_pred", "=", "np", ".", "array", "(", "[", "[", "'a'", ",", "'b'", ",", "'c'", "]", ",", "[", "'a'", ",", "'b'", ",", "'c'", ",", "'d'", "]", ",", "[", "'e'", ",", "'f'", "]", "]", ")", "(", "p", ",", "r", ",", "f", ")", "=", "precision_recall_fscore", "(", "y_real", ",", "y_pred", ")", "assert_array_almost_equal", "(", "p", ",", "[", "1", ",", "0.4", ",", "0", "]", ",", "2", ")", "assert_array_almost_equal", "(", "r", ",", "[", "1.0", ",", "0.5", ",", "0", "]", ",", "2", ")", "assert_array_almost_equal", "(", "f", ",", "[", "1.0", ",", "0.44", ",", "0", "]", ",", "2", ")", "ps", "=", "precision_score", "(", "y_real", ",", "y_pred", ")", "assert_array_almost_equal", "(", "ps", ",", "0.4666", ",", "2", ")", "rs", "=", "recall_score", "(", "y_real", ",", "y_pred", ")", "assert_array_almost_equal", "(", "rs", ",", "0.5", ",", "2", ")", "fs", "=", "f1_score", "(", "y_real", ",", "y_pred", ")", "assert_array_almost_equal", "(", "fs", ",", "0.48", ",", "2", ")" ]
test precision recall and f1 score .
train
false
45,882
def _contains_bad_names(file_names): return any([xml_unsafe.search(f) for f in file_names])
[ "def", "_contains_bad_names", "(", "file_names", ")", ":", "return", "any", "(", "[", "xml_unsafe", ".", "search", "(", "f", ")", "for", "f", "in", "file_names", "]", ")" ]
return true if the list of names contains a bad one .
train
false
45,885
@doctest_depends_on(modules=('lxml',)) def c2p(mml, simple=False): if (not mml.startswith('<math')): mml = add_mathml_headers(mml) if simple: return apply_xsl(mml, 'mathml/data/simple_mmlctop.xsl') return apply_xsl(mml, 'mathml/data/mmlctop.xsl')
[ "@", "doctest_depends_on", "(", "modules", "=", "(", "'lxml'", ",", ")", ")", "def", "c2p", "(", "mml", ",", "simple", "=", "False", ")", ":", "if", "(", "not", "mml", ".", "startswith", "(", "'<math'", ")", ")", ":", "mml", "=", "add_mathml_headers", "(", "mml", ")", "if", "simple", ":", "return", "apply_xsl", "(", "mml", ",", "'mathml/data/simple_mmlctop.xsl'", ")", "return", "apply_xsl", "(", "mml", ",", "'mathml/data/mmlctop.xsl'", ")" ]
transforms a document in mathml content in one document in mathml presentation .
train
false
45,887
def test_nm2_fit_sample_half(): ratio = 0.7 nm2 = NearMiss(ratio=ratio, random_state=RND_SEED, version=VERSION_NEARMISS) (X_resampled, y_resampled) = nm2.fit_sample(X, Y) X_gt = np.array([[0.91464286, 1.61369212], [(-0.80809175), (-1.09917302)], [(-0.20497017), (-0.26630228)], [(-0.05903827), 0.10947647], [0.03142011, 0.12323596], [(-0.60413357), 0.24628718], [1.17737838, (-0.2002118)], [0.50701028, (-0.17636928)], [0.4960075, 0.86130762], [0.45713638, 1.31069295], [0.99272351, (-0.11631728)]]) y_gt = np.array([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]) assert_array_equal(X_resampled, X_gt) assert_array_equal(y_resampled, y_gt)
[ "def", "test_nm2_fit_sample_half", "(", ")", ":", "ratio", "=", "0.7", "nm2", "=", "NearMiss", "(", "ratio", "=", "ratio", ",", "random_state", "=", "RND_SEED", ",", "version", "=", "VERSION_NEARMISS", ")", "(", "X_resampled", ",", "y_resampled", ")", "=", "nm2", ".", "fit_sample", "(", "X", ",", "Y", ")", "X_gt", "=", "np", ".", "array", "(", "[", "[", "0.91464286", ",", "1.61369212", "]", ",", "[", "(", "-", "0.80809175", ")", ",", "(", "-", "1.09917302", ")", "]", ",", "[", "(", "-", "0.20497017", ")", ",", "(", "-", "0.26630228", ")", "]", ",", "[", "(", "-", "0.05903827", ")", ",", "0.10947647", "]", ",", "[", "0.03142011", ",", "0.12323596", "]", ",", "[", "(", "-", "0.60413357", ")", ",", "0.24628718", "]", ",", "[", "1.17737838", ",", "(", "-", "0.2002118", ")", "]", ",", "[", "0.50701028", ",", "(", "-", "0.17636928", ")", "]", ",", "[", "0.4960075", ",", "0.86130762", "]", ",", "[", "0.45713638", ",", "1.31069295", "]", ",", "[", "0.99272351", ",", "(", "-", "0.11631728", ")", "]", "]", ")", "y_gt", "=", "np", ".", "array", "(", "[", "0", ",", "0", ",", "0", ",", "1", ",", "1", ",", "1", ",", "1", ",", "2", ",", "2", ",", "2", ",", "2", "]", ")", "assert_array_equal", "(", "X_resampled", ",", "X_gt", ")", "assert_array_equal", "(", "y_resampled", ",", "y_gt", ")" ]
test fit and sample routines with .
train
false
45,888
def momentum(loss_or_grads, params, learning_rate, momentum=0.9): updates = sgd(loss_or_grads, params, learning_rate) return apply_momentum(updates, momentum=momentum)
[ "def", "momentum", "(", "loss_or_grads", ",", "params", ",", "learning_rate", ",", "momentum", "=", "0.9", ")", ":", "updates", "=", "sgd", "(", "loss_or_grads", ",", "params", ",", "learning_rate", ")", "return", "apply_momentum", "(", "updates", ",", "momentum", "=", "momentum", ")" ]
stochastic gradient descent updates with momentum generates update expressions of the form: * velocity := momentum * velocity - learning_rate * gradient * param := param + velocity parameters loss_or_grads : symbolic expression or list of expressions a scalar loss expression .
train
false
45,889
def remove_featured_activity(activity_type, activity_id): featured_references = get_featured_activity_references() activity_reference_found = False new_activity_references = [] for reference in featured_references: if ((reference.type != activity_type) or (reference.id != activity_id)): new_activity_references.append(reference) else: activity_reference_found = True if activity_reference_found: logging.info(('The %s with id %s was removed from the featured list.' % (activity_type, activity_id))) update_featured_activity_references(new_activity_references)
[ "def", "remove_featured_activity", "(", "activity_type", ",", "activity_id", ")", ":", "featured_references", "=", "get_featured_activity_references", "(", ")", "activity_reference_found", "=", "False", "new_activity_references", "=", "[", "]", "for", "reference", "in", "featured_references", ":", "if", "(", "(", "reference", ".", "type", "!=", "activity_type", ")", "or", "(", "reference", ".", "id", "!=", "activity_id", ")", ")", ":", "new_activity_references", ".", "append", "(", "reference", ")", "else", ":", "activity_reference_found", "=", "True", "if", "activity_reference_found", ":", "logging", ".", "info", "(", "(", "'The %s with id %s was removed from the featured list.'", "%", "(", "activity_type", ",", "activity_id", ")", ")", ")", "update_featured_activity_references", "(", "new_activity_references", ")" ]
removes the specified activity reference from the list of featured activity references .
train
false
45,892
def is_public_group_type(context, group_type_id): group_type = db.group_type_get(context, group_type_id) return group_type['is_public']
[ "def", "is_public_group_type", "(", "context", ",", "group_type_id", ")", ":", "group_type", "=", "db", ".", "group_type_get", "(", "context", ",", "group_type_id", ")", "return", "group_type", "[", "'is_public'", "]" ]
return is_public boolean value of group type .
train
false
45,893
def _filter_data(lhs, rhs): def _has_all_columns(df): return (np.isfinite(df.values).sum(1) == len(df.columns)) rhs_valid = _has_all_columns(rhs) if (not rhs_valid.all()): pre_filtered_rhs = rhs[rhs_valid] else: pre_filtered_rhs = rhs index = lhs.index.union(rhs.index) if ((not index.equals(rhs.index)) or (not index.equals(lhs.index))): rhs = rhs.reindex(index) lhs = lhs.reindex(index) rhs_valid = _has_all_columns(rhs) lhs_valid = _has_all_columns(lhs) valid = (rhs_valid & lhs_valid) if (not valid.all()): filt_index = rhs.index[valid] filtered_rhs = rhs.reindex(filt_index) filtered_lhs = lhs.reindex(filt_index) else: (filtered_rhs, filtered_lhs) = (rhs, lhs) return (filtered_lhs, filtered_rhs, pre_filtered_rhs, index, valid)
[ "def", "_filter_data", "(", "lhs", ",", "rhs", ")", ":", "def", "_has_all_columns", "(", "df", ")", ":", "return", "(", "np", ".", "isfinite", "(", "df", ".", "values", ")", ".", "sum", "(", "1", ")", "==", "len", "(", "df", ".", "columns", ")", ")", "rhs_valid", "=", "_has_all_columns", "(", "rhs", ")", "if", "(", "not", "rhs_valid", ".", "all", "(", ")", ")", ":", "pre_filtered_rhs", "=", "rhs", "[", "rhs_valid", "]", "else", ":", "pre_filtered_rhs", "=", "rhs", "index", "=", "lhs", ".", "index", ".", "union", "(", "rhs", ".", "index", ")", "if", "(", "(", "not", "index", ".", "equals", "(", "rhs", ".", "index", ")", ")", "or", "(", "not", "index", ".", "equals", "(", "lhs", ".", "index", ")", ")", ")", ":", "rhs", "=", "rhs", ".", "reindex", "(", "index", ")", "lhs", "=", "lhs", ".", "reindex", "(", "index", ")", "rhs_valid", "=", "_has_all_columns", "(", "rhs", ")", "lhs_valid", "=", "_has_all_columns", "(", "lhs", ")", "valid", "=", "(", "rhs_valid", "&", "lhs_valid", ")", "if", "(", "not", "valid", ".", "all", "(", ")", ")", ":", "filt_index", "=", "rhs", ".", "index", "[", "valid", "]", "filtered_rhs", "=", "rhs", ".", "reindex", "(", "filt_index", ")", "filtered_lhs", "=", "lhs", ".", "reindex", "(", "filt_index", ")", "else", ":", "(", "filtered_rhs", ",", "filtered_lhs", ")", "=", "(", "rhs", ",", "lhs", ")", "return", "(", "filtered_lhs", ",", "filtered_rhs", ",", "pre_filtered_rhs", ",", "index", ",", "valid", ")" ]
data filtering routine for dynamic var lhs : dataframe original data rhs : dataframe lagged variables returns .
train
false
45,894
@register.inclusion_tag('inclusion.html', takes_context=False) def inclusion_explicit_no_context(arg): return {'result': ('inclusion_explicit_no_context - Expected result: %s' % arg)}
[ "@", "register", ".", "inclusion_tag", "(", "'inclusion.html'", ",", "takes_context", "=", "False", ")", "def", "inclusion_explicit_no_context", "(", "arg", ")", ":", "return", "{", "'result'", ":", "(", "'inclusion_explicit_no_context - Expected result: %s'", "%", "arg", ")", "}" ]
expected inclusion_explicit_no_context __doc__ .
train
false
45,895
def load_img(path, grayscale=False, target_size=None): if (pil_image is None): raise ImportError('Could not import PIL.Image. The use of `array_to_img` requires PIL.') img = pil_image.open(path) if grayscale: img = img.convert('L') else: img = img.convert('RGB') if target_size: img = img.resize((target_size[1], target_size[0])) return img
[ "def", "load_img", "(", "path", ",", "grayscale", "=", "False", ",", "target_size", "=", "None", ")", ":", "if", "(", "pil_image", "is", "None", ")", ":", "raise", "ImportError", "(", "'Could not import PIL.Image. The use of `array_to_img` requires PIL.'", ")", "img", "=", "pil_image", ".", "open", "(", "path", ")", "if", "grayscale", ":", "img", "=", "img", ".", "convert", "(", "'L'", ")", "else", ":", "img", "=", "img", ".", "convert", "(", "'RGB'", ")", "if", "target_size", ":", "img", "=", "img", ".", "resize", "(", "(", "target_size", "[", "1", "]", ",", "target_size", "[", "0", "]", ")", ")", "return", "img" ]
loads an image into pil format .
train
false
45,896
def to_subscription_key(uid, event): return u'{}_{}'.format(uid, event)
[ "def", "to_subscription_key", "(", "uid", ",", "event", ")", ":", "return", "u'{}_{}'", ".", "format", "(", "uid", ",", "event", ")" ]
build the subscription primary key for the given guid and event .
train
false
45,897
def greedyPolicy(Ts, R, discountFactor, V): dim = len(V) numA = len(Ts) Vnext = ((V * discountFactor) + R) policy = zeros((dim, numA)) for si in range(dim): actions = all_argmax([dot(T[si, :], Vnext) for T in Ts]) for a in actions: policy[(si, a)] = (1.0 / len(actions)) return (policy, collapsedTransitions(Ts, policy))
[ "def", "greedyPolicy", "(", "Ts", ",", "R", ",", "discountFactor", ",", "V", ")", ":", "dim", "=", "len", "(", "V", ")", "numA", "=", "len", "(", "Ts", ")", "Vnext", "=", "(", "(", "V", "*", "discountFactor", ")", "+", "R", ")", "policy", "=", "zeros", "(", "(", "dim", ",", "numA", ")", ")", "for", "si", "in", "range", "(", "dim", ")", ":", "actions", "=", "all_argmax", "(", "[", "dot", "(", "T", "[", "si", ",", ":", "]", ",", "Vnext", ")", "for", "T", "in", "Ts", "]", ")", "for", "a", "in", "actions", ":", "policy", "[", "(", "si", ",", "a", ")", "]", "=", "(", "1.0", "/", "len", "(", "actions", ")", ")", "return", "(", "policy", ",", "collapsedTransitions", "(", "Ts", ",", "policy", ")", ")" ]
find the greedy policy .
train
false
45,898
def SOPform(variables, minterms, dontcares=None): variables = [sympify(v) for v in variables] if (minterms == []): return false minterms = [list(i) for i in minterms] dontcares = [list(i) for i in (dontcares or [])] for d in dontcares: if (d in minterms): raise ValueError(('%s in minterms is also in dontcares' % d)) old = None new = (minterms + dontcares) while (new != old): old = new new = _simplified_pairs(old) essential = _rem_redundancy(new, minterms) return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
[ "def", "SOPform", "(", "variables", ",", "minterms", ",", "dontcares", "=", "None", ")", ":", "variables", "=", "[", "sympify", "(", "v", ")", "for", "v", "in", "variables", "]", "if", "(", "minterms", "==", "[", "]", ")", ":", "return", "false", "minterms", "=", "[", "list", "(", "i", ")", "for", "i", "in", "minterms", "]", "dontcares", "=", "[", "list", "(", "i", ")", "for", "i", "in", "(", "dontcares", "or", "[", "]", ")", "]", "for", "d", "in", "dontcares", ":", "if", "(", "d", "in", "minterms", ")", ":", "raise", "ValueError", "(", "(", "'%s in minterms is also in dontcares'", "%", "d", ")", ")", "old", "=", "None", "new", "=", "(", "minterms", "+", "dontcares", ")", "while", "(", "new", "!=", "old", ")", ":", "old", "=", "new", "new", "=", "_simplified_pairs", "(", "old", ")", "essential", "=", "_rem_redundancy", "(", "new", ",", "minterms", ")", "return", "Or", "(", "*", "[", "_convert_to_varsSOP", "(", "x", ",", "variables", ")", "for", "x", "in", "essential", "]", ")" ]
the sopform function uses simplified_pairs and a redundant group- eliminating algorithm to convert the list of all input combos that generate 1 into the smallest sum of products form .
train
false
45,899
def _get_task_id_from_xmodule_args(xmodule_instance_args): return (xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if (xmodule_instance_args is not None) else UNKNOWN_TASK_ID)
[ "def", "_get_task_id_from_xmodule_args", "(", "xmodule_instance_args", ")", ":", "return", "(", "xmodule_instance_args", ".", "get", "(", "'task_id'", ",", "UNKNOWN_TASK_ID", ")", "if", "(", "xmodule_instance_args", "is", "not", "None", ")", "else", "UNKNOWN_TASK_ID", ")" ]
gets task_id from xmodule_instance_args dict .
train
false
45,900
def ProfilesFeedFromString(xml_string): return atom.CreateClassFromXMLString(ProfilesFeed, xml_string)
[ "def", "ProfilesFeedFromString", "(", "xml_string", ")", ":", "return", "atom", ".", "CreateClassFromXMLString", "(", "ProfilesFeed", ",", "xml_string", ")" ]
converts an xml string into a profilesfeed object .
train
false
45,901
def getLargestInsetLoopFromLoopRegardless(loop, radius): global globalDecreasingRadiusMultipliers for decreasingRadiusMultiplier in globalDecreasingRadiusMultipliers: decreasingRadius = (radius * decreasingRadiusMultiplier) largestInsetLoop = getLargestInsetLoopFromLoop(loop, decreasingRadius) if (len(largestInsetLoop) > 0): return largestInsetLoop print 'Warning, there should always be a largestInsetLoop in getLargestInsetLoopFromLoopRegardless in intercircle.' print loop return loop
[ "def", "getLargestInsetLoopFromLoopRegardless", "(", "loop", ",", "radius", ")", ":", "global", "globalDecreasingRadiusMultipliers", "for", "decreasingRadiusMultiplier", "in", "globalDecreasingRadiusMultipliers", ":", "decreasingRadius", "=", "(", "radius", "*", "decreasingRadiusMultiplier", ")", "largestInsetLoop", "=", "getLargestInsetLoopFromLoop", "(", "loop", ",", "decreasingRadius", ")", "if", "(", "len", "(", "largestInsetLoop", ")", ">", "0", ")", ":", "return", "largestInsetLoop", "print", "'Warning, there should always be a largestInsetLoop in getLargestInsetLoopFromLoopRegardless in intercircle.'", "print", "loop", "return", "loop" ]
get the largest inset loop from the loop .
train
false
45,903
def _contains_cycle(fgraph, orderings): outputs = fgraph.outputs assert isinstance(outputs, (tuple, list, deque)) parent_counts = {} node_to_children = {} visitable = deque() for var in fgraph.variables: owner = var.owner if owner: node_to_children.setdefault(owner, []).append(var) parent_counts[var] = 1 else: visitable.append(var) parent_counts[var] = 0 for a_n in fgraph.apply_nodes: parents = list(a_n.inputs) parents.extend(orderings.get(a_n, [])) if parents: for parent in parents: node_to_children.setdefault(parent, []).append(a_n) parent_counts[a_n] = len(parents) else: visitable.append(a_n) parent_counts[a_n] = 0 visited = 0 while visitable: node = visitable.popleft() visited += 1 for client in node_to_children.get(node, []): parent_counts[client] -= 1 if (not parent_counts[client]): visitable.append(client) return (visited != len(parent_counts))
[ "def", "_contains_cycle", "(", "fgraph", ",", "orderings", ")", ":", "outputs", "=", "fgraph", ".", "outputs", "assert", "isinstance", "(", "outputs", ",", "(", "tuple", ",", "list", ",", "deque", ")", ")", "parent_counts", "=", "{", "}", "node_to_children", "=", "{", "}", "visitable", "=", "deque", "(", ")", "for", "var", "in", "fgraph", ".", "variables", ":", "owner", "=", "var", ".", "owner", "if", "owner", ":", "node_to_children", ".", "setdefault", "(", "owner", ",", "[", "]", ")", ".", "append", "(", "var", ")", "parent_counts", "[", "var", "]", "=", "1", "else", ":", "visitable", ".", "append", "(", "var", ")", "parent_counts", "[", "var", "]", "=", "0", "for", "a_n", "in", "fgraph", ".", "apply_nodes", ":", "parents", "=", "list", "(", "a_n", ".", "inputs", ")", "parents", ".", "extend", "(", "orderings", ".", "get", "(", "a_n", ",", "[", "]", ")", ")", "if", "parents", ":", "for", "parent", "in", "parents", ":", "node_to_children", ".", "setdefault", "(", "parent", ",", "[", "]", ")", ".", "append", "(", "a_n", ")", "parent_counts", "[", "a_n", "]", "=", "len", "(", "parents", ")", "else", ":", "visitable", ".", "append", "(", "a_n", ")", "parent_counts", "[", "a_n", "]", "=", "0", "visited", "=", "0", "while", "visitable", ":", "node", "=", "visitable", ".", "popleft", "(", ")", "visited", "+=", "1", "for", "client", "in", "node_to_children", ".", "get", "(", "node", ",", "[", "]", ")", ":", "parent_counts", "[", "client", "]", "-=", "1", "if", "(", "not", "parent_counts", "[", "client", "]", ")", ":", "visitable", ".", "append", "(", "client", ")", "return", "(", "visited", "!=", "len", "(", "parent_counts", ")", ")" ]
function to check if the given graph contains a cycle parameters fgraph the functiongraph to check for cycles .
train
false
45,904
def cyclen(n, iterable): return chain.from_iterable(repeat(tuple(iterable), n))
[ "def", "cyclen", "(", "n", ",", "iterable", ")", ":", "return", "chain", ".", "from_iterable", "(", "repeat", "(", "tuple", "(", "iterable", ")", ",", "n", ")", ")" ]
cyclen -> iterator repeats the elements of iterable n times .
train
false
45,906
def _computeDigestResponse(auth_map, password, method='GET', A1=None, **kwargs): params = auth_map algorithm = params.get('algorithm', MD5) H = DIGEST_AUTH_ENCODERS[algorithm] KD = (lambda secret, data: H(((secret + ':') + data))) qop = params.get('qop', None) H_A2 = H(_A2(params, method, kwargs)) if ((algorithm == MD5_SESS) and (A1 is not None)): H_A1 = H(A1) else: H_A1 = H(_A1(params, password)) if (qop in ('auth', 'auth-int')): request = ('%s:%s:%s:%s:%s' % (params['nonce'], params['nc'], params['cnonce'], params['qop'], H_A2)) elif (qop is None): request = ('%s:%s' % (params['nonce'], H_A2)) return KD(H_A1, request)
[ "def", "_computeDigestResponse", "(", "auth_map", ",", "password", ",", "method", "=", "'GET'", ",", "A1", "=", "None", ",", "**", "kwargs", ")", ":", "params", "=", "auth_map", "algorithm", "=", "params", ".", "get", "(", "'algorithm'", ",", "MD5", ")", "H", "=", "DIGEST_AUTH_ENCODERS", "[", "algorithm", "]", "KD", "=", "(", "lambda", "secret", ",", "data", ":", "H", "(", "(", "(", "secret", "+", "':'", ")", "+", "data", ")", ")", ")", "qop", "=", "params", ".", "get", "(", "'qop'", ",", "None", ")", "H_A2", "=", "H", "(", "_A2", "(", "params", ",", "method", ",", "kwargs", ")", ")", "if", "(", "(", "algorithm", "==", "MD5_SESS", ")", "and", "(", "A1", "is", "not", "None", ")", ")", ":", "H_A1", "=", "H", "(", "A1", ")", "else", ":", "H_A1", "=", "H", "(", "_A1", "(", "params", ",", "password", ")", ")", "if", "(", "qop", "in", "(", "'auth'", ",", "'auth-int'", ")", ")", ":", "request", "=", "(", "'%s:%s:%s:%s:%s'", "%", "(", "params", "[", "'nonce'", "]", ",", "params", "[", "'nc'", "]", ",", "params", "[", "'cnonce'", "]", ",", "params", "[", "'qop'", "]", ",", "H_A2", ")", ")", "elif", "(", "qop", "is", "None", ")", ":", "request", "=", "(", "'%s:%s'", "%", "(", "params", "[", "'nonce'", "]", ",", "H_A2", ")", ")", "return", "KD", "(", "H_A1", ",", "request", ")" ]
generates a response respecting the algorithm defined in rfc 2617 .
train
false
45,907
@asyncio.coroutine def mock_setup_platform(): return None
[ "@", "asyncio", ".", "coroutine", "def", "mock_setup_platform", "(", ")", ":", "return", "None" ]
mock prepare_setup_platform .
train
false
45,908
def DFS_loop(digr): node_explored = set([]) finishing_times = [] for node in digr.nodes(): if (node not in node_explored): leader_node = node inner_DFS(digr, node, node_explored, finishing_times) return finishing_times
[ "def", "DFS_loop", "(", "digr", ")", ":", "node_explored", "=", "set", "(", "[", "]", ")", "finishing_times", "=", "[", "]", "for", "node", "in", "digr", ".", "nodes", "(", ")", ":", "if", "(", "node", "not", "in", "node_explored", ")", ":", "leader_node", "=", "node", "inner_DFS", "(", "digr", ",", "node", ",", "node_explored", ",", "finishing_times", ")", "return", "finishing_times" ]
core dfs loop used to find strongly connected components in a directed graph .
train
false
45,909
def create_virtualenv(root): run_command(['virtualenv', '--python=/usr/bin/python2.7', '--quiet', root.path], added_env=dict(PYTHONDONTWRITEBYTECODE='1')) for module_name in virtualenv.REQUIRED_MODULES: py_base = root.descendant(['lib', 'python2.7', module_name]) py = py_base.siblingExtension('.py') if (py.exists() and py.islink()): pyc = py_base.siblingExtension('.pyc') py_target = py.realpath() pyc_target = FilePath(py_target.splitext()[0]).siblingExtension('.pyc') if pyc.exists(): pyc.remove() if pyc_target.exists(): pyc_target.linkTo(pyc) return VirtualEnv(root=root)
[ "def", "create_virtualenv", "(", "root", ")", ":", "run_command", "(", "[", "'virtualenv'", ",", "'--python=/usr/bin/python2.7'", ",", "'--quiet'", ",", "root", ".", "path", "]", ",", "added_env", "=", "dict", "(", "PYTHONDONTWRITEBYTECODE", "=", "'1'", ")", ")", "for", "module_name", "in", "virtualenv", ".", "REQUIRED_MODULES", ":", "py_base", "=", "root", ".", "descendant", "(", "[", "'lib'", ",", "'python2.7'", ",", "module_name", "]", ")", "py", "=", "py_base", ".", "siblingExtension", "(", "'.py'", ")", "if", "(", "py", ".", "exists", "(", ")", "and", "py", ".", "islink", "(", ")", ")", ":", "pyc", "=", "py_base", ".", "siblingExtension", "(", "'.pyc'", ")", "py_target", "=", "py", ".", "realpath", "(", ")", "pyc_target", "=", "FilePath", "(", "py_target", ".", "splitext", "(", ")", "[", "0", "]", ")", ".", "siblingExtension", "(", "'.pyc'", ")", "if", "pyc", ".", "exists", "(", ")", ":", "pyc", ".", "remove", "(", ")", "if", "pyc_target", ".", "exists", "(", ")", ":", "pyc_target", ".", "linkTo", "(", "pyc", ")", "return", "VirtualEnv", "(", "root", "=", "root", ")" ]
create a virtualenv in root .
train
false
45,910
def parse_column(name, type_string, comment=None): type_string = type_string.lower() column = {'name': name, 'comment': (comment or '')} (simple_type, inner) = _parse_type(type_string) column['type'] = simple_type if inner: column.update(_parse_complex(simple_type, inner)) return column
[ "def", "parse_column", "(", "name", ",", "type_string", ",", "comment", "=", "None", ")", ":", "type_string", "=", "type_string", ".", "lower", "(", ")", "column", "=", "{", "'name'", ":", "name", ",", "'comment'", ":", "(", "comment", "or", "''", ")", "}", "(", "simple_type", ",", "inner", ")", "=", "_parse_type", "(", "type_string", ")", "column", "[", "'type'", "]", "=", "simple_type", "if", "inner", ":", "column", ".", "update", "(", "_parse_complex", "(", "simple_type", ",", "inner", ")", ")", "return", "column" ]
returns a dictionary of a hive columns type metadata and any complex or nested type info .
train
false
45,911
def getPathCopy(path): pathCopy = [] for point in path: pathCopy.append(point.copy()) return pathCopy
[ "def", "getPathCopy", "(", "path", ")", ":", "pathCopy", "=", "[", "]", "for", "point", "in", "path", ":", "pathCopy", ".", "append", "(", "point", ".", "copy", "(", ")", ")", "return", "pathCopy" ]
get path copy .
train
false
45,912
def AlternatingGroup(n): if (n in (1, 2)): return PermutationGroup([Permutation([0])]) a = list(range(n)) (a[0], a[1], a[2]) = (a[1], a[2], a[0]) gen1 = a if (n % 2): a = list(range(1, n)) a.append(0) gen2 = a else: a = list(range(2, n)) a.append(1) a.insert(0, 0) gen2 = a gens = [gen1, gen2] if (gen1 == gen2): gens = gens[:1] G = PermutationGroup([_af_new(a) for a in gens], dups=False) if (n < 4): G._is_abelian = True G._is_nilpotent = True else: G._is_abelian = False G._is_nilpotent = False if (n < 5): G._is_solvable = True else: G._is_solvable = False G._degree = n G._is_transitive = True G._is_alt = True return G
[ "def", "AlternatingGroup", "(", "n", ")", ":", "if", "(", "n", "in", "(", "1", ",", "2", ")", ")", ":", "return", "PermutationGroup", "(", "[", "Permutation", "(", "[", "0", "]", ")", "]", ")", "a", "=", "list", "(", "range", "(", "n", ")", ")", "(", "a", "[", "0", "]", ",", "a", "[", "1", "]", ",", "a", "[", "2", "]", ")", "=", "(", "a", "[", "1", "]", ",", "a", "[", "2", "]", ",", "a", "[", "0", "]", ")", "gen1", "=", "a", "if", "(", "n", "%", "2", ")", ":", "a", "=", "list", "(", "range", "(", "1", ",", "n", ")", ")", "a", ".", "append", "(", "0", ")", "gen2", "=", "a", "else", ":", "a", "=", "list", "(", "range", "(", "2", ",", "n", ")", ")", "a", ".", "append", "(", "1", ")", "a", ".", "insert", "(", "0", ",", "0", ")", "gen2", "=", "a", "gens", "=", "[", "gen1", ",", "gen2", "]", "if", "(", "gen1", "==", "gen2", ")", ":", "gens", "=", "gens", "[", ":", "1", "]", "G", "=", "PermutationGroup", "(", "[", "_af_new", "(", "a", ")", "for", "a", "in", "gens", "]", ",", "dups", "=", "False", ")", "if", "(", "n", "<", "4", ")", ":", "G", ".", "_is_abelian", "=", "True", "G", ".", "_is_nilpotent", "=", "True", "else", ":", "G", ".", "_is_abelian", "=", "False", "G", ".", "_is_nilpotent", "=", "False", "if", "(", "n", "<", "5", ")", ":", "G", ".", "_is_solvable", "=", "True", "else", ":", "G", ".", "_is_solvable", "=", "False", "G", ".", "_degree", "=", "n", "G", ".", "_is_transitive", "=", "True", "G", ".", "_is_alt", "=", "True", "return", "G" ]
generates the alternating group on n elements as a permutation group .
train
false
45,913
def _quote_slashes(match): matched = match.group(0) if (matched == ';'): return ';;' elif (matched == '/'): return ';_' else: return matched
[ "def", "_quote_slashes", "(", "match", ")", ":", "matched", "=", "match", ".", "group", "(", "0", ")", "if", "(", "matched", "==", "';'", ")", ":", "return", "';;'", "elif", "(", "matched", "==", "'/'", ")", ":", "return", "';_'", "else", ":", "return", "matched" ]
helper function for quote_slashes .
train
false
45,914
def _wns_prepare_toast(data, **kwargs): root = ET.Element('toast') visual = ET.SubElement(root, 'visual') binding = ET.SubElement(visual, 'binding') binding.attrib['template'] = kwargs.pop('template', 'ToastText01') if ('text' in data): for (count, item) in enumerate(data['text'], start=1): elem = ET.SubElement(binding, 'text') elem.text = item elem.attrib['id'] = str(count) if ('image' in data): for (count, item) in enumerate(data['image'], start=1): elem = ET.SubElement(binding, 'img') elem.attrib['src'] = item elem.attrib['id'] = str(count) return ET.tostring(root)
[ "def", "_wns_prepare_toast", "(", "data", ",", "**", "kwargs", ")", ":", "root", "=", "ET", ".", "Element", "(", "'toast'", ")", "visual", "=", "ET", ".", "SubElement", "(", "root", ",", "'visual'", ")", "binding", "=", "ET", ".", "SubElement", "(", "visual", ",", "'binding'", ")", "binding", ".", "attrib", "[", "'template'", "]", "=", "kwargs", ".", "pop", "(", "'template'", ",", "'ToastText01'", ")", "if", "(", "'text'", "in", "data", ")", ":", "for", "(", "count", ",", "item", ")", "in", "enumerate", "(", "data", "[", "'text'", "]", ",", "start", "=", "1", ")", ":", "elem", "=", "ET", ".", "SubElement", "(", "binding", ",", "'text'", ")", "elem", ".", "text", "=", "item", "elem", ".", "attrib", "[", "'id'", "]", "=", "str", "(", "count", ")", "if", "(", "'image'", "in", "data", ")", ":", "for", "(", "count", ",", "item", ")", "in", "enumerate", "(", "data", "[", "'image'", "]", ",", "start", "=", "1", ")", ":", "elem", "=", "ET", ".", "SubElement", "(", "binding", ",", "'img'", ")", "elem", ".", "attrib", "[", "'src'", "]", "=", "item", "elem", ".", "attrib", "[", "'id'", "]", "=", "str", "(", "count", ")", "return", "ET", ".", "tostring", "(", "root", ")" ]
creates the xml tree for a toast notification .
train
false
45,915
@testing.requires_testing_data def test_make_inverse_operator_diag(): evoked = _get_evoked() noise_cov = read_cov(fname_cov).as_diag() fwd_op = read_forward_solution(fname_fwd, surf_ori=True) inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=0.2, depth=0.8) _compare_io(inv_op) inverse_operator_diag = read_inverse_operator(fname_inv_meeg_diag) _compare_inverses_approx(inverse_operator_diag, inv_op, evoked, 0, 1.0) assert_true((compute_rank_inverse(inverse_operator_diag) == 360))
[ "@", "testing", ".", "requires_testing_data", "def", "test_make_inverse_operator_diag", "(", ")", ":", "evoked", "=", "_get_evoked", "(", ")", "noise_cov", "=", "read_cov", "(", "fname_cov", ")", ".", "as_diag", "(", ")", "fwd_op", "=", "read_forward_solution", "(", "fname_fwd", ",", "surf_ori", "=", "True", ")", "inv_op", "=", "make_inverse_operator", "(", "evoked", ".", "info", ",", "fwd_op", ",", "noise_cov", ",", "loose", "=", "0.2", ",", "depth", "=", "0.8", ")", "_compare_io", "(", "inv_op", ")", "inverse_operator_diag", "=", "read_inverse_operator", "(", "fname_inv_meeg_diag", ")", "_compare_inverses_approx", "(", "inverse_operator_diag", ",", "inv_op", ",", "evoked", ",", "0", ",", "1.0", ")", "assert_true", "(", "(", "compute_rank_inverse", "(", "inverse_operator_diag", ")", "==", "360", ")", ")" ]
test mne inverse computation with diagonal noise cov .
train
false
45,916
def start_zookeeper(zk_ips, keyname): logging.info('Starting ZooKeeper...') for ip in zk_ips: start_service_cmd = (START_SERVICE_SCRIPT + ZK_WATCH_NAME) try: utils.ssh(ip, keyname, start_service_cmd) except subprocess.CalledProcessError: message = 'Unable to start ZooKeeper on {}'.format(ip) logging.exception(message) raise ZKInternalException(message) logging.info('Waiting for ZooKeeper to be ready') zk_server_cmd = None for script in ZK_SERVER_CMD_LOCATIONS: if os.path.isfile(script): zk_server_cmd = script break if (zk_server_cmd is None): raise ZKInternalException('Unable to find zkServer.sh') status_cmd = '{} status'.format(zk_server_cmd) while (utils.ssh(zk_ips[0], keyname, status_cmd, method=subprocess.call) != 0): time.sleep(5) logging.info('Successfully started ZooKeeper.')
[ "def", "start_zookeeper", "(", "zk_ips", ",", "keyname", ")", ":", "logging", ".", "info", "(", "'Starting ZooKeeper...'", ")", "for", "ip", "in", "zk_ips", ":", "start_service_cmd", "=", "(", "START_SERVICE_SCRIPT", "+", "ZK_WATCH_NAME", ")", "try", ":", "utils", ".", "ssh", "(", "ip", ",", "keyname", ",", "start_service_cmd", ")", "except", "subprocess", ".", "CalledProcessError", ":", "message", "=", "'Unable to start ZooKeeper on {}'", ".", "format", "(", "ip", ")", "logging", ".", "exception", "(", "message", ")", "raise", "ZKInternalException", "(", "message", ")", "logging", ".", "info", "(", "'Waiting for ZooKeeper to be ready'", ")", "zk_server_cmd", "=", "None", "for", "script", "in", "ZK_SERVER_CMD_LOCATIONS", ":", "if", "os", ".", "path", ".", "isfile", "(", "script", ")", ":", "zk_server_cmd", "=", "script", "break", "if", "(", "zk_server_cmd", "is", "None", ")", ":", "raise", "ZKInternalException", "(", "'Unable to find zkServer.sh'", ")", "status_cmd", "=", "'{} status'", ".", "format", "(", "zk_server_cmd", ")", "while", "(", "utils", ".", "ssh", "(", "zk_ips", "[", "0", "]", ",", "keyname", ",", "status_cmd", ",", "method", "=", "subprocess", ".", "call", ")", "!=", "0", ")", ":", "time", ".", "sleep", "(", "5", ")", "logging", ".", "info", "(", "'Successfully started ZooKeeper.'", ")" ]
creates a monit configuration file and prompts monit to start zookeeper .
train
false
45,917
def set_peers(*peers, **options): test = options.pop('test', False) commit = options.pop('commit', True) return __salt__['net.load_template']('set_ntp_peers', peers=peers, test=test, commit=commit)
[ "def", "set_peers", "(", "*", "peers", ",", "**", "options", ")", ":", "test", "=", "options", ".", "pop", "(", "'test'", ",", "False", ")", "commit", "=", "options", ".", "pop", "(", "'commit'", ",", "True", ")", "return", "__salt__", "[", "'net.load_template'", "]", "(", "'set_ntp_peers'", ",", "peers", "=", "peers", ",", "test", "=", "test", ",", "commit", "=", "commit", ")" ]
configures a list of ntp peers on the device .
train
true
45,918
def test_existing_path_FileLinks(): td = mkdtemp() tf1 = NamedTemporaryFile(dir=td) tf2 = NamedTemporaryFile(dir=td) fl = display.FileLinks(td) actual = fl._repr_html_() actual = actual.split('\n') actual.sort() expected = [('%s/<br>' % td), ("&nbsp;&nbsp;<a href='%s' target='_blank'>%s</a><br>" % (tf2.name.replace('\\', '/'), split(tf2.name)[1])), ("&nbsp;&nbsp;<a href='%s' target='_blank'>%s</a><br>" % (tf1.name.replace('\\', '/'), split(tf1.name)[1]))] expected.sort() nt.assert_equal(actual, expected)
[ "def", "test_existing_path_FileLinks", "(", ")", ":", "td", "=", "mkdtemp", "(", ")", "tf1", "=", "NamedTemporaryFile", "(", "dir", "=", "td", ")", "tf2", "=", "NamedTemporaryFile", "(", "dir", "=", "td", ")", "fl", "=", "display", ".", "FileLinks", "(", "td", ")", "actual", "=", "fl", ".", "_repr_html_", "(", ")", "actual", "=", "actual", ".", "split", "(", "'\\n'", ")", "actual", ".", "sort", "(", ")", "expected", "=", "[", "(", "'%s/<br>'", "%", "td", ")", ",", "(", "\"&nbsp;&nbsp;<a href='%s' target='_blank'>%s</a><br>\"", "%", "(", "tf2", ".", "name", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", ",", "split", "(", "tf2", ".", "name", ")", "[", "1", "]", ")", ")", ",", "(", "\"&nbsp;&nbsp;<a href='%s' target='_blank'>%s</a><br>\"", "%", "(", "tf1", ".", "name", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", ",", "split", "(", "tf1", ".", "name", ")", "[", "1", "]", ")", ")", "]", "expected", ".", "sort", "(", ")", "nt", ".", "assert_equal", "(", "actual", ",", "expected", ")" ]
filelinks: calling _repr_html_ functions as expected on existing dir .
train
false
45,919
def coherent_state(n, alpha): return ((exp(((- (Abs(alpha) ** 2)) / 2)) * (alpha ** n)) / sqrt(factorial(n)))
[ "def", "coherent_state", "(", "n", ",", "alpha", ")", ":", "return", "(", "(", "exp", "(", "(", "(", "-", "(", "Abs", "(", "alpha", ")", "**", "2", ")", ")", "/", "2", ")", ")", "*", "(", "alpha", "**", "n", ")", ")", "/", "sqrt", "(", "factorial", "(", "n", ")", ")", ")" ]
returns <n|alpha> for the coherent states of 1d harmonic oscillator .
train
false
45,920
def make_generation_hash(x): return GenerationHash(hash_value=generation_hash(x))
[ "def", "make_generation_hash", "(", "x", ")", ":", "return", "GenerationHash", "(", "hash_value", "=", "generation_hash", "(", "x", ")", ")" ]
creates a generationhash for a given argument .
train
false
45,921
def from_record_like(rec, stream=0, gpu_data=None): return DeviceRecord(rec.dtype, stream=stream, gpu_data=gpu_data)
[ "def", "from_record_like", "(", "rec", ",", "stream", "=", "0", ",", "gpu_data", "=", "None", ")", ":", "return", "DeviceRecord", "(", "rec", ".", "dtype", ",", "stream", "=", "stream", ",", "gpu_data", "=", "gpu_data", ")" ]
create a devicerecord object that is like rec .
train
false
45,922
def _ancillaryDescriptor(fd): packed = struct.pack('i', fd) return [(socket.SOL_SOCKET, sendmsg.SCM_RIGHTS, packed)]
[ "def", "_ancillaryDescriptor", "(", "fd", ")", ":", "packed", "=", "struct", ".", "pack", "(", "'i'", ",", "fd", ")", "return", "[", "(", "socket", ".", "SOL_SOCKET", ",", "sendmsg", ".", "SCM_RIGHTS", ",", "packed", ")", "]" ]
pack an integer into an ancillary data structure suitable for use with l{sendmsg .
train
false
45,923
def graph6_to_data(string): v = [(ord(c) - 63) for c in string] if ((len(v) > 0) and ((min(v) < 0) or (max(v) > 63))): return None return v
[ "def", "graph6_to_data", "(", "string", ")", ":", "v", "=", "[", "(", "ord", "(", "c", ")", "-", "63", ")", "for", "c", "in", "string", "]", "if", "(", "(", "len", "(", "v", ")", ">", "0", ")", "and", "(", "(", "min", "(", "v", ")", "<", "0", ")", "or", "(", "max", "(", "v", ")", ">", "63", ")", ")", ")", ":", "return", "None", "return", "v" ]
convert graph6 character sequence to 6-bit integers .
train
false
45,924
def load_results(base): data_file = get_pdata_path(base, 1) try: with open(data_file, _PICK_LOAD) as stream: return pickle.load(stream) except Exception: return {}
[ "def", "load_results", "(", "base", ")", ":", "data_file", "=", "get_pdata_path", "(", "base", ",", "1", ")", "try", ":", "with", "open", "(", "data_file", ",", "_PICK_LOAD", ")", "as", "stream", ":", "return", "pickle", ".", "load", "(", "stream", ")", "except", "Exception", ":", "return", "{", "}" ]
try to unpickle and return data from file if it exists and is not corrupted return an empty dictionary if it doesnt exists .
train
true
45,925
def patch_thread(threading=True, _threading_local=True, Event=False, logging=True, existing_locks=True, _warnings=None): if threading: __import__('threading') patch_module('thread') if threading: threading = patch_module('threading') if Event: from gevent.event import Event patch_item(threading, 'Event', Event) if existing_locks: _patch_existing_locks(threading) if (logging and ('logging' in sys.modules)): logging = __import__('logging') patch_item(logging, '_lock', threading.RLock()) for wr in logging._handlerList: handler = (wr() if callable(wr) else wr) if (handler is None): continue if (not hasattr(handler, 'lock')): raise TypeError(('Unknown/unsupported handler %r' % handler)) handler.lock = threading.RLock() if _threading_local: _threading_local = __import__('_threading_local') from gevent.local import local patch_item(_threading_local, 'local', local) if (sys.version_info[:2] >= (3, 4)): threading = __import__('threading') greenlet = __import__('greenlet') if (threading.current_thread() == threading.main_thread()): main_thread = threading.main_thread() _greenlet = main_thread._greenlet = greenlet.getcurrent() from gevent.hub import sleep def join(timeout=None): if (threading.current_thread() is main_thread): raise RuntimeError('Cannot join current thread') if (_greenlet.dead or (not main_thread.is_alive())): return elif timeout: raise ValueError('Cannot use a timeout to join the main thread') else: while main_thread.is_alive(): sleep(0.01) main_thread.join = join oldid = main_thread.ident main_thread._ident = threading.get_ident() if (oldid in threading._active): threading._active[main_thread.ident] = threading._active[oldid] if (oldid != main_thread.ident): del threading._active[oldid] else: _queue_warning('Monkey-patching not on the main thread; threading.main_thread().join() will hang from a greenlet', _warnings)
[ "def", "patch_thread", "(", "threading", "=", "True", ",", "_threading_local", "=", "True", ",", "Event", "=", "False", ",", "logging", "=", "True", ",", "existing_locks", "=", "True", ",", "_warnings", "=", "None", ")", ":", "if", "threading", ":", "__import__", "(", "'threading'", ")", "patch_module", "(", "'thread'", ")", "if", "threading", ":", "threading", "=", "patch_module", "(", "'threading'", ")", "if", "Event", ":", "from", "gevent", ".", "event", "import", "Event", "patch_item", "(", "threading", ",", "'Event'", ",", "Event", ")", "if", "existing_locks", ":", "_patch_existing_locks", "(", "threading", ")", "if", "(", "logging", "and", "(", "'logging'", "in", "sys", ".", "modules", ")", ")", ":", "logging", "=", "__import__", "(", "'logging'", ")", "patch_item", "(", "logging", ",", "'_lock'", ",", "threading", ".", "RLock", "(", ")", ")", "for", "wr", "in", "logging", ".", "_handlerList", ":", "handler", "=", "(", "wr", "(", ")", "if", "callable", "(", "wr", ")", "else", "wr", ")", "if", "(", "handler", "is", "None", ")", ":", "continue", "if", "(", "not", "hasattr", "(", "handler", ",", "'lock'", ")", ")", ":", "raise", "TypeError", "(", "(", "'Unknown/unsupported handler %r'", "%", "handler", ")", ")", "handler", ".", "lock", "=", "threading", ".", "RLock", "(", ")", "if", "_threading_local", ":", "_threading_local", "=", "__import__", "(", "'_threading_local'", ")", "from", "gevent", ".", "local", "import", "local", "patch_item", "(", "_threading_local", ",", "'local'", ",", "local", ")", "if", "(", "sys", ".", "version_info", "[", ":", "2", "]", ">=", "(", "3", ",", "4", ")", ")", ":", "threading", "=", "__import__", "(", "'threading'", ")", "greenlet", "=", "__import__", "(", "'greenlet'", ")", "if", "(", "threading", ".", "current_thread", "(", ")", "==", "threading", ".", "main_thread", "(", ")", ")", ":", "main_thread", "=", "threading", ".", "main_thread", "(", ")", "_greenlet", "=", "main_thread", ".", "_greenlet", "=", "greenlet", ".", "getcurrent", "(", ")", "from", "gevent", ".", "hub", "import", "sleep", "def", "join", "(", "timeout", "=", "None", ")", ":", "if", "(", "threading", ".", "current_thread", "(", ")", "is", "main_thread", ")", ":", "raise", "RuntimeError", "(", "'Cannot join current thread'", ")", "if", "(", "_greenlet", ".", "dead", "or", "(", "not", "main_thread", ".", "is_alive", "(", ")", ")", ")", ":", "return", "elif", "timeout", ":", "raise", "ValueError", "(", "'Cannot use a timeout to join the main thread'", ")", "else", ":", "while", "main_thread", ".", "is_alive", "(", ")", ":", "sleep", "(", "0.01", ")", "main_thread", ".", "join", "=", "join", "oldid", "=", "main_thread", ".", "ident", "main_thread", ".", "_ident", "=", "threading", ".", "get_ident", "(", ")", "if", "(", "oldid", "in", "threading", ".", "_active", ")", ":", "threading", ".", "_active", "[", "main_thread", ".", "ident", "]", "=", "threading", ".", "_active", "[", "oldid", "]", "if", "(", "oldid", "!=", "main_thread", ".", "ident", ")", ":", "del", "threading", ".", "_active", "[", "oldid", "]", "else", ":", "_queue_warning", "(", "'Monkey-patching not on the main thread; threading.main_thread().join() will hang from a greenlet'", ",", "_warnings", ")" ]
replace the standard :mod:thread module to make it greenlet-based .
train
false
45,926
def _apply_assertion(expected, result): log.debug('Expected result: %s. Actual result: %s', expected, result) if isinstance(expected, bool): return (result is expected) elif isinstance(expected, dict): try: comparison = getattr(operator, expected['comparison']) except AttributeError: if (expected.get('comparison') == 'search'): comparison = re.search else: raise InvalidArgumentError('Comparison {0} is not a valid selection.'.format(expected.get('comparison'))) except KeyError: log.exception('The comparison dictionary provided is missing expected keys. Either "expected" or "comparison" are not present.') raise return comparison(expected['expected'], result) else: raise TypeError('Expected bool or dict but received {0}'.format(type(expected)))
[ "def", "_apply_assertion", "(", "expected", ",", "result", ")", ":", "log", ".", "debug", "(", "'Expected result: %s. Actual result: %s'", ",", "expected", ",", "result", ")", "if", "isinstance", "(", "expected", ",", "bool", ")", ":", "return", "(", "result", "is", "expected", ")", "elif", "isinstance", "(", "expected", ",", "dict", ")", ":", "try", ":", "comparison", "=", "getattr", "(", "operator", ",", "expected", "[", "'comparison'", "]", ")", "except", "AttributeError", ":", "if", "(", "expected", ".", "get", "(", "'comparison'", ")", "==", "'search'", ")", ":", "comparison", "=", "re", ".", "search", "else", ":", "raise", "InvalidArgumentError", "(", "'Comparison {0} is not a valid selection.'", ".", "format", "(", "expected", ".", "get", "(", "'comparison'", ")", ")", ")", "except", "KeyError", ":", "log", ".", "exception", "(", "'The comparison dictionary provided is missing expected keys. Either \"expected\" or \"comparison\" are not present.'", ")", "raise", "return", "comparison", "(", "expected", "[", "'expected'", "]", ",", "result", ")", "else", ":", "raise", "TypeError", "(", "'Expected bool or dict but received {0}'", ".", "format", "(", "type", "(", "expected", ")", ")", ")" ]
given the result of a method .
train
false
45,928
@pytest.mark.skipif("platform.python_implementation() == 'PyPy'") def test_always_copy_option(): tmp_virtualenv = tempfile.mkdtemp() ve_path = os.path.join(tmp_virtualenv, 'venv') try: virtualenv.create_environment(ve_path, symlink=False) for (root, dirs, files) in os.walk(tmp_virtualenv): for f in (files + dirs): full_name = os.path.join(root, f) assert (not os.path.islink(full_name)), ('%s should not be a symlink (to %s)' % (full_name, os.readlink(full_name))) finally: shutil.rmtree(tmp_virtualenv)
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "\"platform.python_implementation() == 'PyPy'\"", ")", "def", "test_always_copy_option", "(", ")", ":", "tmp_virtualenv", "=", "tempfile", ".", "mkdtemp", "(", ")", "ve_path", "=", "os", ".", "path", ".", "join", "(", "tmp_virtualenv", ",", "'venv'", ")", "try", ":", "virtualenv", ".", "create_environment", "(", "ve_path", ",", "symlink", "=", "False", ")", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "tmp_virtualenv", ")", ":", "for", "f", "in", "(", "files", "+", "dirs", ")", ":", "full_name", "=", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", "assert", "(", "not", "os", ".", "path", ".", "islink", "(", "full_name", ")", ")", ",", "(", "'%s should not be a symlink (to %s)'", "%", "(", "full_name", ",", "os", ".", "readlink", "(", "full_name", ")", ")", ")", "finally", ":", "shutil", ".", "rmtree", "(", "tmp_virtualenv", ")" ]
should be no symlinks in directory tree .
train
false
45,930
def _interpret_hadoop_jar_command_stderr(stderr, record_callback=None): def yield_lines(): try: for line in stderr: (yield to_string(line)) except IOError as e: if (e.errno == errno.EIO): return else: raise def pre_filter(line): return bool(_HADOOP_STREAMING_NON_LOG4J_LINE_RE.match(line)) def yield_records(): for record in _parse_hadoop_log4j_records(yield_lines(), pre_filter=pre_filter): if record_callback: record_callback(record) (yield record) result = _parse_step_syslog_from_log4j_records(yield_records()) _add_implied_job_id(result) for error in (result.get('errors') or ()): _add_implied_task_id(error) return result
[ "def", "_interpret_hadoop_jar_command_stderr", "(", "stderr", ",", "record_callback", "=", "None", ")", ":", "def", "yield_lines", "(", ")", ":", "try", ":", "for", "line", "in", "stderr", ":", "(", "yield", "to_string", "(", "line", ")", ")", "except", "IOError", "as", "e", ":", "if", "(", "e", ".", "errno", "==", "errno", ".", "EIO", ")", ":", "return", "else", ":", "raise", "def", "pre_filter", "(", "line", ")", ":", "return", "bool", "(", "_HADOOP_STREAMING_NON_LOG4J_LINE_RE", ".", "match", "(", "line", ")", ")", "def", "yield_records", "(", ")", ":", "for", "record", "in", "_parse_hadoop_log4j_records", "(", "yield_lines", "(", ")", ",", "pre_filter", "=", "pre_filter", ")", ":", "if", "record_callback", ":", "record_callback", "(", "record", ")", "(", "yield", "record", ")", "result", "=", "_parse_step_syslog_from_log4j_records", "(", "yield_records", "(", ")", ")", "_add_implied_job_id", "(", "result", ")", "for", "error", "in", "(", "result", ".", "get", "(", "'errors'", ")", "or", "(", ")", ")", ":", "_add_implied_task_id", "(", "error", ")", "return", "result" ]
parse stderr from the hadoop jar command .
train
false
45,931
def handle_uncaught_exception(request, resolver, exc_info): if settings.DEBUG_PROPAGATE_EXCEPTIONS: raise logger.error('Internal Server Error: %s', request.path, exc_info=exc_info, extra={'status_code': 500, 'request': request}) if settings.DEBUG: return debug.technical_500_response(request, *exc_info) (callback, param_dict) = resolver.resolve_error_handler(500) return callback(request, **param_dict)
[ "def", "handle_uncaught_exception", "(", "request", ",", "resolver", ",", "exc_info", ")", ":", "if", "settings", ".", "DEBUG_PROPAGATE_EXCEPTIONS", ":", "raise", "logger", ".", "error", "(", "'Internal Server Error: %s'", ",", "request", ".", "path", ",", "exc_info", "=", "exc_info", ",", "extra", "=", "{", "'status_code'", ":", "500", ",", "'request'", ":", "request", "}", ")", "if", "settings", ".", "DEBUG", ":", "return", "debug", ".", "technical_500_response", "(", "request", ",", "*", "exc_info", ")", "(", "callback", ",", "param_dict", ")", "=", "resolver", ".", "resolve_error_handler", "(", "500", ")", "return", "callback", "(", "request", ",", "**", "param_dict", ")" ]
processing for any otherwise uncaught exceptions .
train
false
45,932
def custom_tools(registry, xml_parent, data): base = 'com.cloudbees.jenkins.plugins.customtools' wrapper = XML.SubElement(xml_parent, (base + '.CustomToolInstallWrapper')) wrapper_tools = XML.SubElement(wrapper, 'selectedTools') tools = data.get('tools', []) tool_node = (base + '.CustomToolInstallWrapper_-SelectedTool') for tool in tools: tool_wrapper = XML.SubElement(wrapper_tools, tool_node) XML.SubElement(tool_wrapper, 'name').text = str(tool) opts = XML.SubElement(wrapper, 'multiconfigOptions') skip_install = str(data.get('skip-master-install', 'false')) XML.SubElement(opts, 'skipMasterInstallation').text = skip_install convert_home = str(data.get('convert-homes-to-upper', 'false')) XML.SubElement(wrapper, 'convertHomesToUppercase').text = convert_home
[ "def", "custom_tools", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "base", "=", "'com.cloudbees.jenkins.plugins.customtools'", "wrapper", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "(", "base", "+", "'.CustomToolInstallWrapper'", ")", ")", "wrapper_tools", "=", "XML", ".", "SubElement", "(", "wrapper", ",", "'selectedTools'", ")", "tools", "=", "data", ".", "get", "(", "'tools'", ",", "[", "]", ")", "tool_node", "=", "(", "base", "+", "'.CustomToolInstallWrapper_-SelectedTool'", ")", "for", "tool", "in", "tools", ":", "tool_wrapper", "=", "XML", ".", "SubElement", "(", "wrapper_tools", ",", "tool_node", ")", "XML", ".", "SubElement", "(", "tool_wrapper", ",", "'name'", ")", ".", "text", "=", "str", "(", "tool", ")", "opts", "=", "XML", ".", "SubElement", "(", "wrapper", ",", "'multiconfigOptions'", ")", "skip_install", "=", "str", "(", "data", ".", "get", "(", "'skip-master-install'", ",", "'false'", ")", ")", "XML", ".", "SubElement", "(", "opts", ",", "'skipMasterInstallation'", ")", ".", "text", "=", "skip_install", "convert_home", "=", "str", "(", "data", ".", "get", "(", "'convert-homes-to-upper'", ",", "'false'", ")", ")", "XML", ".", "SubElement", "(", "wrapper", ",", "'convertHomesToUppercase'", ")", ".", "text", "=", "convert_home" ]
yaml: custom-tools requires the jenkins :jenkins-wiki:custom tools plugin <custom+tools+plugin> .
train
false
45,933
def createHTTPFetcher(): if (pycurl is None): fetcher = Urllib2Fetcher() else: fetcher = CurlHTTPFetcher() return fetcher
[ "def", "createHTTPFetcher", "(", ")", ":", "if", "(", "pycurl", "is", "None", ")", ":", "fetcher", "=", "Urllib2Fetcher", "(", ")", "else", ":", "fetcher", "=", "CurlHTTPFetcher", "(", ")", "return", "fetcher" ]
create a default http fetcher instance prefers curl to urllib2 .
train
false
45,935
def assertNoTPDiffs(tps): if (len(tps) == 1): return if (len(tps) > 2): raise 'Not implemented for more than 2 TPs' same = fdrutils.tpDiff2(verbosity=VERBOSITY, *tps.values()) assert same return
[ "def", "assertNoTPDiffs", "(", "tps", ")", ":", "if", "(", "len", "(", "tps", ")", "==", "1", ")", ":", "return", "if", "(", "len", "(", "tps", ")", ">", "2", ")", ":", "raise", "'Not implemented for more than 2 TPs'", "same", "=", "fdrutils", ".", "tpDiff2", "(", "verbosity", "=", "VERBOSITY", ",", "*", "tps", ".", "values", "(", ")", ")", "assert", "same", "return" ]
check for diffs among the tp instances in the passed in tps dict and raise an assert if any are detected parameters: tps: dict of tp instances .
train
false
45,938
@login_required def echo_attributes(request, config_loader_path=None, template='djangosaml2/echo_attributes.html'): state = StateCache(request.session) conf = get_config(config_loader_path, request) client = Saml2Client(conf, state_cache=state, identity_cache=IdentityCache(request.session)) subject_id = _get_subject_id(request.session) identity = client.users.get_identity(subject_id, check_not_on_or_after=False) return render_to_response(template, {'attributes': identity[0]}, context_instance=RequestContext(request))
[ "@", "login_required", "def", "echo_attributes", "(", "request", ",", "config_loader_path", "=", "None", ",", "template", "=", "'djangosaml2/echo_attributes.html'", ")", ":", "state", "=", "StateCache", "(", "request", ".", "session", ")", "conf", "=", "get_config", "(", "config_loader_path", ",", "request", ")", "client", "=", "Saml2Client", "(", "conf", ",", "state_cache", "=", "state", ",", "identity_cache", "=", "IdentityCache", "(", "request", ".", "session", ")", ")", "subject_id", "=", "_get_subject_id", "(", "request", ".", "session", ")", "identity", "=", "client", ".", "users", ".", "get_identity", "(", "subject_id", ",", "check_not_on_or_after", "=", "False", ")", "return", "render_to_response", "(", "template", ",", "{", "'attributes'", ":", "identity", "[", "0", "]", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ")" ]
example view that echo the saml attributes of an user .
train
false
45,939
def is_module_or_package(path): is_module = (osp.isfile(path) and (osp.splitext(path)[1] in ('.py', '.pyw'))) is_package = (osp.isdir(path) and osp.isfile(osp.join(path, '__init__.py'))) return (is_module or is_package)
[ "def", "is_module_or_package", "(", "path", ")", ":", "is_module", "=", "(", "osp", ".", "isfile", "(", "path", ")", "and", "(", "osp", ".", "splitext", "(", "path", ")", "[", "1", "]", "in", "(", "'.py'", ",", "'.pyw'", ")", ")", ")", "is_package", "=", "(", "osp", ".", "isdir", "(", "path", ")", "and", "osp", ".", "isfile", "(", "osp", ".", "join", "(", "path", ",", "'__init__.py'", ")", ")", ")", "return", "(", "is_module", "or", "is_package", ")" ]
return true if path is a python module/package .
train
true
45,940
def dePem(s, name): prefix = ('-----BEGIN %s-----' % name) postfix = ('-----END %s-----' % name) start = s.find(prefix) if (start == (-1)): raise SyntaxError('Missing PEM prefix') end = s.find(postfix, (start + len(prefix))) if (end == (-1)): raise SyntaxError('Missing PEM postfix') s = s[(start + len(('-----BEGIN %s-----' % name))):end] retBytes = a2b_base64(s) return retBytes
[ "def", "dePem", "(", "s", ",", "name", ")", ":", "prefix", "=", "(", "'-----BEGIN %s-----'", "%", "name", ")", "postfix", "=", "(", "'-----END %s-----'", "%", "name", ")", "start", "=", "s", ".", "find", "(", "prefix", ")", "if", "(", "start", "==", "(", "-", "1", ")", ")", ":", "raise", "SyntaxError", "(", "'Missing PEM prefix'", ")", "end", "=", "s", ".", "find", "(", "postfix", ",", "(", "start", "+", "len", "(", "prefix", ")", ")", ")", "if", "(", "end", "==", "(", "-", "1", ")", ")", ":", "raise", "SyntaxError", "(", "'Missing PEM postfix'", ")", "s", "=", "s", "[", "(", "start", "+", "len", "(", "(", "'-----BEGIN %s-----'", "%", "name", ")", ")", ")", ":", "end", "]", "retBytes", "=", "a2b_base64", "(", "s", ")", "return", "retBytes" ]
decode a pem string into a bytearray of its payload .
train
false
45,941
def add_dir_to_list(dirlist, dir): if ((dir is not None) and os.path.isdir(dir) and (dir not in dirlist)): dirlist.insert(0, dir)
[ "def", "add_dir_to_list", "(", "dirlist", ",", "dir", ")", ":", "if", "(", "(", "dir", "is", "not", "None", ")", "and", "os", ".", "path", ".", "isdir", "(", "dir", ")", "and", "(", "dir", "not", "in", "dirlist", ")", ")", ":", "dirlist", ".", "insert", "(", "0", ",", "dir", ")" ]
add the directory dir to the list dirlist if 1) dir is not already in dirlist 2) dir actually exists .
train
false
45,942
def libvlc_audio_output_device_get(mp): f = (_Cfunctions.get('libvlc_audio_output_device_get', None) or _Cfunction('libvlc_audio_output_device_get', ((1,),), None, ctypes.c_char_p, MediaPlayer)) return f(mp)
[ "def", "libvlc_audio_output_device_get", "(", "mp", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_audio_output_device_get'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_audio_output_device_get'", ",", "(", "(", "1", ",", ")", ",", ")", ",", "None", ",", "ctypes", ".", "c_char_p", ",", "MediaPlayer", ")", ")", "return", "f", "(", "mp", ")" ]
get the current audio output device identifier .
train
true
45,943
def delete_security_group_rule(security_group_rule_id, profile=None): conn = _auth(profile) return conn.delete_security_group_rule(security_group_rule_id)
[ "def", "delete_security_group_rule", "(", "security_group_rule_id", ",", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "delete_security_group_rule", "(", "security_group_rule_id", ")" ]
deletes the specified security group rule cli example: .
train
false
45,944
def _parse_optional(fh): optional = {'StartKernData': _parse_kern_pairs, 'StartComposites': _parse_composites} d = {'StartKernData': {}, 'StartComposites': {}} while 1: line = fh.readline() if (not line): break line = line.rstrip() if (len(line) == 0): continue key = line.split()[0] if (key in optional): d[key] = optional[key](fh) l = (d['StartKernData'], d['StartComposites']) return l
[ "def", "_parse_optional", "(", "fh", ")", ":", "optional", "=", "{", "'StartKernData'", ":", "_parse_kern_pairs", ",", "'StartComposites'", ":", "_parse_composites", "}", "d", "=", "{", "'StartKernData'", ":", "{", "}", ",", "'StartComposites'", ":", "{", "}", "}", "while", "1", ":", "line", "=", "fh", ".", "readline", "(", ")", "if", "(", "not", "line", ")", ":", "break", "line", "=", "line", ".", "rstrip", "(", ")", "if", "(", "len", "(", "line", ")", "==", "0", ")", ":", "continue", "key", "=", "line", ".", "split", "(", ")", "[", "0", "]", "if", "(", "key", "in", "optional", ")", ":", "d", "[", "key", "]", "=", "optional", "[", "key", "]", "(", "fh", ")", "l", "=", "(", "d", "[", "'StartKernData'", "]", ",", "d", "[", "'StartComposites'", "]", ")", "return", "l" ]
parse the optional fields for kern pair data and composites return value is a which are the return values from :func:_parse_kern_pairs .
train
false
45,945
def _get_project_base(config): script_location = config.get_main_option('script_location') return script_location.split(':')[0].split('.')[0]
[ "def", "_get_project_base", "(", "config", ")", ":", "script_location", "=", "config", ".", "get_main_option", "(", "'script_location'", ")", "return", "script_location", ".", "split", "(", "':'", ")", "[", "0", "]", ".", "split", "(", "'.'", ")", "[", "0", "]" ]
return the base python namespace name for a project .
train
false
45,946
def string_to_rgb(s): orig_s = s s = s.strip() if s.startswith('#'): s = s[1:] if (not (len(s) == 6)): raise ValueError(("String %s doesn't look like a hex string" % orig_s)) return (int(s[:2], 16), int(s[2:4], 16), int(s[4:], 16))
[ "def", "string_to_rgb", "(", "s", ")", ":", "orig_s", "=", "s", "s", "=", "s", ".", "strip", "(", ")", "if", "s", ".", "startswith", "(", "'#'", ")", ":", "s", "=", "s", "[", "1", ":", "]", "if", "(", "not", "(", "len", "(", "s", ")", "==", "6", ")", ")", ":", "raise", "ValueError", "(", "(", "\"String %s doesn't look like a hex string\"", "%", "orig_s", ")", ")", "return", "(", "int", "(", "s", "[", ":", "2", "]", ",", "16", ")", ",", "int", "(", "s", "[", "2", ":", "4", "]", ",", "16", ")", ",", "int", "(", "s", "[", "4", ":", "]", ",", "16", ")", ")" ]
converts hex string to rgb .
train
false
45,948
def CDLKICKINGBYLENGTH(barDs, count): return call_talib_with_ohlc(barDs, count, talib.CDLKICKINGBYLENGTH)
[ "def", "CDLKICKINGBYLENGTH", "(", "barDs", ",", "count", ")", ":", "return", "call_talib_with_ohlc", "(", "barDs", ",", "count", ",", "talib", ".", "CDLKICKINGBYLENGTH", ")" ]
kicking - bull/bear determined by the longer marubozu .
train
false
45,950
def format_jid_instance_ext(jid, job): ret = format_job_instance(job) ret.update({'JID': jid, 'StartTime': jid_to_time(jid)}) return ret
[ "def", "format_jid_instance_ext", "(", "jid", ",", "job", ")", ":", "ret", "=", "format_job_instance", "(", "job", ")", "ret", ".", "update", "(", "{", "'JID'", ":", "jid", ",", "'StartTime'", ":", "jid_to_time", "(", "jid", ")", "}", ")", "return", "ret" ]
format the jid correctly with jid included .
train
true
45,951
def create_user_and_user_profile(email, username, name, country, password): user = User.objects.create_user(username, email, password) reg = Registration() reg.register(user) profile = UserProfile(user=user) profile.name = name profile.country = country profile.save() return user
[ "def", "create_user_and_user_profile", "(", "email", ",", "username", ",", "name", ",", "country", ",", "password", ")", ":", "user", "=", "User", ".", "objects", ".", "create_user", "(", "username", ",", "email", ",", "password", ")", "reg", "=", "Registration", "(", ")", "reg", ".", "register", "(", "user", ")", "profile", "=", "UserProfile", "(", "user", "=", "user", ")", "profile", ".", "name", "=", "name", "profile", ".", "country", "=", "country", "profile", ".", "save", "(", ")", "return", "user" ]
create a new user .
train
false
45,955
def _fetch_all_datastore_entities(): all_entities = [] for namespace in datastore.Query('__namespace__').Run(): namespace_name = namespace.key().name() for kind in datastore.Query('__kind__', namespace=namespace_name).Run(): all_entities.extend(datastore.Query(kind.key().name(), namespace=namespace_name).Run()) return all_entities
[ "def", "_fetch_all_datastore_entities", "(", ")", ":", "all_entities", "=", "[", "]", "for", "namespace", "in", "datastore", ".", "Query", "(", "'__namespace__'", ")", ".", "Run", "(", ")", ":", "namespace_name", "=", "namespace", ".", "key", "(", ")", ".", "name", "(", ")", "for", "kind", "in", "datastore", ".", "Query", "(", "'__kind__'", ",", "namespace", "=", "namespace_name", ")", ".", "Run", "(", ")", ":", "all_entities", ".", "extend", "(", "datastore", ".", "Query", "(", "kind", ".", "key", "(", ")", ".", "name", "(", ")", ",", "namespace", "=", "namespace_name", ")", ".", "Run", "(", ")", ")", "return", "all_entities" ]
returns all datastore entities from all namespaces as a list .
train
false
45,957
def execute_nb(src, dst, allow_errors=False, timeout=1000, kernel_name=''): import nbformat from nbconvert.preprocessors import ExecutePreprocessor with io.open(src, encoding='utf-8') as f: nb = nbformat.read(f, as_version=4) ep = ExecutePreprocessor(allow_errors=allow_errors, timeout=timeout, kernel_name=kernel_name) ep.preprocess(nb, resources={}) with io.open(dst, 'wt', encoding='utf-8') as f: nbformat.write(nb, f) return dst
[ "def", "execute_nb", "(", "src", ",", "dst", ",", "allow_errors", "=", "False", ",", "timeout", "=", "1000", ",", "kernel_name", "=", "''", ")", ":", "import", "nbformat", "from", "nbconvert", ".", "preprocessors", "import", "ExecutePreprocessor", "with", "io", ".", "open", "(", "src", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "nb", "=", "nbformat", ".", "read", "(", "f", ",", "as_version", "=", "4", ")", "ep", "=", "ExecutePreprocessor", "(", "allow_errors", "=", "allow_errors", ",", "timeout", "=", "timeout", ",", "kernel_name", "=", "kernel_name", ")", "ep", ".", "preprocess", "(", "nb", ",", "resources", "=", "{", "}", ")", "with", "io", ".", "open", "(", "dst", ",", "'wt'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "nbformat", ".", "write", "(", "nb", ",", "f", ")", "return", "dst" ]
execute notebook in src and write the output to dst parameters src .
train
false
45,958
def show_config(): ret = {} cmd = 'cpan -J' out = __salt__['cmd.run'](cmd).splitlines() for line in out: if ('=>' not in line): continue comps = line.split('=>') key = comps[0].replace("'", '').strip() val = comps[1].replace("',", '').replace("'", '').strip() ret[key] = val return ret
[ "def", "show_config", "(", ")", ":", "ret", "=", "{", "}", "cmd", "=", "'cpan -J'", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", ".", "splitlines", "(", ")", "for", "line", "in", "out", ":", "if", "(", "'=>'", "not", "in", "line", ")", ":", "continue", "comps", "=", "line", ".", "split", "(", "'=>'", ")", "key", "=", "comps", "[", "0", "]", ".", "replace", "(", "\"'\"", ",", "''", ")", ".", "strip", "(", ")", "val", "=", "comps", "[", "1", "]", ".", "replace", "(", "\"',\"", ",", "''", ")", ".", "replace", "(", "\"'\"", ",", "''", ")", ".", "strip", "(", ")", "ret", "[", "key", "]", "=", "val", "return", "ret" ]
return a dict of cpan configuration values cli example: .
train
true
45,961
def aggregate_raters(data, n_cat=None): data = np.asarray(data) n_rows = data.shape[0] if (n_cat is None): (cat_uni, cat_int) = np.unique(data.ravel(), return_inverse=True) n_cat = len(cat_uni) data_ = cat_int.reshape(data.shape) else: cat_uni = np.arange(n_cat) data_ = data tt = np.zeros((n_rows, n_cat), int) for (idx, row) in enumerate(data_): ro = np.bincount(row) tt[idx, :len(ro)] = ro return (tt, cat_uni)
[ "def", "aggregate_raters", "(", "data", ",", "n_cat", "=", "None", ")", ":", "data", "=", "np", ".", "asarray", "(", "data", ")", "n_rows", "=", "data", ".", "shape", "[", "0", "]", "if", "(", "n_cat", "is", "None", ")", ":", "(", "cat_uni", ",", "cat_int", ")", "=", "np", ".", "unique", "(", "data", ".", "ravel", "(", ")", ",", "return_inverse", "=", "True", ")", "n_cat", "=", "len", "(", "cat_uni", ")", "data_", "=", "cat_int", ".", "reshape", "(", "data", ".", "shape", ")", "else", ":", "cat_uni", "=", "np", ".", "arange", "(", "n_cat", ")", "data_", "=", "data", "tt", "=", "np", ".", "zeros", "(", "(", "n_rows", ",", "n_cat", ")", ",", "int", ")", "for", "(", "idx", ",", "row", ")", "in", "enumerate", "(", "data_", ")", ":", "ro", "=", "np", ".", "bincount", "(", "row", ")", "tt", "[", "idx", ",", ":", "len", "(", "ro", ")", "]", "=", "ro", "return", "(", "tt", ",", "cat_uni", ")" ]
convert raw data with shape to brings data into correct format for fleiss_kappa bincount will raise exception if data cannot be converted to integer .
train
false
45,963
def _XmlEscape(value, attr=False): def replace(match): m = match.string[match.start():match.end()] if (attr and (m == "'")): return m return _xml_escape_map[m] return _xml_escape_re.sub(replace, value)
[ "def", "_XmlEscape", "(", "value", ",", "attr", "=", "False", ")", ":", "def", "replace", "(", "match", ")", ":", "m", "=", "match", ".", "string", "[", "match", ".", "start", "(", ")", ":", "match", ".", "end", "(", ")", "]", "if", "(", "attr", "and", "(", "m", "==", "\"'\"", ")", ")", ":", "return", "m", "return", "_xml_escape_map", "[", "m", "]", "return", "_xml_escape_re", ".", "sub", "(", "replace", ",", "value", ")" ]
escape a string for inclusion in xml .
train
false
45,964
@require_POST @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) @require_level('staff') @require_finance_admin def list_financial_report_downloads(_request, course_id): course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id) report_store = ReportStore.from_config(config_name='FINANCIAL_REPORTS') response_payload = {'downloads': [dict(name=name, url=url, link=HTML('<a href="{}">{}</a>').format(HTML(url), Text(name))) for (name, url) in report_store.links_for(course_id)]} return JsonResponse(response_payload)
[ "@", "require_POST", "@", "ensure_csrf_cookie", "@", "cache_control", "(", "no_cache", "=", "True", ",", "no_store", "=", "True", ",", "must_revalidate", "=", "True", ")", "@", "require_level", "(", "'staff'", ")", "@", "require_finance_admin", "def", "list_financial_report_downloads", "(", "_request", ",", "course_id", ")", ":", "course_id", "=", "SlashSeparatedCourseKey", ".", "from_deprecated_string", "(", "course_id", ")", "report_store", "=", "ReportStore", ".", "from_config", "(", "config_name", "=", "'FINANCIAL_REPORTS'", ")", "response_payload", "=", "{", "'downloads'", ":", "[", "dict", "(", "name", "=", "name", ",", "url", "=", "url", ",", "link", "=", "HTML", "(", "'<a href=\"{}\">{}</a>'", ")", ".", "format", "(", "HTML", "(", "url", ")", ",", "Text", "(", "name", ")", ")", ")", "for", "(", "name", ",", "url", ")", "in", "report_store", ".", "links_for", "(", "course_id", ")", "]", "}", "return", "JsonResponse", "(", "response_payload", ")" ]
list grade csv files that are available for download for this course .
train
false
45,965
def cyclic_pattern_search(buf): result = [] pattern = cyclic_pattern() p = re.compile((('[' + re.escape(to_binary_string(cyclic_pattern_charset()))) + ']{4,}')) found = p.finditer(buf) found = list(found) for m in found: s = buf[m.start():m.end()] i = pattern.find(s) k = 0 while ((i == (-1)) and (len(s) > 4)): s = s[1:] k += 1 i = pattern.find(s) if (i != (-1)): result += [((m.start() + k), len(s), i)] return result
[ "def", "cyclic_pattern_search", "(", "buf", ")", ":", "result", "=", "[", "]", "pattern", "=", "cyclic_pattern", "(", ")", "p", "=", "re", ".", "compile", "(", "(", "(", "'['", "+", "re", ".", "escape", "(", "to_binary_string", "(", "cyclic_pattern_charset", "(", ")", ")", ")", ")", "+", "']{4,}'", ")", ")", "found", "=", "p", ".", "finditer", "(", "buf", ")", "found", "=", "list", "(", "found", ")", "for", "m", "in", "found", ":", "s", "=", "buf", "[", "m", ".", "start", "(", ")", ":", "m", ".", "end", "(", ")", "]", "i", "=", "pattern", ".", "find", "(", "s", ")", "k", "=", "0", "while", "(", "(", "i", "==", "(", "-", "1", ")", ")", "and", "(", "len", "(", "s", ")", ">", "4", ")", ")", ":", "s", "=", "s", "[", "1", ":", "]", "k", "+=", "1", "i", "=", "pattern", ".", "find", "(", "s", ")", "if", "(", "i", "!=", "(", "-", "1", ")", ")", ":", "result", "+=", "[", "(", "(", "m", ".", "start", "(", ")", "+", "k", ")", ",", "len", "(", "s", ")", ",", "i", ")", "]", "return", "result" ]
search all cyclic pattern pieces in a buffer args: - buf: buffer to search for returns: - list of tuple .
train
false
45,969
@block_user_agents @login_required @check_readonly def revert_document(request, document_path, revision_id): (document_locale, document_slug, needs_redirect) = locale_and_slug_from_path(document_path, request) revision = get_object_or_404(Revision.objects.select_related('document'), pk=revision_id, document__slug=document_slug) if (not revision.document.allows_revision_by(request.user)): raise PermissionDenied if (request.method == 'GET'): return render(request, 'wiki/confirm_revision_revert.html', {'revision': revision, 'document': revision.document}) else: comment = request.POST.get('comment') document = revision.document old_revision_pk = revision.pk try: new_revision = document.revert(revision, request.user, comment) if (new_revision.pk != old_revision_pk): document.schedule_rendering('max-age=0') except IntegrityError: return render(request, 'wiki/confirm_revision_revert.html', {'revision': revision, 'document': revision.document, 'error': ugettext('Document already exists. Note: You cannot revert a document that has been moved until you delete its redirect.')}) return redirect('wiki.document_revisions', revision.document.slug)
[ "@", "block_user_agents", "@", "login_required", "@", "check_readonly", "def", "revert_document", "(", "request", ",", "document_path", ",", "revision_id", ")", ":", "(", "document_locale", ",", "document_slug", ",", "needs_redirect", ")", "=", "locale_and_slug_from_path", "(", "document_path", ",", "request", ")", "revision", "=", "get_object_or_404", "(", "Revision", ".", "objects", ".", "select_related", "(", "'document'", ")", ",", "pk", "=", "revision_id", ",", "document__slug", "=", "document_slug", ")", "if", "(", "not", "revision", ".", "document", ".", "allows_revision_by", "(", "request", ".", "user", ")", ")", ":", "raise", "PermissionDenied", "if", "(", "request", ".", "method", "==", "'GET'", ")", ":", "return", "render", "(", "request", ",", "'wiki/confirm_revision_revert.html'", ",", "{", "'revision'", ":", "revision", ",", "'document'", ":", "revision", ".", "document", "}", ")", "else", ":", "comment", "=", "request", ".", "POST", ".", "get", "(", "'comment'", ")", "document", "=", "revision", ".", "document", "old_revision_pk", "=", "revision", ".", "pk", "try", ":", "new_revision", "=", "document", ".", "revert", "(", "revision", ",", "request", ".", "user", ",", "comment", ")", "if", "(", "new_revision", ".", "pk", "!=", "old_revision_pk", ")", ":", "document", ".", "schedule_rendering", "(", "'max-age=0'", ")", "except", "IntegrityError", ":", "return", "render", "(", "request", ",", "'wiki/confirm_revision_revert.html'", ",", "{", "'revision'", ":", "revision", ",", "'document'", ":", "revision", ".", "document", ",", "'error'", ":", "ugettext", "(", "'Document already exists. Note: You cannot revert a document that has been moved until you delete its redirect.'", ")", "}", ")", "return", "redirect", "(", "'wiki.document_revisions'", ",", "revision", ".", "document", ".", "slug", ")" ]
revert document to a specific revision .
train
false
45,970
def initialize_global_variables(sess=None): assert (sess is not None) try: sess.run(tf.global_variables_initializer()) except: sess.run(tf.initialize_all_variables())
[ "def", "initialize_global_variables", "(", "sess", "=", "None", ")", ":", "assert", "(", "sess", "is", "not", "None", ")", "try", ":", "sess", ".", "run", "(", "tf", ".", "global_variables_initializer", "(", ")", ")", "except", ":", "sess", ".", "run", "(", "tf", ".", "initialize_all_variables", "(", ")", ")" ]
excute sess .
train
false
45,971
def DBSubjectLockTest(f): @functools.wraps(f) def Decorator(testinstance): if testinstance.TEST_DBSUBJECTLOCKS: return f(testinstance) else: return testinstance.skipTest('Tests that use locks are disabled for this data store.') return Decorator
[ "def", "DBSubjectLockTest", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "Decorator", "(", "testinstance", ")", ":", "if", "testinstance", ".", "TEST_DBSUBJECTLOCKS", ":", "return", "f", "(", "testinstance", ")", "else", ":", "return", "testinstance", ".", "skipTest", "(", "'Tests that use locks are disabled for this data store.'", ")", "return", "Decorator" ]
this indicates a test that uses locks .
train
false
45,973
def deepCopyContours(font, parent, component, offset, scale): for nested in component.components: deepCopyContours(font, parent, font[nested.baseGlyph], ((offset[0] + nested.offset[0]), (offset[1] + nested.offset[1])), ((scale[0] * nested.scale[0]), (scale[1] * nested.scale[1]))) if (component == parent): return for contour in component: contour = contour.copy() contour.scale(scale) contour.move(offset) parent.appendContour(contour)
[ "def", "deepCopyContours", "(", "font", ",", "parent", ",", "component", ",", "offset", ",", "scale", ")", ":", "for", "nested", "in", "component", ".", "components", ":", "deepCopyContours", "(", "font", ",", "parent", ",", "font", "[", "nested", ".", "baseGlyph", "]", ",", "(", "(", "offset", "[", "0", "]", "+", "nested", ".", "offset", "[", "0", "]", ")", ",", "(", "offset", "[", "1", "]", "+", "nested", ".", "offset", "[", "1", "]", ")", ")", ",", "(", "(", "scale", "[", "0", "]", "*", "nested", ".", "scale", "[", "0", "]", ")", ",", "(", "scale", "[", "1", "]", "*", "nested", ".", "scale", "[", "1", "]", ")", ")", ")", "if", "(", "component", "==", "parent", ")", ":", "return", "for", "contour", "in", "component", ":", "contour", "=", "contour", ".", "copy", "(", ")", "contour", ".", "scale", "(", "scale", ")", "contour", ".", "move", "(", "offset", ")", "parent", ".", "appendContour", "(", "contour", ")" ]
copy contours to parent from component .
train
false
45,974
def fixed_ip_update(context, address, values): return IMPL.fixed_ip_update(context, address, values)
[ "def", "fixed_ip_update", "(", "context", ",", "address", ",", "values", ")", ":", "return", "IMPL", ".", "fixed_ip_update", "(", "context", ",", "address", ",", "values", ")" ]
create a fixed ip from the values dictionary .
train
false
45,975
def suppress_output(fn): save_stdout = sys.stdout try: sys.stdout = DummyFile() fn() finally: sys.stdout = save_stdout
[ "def", "suppress_output", "(", "fn", ")", ":", "save_stdout", "=", "sys", ".", "stdout", "try", ":", "sys", ".", "stdout", "=", "DummyFile", "(", ")", "fn", "(", ")", "finally", ":", "sys", ".", "stdout", "=", "save_stdout" ]
suppresses the output of fn on sys .
train
false