id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
45,158
def writeIntermediateFile(accountname): global _mbnames if (_mbnames.is_enabled() is not True): return _mbnames.writeIntermediateFile(accountname) if (_mbnames.get_incremental() is True): _mbnames.write()
[ "def", "writeIntermediateFile", "(", "accountname", ")", ":", "global", "_mbnames", "if", "(", "_mbnames", ".", "is_enabled", "(", ")", "is", "not", "True", ")", ":", "return", "_mbnames", ".", "writeIntermediateFile", "(", "accountname", ")", "if", "(", "_mbnames", ".", "get_incremental", "(", ")", "is", "True", ")", ":", "_mbnames", ".", "write", "(", ")" ]
write intermediate mbnames file .
train
false
45,159
def sanitize_choices(choices, choices_all): seen = set() others = [x for x in choices_all if (x not in choices)] res = [] for s in choices: if (s in (list(choices_all) + ['*'])): if (not ((s in seen) or seen.add(s))): res.extend((list(others) if (s == '*') else [s])) return res
[ "def", "sanitize_choices", "(", "choices", ",", "choices_all", ")", ":", "seen", "=", "set", "(", ")", "others", "=", "[", "x", "for", "x", "in", "choices_all", "if", "(", "x", "not", "in", "choices", ")", "]", "res", "=", "[", "]", "for", "s", "in", "choices", ":", "if", "(", "s", "in", "(", "list", "(", "choices_all", ")", "+", "[", "'*'", "]", ")", ")", ":", "if", "(", "not", "(", "(", "s", "in", "seen", ")", "or", "seen", ".", "add", "(", "s", ")", ")", ")", ":", "res", ".", "extend", "(", "(", "list", "(", "others", ")", "if", "(", "s", "==", "'*'", ")", "else", "[", "s", "]", ")", ")", "return", "res" ]
clean up a stringlist configuration attribute: keep only choices elements present in choices_all .
train
false
45,160
def parse_valid_host_port(host_port): try: try: (host, port) = netutils.parse_host_port(host_port) except Exception: raise ValueError((_('Host and port "%s" is not valid.') % host_port)) if (not netutils.is_valid_port(port)): raise ValueError((_('Port "%s" is not valid.') % port)) if (not (netutils.is_valid_ipv6(host) or netutils.is_valid_ipv4(host) or is_valid_hostname(host) or is_valid_fqdn(host))): raise ValueError((_('Host "%s" is not valid.') % host)) except Exception as ex: raise ValueError((_('%s Please specify a host:port pair, where host is an IPv4 address, IPv6 address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets separately from the port (i.e., "[fe80::a:b:c]:9876").') % ex)) return (host, int(port))
[ "def", "parse_valid_host_port", "(", "host_port", ")", ":", "try", ":", "try", ":", "(", "host", ",", "port", ")", "=", "netutils", ".", "parse_host_port", "(", "host_port", ")", "except", "Exception", ":", "raise", "ValueError", "(", "(", "_", "(", "'Host and port \"%s\" is not valid.'", ")", "%", "host_port", ")", ")", "if", "(", "not", "netutils", ".", "is_valid_port", "(", "port", ")", ")", ":", "raise", "ValueError", "(", "(", "_", "(", "'Port \"%s\" is not valid.'", ")", "%", "port", ")", ")", "if", "(", "not", "(", "netutils", ".", "is_valid_ipv6", "(", "host", ")", "or", "netutils", ".", "is_valid_ipv4", "(", "host", ")", "or", "is_valid_hostname", "(", "host", ")", "or", "is_valid_fqdn", "(", "host", ")", ")", ")", ":", "raise", "ValueError", "(", "(", "_", "(", "'Host \"%s\" is not valid.'", ")", "%", "host", ")", ")", "except", "Exception", "as", "ex", ":", "raise", "ValueError", "(", "(", "_", "(", "'%s Please specify a host:port pair, where host is an IPv4 address, IPv6 address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets separately from the port (i.e., \"[fe80::a:b:c]:9876\").'", ")", "%", "ex", ")", ")", "return", "(", "host", ",", "int", "(", "port", ")", ")" ]
given a "host:port" string .
train
false
45,161
def run_expecting_error(*args): process = Popen((['flocker-volume'] + list(args)), stderr=PIPE) result = process.stderr.read() exit_code = process.wait() if (exit_code == 0): raise AssertionError('flocker-volume exited with code 0.') return result
[ "def", "run_expecting_error", "(", "*", "args", ")", ":", "process", "=", "Popen", "(", "(", "[", "'flocker-volume'", "]", "+", "list", "(", "args", ")", ")", ",", "stderr", "=", "PIPE", ")", "result", "=", "process", ".", "stderr", ".", "read", "(", ")", "exit_code", "=", "process", ".", "wait", "(", ")", "if", "(", "exit_code", "==", "0", ")", ":", "raise", "AssertionError", "(", "'flocker-volume exited with code 0.'", ")", "return", "result" ]
run flocker-volume with the given arguments .
train
false
45,162
def _rescale_layout(pos, scale=1): pos -= pos.min(axis=0) pos *= (scale / pos.max()) return pos
[ "def", "_rescale_layout", "(", "pos", ",", "scale", "=", "1", ")", ":", "pos", "-=", "pos", ".", "min", "(", "axis", "=", "0", ")", "pos", "*=", "(", "scale", "/", "pos", ".", "max", "(", ")", ")", "return", "pos" ]
normalize the given coordinate list to the range [0 .
train
true
45,163
def path_to_resource(project, path, type=None): project_path = path_relative_to_project_root(project, path) if (project_path is None): project_path = rope.base.project._realpath(path) project = rope.base.project.get_no_project() if (type is None): return project.get_resource(project_path) if (type == 'file'): return project.get_file(project_path) if (type == 'folder'): return project.get_folder(project_path) return None
[ "def", "path_to_resource", "(", "project", ",", "path", ",", "type", "=", "None", ")", ":", "project_path", "=", "path_relative_to_project_root", "(", "project", ",", "path", ")", "if", "(", "project_path", "is", "None", ")", ":", "project_path", "=", "rope", ".", "base", ".", "project", ".", "_realpath", "(", "path", ")", "project", "=", "rope", ".", "base", ".", "project", ".", "get_no_project", "(", ")", "if", "(", "type", "is", "None", ")", ":", "return", "project", ".", "get_resource", "(", "project_path", ")", "if", "(", "type", "==", "'file'", ")", ":", "return", "project", ".", "get_file", "(", "project_path", ")", "if", "(", "type", "==", "'folder'", ")", ":", "return", "project", ".", "get_folder", "(", "project_path", ")", "return", "None" ]
get the resource at path you only need to specify type if path does not exist .
train
true
45,164
def _b64encode(s): return b2a_base64(s).strip()
[ "def", "_b64encode", "(", "s", ")", ":", "return", "b2a_base64", "(", "s", ")", ".", "strip", "(", ")" ]
encode a binary string as base64 with no trailing newline .
train
false
45,165
def convert_behave_to_cucumber_json(behave_filename, cucumber_filename, encoding='UTF-8', pretty=True): dump_kwargs = {'encoding': encoding} if pretty: dump_kwargs.update(indent=2, sort_keys=True) with open(behave_filename, 'r') as behave_json: with open(cucumber_filename, 'w+') as output_file: cucumber_json = b2c.convert(json.load(behave_json, encoding)) json.dump(cucumber_json, output_file, **dump_kwargs) return 0
[ "def", "convert_behave_to_cucumber_json", "(", "behave_filename", ",", "cucumber_filename", ",", "encoding", "=", "'UTF-8'", ",", "pretty", "=", "True", ")", ":", "dump_kwargs", "=", "{", "'encoding'", ":", "encoding", "}", "if", "pretty", ":", "dump_kwargs", ".", "update", "(", "indent", "=", "2", ",", "sort_keys", "=", "True", ")", "with", "open", "(", "behave_filename", ",", "'r'", ")", "as", "behave_json", ":", "with", "open", "(", "cucumber_filename", ",", "'w+'", ")", "as", "output_file", ":", "cucumber_json", "=", "b2c", ".", "convert", "(", "json", ".", "load", "(", "behave_json", ",", "encoding", ")", ")", "json", ".", "dump", "(", "cucumber_json", ",", "output_file", ",", "**", "dump_kwargs", ")", "return", "0" ]
convert behave json dialect into cucumber json dialect .
train
false
45,167
@memoize('get_step_states', time=60, timeout=60) def get_step_states(emr_connection, jobflowid): ret = emr_connection.list_steps(jobflowid) steps = [] steps.extend(ret.steps) while hasattr(ret, 'marker'): ret = emr_connection.list_steps(jobflowid, marker=ret.marker) steps.extend(ret.steps) ret = [] for step in steps: start_str = step.status.timeline.creationdatetime ret.append((step.name, step.status.state, start_str)) return ret
[ "@", "memoize", "(", "'get_step_states'", ",", "time", "=", "60", ",", "timeout", "=", "60", ")", "def", "get_step_states", "(", "emr_connection", ",", "jobflowid", ")", ":", "ret", "=", "emr_connection", ".", "list_steps", "(", "jobflowid", ")", "steps", "=", "[", "]", "steps", ".", "extend", "(", "ret", ".", "steps", ")", "while", "hasattr", "(", "ret", ",", "'marker'", ")", ":", "ret", "=", "emr_connection", ".", "list_steps", "(", "jobflowid", ",", "marker", "=", "ret", ".", "marker", ")", "steps", ".", "extend", "(", "ret", ".", "steps", ")", "ret", "=", "[", "]", "for", "step", "in", "steps", ":", "start_str", "=", "step", ".", "status", ".", "timeline", ".", "creationdatetime", "ret", ".", "append", "(", "(", "step", ".", "name", ",", "step", ".", "status", ".", "state", ",", "start_str", ")", ")", "return", "ret" ]
return the names and states of all steps in the jobflow .
train
false
45,168
@decorators.which('id') def primary_group(name): return __salt__['cmd.run'](['id', '-g', '-n', name])
[ "@", "decorators", ".", "which", "(", "'id'", ")", "def", "primary_group", "(", "name", ")", ":", "return", "__salt__", "[", "'cmd.run'", "]", "(", "[", "'id'", ",", "'-g'", ",", "'-n'", ",", "name", "]", ")" ]
return the primary group of the named user .
train
false
45,169
@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_identify_table_fail(): table_in = ['<table id="foo"><tr><th>A</th></tr>', '<tr><td>B</td></tr></table>'] with pytest.raises(core.InconsistentTableError) as err: Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'}, guess=False) assert str(err).endswith("ERROR: HTML table id 'bad_id' not found") with pytest.raises(core.InconsistentTableError) as err: Table.read(table_in, format='ascii.html', htmldict={'table_id': 3}, guess=False) assert str(err).endswith('ERROR: HTML table number 3 not found')
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "'not HAS_BEAUTIFUL_SOUP'", ")", "def", "test_identify_table_fail", "(", ")", ":", "table_in", "=", "[", "'<table id=\"foo\"><tr><th>A</th></tr>'", ",", "'<tr><td>B</td></tr></table>'", "]", "with", "pytest", ".", "raises", "(", "core", ".", "InconsistentTableError", ")", "as", "err", ":", "Table", ".", "read", "(", "table_in", ",", "format", "=", "'ascii.html'", ",", "htmldict", "=", "{", "'table_id'", ":", "'bad_id'", "}", ",", "guess", "=", "False", ")", "assert", "str", "(", "err", ")", ".", "endswith", "(", "\"ERROR: HTML table id 'bad_id' not found\"", ")", "with", "pytest", ".", "raises", "(", "core", ".", "InconsistentTableError", ")", "as", "err", ":", "Table", ".", "read", "(", "table_in", ",", "format", "=", "'ascii.html'", ",", "htmldict", "=", "{", "'table_id'", ":", "3", "}", ",", "guess", "=", "False", ")", "assert", "str", "(", "err", ")", ".", "endswith", "(", "'ERROR: HTML table number 3 not found'", ")" ]
raise an exception with an informative error message if table_id is not found .
train
false
45,170
def get_validated_options(options, warn=True): validated_options = {} for (opt, value) in iteritems(options): lower = opt.lower() try: validator = URI_VALIDATORS.get(lower, raise_config_error) value = validator(opt, value) except (ValueError, ConfigurationError) as exc: if warn: warnings.warn(str(exc)) else: raise else: validated_options[lower] = value return validated_options
[ "def", "get_validated_options", "(", "options", ",", "warn", "=", "True", ")", ":", "validated_options", "=", "{", "}", "for", "(", "opt", ",", "value", ")", "in", "iteritems", "(", "options", ")", ":", "lower", "=", "opt", ".", "lower", "(", ")", "try", ":", "validator", "=", "URI_VALIDATORS", ".", "get", "(", "lower", ",", "raise_config_error", ")", "value", "=", "validator", "(", "opt", ",", "value", ")", "except", "(", "ValueError", ",", "ConfigurationError", ")", "as", "exc", ":", "if", "warn", ":", "warnings", ".", "warn", "(", "str", "(", "exc", ")", ")", "else", ":", "raise", "else", ":", "validated_options", "[", "lower", "]", "=", "value", "return", "validated_options" ]
validate each entry in options and raise a warning if it is not valid .
train
true
45,171
def encode_message(message): message.check_initialized() return json.dumps(message, cls=MessageJSONEncoder)
[ "def", "encode_message", "(", "message", ")", ":", "message", ".", "check_initialized", "(", ")", "return", "json", ".", "dumps", "(", "message", ",", "cls", "=", "MessageJSONEncoder", ")" ]
encode message instance to json string .
train
false
45,172
def experience(): s3.filter = (FS('person_id$human_resource.type') == 1) return s3db.hrm_experience_controller()
[ "def", "experience", "(", ")", ":", "s3", ".", "filter", "=", "(", "FS", "(", "'person_id$human_resource.type'", ")", "==", "1", ")", "return", "s3db", ".", "hrm_experience_controller", "(", ")" ]
experience controller .
train
false
45,175
def _package_search(data_dict): context = {'model': model, 'session': model.Session, 'user': c.user, 'auth_user_obj': c.userobj} if (('sort' not in data_dict) or (not data_dict['sort'])): data_dict['sort'] = 'metadata_modified desc' if (('rows' not in data_dict) or (not data_dict['rows'])): data_dict['rows'] = ITEMS_LIMIT query = logic.get_action('package_search')(context, data_dict.copy()) return (query['count'], query['results'])
[ "def", "_package_search", "(", "data_dict", ")", ":", "context", "=", "{", "'model'", ":", "model", ",", "'session'", ":", "model", ".", "Session", ",", "'user'", ":", "c", ".", "user", ",", "'auth_user_obj'", ":", "c", ".", "userobj", "}", "if", "(", "(", "'sort'", "not", "in", "data_dict", ")", "or", "(", "not", "data_dict", "[", "'sort'", "]", ")", ")", ":", "data_dict", "[", "'sort'", "]", "=", "'metadata_modified desc'", "if", "(", "(", "'rows'", "not", "in", "data_dict", ")", "or", "(", "not", "data_dict", "[", "'rows'", "]", ")", ")", ":", "data_dict", "[", "'rows'", "]", "=", "ITEMS_LIMIT", "query", "=", "logic", ".", "get_action", "(", "'package_search'", ")", "(", "context", ",", "data_dict", ".", "copy", "(", ")", ")", "return", "(", "query", "[", "'count'", "]", ",", "query", "[", "'results'", "]", ")" ]
helper method that wraps the package_search action .
train
false
45,176
def test_description_on_long_named_feature(): feature = Feature.from_string(FEATURE3) assert_equals(feature.description, 'In order to describe my features\nI want to add description on them')
[ "def", "test_description_on_long_named_feature", "(", ")", ":", "feature", "=", "Feature", ".", "from_string", "(", "FEATURE3", ")", "assert_equals", "(", "feature", ".", "description", ",", "'In order to describe my features\\nI want to add description on them'", ")" ]
can parse the description on long named features .
train
false
45,177
def deunicode(p): if isinstance(p, unicode): if gUTF: return p.encode('utf-8') else: return p.encode(codepage, 'replace') elif isinstance(p, basestring): if gUTF: try: p.decode('utf-8') return p except: return p.decode(codepage).encode('utf-8') else: try: return p.decode('utf-8').encode(codepage, 'replace') except: return p else: return p
[ "def", "deunicode", "(", "p", ")", ":", "if", "isinstance", "(", "p", ",", "unicode", ")", ":", "if", "gUTF", ":", "return", "p", ".", "encode", "(", "'utf-8'", ")", "else", ":", "return", "p", ".", "encode", "(", "codepage", ",", "'replace'", ")", "elif", "isinstance", "(", "p", ",", "basestring", ")", ":", "if", "gUTF", ":", "try", ":", "p", ".", "decode", "(", "'utf-8'", ")", "return", "p", "except", ":", "return", "p", ".", "decode", "(", "codepage", ")", ".", "encode", "(", "'utf-8'", ")", "else", ":", "try", ":", "return", "p", ".", "decode", "(", "'utf-8'", ")", ".", "encode", "(", "codepage", ",", "'replace'", ")", "except", ":", "return", "p", "else", ":", "return", "p" ]
return the correct 8bit ascii encoding for the platform: latin-1 for windows/posix-non-utf and utf-8 for osx/posix-utf .
train
false
45,178
def test_language_has_first_of(): lang = Language() assert_equals(lang.first_of_examples, 'Examples')
[ "def", "test_language_has_first_of", "(", ")", ":", "lang", "=", "Language", "(", ")", "assert_equals", "(", "lang", ".", "first_of_examples", ",", "'Examples'", ")" ]
language() can pick up first occurrece of a string .
train
false
45,179
def extract_field_data(field): if (not field.form.is_bound): res = field.form.initial.get(field.name, field.field.initial) if callable(res): return res() return res else: return field.data
[ "def", "extract_field_data", "(", "field", ")", ":", "if", "(", "not", "field", ".", "form", ".", "is_bound", ")", ":", "res", "=", "field", ".", "form", ".", "initial", ".", "get", "(", "field", ".", "name", ",", "field", ".", "field", ".", "initial", ")", "if", "callable", "(", "res", ")", ":", "return", "res", "(", ")", "return", "res", "else", ":", "return", "field", ".", "data" ]
given a form field .
train
false
45,181
def output_start_end_log(func): @functools.wraps(func) def wrap(self, *args, **kwargs): 'Wrap the method to add logging function.' def _output_start_end_log(*_args, **_kwargs): 'Output the log of the start and the end of the method.' output_log(MSG.METHOD_START, method=func.__name__, config_group=self.configuration.config_group) ret = func(*_args, **_kwargs) output_log(MSG.METHOD_END, method=func.__name__, config_group=self.configuration.config_group) return ret return _output_start_end_log(self, *args, **kwargs) return wrap
[ "def", "output_start_end_log", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrap", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "def", "_output_start_end_log", "(", "*", "_args", ",", "**", "_kwargs", ")", ":", "output_log", "(", "MSG", ".", "METHOD_START", ",", "method", "=", "func", ".", "__name__", ",", "config_group", "=", "self", ".", "configuration", ".", "config_group", ")", "ret", "=", "func", "(", "*", "_args", ",", "**", "_kwargs", ")", "output_log", "(", "MSG", ".", "METHOD_END", ",", "method", "=", "func", ".", "__name__", ",", "config_group", "=", "self", ".", "configuration", ".", "config_group", ")", "return", "ret", "return", "_output_start_end_log", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "return", "wrap" ]
output the log of the start and the end of the method .
train
false
45,182
def DatabaseDirectorySize(root_path, extension): directories = collections.deque([root_path]) total_size = 0 total_files = 0 while directories: directory = directories.popleft() try: items = os.listdir(directory) except OSError: continue for comp in items: if (comp == constants.REBALANCE_DIRECTORY): continue path = os.path.join(directory, comp) try: statinfo = os.lstat(path) if stat.S_ISLNK(statinfo.st_mode): continue if stat.S_ISDIR(statinfo.st_mode): directories.append(path) elif stat.S_ISREG(statinfo.st_mode): if comp.endswith(extension): total_size += statinfo.st_size total_files += 1 except OSError: continue return (total_size, total_files)
[ "def", "DatabaseDirectorySize", "(", "root_path", ",", "extension", ")", ":", "directories", "=", "collections", ".", "deque", "(", "[", "root_path", "]", ")", "total_size", "=", "0", "total_files", "=", "0", "while", "directories", ":", "directory", "=", "directories", ".", "popleft", "(", ")", "try", ":", "items", "=", "os", ".", "listdir", "(", "directory", ")", "except", "OSError", ":", "continue", "for", "comp", "in", "items", ":", "if", "(", "comp", "==", "constants", ".", "REBALANCE_DIRECTORY", ")", ":", "continue", "path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "comp", ")", "try", ":", "statinfo", "=", "os", ".", "lstat", "(", "path", ")", "if", "stat", ".", "S_ISLNK", "(", "statinfo", ".", "st_mode", ")", ":", "continue", "if", "stat", ".", "S_ISDIR", "(", "statinfo", ".", "st_mode", ")", ":", "directories", ".", "append", "(", "path", ")", "elif", "stat", ".", "S_ISREG", "(", "statinfo", ".", "st_mode", ")", ":", "if", "comp", ".", "endswith", "(", "extension", ")", ":", "total_size", "+=", "statinfo", ".", "st_size", "total_files", "+=", "1", "except", "OSError", ":", "continue", "return", "(", "total_size", ",", "total_files", ")" ]
compute size and number of files of a file-based data store .
train
true
45,183
def init(mpstate): return SerialModule(mpstate)
[ "def", "init", "(", "mpstate", ")", ":", "return", "SerialModule", "(", "mpstate", ")" ]
initialise module .
train
false
45,184
def set_sys(layout): if salt.utils.which('localectl'): __salt__['cmd.run']('localectl set-keymap {0}'.format(layout)) elif ('RedHat' in __grains__['os_family']): __salt__['file.sed']('/etc/sysconfig/keyboard', '^LAYOUT=.*', 'LAYOUT={0}'.format(layout)) elif ('Debian' in __grains__['os_family']): __salt__['file.sed']('/etc/default/keyboard', '^XKBLAYOUT=.*', 'XKBLAYOUT={0}'.format(layout)) elif ('Gentoo' in __grains__['os_family']): __salt__['file.sed']('/etc/conf.d/keymaps', '^keymap=.*', 'keymap={0}'.format(layout)) return layout
[ "def", "set_sys", "(", "layout", ")", ":", "if", "salt", ".", "utils", ".", "which", "(", "'localectl'", ")", ":", "__salt__", "[", "'cmd.run'", "]", "(", "'localectl set-keymap {0}'", ".", "format", "(", "layout", ")", ")", "elif", "(", "'RedHat'", "in", "__grains__", "[", "'os_family'", "]", ")", ":", "__salt__", "[", "'file.sed'", "]", "(", "'/etc/sysconfig/keyboard'", ",", "'^LAYOUT=.*'", ",", "'LAYOUT={0}'", ".", "format", "(", "layout", ")", ")", "elif", "(", "'Debian'", "in", "__grains__", "[", "'os_family'", "]", ")", ":", "__salt__", "[", "'file.sed'", "]", "(", "'/etc/default/keyboard'", ",", "'^XKBLAYOUT=.*'", ",", "'XKBLAYOUT={0}'", ".", "format", "(", "layout", ")", ")", "elif", "(", "'Gentoo'", "in", "__grains__", "[", "'os_family'", "]", ")", ":", "__salt__", "[", "'file.sed'", "]", "(", "'/etc/conf.d/keymaps'", ",", "'^keymap=.*'", ",", "'keymap={0}'", ".", "format", "(", "layout", ")", ")", "return", "layout" ]
set current system keyboard setting cli example: .
train
true
45,185
def hex_encoding(t): return ('%' + '%'.join((hex(ord(c))[2:] for c in t)))
[ "def", "hex_encoding", "(", "t", ")", ":", "return", "(", "'%'", "+", "'%'", ".", "join", "(", "(", "hex", "(", "ord", "(", "c", ")", ")", "[", "2", ":", "]", "for", "c", "in", "t", ")", ")", ")" ]
hex encoding method .
train
false
45,186
def format_elemwise(vars_): spacing = len(vars_) prod_size = ((spacing * vars_[0].size[0]), vars_[0].size[1]) mat_size = ((spacing * vars_[0].size[0]), vars_[0].size[0]) terms = [] for (i, var) in enumerate(vars_): mat = get_spacing_matrix(mat_size, spacing, i) terms.append(lu.mul_expr(mat, var, prod_size)) return [lu.create_geq(lu.sum_expr(terms))]
[ "def", "format_elemwise", "(", "vars_", ")", ":", "spacing", "=", "len", "(", "vars_", ")", "prod_size", "=", "(", "(", "spacing", "*", "vars_", "[", "0", "]", ".", "size", "[", "0", "]", ")", ",", "vars_", "[", "0", "]", ".", "size", "[", "1", "]", ")", "mat_size", "=", "(", "(", "spacing", "*", "vars_", "[", "0", "]", ".", "size", "[", "0", "]", ")", ",", "vars_", "[", "0", "]", ".", "size", "[", "0", "]", ")", "terms", "=", "[", "]", "for", "(", "i", ",", "var", ")", "in", "enumerate", "(", "vars_", ")", ":", "mat", "=", "get_spacing_matrix", "(", "mat_size", ",", "spacing", ",", "i", ")", "terms", ".", "append", "(", "lu", ".", "mul_expr", "(", "mat", ",", "var", ",", "prod_size", ")", ")", "return", "[", "lu", ".", "create_geq", "(", "lu", ".", "sum_expr", "(", "terms", ")", ")", "]" ]
formats all the elementwise cones for the solver .
train
false
45,188
def std_prop(prop, nobs): return np.sqrt(((prop * (1.0 - prop)) / nobs))
[ "def", "std_prop", "(", "prop", ",", "nobs", ")", ":", "return", "np", ".", "sqrt", "(", "(", "(", "prop", "*", "(", "1.0", "-", "prop", ")", ")", "/", "nobs", ")", ")" ]
standard error for the estimate of a proportion this is just np .
train
false
45,189
def ylim(*args, **kwargs): ax = gca() ret = ax.set_ylim(*args, **kwargs) draw_if_interactive() return ret
[ "def", "ylim", "(", "*", "args", ",", "**", "kwargs", ")", ":", "ax", "=", "gca", "(", ")", "ret", "=", "ax", ".", "set_ylim", "(", "*", "args", ",", "**", "kwargs", ")", "draw_if_interactive", "(", ")", "return", "ret" ]
set/get the ylimits of the current axes:: ymin .
train
false
45,190
def get_markup_filter(): try: (markup_filter, markup_kwargs) = settings.POOTLE_MARKUP_FILTER if (markup_filter is None): return (None, 'unset') elif (markup_filter == 'textile'): import textile elif (markup_filter == 'markdown'): import markdown elif (markup_filter == 'restructuredtext'): import docutils else: return (None, '') except Exception: return (None, '') return (markup_filter, markup_kwargs)
[ "def", "get_markup_filter", "(", ")", ":", "try", ":", "(", "markup_filter", ",", "markup_kwargs", ")", "=", "settings", ".", "POOTLE_MARKUP_FILTER", "if", "(", "markup_filter", "is", "None", ")", ":", "return", "(", "None", ",", "'unset'", ")", "elif", "(", "markup_filter", "==", "'textile'", ")", ":", "import", "textile", "elif", "(", "markup_filter", "==", "'markdown'", ")", ":", "import", "markdown", "elif", "(", "markup_filter", "==", "'restructuredtext'", ")", ":", "import", "docutils", "else", ":", "return", "(", "None", ",", "''", ")", "except", "Exception", ":", "return", "(", "None", ",", "''", ")", "return", "(", "markup_filter", ",", "markup_kwargs", ")" ]
returns the configured filter as a tuple with name and args .
train
false
45,191
def packGlobal_tcpip_forward(peer): (host, port) = peer return (common.NS(host) + struct.pack('>L', port))
[ "def", "packGlobal_tcpip_forward", "(", "peer", ")", ":", "(", "host", ",", "port", ")", "=", "peer", "return", "(", "common", ".", "NS", "(", "host", ")", "+", "struct", ".", "pack", "(", "'>L'", ",", "port", ")", ")" ]
pack the data for tcpip forwarding .
train
false
45,192
def r2_score(pred, true, sample_weight=None, multioutput='uniform_average'): return R2_score(sample_weight=sample_weight, multioutput=multioutput)(pred, true)
[ "def", "r2_score", "(", "pred", ",", "true", ",", "sample_weight", "=", "None", ",", "multioutput", "=", "'uniform_average'", ")", ":", "return", "R2_score", "(", "sample_weight", "=", "sample_weight", ",", "multioutput", "=", "multioutput", ")", "(", "pred", ",", "true", ")" ]
computes r^2 regression score function .
train
false
45,193
def get_ref_to_resource(resource, is_list): return get_ref_to_doc(('webapi2.0-%s-resource' % get_resource_docname(resource, is_list)))
[ "def", "get_ref_to_resource", "(", "resource", ",", "is_list", ")", ":", "return", "get_ref_to_doc", "(", "(", "'webapi2.0-%s-resource'", "%", "get_resource_docname", "(", "resource", ",", "is_list", ")", ")", ")" ]
returns a node that links to a resources documentation .
train
false
45,194
def get_service_type(f): return getattr(f, 'service_type', None)
[ "def", "get_service_type", "(", "f", ")", ":", "return", "getattr", "(", "f", ",", "'service_type'", ",", "None", ")" ]
retrieves service type from function .
train
false
45,195
def js_del_alert(ident): return ('try {del_message("%s");} catch(err) {}\n' % ident)
[ "def", "js_del_alert", "(", "ident", ")", ":", "return", "(", "'try {del_message(\"%s\");} catch(err) {}\\n'", "%", "ident", ")" ]
this function returns a string containing js code to remove an alert message .
train
false
45,196
def _update_version_in_json_manifest(content, new_version_number): updated = json.loads(content) if ('version' in updated): updated['version'] = new_version_number return json.dumps(updated)
[ "def", "_update_version_in_json_manifest", "(", "content", ",", "new_version_number", ")", ":", "updated", "=", "json", ".", "loads", "(", "content", ")", "if", "(", "'version'", "in", "updated", ")", ":", "updated", "[", "'version'", "]", "=", "new_version_number", "return", "json", ".", "dumps", "(", "updated", ")" ]
change the version number in the json manifest file provided .
train
false
45,199
def test_nvcc_bug(): shape = (5, 4) aa = theano._asarray(numpy.random.rand(*shape), dtype='float32') a = aa[::None, ::(-1)] b = cuda_ndarray.CudaNdarray(aa)[::None, ::(-1)] c = copy.copy(b) d = copy.deepcopy(b) assert numpy.allclose(a, numpy.asarray(b)) assert numpy.allclose(a, numpy.asarray(c)) assert numpy.allclose(a, numpy.asarray(d)) b += b assert numpy.allclose((a + a), numpy.asarray(b)) assert numpy.allclose((a + a), numpy.asarray(c)) assert numpy.allclose(a, numpy.asarray(d))
[ "def", "test_nvcc_bug", "(", ")", ":", "shape", "=", "(", "5", ",", "4", ")", "aa", "=", "theano", ".", "_asarray", "(", "numpy", ".", "random", ".", "rand", "(", "*", "shape", ")", ",", "dtype", "=", "'float32'", ")", "a", "=", "aa", "[", ":", ":", "None", ",", ":", ":", "(", "-", "1", ")", "]", "b", "=", "cuda_ndarray", ".", "CudaNdarray", "(", "aa", ")", "[", ":", ":", "None", ",", ":", ":", "(", "-", "1", ")", "]", "c", "=", "copy", ".", "copy", "(", "b", ")", "d", "=", "copy", ".", "deepcopy", "(", "b", ")", "assert", "numpy", ".", "allclose", "(", "a", ",", "numpy", ".", "asarray", "(", "b", ")", ")", "assert", "numpy", ".", "allclose", "(", "a", ",", "numpy", ".", "asarray", "(", "c", ")", ")", "assert", "numpy", ".", "allclose", "(", "a", ",", "numpy", ".", "asarray", "(", "d", ")", ")", "b", "+=", "b", "assert", "numpy", ".", "allclose", "(", "(", "a", "+", "a", ")", ",", "numpy", ".", "asarray", "(", "b", ")", ")", "assert", "numpy", ".", "allclose", "(", "(", "a", "+", "a", ")", ",", "numpy", ".", "asarray", "(", "c", ")", ")", "assert", "numpy", ".", "allclose", "(", "a", ",", "numpy", ".", "asarray", "(", "d", ")", ")" ]
the fct k_elemwise_unary_rowmajor_copy(used by cuda .
train
false
45,204
def yscale(*args, **kwargs): ax = gca() ret = ax.set_yscale(*args, **kwargs) draw_if_interactive() return ret
[ "def", "yscale", "(", "*", "args", ",", "**", "kwargs", ")", ":", "ax", "=", "gca", "(", ")", "ret", "=", "ax", ".", "set_yscale", "(", "*", "args", ",", "**", "kwargs", ")", "draw_if_interactive", "(", ")", "return", "ret" ]
call signature:: xscale set the scaling for the y-axis: %s different keywords may be accepted .
train
false
45,205
@pytest.fixture def bookmarks(bookmark_manager_stub): bookmark_manager_stub.marks = collections.OrderedDict([('https://github.com', 'GitHub'), ('https://python.org', 'Welcome to Python.org'), ('http://qutebrowser.org', 'qutebrowser | qutebrowser')]) return bookmark_manager_stub
[ "@", "pytest", ".", "fixture", "def", "bookmarks", "(", "bookmark_manager_stub", ")", ":", "bookmark_manager_stub", ".", "marks", "=", "collections", ".", "OrderedDict", "(", "[", "(", "'https://github.com'", ",", "'GitHub'", ")", ",", "(", "'https://python.org'", ",", "'Welcome to Python.org'", ")", ",", "(", "'http://qutebrowser.org'", ",", "'qutebrowser | qutebrowser'", ")", "]", ")", "return", "bookmark_manager_stub" ]
pre-populate the bookmark-manager stub with some quickmarks .
train
false
45,206
def _bin_to_long(x): return int(binascii.hexlify(x), 16)
[ "def", "_bin_to_long", "(", "x", ")", ":", "return", "int", "(", "binascii", ".", "hexlify", "(", "x", ")", ",", "16", ")" ]
convert a binary string into a long integer this is a clever optimization for fast xor vector math .
train
false
45,207
def is_list_view(path, method, view): if hasattr(view, 'action'): return (view.action == 'list') if (method.lower() != 'get'): return False path_components = path.strip('/').split('/') if (path_components and ('{' in path_components[(-1)])): return False return True
[ "def", "is_list_view", "(", "path", ",", "method", ",", "view", ")", ":", "if", "hasattr", "(", "view", ",", "'action'", ")", ":", "return", "(", "view", ".", "action", "==", "'list'", ")", "if", "(", "method", ".", "lower", "(", ")", "!=", "'get'", ")", ":", "return", "False", "path_components", "=", "path", ".", "strip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "if", "(", "path_components", "and", "(", "'{'", "in", "path_components", "[", "(", "-", "1", ")", "]", ")", ")", ":", "return", "False", "return", "True" ]
return true if the given path/method appears to represent a list view .
train
false
45,208
def test_with_settings_with_other_context_managers(): env.testval1 = 'outer 1' prev_lcwd = env.lcwd def some_task(): eq_(env.testval1, 'inner 1') ok_(env.lcwd.endswith('here')) decorated_task = decorators.with_settings(lcd('here'), testval1='inner 1')(some_task) decorated_task() ok_(env.testval1, 'outer 1') eq_(env.lcwd, prev_lcwd)
[ "def", "test_with_settings_with_other_context_managers", "(", ")", ":", "env", ".", "testval1", "=", "'outer 1'", "prev_lcwd", "=", "env", ".", "lcwd", "def", "some_task", "(", ")", ":", "eq_", "(", "env", ".", "testval1", ",", "'inner 1'", ")", "ok_", "(", "env", ".", "lcwd", ".", "endswith", "(", "'here'", ")", ")", "decorated_task", "=", "decorators", ".", "with_settings", "(", "lcd", "(", "'here'", ")", ",", "testval1", "=", "'inner 1'", ")", "(", "some_task", ")", "decorated_task", "(", ")", "ok_", "(", "env", ".", "testval1", ",", "'outer 1'", ")", "eq_", "(", "env", ".", "lcwd", ",", "prev_lcwd", ")" ]
with_settings() should take other context managers .
train
false
45,209
@gen.coroutine def UpdateDevice(client, obj_store, user_id, device_id, request): device_dict = request['device_dict'] if (device_dict.has_key('device_id') and (device_dict['device_id'] != device_id)): raise web.HTTPError(400, ('bad auth cookie; device id mismatch %d != %d' % (device_dict['device_id'], device_id))) request['user_id'] = user_id request['device_id'] = device_id (yield gen.Task(Operation.CreateAndExecute, client, user_id, device_id, 'Device.UpdateOperation', request)) logging.info(('UPDATE DEVICE: user: %d, device: %d' % (user_id, device_id))) raise gen.Return({})
[ "@", "gen", ".", "coroutine", "def", "UpdateDevice", "(", "client", ",", "obj_store", ",", "user_id", ",", "device_id", ",", "request", ")", ":", "device_dict", "=", "request", "[", "'device_dict'", "]", "if", "(", "device_dict", ".", "has_key", "(", "'device_id'", ")", "and", "(", "device_dict", "[", "'device_id'", "]", "!=", "device_id", ")", ")", ":", "raise", "web", ".", "HTTPError", "(", "400", ",", "(", "'bad auth cookie; device id mismatch %d != %d'", "%", "(", "device_dict", "[", "'device_id'", "]", ",", "device_id", ")", ")", ")", "request", "[", "'user_id'", "]", "=", "user_id", "request", "[", "'device_id'", "]", "=", "device_id", "(", "yield", "gen", ".", "Task", "(", "Operation", ".", "CreateAndExecute", ",", "client", ",", "user_id", ",", "device_id", ",", "'Device.UpdateOperation'", ",", "request", ")", ")", "logging", ".", "info", "(", "(", "'UPDATE DEVICE: user: %d, device: %d'", "%", "(", "user_id", ",", "device_id", ")", ")", ")", "raise", "gen", ".", "Return", "(", "{", "}", ")" ]
updates the device metadata .
train
false
45,210
def extract_eliot_from_twisted_log(twisted_log_line): open_brace = twisted_log_line.find('{') close_brace = twisted_log_line.rfind('}') if ((open_brace == (-1)) or (close_brace == (-1))): return None candidate = twisted_log_line[open_brace:(close_brace + 1)] try: fields = json.loads(candidate) except (ValueError, TypeError): return None if {'task_uuid', 'timestamp'}.difference(fields): return None return candidate
[ "def", "extract_eliot_from_twisted_log", "(", "twisted_log_line", ")", ":", "open_brace", "=", "twisted_log_line", ".", "find", "(", "'{'", ")", "close_brace", "=", "twisted_log_line", ".", "rfind", "(", "'}'", ")", "if", "(", "(", "open_brace", "==", "(", "-", "1", ")", ")", "or", "(", "close_brace", "==", "(", "-", "1", ")", ")", ")", ":", "return", "None", "candidate", "=", "twisted_log_line", "[", "open_brace", ":", "(", "close_brace", "+", "1", ")", "]", "try", ":", "fields", "=", "json", ".", "loads", "(", "candidate", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "None", "if", "{", "'task_uuid'", ",", "'timestamp'", "}", ".", "difference", "(", "fields", ")", ":", "return", "None", "return", "candidate" ]
given a line from a twisted log message .
train
false
45,211
def _get_xdg_config_dir(): path = os.environ.get(u'XDG_CONFIG_HOME') if (path is None): path = get_home() if (path is not None): path = os.path.join(path, u'.config') return path
[ "def", "_get_xdg_config_dir", "(", ")", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "u'XDG_CONFIG_HOME'", ")", "if", "(", "path", "is", "None", ")", ":", "path", "=", "get_home", "(", ")", "if", "(", "path", "is", "not", "None", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "u'.config'", ")", "return", "path" ]
returns the xdg configuration directory .
train
false
45,212
def kit2fiff(): _check_mayavi_version() from ._backend import _check_backend _check_backend() from ._kit2fiff_gui import Kit2FiffFrame gui = Kit2FiffFrame() gui.configure_traits() return gui
[ "def", "kit2fiff", "(", ")", ":", "_check_mayavi_version", "(", ")", "from", ".", "_backend", "import", "_check_backend", "_check_backend", "(", ")", "from", ".", "_kit2fiff_gui", "import", "Kit2FiffFrame", "gui", "=", "Kit2FiffFrame", "(", ")", "gui", ".", "configure_traits", "(", ")", "return", "gui" ]
convert kit files to the fiff format .
train
false
45,214
def OpenFilename(filename, options={}): command = GetVimCommand(options.get(u'command', u'horizontal-split'), u'horizontal-split') size = (options.get(u'size', u'') if (command in [u'split', u'vsplit']) else u'') focus = options.get(u'focus', False) if ((not focus) and (command == u'tabedit')): previous_tab = GetIntValue(u'tabpagenr()') else: previous_tab = None try: vim.command(u'{0}{1} {2}'.format(size, command, filename)) except vim.error as e: if (u'E325' not in str(e)): raise if (filename != GetCurrentBufferFilepath()): return except KeyboardInterrupt: return _SetUpLoadedBuffer(command, filename, options.get(u'fix', False), options.get(u'position', u'start'), options.get(u'watch', False)) if (not focus): if (command == u'tabedit'): JumpToTab(previous_tab) if (command in [u'split', u'vsplit']): JumpToPreviousWindow()
[ "def", "OpenFilename", "(", "filename", ",", "options", "=", "{", "}", ")", ":", "command", "=", "GetVimCommand", "(", "options", ".", "get", "(", "u'command'", ",", "u'horizontal-split'", ")", ",", "u'horizontal-split'", ")", "size", "=", "(", "options", ".", "get", "(", "u'size'", ",", "u''", ")", "if", "(", "command", "in", "[", "u'split'", ",", "u'vsplit'", "]", ")", "else", "u''", ")", "focus", "=", "options", ".", "get", "(", "u'focus'", ",", "False", ")", "if", "(", "(", "not", "focus", ")", "and", "(", "command", "==", "u'tabedit'", ")", ")", ":", "previous_tab", "=", "GetIntValue", "(", "u'tabpagenr()'", ")", "else", ":", "previous_tab", "=", "None", "try", ":", "vim", ".", "command", "(", "u'{0}{1} {2}'", ".", "format", "(", "size", ",", "command", ",", "filename", ")", ")", "except", "vim", ".", "error", "as", "e", ":", "if", "(", "u'E325'", "not", "in", "str", "(", "e", ")", ")", ":", "raise", "if", "(", "filename", "!=", "GetCurrentBufferFilepath", "(", ")", ")", ":", "return", "except", "KeyboardInterrupt", ":", "return", "_SetUpLoadedBuffer", "(", "command", ",", "filename", ",", "options", ".", "get", "(", "u'fix'", ",", "False", ")", ",", "options", ".", "get", "(", "u'position'", ",", "u'start'", ")", ",", "options", ".", "get", "(", "u'watch'", ",", "False", ")", ")", "if", "(", "not", "focus", ")", ":", "if", "(", "command", "==", "u'tabedit'", ")", ":", "JumpToTab", "(", "previous_tab", ")", "if", "(", "command", "in", "[", "u'split'", ",", "u'vsplit'", "]", ")", ":", "JumpToPreviousWindow", "(", ")" ]
open a file in vim .
train
false
45,215
def do_with_python(python, cmdline, runas=None): if python: cmd = 'PYENV_VERSION={0} {1}'.format(python, cmdline) else: cmd = cmdline return do(cmd, runas=runas)
[ "def", "do_with_python", "(", "python", ",", "cmdline", ",", "runas", "=", "None", ")", ":", "if", "python", ":", "cmd", "=", "'PYENV_VERSION={0} {1}'", ".", "format", "(", "python", ",", "cmdline", ")", "else", ":", "cmd", "=", "cmdline", "return", "do", "(", "cmd", ",", "runas", "=", "runas", ")" ]
execute a python command with pyenvs shims using a specific python version .
train
true
45,216
def lexists(path): try: st = os.lstat(path) except OSError: return False return True
[ "def", "lexists", "(", "path", ")", ":", "try", ":", "st", "=", "os", ".", "lstat", "(", "path", ")", "except", "OSError", ":", "return", "False", "return", "True" ]
test whether a path exists .
train
false
45,218
def createParser(filename, real_filename=None, tags=None): if (not tags): tags = [] stream = FileInputStream(filename, real_filename, tags=tags) return guessParser(stream)
[ "def", "createParser", "(", "filename", ",", "real_filename", "=", "None", ",", "tags", "=", "None", ")", ":", "if", "(", "not", "tags", ")", ":", "tags", "=", "[", "]", "stream", "=", "FileInputStream", "(", "filename", ",", "real_filename", ",", "tags", "=", "tags", ")", "return", "guessParser", "(", "stream", ")" ]
create a parser from a file or returns none on error .
train
false
45,219
@status('pyconfig.h.in regenerated', modal=True, info=str) def regenerated_pyconfig_h_in(file_paths): if ('configure.ac' in file_paths): return ('yes' if ('pyconfig.h.in' in file_paths) else 'no') else: return 'not needed'
[ "@", "status", "(", "'pyconfig.h.in regenerated'", ",", "modal", "=", "True", ",", "info", "=", "str", ")", "def", "regenerated_pyconfig_h_in", "(", "file_paths", ")", ":", "if", "(", "'configure.ac'", "in", "file_paths", ")", ":", "return", "(", "'yes'", "if", "(", "'pyconfig.h.in'", "in", "file_paths", ")", "else", "'no'", ")", "else", ":", "return", "'not needed'" ]
check if pyconfig .
train
false
45,220
def renotify(thing, possible_recipients=None): from r2.lib import butler error_message = ('Unable to renotify thing of type: %r' % thing) notification_handler(thing, notify_function=butler.readd_mention_notification, error_message=error_message, possible_recipients=possible_recipients)
[ "def", "renotify", "(", "thing", ",", "possible_recipients", "=", "None", ")", ":", "from", "r2", ".", "lib", "import", "butler", "error_message", "=", "(", "'Unable to renotify thing of type: %r'", "%", "thing", ")", "notification_handler", "(", "thing", ",", "notify_function", "=", "butler", ".", "readd_mention_notification", ",", "error_message", "=", "error_message", ",", "possible_recipients", "=", "possible_recipients", ")" ]
given a thing .
train
false
45,222
def clear_site_cache(sender, **kwargs): instance = kwargs['instance'] using = kwargs['using'] try: del SITE_CACHE[instance.pk] except KeyError: pass try: del SITE_CACHE[Site.objects.using(using).get(pk=instance.pk).domain] except (KeyError, Site.DoesNotExist): pass
[ "def", "clear_site_cache", "(", "sender", ",", "**", "kwargs", ")", ":", "instance", "=", "kwargs", "[", "'instance'", "]", "using", "=", "kwargs", "[", "'using'", "]", "try", ":", "del", "SITE_CACHE", "[", "instance", ".", "pk", "]", "except", "KeyError", ":", "pass", "try", ":", "del", "SITE_CACHE", "[", "Site", ".", "objects", ".", "using", "(", "using", ")", ".", "get", "(", "pk", "=", "instance", ".", "pk", ")", ".", "domain", "]", "except", "(", "KeyError", ",", "Site", ".", "DoesNotExist", ")", ":", "pass" ]
clears the cache each time a site is saved or deleted .
train
false
45,224
def ode_Bernoulli(eq, func, order, match): x = func.args[0] f = func.func r = match C1 = get_numbered_constants(eq, num=1) t = exp(((1 - r[r['n']]) * Integral((r[r['b']] / r[r['a']]), x))) tt = ((r[r['n']] - 1) * Integral(((t * r[r['c']]) / r[r['a']]), x)) return Eq(f(x), (((tt + C1) / t) ** (1 / (1 - r[r['n']]))))
[ "def", "ode_Bernoulli", "(", "eq", ",", "func", ",", "order", ",", "match", ")", ":", "x", "=", "func", ".", "args", "[", "0", "]", "f", "=", "func", ".", "func", "r", "=", "match", "C1", "=", "get_numbered_constants", "(", "eq", ",", "num", "=", "1", ")", "t", "=", "exp", "(", "(", "(", "1", "-", "r", "[", "r", "[", "'n'", "]", "]", ")", "*", "Integral", "(", "(", "r", "[", "r", "[", "'b'", "]", "]", "/", "r", "[", "r", "[", "'a'", "]", "]", ")", ",", "x", ")", ")", ")", "tt", "=", "(", "(", "r", "[", "r", "[", "'n'", "]", "]", "-", "1", ")", "*", "Integral", "(", "(", "(", "t", "*", "r", "[", "r", "[", "'c'", "]", "]", ")", "/", "r", "[", "r", "[", "'a'", "]", "]", ")", ",", "x", ")", ")", "return", "Eq", "(", "f", "(", "x", ")", ",", "(", "(", "(", "tt", "+", "C1", ")", "/", "t", ")", "**", "(", "1", "/", "(", "1", "-", "r", "[", "r", "[", "'n'", "]", "]", ")", ")", ")", ")" ]
solves bernoulli differential equations .
train
false
45,226
def _split_token_parts(blob): return [(urllib.unquote_plus(part) or None) for part in blob.split('|')]
[ "def", "_split_token_parts", "(", "blob", ")", ":", "return", "[", "(", "urllib", ".", "unquote_plus", "(", "part", ")", "or", "None", ")", "for", "part", "in", "blob", ".", "split", "(", "'|'", ")", "]" ]
extracts and unescapes fields from the provided binary string .
train
false
45,227
@control_command() def enable_events(state): dispatcher = state.consumer.event_dispatcher if (dispatcher.groups and (u'task' not in dispatcher.groups)): dispatcher.groups.add(u'task') logger.info(u'Events of group {task} enabled by remote.') return ok(u'task events enabled') return ok(u'task events already enabled')
[ "@", "control_command", "(", ")", "def", "enable_events", "(", "state", ")", ":", "dispatcher", "=", "state", ".", "consumer", ".", "event_dispatcher", "if", "(", "dispatcher", ".", "groups", "and", "(", "u'task'", "not", "in", "dispatcher", ".", "groups", ")", ")", ":", "dispatcher", ".", "groups", ".", "add", "(", "u'task'", ")", "logger", ".", "info", "(", "u'Events of group {task} enabled by remote.'", ")", "return", "ok", "(", "u'task events enabled'", ")", "return", "ok", "(", "u'task events already enabled'", ")" ]
tell worker(s) to send task-related events .
train
false
45,228
def test_spans_span_sent(doc): assert len(list(doc.sents)) assert (doc[:2].sent.root.text == u'is') assert (doc[:2].sent.text == u'This is a sentence .') assert (doc[6:7].sent.root.left_edge.text == u'This')
[ "def", "test_spans_span_sent", "(", "doc", ")", ":", "assert", "len", "(", "list", "(", "doc", ".", "sents", ")", ")", "assert", "(", "doc", "[", ":", "2", "]", ".", "sent", ".", "root", ".", "text", "==", "u'is'", ")", "assert", "(", "doc", "[", ":", "2", "]", ".", "sent", ".", "text", "==", "u'This is a sentence .'", ")", "assert", "(", "doc", "[", "6", ":", "7", "]", ".", "sent", ".", "root", ".", "left_edge", ".", "text", "==", "u'This'", ")" ]
test span .
train
false
45,229
def ip_for_request(request): meta = request.META return meta.get(u'HTTP_X_FORWARDED_FOR', meta[u'REMOTE_ADDR']).split(u',')[0]
[ "def", "ip_for_request", "(", "request", ")", ":", "meta", "=", "request", ".", "META", "return", "meta", ".", "get", "(", "u'HTTP_X_FORWARDED_FOR'", ",", "meta", "[", "u'REMOTE_ADDR'", "]", ")", ".", "split", "(", "u','", ")", "[", "0", "]" ]
returns ip address for request - first checks http_x_forwarded_for header .
train
false
45,231
@gen.coroutine def _QueryUsersForArchive(client, requesting_user_id, user_ids): user_friend_list = (yield gen.Task(User.QueryUsers, client, requesting_user_id, user_ids)) user_dicts = (yield [gen.Task(user.MakeUserMetadataDict, client, requesting_user_id, forward_friend, reverse_friend) for (user, forward_friend, reverse_friend) in user_friend_list]) response = {'users': user_dicts} raise gen.Return(response)
[ "@", "gen", ".", "coroutine", "def", "_QueryUsersForArchive", "(", "client", ",", "requesting_user_id", ",", "user_ids", ")", ":", "user_friend_list", "=", "(", "yield", "gen", ".", "Task", "(", "User", ".", "QueryUsers", ",", "client", ",", "requesting_user_id", ",", "user_ids", ")", ")", "user_dicts", "=", "(", "yield", "[", "gen", ".", "Task", "(", "user", ".", "MakeUserMetadataDict", ",", "client", ",", "requesting_user_id", ",", "forward_friend", ",", "reverse_friend", ")", "for", "(", "user", ",", "forward_friend", ",", "reverse_friend", ")", "in", "user_friend_list", "]", ")", "response", "=", "{", "'users'", ":", "user_dicts", "}", "raise", "gen", ".", "Return", "(", "response", ")" ]
queries users by user id .
train
false
45,232
def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer): (X_train, y_train) = _safe_split(estimator, X, y, train) (X_test, y_test) = _safe_split(estimator, X, y, test, train) return rfe._fit(X_train, y_train, (lambda estimator, features: _score(estimator, X_test[:, features], y_test, scorer))).scores_
[ "def", "_rfe_single_fit", "(", "rfe", ",", "estimator", ",", "X", ",", "y", ",", "train", ",", "test", ",", "scorer", ")", ":", "(", "X_train", ",", "y_train", ")", "=", "_safe_split", "(", "estimator", ",", "X", ",", "y", ",", "train", ")", "(", "X_test", ",", "y_test", ")", "=", "_safe_split", "(", "estimator", ",", "X", ",", "y", ",", "test", ",", "train", ")", "return", "rfe", ".", "_fit", "(", "X_train", ",", "y_train", ",", "(", "lambda", "estimator", ",", "features", ":", "_score", "(", "estimator", ",", "X_test", "[", ":", ",", "features", "]", ",", "y_test", ",", "scorer", ")", ")", ")", ".", "scores_" ]
return the score for a fit across one fold .
train
false
45,235
def ffs(c, s): for i in c: if (i in s): return i
[ "def", "ffs", "(", "c", ",", "s", ")", ":", "for", "i", "in", "c", ":", "if", "(", "i", "in", "s", ")", ":", "return", "i" ]
first from second goes through the first list .
train
false
45,236
def wraps(wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=WRAPPER_UPDATES): return curry(update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated)
[ "def", "wraps", "(", "wrapped", ",", "assigned", "=", "WRAPPER_ASSIGNMENTS", ",", "updated", "=", "WRAPPER_UPDATES", ")", ":", "return", "curry", "(", "update_wrapper", ",", "wrapped", "=", "wrapped", ",", "assigned", "=", "assigned", ",", "updated", "=", "updated", ")" ]
decorator factory to apply update_wrapper() to a wrapper function returns a decorator that invokes update_wrapper() with the decorated function as the wrapper argument and the arguments to wraps() as the remaining arguments .
train
false
45,237
def _aggr_first(inList): for elem in inList: if (elem != SENTINEL_VALUE_FOR_MISSING_DATA): return elem return None
[ "def", "_aggr_first", "(", "inList", ")", ":", "for", "elem", "in", "inList", ":", "if", "(", "elem", "!=", "SENTINEL_VALUE_FOR_MISSING_DATA", ")", ":", "return", "elem", "return", "None" ]
returns first non-none element in the list .
train
false
45,238
@requires_segment_info def last_pipe_status(pl, segment_info): last_pipe_status = segment_info[u'args'].last_pipe_status if any(last_pipe_status): return [{u'contents': str(status), u'highlight_groups': [(u'exit_fail' if status else u'exit_success')], u'draw_inner_divider': True} for status in last_pipe_status] else: return None
[ "@", "requires_segment_info", "def", "last_pipe_status", "(", "pl", ",", "segment_info", ")", ":", "last_pipe_status", "=", "segment_info", "[", "u'args'", "]", ".", "last_pipe_status", "if", "any", "(", "last_pipe_status", ")", ":", "return", "[", "{", "u'contents'", ":", "str", "(", "status", ")", ",", "u'highlight_groups'", ":", "[", "(", "u'exit_fail'", "if", "status", "else", "u'exit_success'", ")", "]", ",", "u'draw_inner_divider'", ":", "True", "}", "for", "status", "in", "last_pipe_status", "]", "else", ":", "return", "None" ]
return last pipe status .
train
false
45,239
def do_filesizeformat(value, binary=False): bytes = float(value) base = ((binary and 1024) or 1000) prefixes = [((binary and 'KiB') or 'kB'), ((binary and 'MiB') or 'MB'), ((binary and 'GiB') or 'GB'), ((binary and 'TiB') or 'TB'), ((binary and 'PiB') or 'PB'), ((binary and 'EiB') or 'EB'), ((binary and 'ZiB') or 'ZB'), ((binary and 'YiB') or 'YB')] if (bytes == 1): return '1 Byte' elif (bytes < base): return ('%d Bytes' % bytes) else: for (i, prefix) in enumerate(prefixes): unit = (base ** (i + 2)) if (bytes < unit): return ('%.1f %s' % (((base * bytes) / unit), prefix)) return ('%.1f %s' % (((base * bytes) / unit), prefix))
[ "def", "do_filesizeformat", "(", "value", ",", "binary", "=", "False", ")", ":", "bytes", "=", "float", "(", "value", ")", "base", "=", "(", "(", "binary", "and", "1024", ")", "or", "1000", ")", "prefixes", "=", "[", "(", "(", "binary", "and", "'KiB'", ")", "or", "'kB'", ")", ",", "(", "(", "binary", "and", "'MiB'", ")", "or", "'MB'", ")", ",", "(", "(", "binary", "and", "'GiB'", ")", "or", "'GB'", ")", ",", "(", "(", "binary", "and", "'TiB'", ")", "or", "'TB'", ")", ",", "(", "(", "binary", "and", "'PiB'", ")", "or", "'PB'", ")", ",", "(", "(", "binary", "and", "'EiB'", ")", "or", "'EB'", ")", ",", "(", "(", "binary", "and", "'ZiB'", ")", "or", "'ZB'", ")", ",", "(", "(", "binary", "and", "'YiB'", ")", "or", "'YB'", ")", "]", "if", "(", "bytes", "==", "1", ")", ":", "return", "'1 Byte'", "elif", "(", "bytes", "<", "base", ")", ":", "return", "(", "'%d Bytes'", "%", "bytes", ")", "else", ":", "for", "(", "i", ",", "prefix", ")", "in", "enumerate", "(", "prefixes", ")", ":", "unit", "=", "(", "base", "**", "(", "i", "+", "2", ")", ")", "if", "(", "bytes", "<", "unit", ")", ":", "return", "(", "'%.1f %s'", "%", "(", "(", "(", "base", "*", "bytes", ")", "/", "unit", ")", ",", "prefix", ")", ")", "return", "(", "'%.1f %s'", "%", "(", "(", "(", "base", "*", "bytes", ")", "/", "unit", ")", ",", "prefix", ")", ")" ]
format the value like a human-readable file size .
train
true
45,240
def _maybe_partial_time_string(index, indexer, kind): assert isinstance(index, pd.Index) if (not isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex))): return indexer if isinstance(indexer, slice): if isinstance(indexer.start, pd.compat.string_types): start = index._maybe_cast_slice_bound(indexer.start, 'left', kind) else: start = indexer.start if isinstance(indexer.stop, pd.compat.string_types): stop = index._maybe_cast_slice_bound(indexer.stop, 'right', kind) else: stop = indexer.stop return slice(start, stop) elif isinstance(indexer, pd.compat.string_types): start = index._maybe_cast_slice_bound(indexer, 'left', 'loc') stop = index._maybe_cast_slice_bound(indexer, 'right', 'loc') return slice(min(start, stop), max(start, stop)) return indexer
[ "def", "_maybe_partial_time_string", "(", "index", ",", "indexer", ",", "kind", ")", ":", "assert", "isinstance", "(", "index", ",", "pd", ".", "Index", ")", "if", "(", "not", "isinstance", "(", "index", ",", "(", "pd", ".", "DatetimeIndex", ",", "pd", ".", "PeriodIndex", ")", ")", ")", ":", "return", "indexer", "if", "isinstance", "(", "indexer", ",", "slice", ")", ":", "if", "isinstance", "(", "indexer", ".", "start", ",", "pd", ".", "compat", ".", "string_types", ")", ":", "start", "=", "index", ".", "_maybe_cast_slice_bound", "(", "indexer", ".", "start", ",", "'left'", ",", "kind", ")", "else", ":", "start", "=", "indexer", ".", "start", "if", "isinstance", "(", "indexer", ".", "stop", ",", "pd", ".", "compat", ".", "string_types", ")", ":", "stop", "=", "index", ".", "_maybe_cast_slice_bound", "(", "indexer", ".", "stop", ",", "'right'", ",", "kind", ")", "else", ":", "stop", "=", "indexer", ".", "stop", "return", "slice", "(", "start", ",", "stop", ")", "elif", "isinstance", "(", "indexer", ",", "pd", ".", "compat", ".", "string_types", ")", ":", "start", "=", "index", ".", "_maybe_cast_slice_bound", "(", "indexer", ",", "'left'", ",", "'loc'", ")", "stop", "=", "index", ".", "_maybe_cast_slice_bound", "(", "indexer", ",", "'right'", ",", "'loc'", ")", "return", "slice", "(", "min", "(", "start", ",", "stop", ")", ",", "max", "(", "start", ",", "stop", ")", ")", "return", "indexer" ]
convert indexer for partial string selection if data has datetimeindex/periodindex .
train
false
45,241
def insort(col, element, get=(lambda x: x)): if (not col): col.insert(0, element) return col (lo, hi) = (0, len(col)) while (lo < hi): mid = int(((hi + lo) / 2)) if (get(col[mid]) > get(element)): hi = mid else: lo = (mid + 1) col.insert(lo, element) return col
[ "def", "insort", "(", "col", ",", "element", ",", "get", "=", "(", "lambda", "x", ":", "x", ")", ")", ":", "if", "(", "not", "col", ")", ":", "col", ".", "insert", "(", "0", ",", "element", ")", "return", "col", "(", "lo", ",", "hi", ")", "=", "(", "0", ",", "len", "(", "col", ")", ")", "while", "(", "lo", "<", "hi", ")", ":", "mid", "=", "int", "(", "(", "(", "hi", "+", "lo", ")", "/", "2", ")", ")", "if", "(", "get", "(", "col", "[", "mid", "]", ")", ">", "get", "(", "element", ")", ")", ":", "hi", "=", "mid", "else", ":", "lo", "=", "(", "mid", "+", "1", ")", "col", ".", "insert", "(", "lo", ",", "element", ")", "return", "col" ]
pythons bisect does not allow for a get/key so it can not be used on a list of dictionaries .
train
false
45,242
def _ParseOrMerge(text, message, allow_multiple_scalars): tokenizer = _Tokenizer(text) while (not tokenizer.AtEnd()): _MergeField(tokenizer, message, allow_multiple_scalars)
[ "def", "_ParseOrMerge", "(", "text", ",", "message", ",", "allow_multiple_scalars", ")", ":", "tokenizer", "=", "_Tokenizer", "(", "text", ")", "while", "(", "not", "tokenizer", ".", "AtEnd", "(", ")", ")", ":", "_MergeField", "(", "tokenizer", ",", "message", ",", "allow_multiple_scalars", ")" ]
converts an ascii representation of a protocol message into a message .
train
false
45,243
def _compute_residual(proj_op, B_orig, fwd_orig, Q): return (np.dot(proj_op, B_orig) - np.dot(np.dot(Q, fwd_orig), proj_op.T))
[ "def", "_compute_residual", "(", "proj_op", ",", "B_orig", ",", "fwd_orig", ",", "Q", ")", ":", "return", "(", "np", ".", "dot", "(", "proj_op", ",", "B_orig", ")", "-", "np", ".", "dot", "(", "np", ".", "dot", "(", "Q", ",", "fwd_orig", ")", ",", "proj_op", ".", "T", ")", ")" ]
compute the residual .
train
false
45,244
def select_descendant(cache, left, right): right = (always_in if (right is None) else frozenset(right)) for ancestor in left: for descendant in cache.iterdescendants(ancestor): if (descendant in right): (yield descendant)
[ "def", "select_descendant", "(", "cache", ",", "left", ",", "right", ")", ":", "right", "=", "(", "always_in", "if", "(", "right", "is", "None", ")", "else", "frozenset", "(", "right", ")", ")", "for", "ancestor", "in", "left", ":", "for", "descendant", "in", "cache", ".", "iterdescendants", "(", "ancestor", ")", ":", "if", "(", "descendant", "in", "right", ")", ":", "(", "yield", "descendant", ")" ]
right is a child .
train
false
45,245
@pytest.mark.parametrize('fast_writer', [True, False]) def test_write_no_data_ipac(fast_writer): table = ascii.get_reader(Reader=ascii.Ipac) data = table.read('t/no_data_ipac.dat') for test_def in test_defs_no_data: check_write_table(test_def, data, fast_writer) check_write_table_via_table(test_def, data, fast_writer)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'fast_writer'", ",", "[", "True", ",", "False", "]", ")", "def", "test_write_no_data_ipac", "(", "fast_writer", ")", ":", "table", "=", "ascii", ".", "get_reader", "(", "Reader", "=", "ascii", ".", "Ipac", ")", "data", "=", "table", ".", "read", "(", "'t/no_data_ipac.dat'", ")", "for", "test_def", "in", "test_defs_no_data", ":", "check_write_table", "(", "test_def", ",", "data", ",", "fast_writer", ")", "check_write_table_via_table", "(", "test_def", ",", "data", ",", "fast_writer", ")" ]
write an ipac table that contains no data .
train
false
45,246
def p_expression_group(t): t[0] = t[2]
[ "def", "p_expression_group", "(", "t", ")", ":", "t", "[", "0", "]", "=", "t", "[", "2", "]" ]
expression : lparen expression rparen .
train
false
45,247
def _validate_filetype(filetype): if (filetype not in _SELINUX_FILETYPES.keys()): raise SaltInvocationError('Invalid filetype given: {0}'.format(filetype)) return True
[ "def", "_validate_filetype", "(", "filetype", ")", ":", "if", "(", "filetype", "not", "in", "_SELINUX_FILETYPES", ".", "keys", "(", ")", ")", ":", "raise", "SaltInvocationError", "(", "'Invalid filetype given: {0}'", ".", "format", "(", "filetype", ")", ")", "return", "True" ]
checks if the given filetype is a valid selinux filetype specification .
train
false
45,249
def test_lighten(): assert (lighten('#800', 20) == '#e00') assert (lighten('#800', 0) == '#800') assert (lighten('#ffffff', 10) == '#ffffff') assert (lighten('#000000', 10) == '#1a1a1a') assert (lighten('#f3148a', 25) == '#f98dc6') assert (lighten('#121212', 1) == '#151515') assert (lighten('#999999', 100) == '#ffffff') assert (lighten('#1479ac', 8) == '#1893d1')
[ "def", "test_lighten", "(", ")", ":", "assert", "(", "lighten", "(", "'#800'", ",", "20", ")", "==", "'#e00'", ")", "assert", "(", "lighten", "(", "'#800'", ",", "0", ")", "==", "'#800'", ")", "assert", "(", "lighten", "(", "'#ffffff'", ",", "10", ")", "==", "'#ffffff'", ")", "assert", "(", "lighten", "(", "'#000000'", ",", "10", ")", "==", "'#1a1a1a'", ")", "assert", "(", "lighten", "(", "'#f3148a'", ",", "25", ")", "==", "'#f98dc6'", ")", "assert", "(", "lighten", "(", "'#121212'", ",", "1", ")", "==", "'#151515'", ")", "assert", "(", "lighten", "(", "'#999999'", ",", "100", ")", "==", "'#ffffff'", ")", "assert", "(", "lighten", "(", "'#1479ac'", ",", "8", ")", "==", "'#1893d1'", ")" ]
test lighten color function .
train
false
45,250
def h(w, x): neuron1_out = neuron(w[0:3], x) neuron2_out = neuron(w[3:6], x) return neuron(w[6:9], np.array([neuron1_out, neuron2_out]))
[ "def", "h", "(", "w", ",", "x", ")", ":", "neuron1_out", "=", "neuron", "(", "w", "[", "0", ":", "3", "]", ",", "x", ")", "neuron2_out", "=", "neuron", "(", "w", "[", "3", ":", "6", "]", ",", "x", ")", "return", "neuron", "(", "w", "[", "6", ":", "9", "]", ",", "np", ".", "array", "(", "[", "neuron1_out", ",", "neuron2_out", "]", ")", ")" ]
return the output from the three-neuron network with weights w and inputs x .
train
false
45,252
def _need_branch_change(branch, local_branch): return ((branch is not None) and (branch != local_branch))
[ "def", "_need_branch_change", "(", "branch", ",", "local_branch", ")", ":", "return", "(", "(", "branch", "is", "not", "None", ")", "and", "(", "branch", "!=", "local_branch", ")", ")" ]
short hand for telling when a new branch is needed .
train
false
45,253
@sudo_support def get_new_command(command): dest = command.script_parts[1].split(os.sep) if (dest[(-1)] == ''): dest = dest[:(-1)] if six.PY2: cwd = os.getcwdu() else: cwd = os.getcwd() for directory in dest: if (directory == '.'): continue elif (directory == '..'): cwd = os.path.split(cwd)[0] continue best_matches = get_close_matches(directory, _get_sub_dirs(cwd), cutoff=MAX_ALLOWED_DIFF) if best_matches: cwd = os.path.join(cwd, best_matches[0]) else: return cd_mkdir.get_new_command(command) return u'cd "{0}"'.format(cwd)
[ "@", "sudo_support", "def", "get_new_command", "(", "command", ")", ":", "dest", "=", "command", ".", "script_parts", "[", "1", "]", ".", "split", "(", "os", ".", "sep", ")", "if", "(", "dest", "[", "(", "-", "1", ")", "]", "==", "''", ")", ":", "dest", "=", "dest", "[", ":", "(", "-", "1", ")", "]", "if", "six", ".", "PY2", ":", "cwd", "=", "os", ".", "getcwdu", "(", ")", "else", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "for", "directory", "in", "dest", ":", "if", "(", "directory", "==", "'.'", ")", ":", "continue", "elif", "(", "directory", "==", "'..'", ")", ":", "cwd", "=", "os", ".", "path", ".", "split", "(", "cwd", ")", "[", "0", "]", "continue", "best_matches", "=", "get_close_matches", "(", "directory", ",", "_get_sub_dirs", "(", "cwd", ")", ",", "cutoff", "=", "MAX_ALLOWED_DIFF", ")", "if", "best_matches", ":", "cwd", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "best_matches", "[", "0", "]", ")", "else", ":", "return", "cd_mkdir", ".", "get_new_command", "(", "command", ")", "return", "u'cd \"{0}\"'", ".", "format", "(", "cwd", ")" ]
attempt to rebuild the path string by spellchecking the directories .
train
true
45,254
@celery.task def send_activation_token(user): token = make_token(user=user, operation='activate_account') send_email(subject=_('Account Activation'), recipients=[user.email], text_body=render_template('email/activate_account.txt', user=user, token=token), html_body=render_template('email/activate_account.html', user=user, token=token))
[ "@", "celery", ".", "task", "def", "send_activation_token", "(", "user", ")", ":", "token", "=", "make_token", "(", "user", "=", "user", ",", "operation", "=", "'activate_account'", ")", "send_email", "(", "subject", "=", "_", "(", "'Account Activation'", ")", ",", "recipients", "=", "[", "user", ".", "email", "]", ",", "text_body", "=", "render_template", "(", "'email/activate_account.txt'", ",", "user", "=", "user", ",", "token", "=", "token", ")", ",", "html_body", "=", "render_template", "(", "'email/activate_account.html'", ",", "user", "=", "user", ",", "token", "=", "token", ")", ")" ]
sends the activation token to the users email address .
train
false
45,255
def to_npy_stack(dirname, x, axis=0): chunks = tuple(((c if (i == axis) else (sum(c),)) for (i, c) in enumerate(x.chunks))) xx = x.rechunk(chunks) if (not os.path.exists(dirname)): os.path.mkdir(dirname) meta = {'chunks': chunks, 'dtype': x.dtype, 'axis': axis} with open(os.path.join(dirname, 'info'), 'wb') as f: pickle.dump(meta, f) name = ('to-npy-stack-' + str(uuid.uuid1())) dsk = dict((((name, i), (np.save, os.path.join(dirname, ('%d.npy' % i)), key)) for (i, key) in enumerate(core.flatten(xx._keys())))) Array._get(merge(dsk, xx.dask), list(dsk))
[ "def", "to_npy_stack", "(", "dirname", ",", "x", ",", "axis", "=", "0", ")", ":", "chunks", "=", "tuple", "(", "(", "(", "c", "if", "(", "i", "==", "axis", ")", "else", "(", "sum", "(", "c", ")", ",", ")", ")", "for", "(", "i", ",", "c", ")", "in", "enumerate", "(", "x", ".", "chunks", ")", ")", ")", "xx", "=", "x", ".", "rechunk", "(", "chunks", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ")", ":", "os", ".", "path", ".", "mkdir", "(", "dirname", ")", "meta", "=", "{", "'chunks'", ":", "chunks", ",", "'dtype'", ":", "x", ".", "dtype", ",", "'axis'", ":", "axis", "}", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "'info'", ")", ",", "'wb'", ")", "as", "f", ":", "pickle", ".", "dump", "(", "meta", ",", "f", ")", "name", "=", "(", "'to-npy-stack-'", "+", "str", "(", "uuid", ".", "uuid1", "(", ")", ")", ")", "dsk", "=", "dict", "(", "(", "(", "(", "name", ",", "i", ")", ",", "(", "np", ".", "save", ",", "os", ".", "path", ".", "join", "(", "dirname", ",", "(", "'%d.npy'", "%", "i", ")", ")", ",", "key", ")", ")", "for", "(", "i", ",", "key", ")", "in", "enumerate", "(", "core", ".", "flatten", "(", "xx", ".", "_keys", "(", ")", ")", ")", ")", ")", "Array", ".", "_get", "(", "merge", "(", "dsk", ",", "xx", ".", "dask", ")", ",", "list", "(", "dsk", ")", ")" ]
write dask array to a stack of .
train
false
45,257
def get_cluster(options, env): cluster_option = options['cluster'] if cluster_option: try: cluster = BenchmarkCluster.from_cluster_yaml(FilePath(cluster_option)) except IOError as e: usage(options, 'Cluster file {!r} not found.'.format(e.filename)) else: try: cluster = BenchmarkCluster.from_acceptance_test_env(env) except KeyError as e: usage(options, 'Environment variable {!r} not set.'.format(e.args[0])) except ValueError as e: usage(options, e.args[0]) except ValidationError as e: usage(options, e.message) return cluster
[ "def", "get_cluster", "(", "options", ",", "env", ")", ":", "cluster_option", "=", "options", "[", "'cluster'", "]", "if", "cluster_option", ":", "try", ":", "cluster", "=", "BenchmarkCluster", ".", "from_cluster_yaml", "(", "FilePath", "(", "cluster_option", ")", ")", "except", "IOError", "as", "e", ":", "usage", "(", "options", ",", "'Cluster file {!r} not found.'", ".", "format", "(", "e", ".", "filename", ")", ")", "else", ":", "try", ":", "cluster", "=", "BenchmarkCluster", ".", "from_acceptance_test_env", "(", "env", ")", "except", "KeyError", "as", "e", ":", "usage", "(", "options", ",", "'Environment variable {!r} not set.'", ".", "format", "(", "e", ".", "args", "[", "0", "]", ")", ")", "except", "ValueError", "as", "e", ":", "usage", "(", "options", ",", "e", ".", "args", "[", "0", "]", ")", "except", "ValidationError", "as", "e", ":", "usage", "(", "options", ",", "e", ".", "message", ")", "return", "cluster" ]
returns a cluster in a datacenter .
train
false
45,258
def register_formats(formats): for (formatter_name, formatter_class_name) in formats: register_as(formatter_name, formatter_class_name)
[ "def", "register_formats", "(", "formats", ")", ":", "for", "(", "formatter_name", ",", "formatter_class_name", ")", "in", "formats", ":", "register_as", "(", "formatter_name", ",", "formatter_class_name", ")" ]
register many format items into the registry .
train
false
45,260
def buildDMG(): outdir = os.path.join(WORKDIR, 'diskimage') if os.path.exists(outdir): shutil.rmtree(outdir) imagepath = os.path.join(outdir, ('python-%s-macosx' % (getFullVersion(),))) if INCLUDE_TIMESTAMP: imagepath = (imagepath + ('%04d-%02d-%02d' % time.localtime()[:3])) imagepath = (imagepath + '.dmg') os.mkdir(outdir) runCommand(("hdiutil create -volname 'Universal MacPython %s' -srcfolder %s %s" % (getFullVersion(), shellQuote(os.path.join(WORKDIR, 'installer')), shellQuote(imagepath)))) return imagepath
[ "def", "buildDMG", "(", ")", ":", "outdir", "=", "os", ".", "path", ".", "join", "(", "WORKDIR", ",", "'diskimage'", ")", "if", "os", ".", "path", ".", "exists", "(", "outdir", ")", ":", "shutil", ".", "rmtree", "(", "outdir", ")", "imagepath", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "(", "'python-%s-macosx'", "%", "(", "getFullVersion", "(", ")", ",", ")", ")", ")", "if", "INCLUDE_TIMESTAMP", ":", "imagepath", "=", "(", "imagepath", "+", "(", "'%04d-%02d-%02d'", "%", "time", ".", "localtime", "(", ")", "[", ":", "3", "]", ")", ")", "imagepath", "=", "(", "imagepath", "+", "'.dmg'", ")", "os", ".", "mkdir", "(", "outdir", ")", "runCommand", "(", "(", "\"hdiutil create -volname 'Universal MacPython %s' -srcfolder %s %s\"", "%", "(", "getFullVersion", "(", ")", ",", "shellQuote", "(", "os", ".", "path", ".", "join", "(", "WORKDIR", ",", "'installer'", ")", ")", ",", "shellQuote", "(", "imagepath", ")", ")", ")", ")", "return", "imagepath" ]
create dmg containing the rootdir .
train
false
45,261
def get_options_jsonschema(options): for (item, option) in enumerate(options): if (isinstance(option, dict) and option.get('text')): options[item] = option.get('text') value = {'enum': options} return value
[ "def", "get_options_jsonschema", "(", "options", ")", ":", "for", "(", "item", ",", "option", ")", "in", "enumerate", "(", "options", ")", ":", "if", "(", "isinstance", "(", "option", ",", "dict", ")", "and", "option", ".", "get", "(", "'text'", ")", ")", ":", "options", "[", "item", "]", "=", "option", ".", "get", "(", "'text'", ")", "value", "=", "{", "'enum'", ":", "options", "}", "return", "value" ]
returns multiple choice options for schema questions .
train
false
45,262
def xsl_post_save(instance, sender, **kwargs): add_xsl_link(instance.resourcebase_ptr)
[ "def", "xsl_post_save", "(", "instance", ",", "sender", ",", "**", "kwargs", ")", ":", "add_xsl_link", "(", "instance", ".", "resourcebase_ptr", ")" ]
add a link to the enriched iso metadata .
train
false
45,264
def login_rate_limit_message(): current_limit = getattr(g, 'view_rate_limit', None) if (current_limit is not None): window_stats = limiter.limiter.get_window_stats(*current_limit) reset_time = datetime.utcfromtimestamp(window_stats[0]) timeout = (reset_time - datetime.utcnow()) return '{timeout}'.format(timeout=format_timedelta(timeout))
[ "def", "login_rate_limit_message", "(", ")", ":", "current_limit", "=", "getattr", "(", "g", ",", "'view_rate_limit'", ",", "None", ")", "if", "(", "current_limit", "is", "not", "None", ")", ":", "window_stats", "=", "limiter", ".", "limiter", ".", "get_window_stats", "(", "*", "current_limit", ")", "reset_time", "=", "datetime", ".", "utcfromtimestamp", "(", "window_stats", "[", "0", "]", ")", "timeout", "=", "(", "reset_time", "-", "datetime", ".", "utcnow", "(", ")", ")", "return", "'{timeout}'", ".", "format", "(", "timeout", "=", "format_timedelta", "(", "timeout", ")", ")" ]
display the amount of time left until the user can access the requested resource again .
train
false
45,265
def parse_int2(text): texts = [x for x in text.split(' ') if (x.strip() != '')] value = list(map(parse_int, texts)) if (len(value) < 1): raise Exception(('Invalid int2 format: %s' % text)) elif (len(value) == 1): return [value[0], value[0]] elif (len(value) > 2): raise Exception(('Too many values in %s: %s' % (text, str(value)))) return value
[ "def", "parse_int2", "(", "text", ")", ":", "texts", "=", "[", "x", "for", "x", "in", "text", ".", "split", "(", "' '", ")", "if", "(", "x", ".", "strip", "(", ")", "!=", "''", ")", "]", "value", "=", "list", "(", "map", "(", "parse_int", ",", "texts", ")", ")", "if", "(", "len", "(", "value", ")", "<", "1", ")", ":", "raise", "Exception", "(", "(", "'Invalid int2 format: %s'", "%", "text", ")", ")", "elif", "(", "len", "(", "value", ")", "==", "1", ")", ":", "return", "[", "value", "[", "0", "]", ",", "value", "[", "0", "]", "]", "elif", "(", "len", "(", "value", ")", ">", "2", ")", ":", "raise", "Exception", "(", "(", "'Too many values in %s: %s'", "%", "(", "text", ",", "str", "(", "value", ")", ")", ")", ")", "return", "value" ]
parse a string to a list of exactly 2 integers .
train
false
45,266
def get_profile_form(): from mezzanine.conf import settings try: return import_dotted_path(settings.ACCOUNTS_PROFILE_FORM_CLASS) except ImportError: raise ImproperlyConfigured((u'Value for ACCOUNTS_PROFILE_FORM_CLASS could not be imported: %s' % settings.ACCOUNTS_PROFILE_FORM_CLASS))
[ "def", "get_profile_form", "(", ")", ":", "from", "mezzanine", ".", "conf", "import", "settings", "try", ":", "return", "import_dotted_path", "(", "settings", ".", "ACCOUNTS_PROFILE_FORM_CLASS", ")", "except", "ImportError", ":", "raise", "ImproperlyConfigured", "(", "(", "u'Value for ACCOUNTS_PROFILE_FORM_CLASS could not be imported: %s'", "%", "settings", ".", "ACCOUNTS_PROFILE_FORM_CLASS", ")", ")" ]
returns the profile form defined by settings .
train
false
45,267
def _copy_lines_between_files(in_, fout, n=None, skip=0, mode='a', terminal_line=''): mtimesleep() if isinstance(in_, str): fin = open(in_, 'r') else: fin = in_ for i in xrange(skip): fin.readline() i = 0 lines = [] while ((n is None) or (i < n)): l = fin.readline() if ((terminal_line is not None) and (l == terminal_line)): break lines.append(l) i += 1 if isinstance(fout, str): fout = open(fout, mode) fout.write('\n'.join(lines)) fout.flush() if isinstance(in_, str): fin.close() time.sleep(Utils.DEFAULT_SHORT_INTERVAL) return fout
[ "def", "_copy_lines_between_files", "(", "in_", ",", "fout", ",", "n", "=", "None", ",", "skip", "=", "0", ",", "mode", "=", "'a'", ",", "terminal_line", "=", "''", ")", ":", "mtimesleep", "(", ")", "if", "isinstance", "(", "in_", ",", "str", ")", ":", "fin", "=", "open", "(", "in_", ",", "'r'", ")", "else", ":", "fin", "=", "in_", "for", "i", "in", "xrange", "(", "skip", ")", ":", "fin", ".", "readline", "(", ")", "i", "=", "0", "lines", "=", "[", "]", "while", "(", "(", "n", "is", "None", ")", "or", "(", "i", "<", "n", ")", ")", ":", "l", "=", "fin", ".", "readline", "(", ")", "if", "(", "(", "terminal_line", "is", "not", "None", ")", "and", "(", "l", "==", "terminal_line", ")", ")", ":", "break", "lines", ".", "append", "(", "l", ")", "i", "+=", "1", "if", "isinstance", "(", "fout", ",", "str", ")", ":", "fout", "=", "open", "(", "fout", ",", "mode", ")", "fout", ".", "write", "(", "'\\n'", ".", "join", "(", "lines", ")", ")", "fout", ".", "flush", "(", ")", "if", "isinstance", "(", "in_", ",", "str", ")", ":", "fin", ".", "close", "(", ")", "time", ".", "sleep", "(", "Utils", ".", "DEFAULT_SHORT_INTERVAL", ")", "return", "fout" ]
copy lines from one file to another returns open fout .
train
false
45,268
def isDominated(wvalues1, wvalues2): not_equal = False for (self_wvalue, other_wvalue) in zip(wvalues1, wvalues2): if (self_wvalue > other_wvalue): return False elif (self_wvalue < other_wvalue): not_equal = True return not_equal
[ "def", "isDominated", "(", "wvalues1", ",", "wvalues2", ")", ":", "not_equal", "=", "False", "for", "(", "self_wvalue", ",", "other_wvalue", ")", "in", "zip", "(", "wvalues1", ",", "wvalues2", ")", ":", "if", "(", "self_wvalue", ">", "other_wvalue", ")", ":", "return", "False", "elif", "(", "self_wvalue", "<", "other_wvalue", ")", ":", "not_equal", "=", "True", "return", "not_equal" ]
returns whether or not *wvalues1* dominates *wvalues2* .
train
false
45,270
@register.inclusion_tag('zinnia/tags/dummy.html') def get_archives_entries_tree(template='zinnia/tags/entries_archives_tree.html'): return {'template': template, 'archives': Entry.published.datetimes('publication_date', 'day', order='ASC')}
[ "@", "register", ".", "inclusion_tag", "(", "'zinnia/tags/dummy.html'", ")", "def", "get_archives_entries_tree", "(", "template", "=", "'zinnia/tags/entries_archives_tree.html'", ")", ":", "return", "{", "'template'", ":", "template", ",", "'archives'", ":", "Entry", ".", "published", ".", "datetimes", "(", "'publication_date'", ",", "'day'", ",", "order", "=", "'ASC'", ")", "}" ]
return archives entries as a tree .
train
false
45,271
def _log_line_from_hadoop(line, level=None): log.log((level or logging.INFO), (' %s' % line))
[ "def", "_log_line_from_hadoop", "(", "line", ",", "level", "=", "None", ")", ":", "log", ".", "log", "(", "(", "level", "or", "logging", ".", "INFO", ")", ",", "(", "' %s'", "%", "line", ")", ")" ]
log <line> .
train
false
45,272
def build_control_amp_service(test_case, reactor=None): if (reactor is None): reactor = Clock() cluster_state = ClusterStateService(reactor) cluster_state.startService() test_case.addCleanup(cluster_state.stopService) persistence_service = ConfigurationPersistenceService(reactor, test_case.make_temporary_directory()) persistence_service.startService() test_case.addCleanup(persistence_service.stopService) return ControlAMPService(reactor, cluster_state, persistence_service, TCP4ServerEndpoint(MemoryReactor(), 1234), ClientContextFactory())
[ "def", "build_control_amp_service", "(", "test_case", ",", "reactor", "=", "None", ")", ":", "if", "(", "reactor", "is", "None", ")", ":", "reactor", "=", "Clock", "(", ")", "cluster_state", "=", "ClusterStateService", "(", "reactor", ")", "cluster_state", ".", "startService", "(", ")", "test_case", ".", "addCleanup", "(", "cluster_state", ".", "stopService", ")", "persistence_service", "=", "ConfigurationPersistenceService", "(", "reactor", ",", "test_case", ".", "make_temporary_directory", "(", ")", ")", "persistence_service", ".", "startService", "(", ")", "test_case", ".", "addCleanup", "(", "persistence_service", ".", "stopService", ")", "return", "ControlAMPService", "(", "reactor", ",", "cluster_state", ",", "persistence_service", ",", "TCP4ServerEndpoint", "(", "MemoryReactor", "(", ")", ",", "1234", ")", ",", "ClientContextFactory", "(", ")", ")" ]
create a new controlampservice .
train
false
45,273
def dataset_range(dataset, start, stop): if (dataset.X is None): return DenseDesignMatrix(X=None, y=None, view_converter=dataset.view_converter) X = dataset.X[start:stop, :].copy() if (dataset.y is None): y = None else: if (dataset.y.ndim == 2): y = dataset.y[start:stop, :].copy() else: y = dataset.y[start:stop].copy() assert (X.shape[0] == y.shape[0]) assert (X.shape[0] == (stop - start)) topo = dataset.get_topological_view(X) rval = DenseDesignMatrix(topo_view=topo, y=y) rval.adjust_for_viewer = dataset.adjust_for_viewer return rval
[ "def", "dataset_range", "(", "dataset", ",", "start", ",", "stop", ")", ":", "if", "(", "dataset", ".", "X", "is", "None", ")", ":", "return", "DenseDesignMatrix", "(", "X", "=", "None", ",", "y", "=", "None", ",", "view_converter", "=", "dataset", ".", "view_converter", ")", "X", "=", "dataset", ".", "X", "[", "start", ":", "stop", ",", ":", "]", ".", "copy", "(", ")", "if", "(", "dataset", ".", "y", "is", "None", ")", ":", "y", "=", "None", "else", ":", "if", "(", "dataset", ".", "y", ".", "ndim", "==", "2", ")", ":", "y", "=", "dataset", ".", "y", "[", "start", ":", "stop", ",", ":", "]", ".", "copy", "(", ")", "else", ":", "y", "=", "dataset", ".", "y", "[", "start", ":", "stop", "]", ".", "copy", "(", ")", "assert", "(", "X", ".", "shape", "[", "0", "]", "==", "y", ".", "shape", "[", "0", "]", ")", "assert", "(", "X", ".", "shape", "[", "0", "]", "==", "(", "stop", "-", "start", ")", ")", "topo", "=", "dataset", ".", "get_topological_view", "(", "X", ")", "rval", "=", "DenseDesignMatrix", "(", "topo_view", "=", "topo", ",", "y", "=", "y", ")", "rval", ".", "adjust_for_viewer", "=", "dataset", ".", "adjust_for_viewer", "return", "rval" ]
returns a new dataset formed by extracting a range of examples from an existing dataset .
train
false
45,275
def longify(listofints): if (sys.version_info.major < 3): return map(long, listofints) return listofints
[ "def", "longify", "(", "listofints", ")", ":", "if", "(", "sys", ".", "version_info", ".", "major", "<", "3", ")", ":", "return", "map", "(", "long", ",", "listofints", ")", "return", "listofints" ]
list of ints => list of longs .
train
false
45,276
def parse_as_when(raw): when_classes = [TimeSpan, Time, DateSpan, Date] keys_for_type = {tuple(cls_.json_keys): cls_ for cls_ in when_classes} given_keys = tuple((set(raw.keys()) - set('object'))) when_type = keys_for_type.get(given_keys) if (when_type is None): raise ValueError('When object had invalid keys.') return when_type.parse(raw)
[ "def", "parse_as_when", "(", "raw", ")", ":", "when_classes", "=", "[", "TimeSpan", ",", "Time", ",", "DateSpan", ",", "Date", "]", "keys_for_type", "=", "{", "tuple", "(", "cls_", ".", "json_keys", ")", ":", "cls_", "for", "cls_", "in", "when_classes", "}", "given_keys", "=", "tuple", "(", "(", "set", "(", "raw", ".", "keys", "(", ")", ")", "-", "set", "(", "'object'", ")", ")", ")", "when_type", "=", "keys_for_type", ".", "get", "(", "given_keys", ")", "if", "(", "when_type", "is", "None", ")", ":", "raise", "ValueError", "(", "'When object had invalid keys.'", ")", "return", "when_type", ".", "parse", "(", "raw", ")" ]
tries to parse a dictionary into a corresponding date .
train
false
45,277
def iostat(): io_stat = ['IO status:'] io_stat += utils.system_output('iostat', verbose=False).splitlines() io_stat += ['\n'] return io_stat
[ "def", "iostat", "(", ")", ":", "io_stat", "=", "[", "'IO status:'", "]", "io_stat", "+=", "utils", ".", "system_output", "(", "'iostat'", ",", "verbose", "=", "False", ")", ".", "splitlines", "(", ")", "io_stat", "+=", "[", "'\\n'", "]", "return", "io_stat" ]
gather and return io stats .
train
false
45,281
def test_inline_css(Chart): css = '{{ id }}text { fill: #bedead; }\n' config = Config() config.css.append(('inline:' + css)) chart = Chart(config) chart.add('/', [10, 1, 5]) svg = chart.render().decode('utf-8') assert ('#bedead' in svg)
[ "def", "test_inline_css", "(", "Chart", ")", ":", "css", "=", "'{{ id }}text { fill: #bedead; }\\n'", "config", "=", "Config", "(", ")", "config", ".", "css", ".", "append", "(", "(", "'inline:'", "+", "css", ")", ")", "chart", "=", "Chart", "(", "config", ")", "chart", ".", "add", "(", "'/'", ",", "[", "10", ",", "1", ",", "5", "]", ")", "svg", "=", "chart", ".", "render", "(", ")", ".", "decode", "(", "'utf-8'", ")", "assert", "(", "'#bedead'", "in", "svg", ")" ]
test inline css option .
train
false
45,282
def _get_footer_size(file_obj): file_obj.seek((-8), 2) tup = struct.unpack('<i', file_obj.read(4)) return tup[0]
[ "def", "_get_footer_size", "(", "file_obj", ")", ":", "file_obj", ".", "seek", "(", "(", "-", "8", ")", ",", "2", ")", "tup", "=", "struct", ".", "unpack", "(", "'<i'", ",", "file_obj", ".", "read", "(", "4", ")", ")", "return", "tup", "[", "0", "]" ]
read the footer size in bytes .
train
true
45,283
def check_no_fit_attributes_set_in_init(name, Estimator): estimator = Estimator() for attr in dir(estimator): if (attr.endswith('_') and (not attr.startswith('__'))): assert_false(hasattr(estimator, attr), "By convention, attributes ending with '_' are estimated from data in scikit-learn. Consequently they should not be initialized in the constructor of an estimator but in the fit method. Attribute {!r} was found in estimator {}".format(attr, name))
[ "def", "check_no_fit_attributes_set_in_init", "(", "name", ",", "Estimator", ")", ":", "estimator", "=", "Estimator", "(", ")", "for", "attr", "in", "dir", "(", "estimator", ")", ":", "if", "(", "attr", ".", "endswith", "(", "'_'", ")", "and", "(", "not", "attr", ".", "startswith", "(", "'__'", ")", ")", ")", ":", "assert_false", "(", "hasattr", "(", "estimator", ",", "attr", ")", ",", "\"By convention, attributes ending with '_' are estimated from data in scikit-learn. Consequently they should not be initialized in the constructor of an estimator but in the fit method. Attribute {!r} was found in estimator {}\"", ".", "format", "(", "attr", ",", "name", ")", ")" ]
check that estimator .
train
false
45,284
def generate_sub_moved_events(src_dir_path, dest_dir_path): for (root, directories, filenames) in os.walk(dest_dir_path): for directory in directories: full_path = os.path.join(root, directory) renamed_path = (full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None) (yield DirMovedEvent(renamed_path, full_path)) for filename in filenames: full_path = os.path.join(root, filename) renamed_path = (full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None) (yield FileMovedEvent(renamed_path, full_path))
[ "def", "generate_sub_moved_events", "(", "src_dir_path", ",", "dest_dir_path", ")", ":", "for", "(", "root", ",", "directories", ",", "filenames", ")", "in", "os", ".", "walk", "(", "dest_dir_path", ")", ":", "for", "directory", "in", "directories", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "directory", ")", "renamed_path", "=", "(", "full_path", ".", "replace", "(", "dest_dir_path", ",", "src_dir_path", ")", "if", "src_dir_path", "else", "None", ")", "(", "yield", "DirMovedEvent", "(", "renamed_path", ",", "full_path", ")", ")", "for", "filename", "in", "filenames", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "renamed_path", "=", "(", "full_path", ".", "replace", "(", "dest_dir_path", ",", "src_dir_path", ")", "if", "src_dir_path", "else", "None", ")", "(", "yield", "FileMovedEvent", "(", "renamed_path", ",", "full_path", ")", ")" ]
generates an event list of :class:dirmovedevent and :class:filemovedevent objects for all the files and directories within the given moved directory that were moved along with the directory .
train
false
45,285
def _get_vif_name(vif): if (vif.get('devname', None) is not None): return vif['devname'] return ('nic' + vif['id'])[:model.NIC_NAME_LEN]
[ "def", "_get_vif_name", "(", "vif", ")", ":", "if", "(", "vif", ".", "get", "(", "'devname'", ",", "None", ")", "is", "not", "None", ")", ":", "return", "vif", "[", "'devname'", "]", "return", "(", "'nic'", "+", "vif", "[", "'id'", "]", ")", "[", ":", "model", ".", "NIC_NAME_LEN", "]" ]
get a vif device name .
train
false
45,287
def kendall_pval(tau, n): test_stat = (tau / (((2 * ((2 * n) + 5)) / float(((9 * n) * (n - 1)))) ** 0.5)) return normprob(test_stat, direction='two-sided')
[ "def", "kendall_pval", "(", "tau", ",", "n", ")", ":", "test_stat", "=", "(", "tau", "/", "(", "(", "(", "2", "*", "(", "(", "2", "*", "n", ")", "+", "5", ")", ")", "/", "float", "(", "(", "(", "9", "*", "n", ")", "*", "(", "n", "-", "1", ")", ")", ")", ")", "**", "0.5", ")", ")", "return", "normprob", "(", "test_stat", ",", "direction", "=", "'two-sided'", ")" ]
calculate the p-value for the passed tau and vector length n .
train
false