id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
41,988
def conv1d_sc(input, filters, image_shape=None, filter_shape=None, border_mode='valid', subsample=(1,), filter_flip=True): if (border_mode not in ('valid', 0, (0,))): raise RuntimeError(('Unsupported border_mode for conv1d_sc: %s' % border_mode)) if (image_shape is None): image_shape_sc = None else: image_shape_sc = (image_shape[0], 1, image_shape[1], image_shape[2]) if (filter_shape is None): filter_shape_sc = None else: filter_shape_sc = (filter_shape[0], 1, filter_shape[1], filter_shape[2]) input_sc = input.dimshuffle(0, 'x', 1, 2) filters_sc = filters.dimshuffle(0, 'x', 1, 2)[:, :, ::(-1), :] conved = T.nnet.conv2d(input_sc, filters_sc, image_shape_sc, filter_shape_sc, subsample=(1, subsample[0]), filter_flip=filter_flip) return conved[:, :, 0, :]
[ "def", "conv1d_sc", "(", "input", ",", "filters", ",", "image_shape", "=", "None", ",", "filter_shape", "=", "None", ",", "border_mode", "=", "'valid'", ",", "subsample", "=", "(", "1", ",", ")", ",", "filter_flip", "=", "True", ")", ":", "if", "(", "border_mode", "not", "in", "(", "'valid'", ",", "0", ",", "(", "0", ",", ")", ")", ")", ":", "raise", "RuntimeError", "(", "(", "'Unsupported border_mode for conv1d_sc: %s'", "%", "border_mode", ")", ")", "if", "(", "image_shape", "is", "None", ")", ":", "image_shape_sc", "=", "None", "else", ":", "image_shape_sc", "=", "(", "image_shape", "[", "0", "]", ",", "1", ",", "image_shape", "[", "1", "]", ",", "image_shape", "[", "2", "]", ")", "if", "(", "filter_shape", "is", "None", ")", ":", "filter_shape_sc", "=", "None", "else", ":", "filter_shape_sc", "=", "(", "filter_shape", "[", "0", "]", ",", "1", ",", "filter_shape", "[", "1", "]", ",", "filter_shape", "[", "2", "]", ")", "input_sc", "=", "input", ".", "dimshuffle", "(", "0", ",", "'x'", ",", "1", ",", "2", ")", "filters_sc", "=", "filters", ".", "dimshuffle", "(", "0", ",", "'x'", ",", "1", ",", "2", ")", "[", ":", ",", ":", ",", ":", ":", "(", "-", "1", ")", ",", ":", "]", "conved", "=", "T", ".", "nnet", ".", "conv2d", "(", "input_sc", ",", "filters_sc", ",", "image_shape_sc", ",", "filter_shape_sc", ",", "subsample", "=", "(", "1", ",", "subsample", "[", "0", "]", ")", ",", "filter_flip", "=", "filter_flip", ")", "return", "conved", "[", ":", ",", ":", ",", "0", ",", ":", "]" ]
using conv2d with a single input channel .
train
false
41,989
@task def post_index(new_index, old_index, alias, index_name, settings): _print('Optimizing, updating settings and aliases.', alias) ES.indices.optimize(index=new_index) ES.indices.put_settings(index=new_index, body=settings) actions = [{'add': {'index': new_index, 'alias': alias}}] if old_index: actions.append({'remove': {'index': old_index, 'alias': alias}}) ES.indices.update_aliases(body=dict(actions=actions)) _print('Unflagging the database.', alias) Reindexing.unflag_reindexing(alias=alias) _print('Removing index {index}.'.format(index=old_index), alias) if (old_index and ES.indices.exists(index=old_index)): ES.indices.delete(index=old_index) alias_output = '' for indexer in INDEXERS: alias = ES_INDEXES[indexer.get_mapping_type_name()] alias_output += (unicode(ES.indices.get_aliases(index=alias)) + '\n') _print('Reindexation done. Current aliases configuration: {output}\n'.format(output=alias_output), alias)
[ "@", "task", "def", "post_index", "(", "new_index", ",", "old_index", ",", "alias", ",", "index_name", ",", "settings", ")", ":", "_print", "(", "'Optimizing, updating settings and aliases.'", ",", "alias", ")", "ES", ".", "indices", ".", "optimize", "(", "index", "=", "new_index", ")", "ES", ".", "indices", ".", "put_settings", "(", "index", "=", "new_index", ",", "body", "=", "settings", ")", "actions", "=", "[", "{", "'add'", ":", "{", "'index'", ":", "new_index", ",", "'alias'", ":", "alias", "}", "}", "]", "if", "old_index", ":", "actions", ".", "append", "(", "{", "'remove'", ":", "{", "'index'", ":", "old_index", ",", "'alias'", ":", "alias", "}", "}", ")", "ES", ".", "indices", ".", "update_aliases", "(", "body", "=", "dict", "(", "actions", "=", "actions", ")", ")", "_print", "(", "'Unflagging the database.'", ",", "alias", ")", "Reindexing", ".", "unflag_reindexing", "(", "alias", "=", "alias", ")", "_print", "(", "'Removing index {index}.'", ".", "format", "(", "index", "=", "old_index", ")", ",", "alias", ")", "if", "(", "old_index", "and", "ES", ".", "indices", ".", "exists", "(", "index", "=", "old_index", ")", ")", ":", "ES", ".", "indices", ".", "delete", "(", "index", "=", "old_index", ")", "alias_output", "=", "''", "for", "indexer", "in", "INDEXERS", ":", "alias", "=", "ES_INDEXES", "[", "indexer", ".", "get_mapping_type_name", "(", ")", "]", "alias_output", "+=", "(", "unicode", "(", "ES", ".", "indices", ".", "get_aliases", "(", "index", "=", "alias", ")", ")", "+", "'\\n'", ")", "_print", "(", "'Reindexation done. Current aliases configuration: {output}\\n'", ".", "format", "(", "output", "=", "alias_output", ")", ",", "alias", ")" ]
perform post-indexing tasks: * optimize .
train
false
41,990
def process_settings(pelicanobj): inline_settings = {} inline_settings['config'] = {'[]': ('', 'pelican-inline')} try: settings = pelicanobj.settings['MD_INLINE'] except: settings = None if isinstance(settings, dict): inline_settings['config'].update(settings) return inline_settings
[ "def", "process_settings", "(", "pelicanobj", ")", ":", "inline_settings", "=", "{", "}", "inline_settings", "[", "'config'", "]", "=", "{", "'[]'", ":", "(", "''", ",", "'pelican-inline'", ")", "}", "try", ":", "settings", "=", "pelicanobj", ".", "settings", "[", "'MD_INLINE'", "]", "except", ":", "settings", "=", "None", "if", "isinstance", "(", "settings", ",", "dict", ")", ":", "inline_settings", "[", "'config'", "]", ".", "update", "(", "settings", ")", "return", "inline_settings" ]
sets user specified settings .
train
true
41,991
def _reconstitute(config, full_path): try: renewal_candidate = storage.RenewableCert(full_path, config) except (errors.CertStorageError, IOError) as exc: logger.warning(exc) logger.warning('Renewal configuration file %s is broken. Skipping.', full_path) logger.debug('Traceback was:\n%s', traceback.format_exc()) return None if ('renewalparams' not in renewal_candidate.configuration): logger.warning('Renewal configuration file %s lacks renewalparams. Skipping.', full_path) return None renewalparams = renewal_candidate.configuration['renewalparams'] if ('authenticator' not in renewalparams): logger.warning('Renewal configuration file %s does not specify an authenticator. Skipping.', full_path) return None try: restore_required_config_elements(config, renewalparams) _restore_plugin_configs(config, renewalparams) except (ValueError, errors.Error) as error: logger.warning('An error occurred while parsing %s. The error was %s. Skipping the file.', full_path, error.message) logger.debug('Traceback was:\n%s', traceback.format_exc()) return None try: config.domains = [util.enforce_domain_sanity(d) for d in renewal_candidate.names()] except errors.ConfigurationError as error: logger.warning('Renewal configuration file %s references a cert that contains an invalid domain name. The problem was: %s. Skipping.', full_path, error) return None return renewal_candidate
[ "def", "_reconstitute", "(", "config", ",", "full_path", ")", ":", "try", ":", "renewal_candidate", "=", "storage", ".", "RenewableCert", "(", "full_path", ",", "config", ")", "except", "(", "errors", ".", "CertStorageError", ",", "IOError", ")", "as", "exc", ":", "logger", ".", "warning", "(", "exc", ")", "logger", ".", "warning", "(", "'Renewal configuration file %s is broken. Skipping.'", ",", "full_path", ")", "logger", ".", "debug", "(", "'Traceback was:\\n%s'", ",", "traceback", ".", "format_exc", "(", ")", ")", "return", "None", "if", "(", "'renewalparams'", "not", "in", "renewal_candidate", ".", "configuration", ")", ":", "logger", ".", "warning", "(", "'Renewal configuration file %s lacks renewalparams. Skipping.'", ",", "full_path", ")", "return", "None", "renewalparams", "=", "renewal_candidate", ".", "configuration", "[", "'renewalparams'", "]", "if", "(", "'authenticator'", "not", "in", "renewalparams", ")", ":", "logger", ".", "warning", "(", "'Renewal configuration file %s does not specify an authenticator. Skipping.'", ",", "full_path", ")", "return", "None", "try", ":", "restore_required_config_elements", "(", "config", ",", "renewalparams", ")", "_restore_plugin_configs", "(", "config", ",", "renewalparams", ")", "except", "(", "ValueError", ",", "errors", ".", "Error", ")", "as", "error", ":", "logger", ".", "warning", "(", "'An error occurred while parsing %s. The error was %s. Skipping the file.'", ",", "full_path", ",", "error", ".", "message", ")", "logger", ".", "debug", "(", "'Traceback was:\\n%s'", ",", "traceback", ".", "format_exc", "(", ")", ")", "return", "None", "try", ":", "config", ".", "domains", "=", "[", "util", ".", "enforce_domain_sanity", "(", "d", ")", "for", "d", "in", "renewal_candidate", ".", "names", "(", ")", "]", "except", "errors", ".", "ConfigurationError", "as", "error", ":", "logger", ".", "warning", "(", "'Renewal configuration file %s references a cert that contains an invalid domain name. The problem was: %s. Skipping.'", ",", "full_path", ",", "error", ")", "return", "None", "return", "renewal_candidate" ]
reconstitute an :class: .
train
false
41,992
def libvlc_media_player_set_rate(p_mi, rate): f = (_Cfunctions.get('libvlc_media_player_set_rate', None) or _Cfunction('libvlc_media_player_set_rate', ((1,), (1,)), None, ctypes.c_int, MediaPlayer, ctypes.c_float)) return f(p_mi, rate)
[ "def", "libvlc_media_player_set_rate", "(", "p_mi", ",", "rate", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_player_set_rate'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_player_set_rate'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "None", ",", "ctypes", ".", "c_int", ",", "MediaPlayer", ",", "ctypes", ".", "c_float", ")", ")", "return", "f", "(", "p_mi", ",", "rate", ")" ]
set movie play rate .
train
true
41,993
def _expand_one_key_dictionary(_dict): key = next(six.iterkeys(_dict)) value = _dict[key] return (key, value)
[ "def", "_expand_one_key_dictionary", "(", "_dict", ")", ":", "key", "=", "next", "(", "six", ".", "iterkeys", "(", "_dict", ")", ")", "value", "=", "_dict", "[", "key", "]", "return", "(", "key", ",", "value", ")" ]
returns the only one key and its value from a dictionary .
train
true
41,995
def method_mock(request, cls, method_name, **kwargs): _patch = patch.object(cls, method_name, **kwargs) request.addfinalizer(_patch.stop) return _patch.start()
[ "def", "method_mock", "(", "request", ",", "cls", ",", "method_name", ",", "**", "kwargs", ")", ":", "_patch", "=", "patch", ".", "object", "(", "cls", ",", "method_name", ",", "**", "kwargs", ")", "request", ".", "addfinalizer", "(", "_patch", ".", "stop", ")", "return", "_patch", ".", "start", "(", ")" ]
return a mock for method *method_name* on *cls* where the patch is reversed after pytest uses it .
train
false
41,996
def initialize_nick_templates(in_template, out_template): regex_string = fnmatch.translate(in_template) regex_string = (regex_string[:(-7)] + '(?:[\\n\\r]*?)\\Z(?ms)') regex_args = [match.group(2) for match in _RE_NICK_ARG.finditer(regex_string)] temp_args = [match.group(2) for match in _RE_NICK_TEMPLATE_ARG.finditer(out_template)] if (set(regex_args) != set(temp_args)): raise NickTemplateInvalid regex_string = _RE_NICK_SPACE.sub('\\s+', regex_string) regex_string = _RE_NICK_ARG.sub((lambda m: ('(?P<arg%s>.+?)' % m.group(2))), regex_string) template_string = _RE_NICK_TEMPLATE_ARG.sub((lambda m: ('{arg%s}' % m.group(2))), out_template) return (regex_string, template_string)
[ "def", "initialize_nick_templates", "(", "in_template", ",", "out_template", ")", ":", "regex_string", "=", "fnmatch", ".", "translate", "(", "in_template", ")", "regex_string", "=", "(", "regex_string", "[", ":", "(", "-", "7", ")", "]", "+", "'(?:[\\\\n\\\\r]*?)\\\\Z(?ms)'", ")", "regex_args", "=", "[", "match", ".", "group", "(", "2", ")", "for", "match", "in", "_RE_NICK_ARG", ".", "finditer", "(", "regex_string", ")", "]", "temp_args", "=", "[", "match", ".", "group", "(", "2", ")", "for", "match", "in", "_RE_NICK_TEMPLATE_ARG", ".", "finditer", "(", "out_template", ")", "]", "if", "(", "set", "(", "regex_args", ")", "!=", "set", "(", "temp_args", ")", ")", ":", "raise", "NickTemplateInvalid", "regex_string", "=", "_RE_NICK_SPACE", ".", "sub", "(", "'\\\\s+'", ",", "regex_string", ")", "regex_string", "=", "_RE_NICK_ARG", ".", "sub", "(", "(", "lambda", "m", ":", "(", "'(?P<arg%s>.+?)'", "%", "m", ".", "group", "(", "2", ")", ")", ")", ",", "regex_string", ")", "template_string", "=", "_RE_NICK_TEMPLATE_ARG", ".", "sub", "(", "(", "lambda", "m", ":", "(", "'{arg%s}'", "%", "m", ".", "group", "(", "2", ")", ")", ")", ",", "out_template", ")", "return", "(", "regex_string", ",", "template_string", ")" ]
initialize the nick templates for matching and remapping a string .
train
false
41,997
def method2jpg(output, mx, raw=False): buff = raw if (raw == False): buff = method2dot(mx) method2format(output, 'jpg', mx, buff)
[ "def", "method2jpg", "(", "output", ",", "mx", ",", "raw", "=", "False", ")", ":", "buff", "=", "raw", "if", "(", "raw", "==", "False", ")", ":", "buff", "=", "method2dot", "(", "mx", ")", "method2format", "(", "output", ",", "'jpg'", ",", "mx", ",", "buff", ")" ]
export method to a jpg file format .
train
true
41,998
def runinplace(command, infile): if (not isinstance(command, list)): command = shlex.split(command) tmpdir = None if ('%2' in command): tmpdir = tempfile.mkdtemp(prefix='nikola') tmpfname = os.path.join(tmpdir, os.path.basename(infile)) try: list_replace(command, '%1', infile) if tmpdir: list_replace(command, '%2', tmpfname) subprocess.check_call(command) if tmpdir: shutil.move(tmpfname, infile) finally: if tmpdir: shutil.rmtree(tmpdir)
[ "def", "runinplace", "(", "command", ",", "infile", ")", ":", "if", "(", "not", "isinstance", "(", "command", ",", "list", ")", ")", ":", "command", "=", "shlex", ".", "split", "(", "command", ")", "tmpdir", "=", "None", "if", "(", "'%2'", "in", "command", ")", ":", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'nikola'", ")", "tmpfname", "=", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "os", ".", "path", ".", "basename", "(", "infile", ")", ")", "try", ":", "list_replace", "(", "command", ",", "'%1'", ",", "infile", ")", "if", "tmpdir", ":", "list_replace", "(", "command", ",", "'%2'", ",", "tmpfname", ")", "subprocess", ".", "check_call", "(", "command", ")", "if", "tmpdir", ":", "shutil", ".", "move", "(", "tmpfname", ",", "infile", ")", "finally", ":", "if", "tmpdir", ":", "shutil", ".", "rmtree", "(", "tmpdir", ")" ]
run a command in-place on a file .
train
false
41,999
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get the repository constructor .
train
false
42,000
def get_bin_path(): return os.path.dirname(sys.executable)
[ "def", "get_bin_path", "(", ")", ":", "return", "os", ".", "path", ".", "dirname", "(", "sys", ".", "executable", ")" ]
get parent path of current python binary .
train
false
42,001
def _connect_nodes(graph, srcnode, destnode, connection_info): data = graph.get_edge_data(srcnode, destnode, default=None) if (not data): data = {u'connect': connection_info} graph.add_edges_from([(srcnode, destnode, data)]) else: data[u'connect'].extend(connection_info)
[ "def", "_connect_nodes", "(", "graph", ",", "srcnode", ",", "destnode", ",", "connection_info", ")", ":", "data", "=", "graph", ".", "get_edge_data", "(", "srcnode", ",", "destnode", ",", "default", "=", "None", ")", "if", "(", "not", "data", ")", ":", "data", "=", "{", "u'connect'", ":", "connection_info", "}", "graph", ".", "add_edges_from", "(", "[", "(", "srcnode", ",", "destnode", ",", "data", ")", "]", ")", "else", ":", "data", "[", "u'connect'", "]", ".", "extend", "(", "connection_info", ")" ]
add a connection between two nodes .
train
false
42,002
def super_float(text): if (text[(-1)] in SUFFIXES): return (float(text[:(-1)]) * SUFFIXES[text[(-1)]]) else: return float(text)
[ "def", "super_float", "(", "text", ")", ":", "if", "(", "text", "[", "(", "-", "1", ")", "]", "in", "SUFFIXES", ")", ":", "return", "(", "float", "(", "text", "[", ":", "(", "-", "1", ")", "]", ")", "*", "SUFFIXES", "[", "text", "[", "(", "-", "1", ")", "]", "]", ")", "else", ":", "return", "float", "(", "text", ")" ]
like float .
train
false
42,003
@command('file', optional=['input-filename', 'output-filename']) def file_command(options, input_filename=None, output_filename=None): with open_input_file(input_filename) as input_file: descriptor_content = input_file.read() if output_filename: output_file = open(output_filename, 'w') else: output_file = sys.stdout file_descriptor = protobuf.decode_message(descriptor.FileDescriptor, descriptor_content) generate_python.format_python_file(file_descriptor, output_file)
[ "@", "command", "(", "'file'", ",", "optional", "=", "[", "'input-filename'", ",", "'output-filename'", "]", ")", "def", "file_command", "(", "options", ",", "input_filename", "=", "None", ",", "output_filename", "=", "None", ")", ":", "with", "open_input_file", "(", "input_filename", ")", "as", "input_file", ":", "descriptor_content", "=", "input_file", ".", "read", "(", ")", "if", "output_filename", ":", "output_file", "=", "open", "(", "output_filename", ",", "'w'", ")", "else", ":", "output_file", "=", "sys", ".", "stdout", "file_descriptor", "=", "protobuf", ".", "decode_message", "(", "descriptor", ".", "FileDescriptor", ",", "descriptor_content", ")", "generate_python", ".", "format_python_file", "(", "file_descriptor", ",", "output_file", ")" ]
generate a single descriptor file to python .
train
false
42,004
def _stub_islink(path): return False
[ "def", "_stub_islink", "(", "path", ")", ":", "return", "False" ]
always return c{false} if the operating system does not support symlinks .
train
false
42,005
def associative(query): for row in query: (yield query.record(row))
[ "def", "associative", "(", "query", ")", ":", "for", "row", "in", "query", ":", "(", "yield", "query", ".", "record", "(", "row", ")", ")" ]
yields query rows as dictionaries of -items .
train
false
42,006
def available_features(image=None): return _get_components('Feature Name', 'Features', 'Disabled')
[ "def", "available_features", "(", "image", "=", "None", ")", ":", "return", "_get_components", "(", "'Feature Name'", ",", "'Features'", ",", "'Disabled'", ")" ]
list the features available on the system args: image : the path to the root directory of an offline windows image .
train
false
42,007
def exit_subprocess(): os._exit(0)
[ "def", "exit_subprocess", "(", ")", ":", "os", ".", "_exit", "(", "0", ")" ]
use os .
train
false
42,008
def _parseError(error, errorNamespace): condition = None text = None textLang = None appCondition = None for element in error.elements(): if (element.uri == errorNamespace): if (element.name == 'text'): text = _getText(element) textLang = element.getAttribute((NS_XML, 'lang')) else: condition = element.name else: appCondition = element return {'condition': condition, 'text': text, 'textLang': textLang, 'appCondition': appCondition}
[ "def", "_parseError", "(", "error", ",", "errorNamespace", ")", ":", "condition", "=", "None", "text", "=", "None", "textLang", "=", "None", "appCondition", "=", "None", "for", "element", "in", "error", ".", "elements", "(", ")", ":", "if", "(", "element", ".", "uri", "==", "errorNamespace", ")", ":", "if", "(", "element", ".", "name", "==", "'text'", ")", ":", "text", "=", "_getText", "(", "element", ")", "textLang", "=", "element", ".", "getAttribute", "(", "(", "NS_XML", ",", "'lang'", ")", ")", "else", ":", "condition", "=", "element", ".", "name", "else", ":", "appCondition", "=", "element", "return", "{", "'condition'", ":", "condition", ",", "'text'", ":", "text", ",", "'textLang'", ":", "textLang", ",", "'appCondition'", ":", "appCondition", "}" ]
parses an error element .
train
false
42,011
def _maybe_strip_i18n_prefix_and_normalize(number, possible_idd_prefix): if (len(number) == 0): return (CountryCodeSource.FROM_DEFAULT_COUNTRY, number) m = _PLUS_CHARS_PATTERN.match(number) if m: number = number[m.end():] return (CountryCodeSource.FROM_NUMBER_WITH_PLUS_SIGN, _normalize(number)) idd_pattern = re.compile(possible_idd_prefix) number = _normalize(number) (stripped, number) = _parse_prefix_as_idd(idd_pattern, number) if stripped: return (CountryCodeSource.FROM_NUMBER_WITH_IDD, number) else: return (CountryCodeSource.FROM_DEFAULT_COUNTRY, number)
[ "def", "_maybe_strip_i18n_prefix_and_normalize", "(", "number", ",", "possible_idd_prefix", ")", ":", "if", "(", "len", "(", "number", ")", "==", "0", ")", ":", "return", "(", "CountryCodeSource", ".", "FROM_DEFAULT_COUNTRY", ",", "number", ")", "m", "=", "_PLUS_CHARS_PATTERN", ".", "match", "(", "number", ")", "if", "m", ":", "number", "=", "number", "[", "m", ".", "end", "(", ")", ":", "]", "return", "(", "CountryCodeSource", ".", "FROM_NUMBER_WITH_PLUS_SIGN", ",", "_normalize", "(", "number", ")", ")", "idd_pattern", "=", "re", ".", "compile", "(", "possible_idd_prefix", ")", "number", "=", "_normalize", "(", "number", ")", "(", "stripped", ",", "number", ")", "=", "_parse_prefix_as_idd", "(", "idd_pattern", ",", "number", ")", "if", "stripped", ":", "return", "(", "CountryCodeSource", ".", "FROM_NUMBER_WITH_IDD", ",", "number", ")", "else", ":", "return", "(", "CountryCodeSource", ".", "FROM_DEFAULT_COUNTRY", ",", "number", ")" ]
strips any international prefix present in the number provided .
train
true
42,012
def checkSameHost(*urls): if (not urls): return None elif (len(urls) == 1): return True else: return all(((urlparse.urlparse((url or '')).netloc.split(':')[0] == urlparse.urlparse((urls[0] or '')).netloc.split(':')[0]) for url in urls[1:]))
[ "def", "checkSameHost", "(", "*", "urls", ")", ":", "if", "(", "not", "urls", ")", ":", "return", "None", "elif", "(", "len", "(", "urls", ")", "==", "1", ")", ":", "return", "True", "else", ":", "return", "all", "(", "(", "(", "urlparse", ".", "urlparse", "(", "(", "url", "or", "''", ")", ")", ".", "netloc", ".", "split", "(", "':'", ")", "[", "0", "]", "==", "urlparse", ".", "urlparse", "(", "(", "urls", "[", "0", "]", "or", "''", ")", ")", ".", "netloc", ".", "split", "(", "':'", ")", "[", "0", "]", ")", "for", "url", "in", "urls", "[", "1", ":", "]", ")", ")" ]
returns true if all provided urls share that same host .
train
false
42,013
def srs_double(f): return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)
[ "def", "srs_double", "(", "f", ")", ":", "return", "double_output", "(", "f", ",", "[", "c_void_p", ",", "POINTER", "(", "c_int", ")", "]", ",", "errcheck", "=", "True", ")" ]
creates a function prototype for the osr routines that take the osrspatialreference object and .
train
false
42,018
def unhot(function): def wrapper(actual, predicted): if ((len(actual.shape) > 1) and (actual.shape[1] > 1)): actual = actual.argmax(axis=1) if ((len(predicted.shape) > 1) and (predicted.shape[1] > 1)): predicted = predicted.argmax(axis=1) return function(actual, predicted) return wrapper
[ "def", "unhot", "(", "function", ")", ":", "def", "wrapper", "(", "actual", ",", "predicted", ")", ":", "if", "(", "(", "len", "(", "actual", ".", "shape", ")", ">", "1", ")", "and", "(", "actual", ".", "shape", "[", "1", "]", ">", "1", ")", ")", ":", "actual", "=", "actual", ".", "argmax", "(", "axis", "=", "1", ")", "if", "(", "(", "len", "(", "predicted", ".", "shape", ")", ">", "1", ")", "and", "(", "predicted", ".", "shape", "[", "1", "]", ">", "1", ")", ")", ":", "predicted", "=", "predicted", ".", "argmax", "(", "axis", "=", "1", ")", "return", "function", "(", "actual", ",", "predicted", ")", "return", "wrapper" ]
convert one-hot representation into one column .
train
false
42,019
def password_validator(optdict, name, value): return optik_ext.check_password(None, name, value)
[ "def", "password_validator", "(", "optdict", ",", "name", ",", "value", ")", ":", "return", "optik_ext", ".", "check_password", "(", "None", ",", "name", ",", "value", ")" ]
validate and return a string for option of type password .
train
false
42,020
def key_filenames(): from fabric.state import env keys = env.key_filename if (isinstance(env.key_filename, basestring) or (env.key_filename is None)): keys = [keys] keys = filter(bool, keys) conf = ssh_config() if ('identityfile' in conf): keys.extend(conf['identityfile']) return map(os.path.expanduser, keys)
[ "def", "key_filenames", "(", ")", ":", "from", "fabric", ".", "state", "import", "env", "keys", "=", "env", ".", "key_filename", "if", "(", "isinstance", "(", "env", ".", "key_filename", ",", "basestring", ")", "or", "(", "env", ".", "key_filename", "is", "None", ")", ")", ":", "keys", "=", "[", "keys", "]", "keys", "=", "filter", "(", "bool", ",", "keys", ")", "conf", "=", "ssh_config", "(", ")", "if", "(", "'identityfile'", "in", "conf", ")", ":", "keys", ".", "extend", "(", "conf", "[", "'identityfile'", "]", ")", "return", "map", "(", "os", ".", "path", ".", "expanduser", ",", "keys", ")" ]
returns list of ssh key filenames for the current env .
train
false
42,021
def reset_metadata_version(): v = generate_hash() cache().set_value(u'metadata_version', v) return v
[ "def", "reset_metadata_version", "(", ")", ":", "v", "=", "generate_hash", "(", ")", "cache", "(", ")", ".", "set_value", "(", "u'metadata_version'", ",", "v", ")", "return", "v" ]
reset metadata_version (client build id) hash .
train
false
42,022
def object_name(name, parent): if ((not isinstance(name, basestring)) or (not isinstance(parent, basestring))): raise CX('Invalid input, name and parent must be strings') else: name = name.strip() parent = parent.strip() if ((name != '') and (parent != '') and (name == parent)): raise CX('Self parentage is not allowed') if (not RE_OBJECT_NAME.match(name)): raise CX(("Invalid characters in name: '%s'" % name)) return name
[ "def", "object_name", "(", "name", ",", "parent", ")", ":", "if", "(", "(", "not", "isinstance", "(", "name", ",", "basestring", ")", ")", "or", "(", "not", "isinstance", "(", "parent", ",", "basestring", ")", ")", ")", ":", "raise", "CX", "(", "'Invalid input, name and parent must be strings'", ")", "else", ":", "name", "=", "name", ".", "strip", "(", ")", "parent", "=", "parent", ".", "strip", "(", ")", "if", "(", "(", "name", "!=", "''", ")", "and", "(", "parent", "!=", "''", ")", "and", "(", "name", "==", "parent", ")", ")", ":", "raise", "CX", "(", "'Self parentage is not allowed'", ")", "if", "(", "not", "RE_OBJECT_NAME", ".", "match", "(", "name", ")", ")", ":", "raise", "CX", "(", "(", "\"Invalid characters in name: '%s'\"", "%", "name", ")", ")", "return", "name" ]
validate the object name .
train
false
42,023
def LogNormal(name, mean, std): return rv(name, LogNormalDistribution, (mean, std))
[ "def", "LogNormal", "(", "name", ",", "mean", ",", "std", ")", ":", "return", "rv", "(", "name", ",", "LogNormalDistribution", ",", "(", "mean", ",", "std", ")", ")" ]
create a continuous random variable with a log-normal distribution .
train
false
42,024
def do_action_on_many(action, resources, success_msg, error_msg): failure_flag = False for resource in resources: try: action(resource) print (success_msg % resource) except Exception as e: failure_flag = True print encodeutils.safe_encode(six.text_type(e)) if failure_flag: raise exceptions.CommandError(error_msg)
[ "def", "do_action_on_many", "(", "action", ",", "resources", ",", "success_msg", ",", "error_msg", ")", ":", "failure_flag", "=", "False", "for", "resource", "in", "resources", ":", "try", ":", "action", "(", "resource", ")", "print", "(", "success_msg", "%", "resource", ")", "except", "Exception", "as", "e", ":", "failure_flag", "=", "True", "print", "encodeutils", ".", "safe_encode", "(", "six", ".", "text_type", "(", "e", ")", ")", "if", "failure_flag", ":", "raise", "exceptions", ".", "CommandError", "(", "error_msg", ")" ]
helper to run an action on many resources .
train
false
42,025
def xmltoolkit62(): ENTITIES = {u'rsquo': u'\u2019', u'lsquo': u'\u2018'} parser = ET.XMLTreeBuilder() parser.entity.update(ENTITIES) parser.feed(XMLTOOLKIT62_DOC) t = parser.close() return t.find('.//paragraph').text
[ "def", "xmltoolkit62", "(", ")", ":", "ENTITIES", "=", "{", "u'rsquo'", ":", "u'\\u2019'", ",", "u'lsquo'", ":", "u'\\u2018'", "}", "parser", "=", "ET", ".", "XMLTreeBuilder", "(", ")", "parser", ".", "entity", ".", "update", "(", "ENTITIES", ")", "parser", ".", "feed", "(", "XMLTOOLKIT62_DOC", ")", "t", "=", "parser", ".", "close", "(", ")", "return", "t", ".", "find", "(", "'.//paragraph'", ")", ".", "text" ]
dont crash when using custom entities .
train
false
42,026
def test_saving_state_include_domain_exclude_entity(hass_recorder): hass = hass_recorder({'exclude': {'entities': 'test.recorder'}, 'include': {'domains': 'test'}}) states = _add_entities(hass, ['test.recorder', 'test2.recorder', 'test.ok']) assert (len(states) == 1) assert (hass.states.get('test.ok') == states[0]) assert (hass.states.get('test.ok').state == 'state2')
[ "def", "test_saving_state_include_domain_exclude_entity", "(", "hass_recorder", ")", ":", "hass", "=", "hass_recorder", "(", "{", "'exclude'", ":", "{", "'entities'", ":", "'test.recorder'", "}", ",", "'include'", ":", "{", "'domains'", ":", "'test'", "}", "}", ")", "states", "=", "_add_entities", "(", "hass", ",", "[", "'test.recorder'", ",", "'test2.recorder'", ",", "'test.ok'", "]", ")", "assert", "(", "len", "(", "states", ")", "==", "1", ")", "assert", "(", "hass", ".", "states", ".", "get", "(", "'test.ok'", ")", "==", "states", "[", "0", "]", ")", "assert", "(", "hass", ".", "states", ".", "get", "(", "'test.ok'", ")", ".", "state", "==", "'state2'", ")" ]
test saving and restoring a state .
train
false
42,027
def _interp(arr, v, _Series=pd.Series, _charmod=np.char.mod): if (len(arr) >= 145): return (_Series(arr) % v) return _charmod(arr, v)
[ "def", "_interp", "(", "arr", ",", "v", ",", "_Series", "=", "pd", ".", "Series", ",", "_charmod", "=", "np", ".", "char", ".", "mod", ")", ":", "if", "(", "len", "(", "arr", ")", ">=", "145", ")", ":", "return", "(", "_Series", "(", "arr", ")", "%", "v", ")", "return", "_charmod", "(", "arr", ",", "v", ")" ]
delegate to the most efficient string formatting technique based on the length of the array .
train
false
42,028
def _init_worker(counter): global _worker_id with counter.get_lock(): counter.value += 1 _worker_id = counter.value for alias in connections: connection = connections[alias] settings_dict = connection.creation.get_test_db_clone_settings(_worker_id) connection.settings_dict.update(settings_dict) connection.close()
[ "def", "_init_worker", "(", "counter", ")", ":", "global", "_worker_id", "with", "counter", ".", "get_lock", "(", ")", ":", "counter", ".", "value", "+=", "1", "_worker_id", "=", "counter", ".", "value", "for", "alias", "in", "connections", ":", "connection", "=", "connections", "[", "alias", "]", "settings_dict", "=", "connection", ".", "creation", ".", "get_test_db_clone_settings", "(", "_worker_id", ")", "connection", ".", "settings_dict", ".", "update", "(", "settings_dict", ")", "connection", ".", "close", "(", ")" ]
switch to databases dedicated to this worker .
train
false
42,029
def read_addr_range(start, end, addr_space): pagesize = 4096 while (start < end): page = addr_space.zread(start, pagesize) (yield page) start += pagesize
[ "def", "read_addr_range", "(", "start", ",", "end", ",", "addr_space", ")", ":", "pagesize", "=", "4096", "while", "(", "start", "<", "end", ")", ":", "page", "=", "addr_space", ".", "zread", "(", "start", ",", "pagesize", ")", "(", "yield", "page", ")", "start", "+=", "pagesize" ]
read a number of pages .
train
false
42,030
def get_current_user_for_models(): user = get_current_user() try: if (not user.is_authenticated): return None return user except Exception as e: logger.info(('Cant access is_authenticated method: %s' % e)) return None
[ "def", "get_current_user_for_models", "(", ")", ":", "user", "=", "get_current_user", "(", ")", "try", ":", "if", "(", "not", "user", ".", "is_authenticated", ")", ":", "return", "None", "return", "user", "except", "Exception", "as", "e", ":", "logger", ".", "info", "(", "(", "'Cant access is_authenticated method: %s'", "%", "e", ")", ")", "return", "None" ]
hackish but needed for running tests outside application context because flask test context is not well configured yet :return: user or none .
train
false
42,032
def get_schema_for_resource_parameters(parameters_schema, allow_additional_properties=False): def normalize(x): return {k: (v if v else SCHEMA_ANY_TYPE) for (k, v) in six.iteritems(x)} schema = {} properties = {} properties.update(normalize(parameters_schema)) if properties: schema['type'] = 'object' schema['properties'] = properties schema['additionalProperties'] = allow_additional_properties return schema
[ "def", "get_schema_for_resource_parameters", "(", "parameters_schema", ",", "allow_additional_properties", "=", "False", ")", ":", "def", "normalize", "(", "x", ")", ":", "return", "{", "k", ":", "(", "v", "if", "v", "else", "SCHEMA_ANY_TYPE", ")", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "x", ")", "}", "schema", "=", "{", "}", "properties", "=", "{", "}", "properties", ".", "update", "(", "normalize", "(", "parameters_schema", ")", ")", "if", "properties", ":", "schema", "[", "'type'", "]", "=", "'object'", "schema", "[", "'properties'", "]", "=", "properties", "schema", "[", "'additionalProperties'", "]", "=", "allow_additional_properties", "return", "schema" ]
dynamically construct json schema for the provided resource from the parameters metadata .
train
false
42,033
def list_replace(the_list, find, replacement): for (i, v) in enumerate(the_list): if (v == find): the_list[i] = replacement
[ "def", "list_replace", "(", "the_list", ",", "find", ",", "replacement", ")", ":", "for", "(", "i", ",", "v", ")", "in", "enumerate", "(", "the_list", ")", ":", "if", "(", "v", "==", "find", ")", ":", "the_list", "[", "i", "]", "=", "replacement" ]
replace all occurrences of find with replacement in the_list .
train
false
42,035
def servers_update_addresses(request, servers, all_tenants=False): neutron_enabled = base.is_service_enabled(request, 'network') if neutron_enabled: neutron.servers_update_addresses(request, servers, all_tenants)
[ "def", "servers_update_addresses", "(", "request", ",", "servers", ",", "all_tenants", "=", "False", ")", ":", "neutron_enabled", "=", "base", ".", "is_service_enabled", "(", "request", ",", "'network'", ")", "if", "neutron_enabled", ":", "neutron", ".", "servers_update_addresses", "(", "request", ",", "servers", ",", "all_tenants", ")" ]
retrieve servers networking information from neutron if enabled .
train
true
42,036
@testing.requires_testing_data def test_calculate_chpi_positions_on_chpi5_in_shorter_steps(): mf_quats = read_head_pos(chpi5_pos_fname) raw = read_raw_fif(chpi5_fif_fname, allow_maxshield='yes') raw = _decimate_chpi(raw.crop(0.0, 15.0).load_data(), decim=8) py_quats = _calculate_chpi_positions(raw, t_step_min=0.1, t_step_max=0.1, t_window=0.1, verbose='debug') _assert_quats(py_quats, mf_quats, dist_tol=0.001, angle_tol=0.6)
[ "@", "testing", ".", "requires_testing_data", "def", "test_calculate_chpi_positions_on_chpi5_in_shorter_steps", "(", ")", ":", "mf_quats", "=", "read_head_pos", "(", "chpi5_pos_fname", ")", "raw", "=", "read_raw_fif", "(", "chpi5_fif_fname", ",", "allow_maxshield", "=", "'yes'", ")", "raw", "=", "_decimate_chpi", "(", "raw", ".", "crop", "(", "0.0", ",", "15.0", ")", ".", "load_data", "(", ")", ",", "decim", "=", "8", ")", "py_quats", "=", "_calculate_chpi_positions", "(", "raw", ",", "t_step_min", "=", "0.1", ",", "t_step_max", "=", "0.1", ",", "t_window", "=", "0.1", ",", "verbose", "=", "'debug'", ")", "_assert_quats", "(", "py_quats", ",", "mf_quats", ",", "dist_tol", "=", "0.001", ",", "angle_tol", "=", "0.6", ")" ]
comparing estimated chpi positions with mf results .
train
false
42,037
def vector(name=None, dtype=None): if (dtype is None): dtype = config.floatX type = TensorType(dtype, (False,)) return type(name)
[ "def", "vector", "(", "name", "=", "None", ",", "dtype", "=", "None", ")", ":", "if", "(", "dtype", "is", "None", ")", ":", "dtype", "=", "config", ".", "floatX", "type", "=", "TensorType", "(", "dtype", ",", "(", "False", ",", ")", ")", "return", "type", "(", "name", ")" ]
return a symbolic vector variable .
train
false
42,038
def convert_directive_function(directive_fn): class FunctionalDirective(Directive, ): option_spec = getattr(directive_fn, 'options', None) has_content = getattr(directive_fn, 'content', False) _argument_spec = getattr(directive_fn, 'arguments', (0, 0, False)) (required_arguments, optional_arguments, final_argument_whitespace) = _argument_spec def run(self): return directive_fn(self.name, self.arguments, self.options, self.content, self.lineno, self.content_offset, self.block_text, self.state, self.state_machine) return FunctionalDirective
[ "def", "convert_directive_function", "(", "directive_fn", ")", ":", "class", "FunctionalDirective", "(", "Directive", ",", ")", ":", "option_spec", "=", "getattr", "(", "directive_fn", ",", "'options'", ",", "None", ")", "has_content", "=", "getattr", "(", "directive_fn", ",", "'content'", ",", "False", ")", "_argument_spec", "=", "getattr", "(", "directive_fn", ",", "'arguments'", ",", "(", "0", ",", "0", ",", "False", ")", ")", "(", "required_arguments", ",", "optional_arguments", ",", "final_argument_whitespace", ")", "=", "_argument_spec", "def", "run", "(", "self", ")", ":", "return", "directive_fn", "(", "self", ".", "name", ",", "self", ".", "arguments", ",", "self", ".", "options", ",", "self", ".", "content", ",", "self", ".", "lineno", ",", "self", ".", "content_offset", ",", "self", ".", "block_text", ",", "self", ".", "state", ",", "self", ".", "state_machine", ")", "return", "FunctionalDirective" ]
define & return a directive class generated from directive_fn .
train
false
42,039
def upload_image_stream_optimized(context, image_id, instance, session, vm, vmdk_size): LOG.debug('Uploading image %s', image_id, instance=instance) metadata = IMAGE_API.get(context, image_id) read_handle = rw_handles.VmdkReadHandle(session, session._host, session._port, vm, None, vmdk_size) image_metadata = {'disk_format': constants.DISK_FORMAT_VMDK, 'is_public': metadata['is_public'], 'name': metadata['name'], 'status': 'active', 'container_format': constants.CONTAINER_FORMAT_BARE, 'size': 0, 'properties': {'vmware_image_version': 1, 'vmware_disktype': 'streamOptimized', 'owner_id': instance.project_id}} updater = loopingcall.FixedIntervalLoopingCall(read_handle.update_progress) try: updater.start(interval=NFC_LEASE_UPDATE_PERIOD) IMAGE_API.update(context, image_id, image_metadata, data=read_handle) finally: updater.stop() read_handle.close() LOG.debug('Uploaded image %s to the Glance image server', image_id, instance=instance)
[ "def", "upload_image_stream_optimized", "(", "context", ",", "image_id", ",", "instance", ",", "session", ",", "vm", ",", "vmdk_size", ")", ":", "LOG", ".", "debug", "(", "'Uploading image %s'", ",", "image_id", ",", "instance", "=", "instance", ")", "metadata", "=", "IMAGE_API", ".", "get", "(", "context", ",", "image_id", ")", "read_handle", "=", "rw_handles", ".", "VmdkReadHandle", "(", "session", ",", "session", ".", "_host", ",", "session", ".", "_port", ",", "vm", ",", "None", ",", "vmdk_size", ")", "image_metadata", "=", "{", "'disk_format'", ":", "constants", ".", "DISK_FORMAT_VMDK", ",", "'is_public'", ":", "metadata", "[", "'is_public'", "]", ",", "'name'", ":", "metadata", "[", "'name'", "]", ",", "'status'", ":", "'active'", ",", "'container_format'", ":", "constants", ".", "CONTAINER_FORMAT_BARE", ",", "'size'", ":", "0", ",", "'properties'", ":", "{", "'vmware_image_version'", ":", "1", ",", "'vmware_disktype'", ":", "'streamOptimized'", ",", "'owner_id'", ":", "instance", ".", "project_id", "}", "}", "updater", "=", "loopingcall", ".", "FixedIntervalLoopingCall", "(", "read_handle", ".", "update_progress", ")", "try", ":", "updater", ".", "start", "(", "interval", "=", "NFC_LEASE_UPDATE_PERIOD", ")", "IMAGE_API", ".", "update", "(", "context", ",", "image_id", ",", "image_metadata", ",", "data", "=", "read_handle", ")", "finally", ":", "updater", ".", "stop", "(", ")", "read_handle", ".", "close", "(", ")", "LOG", ".", "debug", "(", "'Uploaded image %s to the Glance image server'", ",", "image_id", ",", "instance", "=", "instance", ")" ]
upload the snapshotted vm disk file to glance image server .
train
false
42,041
def verification_user(id, user): conferences = get_memcached(get_key('conferences')) if (user not in conferences[id]['users'].keys()): return False return True
[ "def", "verification_user", "(", "id", ",", "user", ")", ":", "conferences", "=", "get_memcached", "(", "get_key", "(", "'conferences'", ")", ")", "if", "(", "user", "not", "in", "conferences", "[", "id", "]", "[", "'users'", "]", ".", "keys", "(", ")", ")", ":", "return", "False", "return", "True" ]
verification user in conference return true if the user is present in conference .
train
false
42,042
def libvlc_media_player_set_media(p_mi, p_md): f = (_Cfunctions.get('libvlc_media_player_set_media', None) or _Cfunction('libvlc_media_player_set_media', ((1,), (1,)), None, None, MediaPlayer, Media)) return f(p_mi, p_md)
[ "def", "libvlc_media_player_set_media", "(", "p_mi", ",", "p_md", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_player_set_media'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_player_set_media'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "None", ",", "None", ",", "MediaPlayer", ",", "Media", ")", ")", "return", "f", "(", "p_mi", ",", "p_md", ")" ]
set the media that will be used by the media_player .
train
true
42,043
def lookup_table(options, embed_map, worddict, word_idict, f_emb, use_norm=False): wordvecs = get_embeddings(options, word_idict, f_emb) clf = train_regressor(options, embed_map, wordvecs, worddict) table = apply_regressor(clf, embed_map, use_norm=use_norm) for i in range(options['n_words']): w = word_idict[i] table[w] = wordvecs[w] if use_norm: table[w] /= norm(table[w]) return table
[ "def", "lookup_table", "(", "options", ",", "embed_map", ",", "worddict", ",", "word_idict", ",", "f_emb", ",", "use_norm", "=", "False", ")", ":", "wordvecs", "=", "get_embeddings", "(", "options", ",", "word_idict", ",", "f_emb", ")", "clf", "=", "train_regressor", "(", "options", ",", "embed_map", ",", "wordvecs", ",", "worddict", ")", "table", "=", "apply_regressor", "(", "clf", ",", "embed_map", ",", "use_norm", "=", "use_norm", ")", "for", "i", "in", "range", "(", "options", "[", "'n_words'", "]", ")", ":", "w", "=", "word_idict", "[", "i", "]", "table", "[", "w", "]", "=", "wordvecs", "[", "w", "]", "if", "use_norm", ":", "table", "[", "w", "]", "/=", "norm", "(", "table", "[", "w", "]", ")", "return", "table" ]
create a lookup table from linear mapping of word2vec into rnn word space .
train
false
42,044
def test_field_choices_used_to_translated_value(): LANGUAGES = (('en', 'English'), ('ru', 'Russian')) from django.db import models class Article(models.Model, ): name = models.CharField(max_length=200) language = models.CharField(max_length=200, choices=LANGUAGES) class Meta: app_label = 'django_tables2_test' def __unicode__(self): return self.name class ArticleTable(tables.Table, ): class Meta: model = Article table = ArticleTable([Article(name='English article', language='en'), Article(name='Russian article', language='ru')]) assert ('English' == table.rows[0].get_cell('language')) assert ('Russian' == table.rows[1].get_cell('language'))
[ "def", "test_field_choices_used_to_translated_value", "(", ")", ":", "LANGUAGES", "=", "(", "(", "'en'", ",", "'English'", ")", ",", "(", "'ru'", ",", "'Russian'", ")", ")", "from", "django", ".", "db", "import", "models", "class", "Article", "(", "models", ".", "Model", ",", ")", ":", "name", "=", "models", ".", "CharField", "(", "max_length", "=", "200", ")", "language", "=", "models", ".", "CharField", "(", "max_length", "=", "200", ",", "choices", "=", "LANGUAGES", ")", "class", "Meta", ":", "app_label", "=", "'django_tables2_test'", "def", "__unicode__", "(", "self", ")", ":", "return", "self", ".", "name", "class", "ArticleTable", "(", "tables", ".", "Table", ",", ")", ":", "class", "Meta", ":", "model", "=", "Article", "table", "=", "ArticleTable", "(", "[", "Article", "(", "name", "=", "'English article'", ",", "language", "=", "'en'", ")", ",", "Article", "(", "name", "=", "'Russian article'", ",", "language", "=", "'ru'", ")", "]", ")", "assert", "(", "'English'", "==", "table", ".", "rows", "[", "0", "]", ".", "get_cell", "(", "'language'", ")", ")", "assert", "(", "'Russian'", "==", "table", ".", "rows", "[", "1", "]", ".", "get_cell", "(", "'language'", ")", ")" ]
when a model field uses the choices option .
train
false
42,045
def getattr_recursive(item, attr_key, *args): using_default = (len(args) >= 1) default = (args[0] if using_default else None) for attr_key in attr_key.split('.'): try: if isinstance(item, dict): item = item.__getitem__(attr_key) else: item = getattr(item, attr_key) except (KeyError, AttributeError): if using_default: return default raise return item
[ "def", "getattr_recursive", "(", "item", ",", "attr_key", ",", "*", "args", ")", ":", "using_default", "=", "(", "len", "(", "args", ")", ">=", "1", ")", "default", "=", "(", "args", "[", "0", "]", "if", "using_default", "else", "None", ")", "for", "attr_key", "in", "attr_key", ".", "split", "(", "'.'", ")", ":", "try", ":", "if", "isinstance", "(", "item", ",", "dict", ")", ":", "item", "=", "item", ".", "__getitem__", "(", "attr_key", ")", "else", ":", "item", "=", "getattr", "(", "item", ",", "attr_key", ")", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "if", "using_default", ":", "return", "default", "raise", "return", "item" ]
get attributes recursively .
train
false
42,046
def _device_from_servicecall(hass, service): address = service.data.get(ATTR_ADDRESS) proxy = service.data.get(ATTR_PROXY) if proxy: return hass.data[DATA_HOMEMATIC].devices[proxy].get(address) for (_, devices) in hass.data[DATA_HOMEMATIC].devices.items(): if (address in devices): return devices[address]
[ "def", "_device_from_servicecall", "(", "hass", ",", "service", ")", ":", "address", "=", "service", ".", "data", ".", "get", "(", "ATTR_ADDRESS", ")", "proxy", "=", "service", ".", "data", ".", "get", "(", "ATTR_PROXY", ")", "if", "proxy", ":", "return", "hass", ".", "data", "[", "DATA_HOMEMATIC", "]", ".", "devices", "[", "proxy", "]", ".", "get", "(", "address", ")", "for", "(", "_", ",", "devices", ")", "in", "hass", ".", "data", "[", "DATA_HOMEMATIC", "]", ".", "devices", ".", "items", "(", ")", ":", "if", "(", "address", "in", "devices", ")", ":", "return", "devices", "[", "address", "]" ]
extract homematic device from service call .
train
false
42,047
def ensureRoot(): if (os.getuid() != 0): print '*** Mininet must run as root.' exit(1) return
[ "def", "ensureRoot", "(", ")", ":", "if", "(", "os", ".", "getuid", "(", ")", "!=", "0", ")", ":", "print", "'*** Mininet must run as root.'", "exit", "(", "1", ")", "return" ]
ensure that we are running as root .
train
false
42,048
def _not_in(x, y): try: return (~ x.isin(y)) except AttributeError: if is_list_like(x): try: return (~ y.isin(x)) except AttributeError: pass return (x not in y)
[ "def", "_not_in", "(", "x", ",", "y", ")", ":", "try", ":", "return", "(", "~", "x", ".", "isin", "(", "y", ")", ")", "except", "AttributeError", ":", "if", "is_list_like", "(", "x", ")", ":", "try", ":", "return", "(", "~", "y", ".", "isin", "(", "x", ")", ")", "except", "AttributeError", ":", "pass", "return", "(", "x", "not", "in", "y", ")" ]
compute the vectorized membership of x not in y if possible .
train
true
42,049
def assert_conv_shape(shape): out_shape = [] for (i, n) in enumerate(shape): try: const_n = get_scalar_constant_value(n) if (i < 2): if (const_n < 0): raise ValueError(('The convolution would produce an invalid shape (dim[%d]: %d < 0).' % (i, const_n))) elif (const_n <= 0): raise ValueError(('The convolution would produce an invalid shape (dim[%d]: %d <= 0).' % (i, const_n))) out_shape.append(n) except NotScalarConstantError: if (i < 2): assert_shp = Assert(('The convolution would produce an invalid shape (dim[%d] < 0).' % i)) out_shape.append(assert_shp(n, theano.tensor.ge(n, 0))) else: assert_shp = Assert(('The convolution would produce an invalid shape (dim[%d] <= 0).' % i)) out_shape.append(assert_shp(n, theano.tensor.gt(n, 0))) return tuple(out_shape)
[ "def", "assert_conv_shape", "(", "shape", ")", ":", "out_shape", "=", "[", "]", "for", "(", "i", ",", "n", ")", "in", "enumerate", "(", "shape", ")", ":", "try", ":", "const_n", "=", "get_scalar_constant_value", "(", "n", ")", "if", "(", "i", "<", "2", ")", ":", "if", "(", "const_n", "<", "0", ")", ":", "raise", "ValueError", "(", "(", "'The convolution would produce an invalid shape (dim[%d]: %d < 0).'", "%", "(", "i", ",", "const_n", ")", ")", ")", "elif", "(", "const_n", "<=", "0", ")", ":", "raise", "ValueError", "(", "(", "'The convolution would produce an invalid shape (dim[%d]: %d <= 0).'", "%", "(", "i", ",", "const_n", ")", ")", ")", "out_shape", ".", "append", "(", "n", ")", "except", "NotScalarConstantError", ":", "if", "(", "i", "<", "2", ")", ":", "assert_shp", "=", "Assert", "(", "(", "'The convolution would produce an invalid shape (dim[%d] < 0).'", "%", "i", ")", ")", "out_shape", ".", "append", "(", "assert_shp", "(", "n", ",", "theano", ".", "tensor", ".", "ge", "(", "n", ",", "0", ")", ")", ")", "else", ":", "assert_shp", "=", "Assert", "(", "(", "'The convolution would produce an invalid shape (dim[%d] <= 0).'", "%", "i", ")", ")", "out_shape", ".", "append", "(", "assert_shp", "(", "n", ",", "theano", ".", "tensor", ".", "gt", "(", "n", ",", "0", ")", ")", ")", "return", "tuple", "(", "out_shape", ")" ]
this function adds assert nodes that check if shape is a valid convolution shape .
train
false
42,050
@pytest.mark.django_db def test_approve_addons_get_review_type_already_approved(): addon = addon_factory(status=amo.STATUS_PUBLIC) file_ = addon.versions.get().files.get() file_.update(status=amo.STATUS_PUBLIC) assert (approve_addons.get_review_type(file_) is None)
[ "@", "pytest", ".", "mark", ".", "django_db", "def", "test_approve_addons_get_review_type_already_approved", "(", ")", ":", "addon", "=", "addon_factory", "(", "status", "=", "amo", ".", "STATUS_PUBLIC", ")", "file_", "=", "addon", ".", "versions", ".", "get", "(", ")", ".", "files", ".", "get", "(", ")", "file_", ".", "update", "(", "status", "=", "amo", ".", "STATUS_PUBLIC", ")", "assert", "(", "approve_addons", ".", "get_review_type", "(", "file_", ")", "is", "None", ")" ]
the review type for a file that doesnt need approval is none .
train
false
42,051
def _compute_error(cross_correlation_max, src_amp, target_amp): error = (1.0 - ((cross_correlation_max * cross_correlation_max.conj()) / (src_amp * target_amp))) return np.sqrt(np.abs(error))
[ "def", "_compute_error", "(", "cross_correlation_max", ",", "src_amp", ",", "target_amp", ")", ":", "error", "=", "(", "1.0", "-", "(", "(", "cross_correlation_max", "*", "cross_correlation_max", ".", "conj", "(", ")", ")", "/", "(", "src_amp", "*", "target_amp", ")", ")", ")", "return", "np", ".", "sqrt", "(", "np", ".", "abs", "(", "error", ")", ")" ]
compute rms error metric between src_image and target_image .
train
false
42,052
def LoadPagespeedEntry(pagespeed_entry, open_fn=None): builder = yaml_object.ObjectBuilder(PagespeedEntry) handler = yaml_builder.BuilderHandler(builder) listener = yaml_listener.EventListener(handler) listener.Parse(pagespeed_entry) parsed_yaml = handler.GetResults() if (not parsed_yaml): return PagespeedEntry() if (len(parsed_yaml) > 1): raise MalformedPagespeedConfiguration('Multiple configuration sections in the yaml') return parsed_yaml[0]
[ "def", "LoadPagespeedEntry", "(", "pagespeed_entry", ",", "open_fn", "=", "None", ")", ":", "builder", "=", "yaml_object", ".", "ObjectBuilder", "(", "PagespeedEntry", ")", "handler", "=", "yaml_builder", ".", "BuilderHandler", "(", "builder", ")", "listener", "=", "yaml_listener", ".", "EventListener", "(", "handler", ")", "listener", ".", "Parse", "(", "pagespeed_entry", ")", "parsed_yaml", "=", "handler", ".", "GetResults", "(", ")", "if", "(", "not", "parsed_yaml", ")", ":", "return", "PagespeedEntry", "(", ")", "if", "(", "len", "(", "parsed_yaml", ")", ">", "1", ")", ":", "raise", "MalformedPagespeedConfiguration", "(", "'Multiple configuration sections in the yaml'", ")", "return", "parsed_yaml", "[", "0", "]" ]
load a yaml file or string and return a pagespeedentry .
train
false
42,053
def libvlc_retain(p_instance): f = (_Cfunctions.get('libvlc_retain', None) or _Cfunction('libvlc_retain', ((1,),), None, None, Instance)) return f(p_instance)
[ "def", "libvlc_retain", "(", "p_instance", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_retain'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_retain'", ",", "(", "(", "1", ",", ")", ",", ")", ",", "None", ",", "None", ",", "Instance", ")", ")", "return", "f", "(", "p_instance", ")" ]
increments the reference count of a libvlc instance .
train
false
42,054
def sgd(loss_or_grads, params, learning_rate): grads = get_or_compute_grads(loss_or_grads, params) updates = OrderedDict() for (param, grad) in zip(params, grads): updates[param] = (param - (learning_rate * grad)) return updates
[ "def", "sgd", "(", "loss_or_grads", ",", "params", ",", "learning_rate", ")", ":", "grads", "=", "get_or_compute_grads", "(", "loss_or_grads", ",", "params", ")", "updates", "=", "OrderedDict", "(", ")", "for", "(", "param", ",", "grad", ")", "in", "zip", "(", "params", ",", "grads", ")", ":", "updates", "[", "param", "]", "=", "(", "param", "-", "(", "learning_rate", "*", "grad", ")", ")", "return", "updates" ]
stochastic gradient descent updates generates update expressions of the form: * param := param - learning_rate * gradient parameters loss_or_grads : symbolic expression or list of expressions a scalar loss expression .
train
false
42,057
def test_dict_init(): w = wcs.WCS({}) (xp, yp) = w.wcs_world2pix(41.0, 2.0, 1) assert_array_almost_equal_nulp(xp, 41.0, 10) assert_array_almost_equal_nulp(yp, 2.0, 10) w = wcs.WCS({u'CTYPE1': u'GLON-CAR', u'CTYPE2': u'GLAT-CAR', u'CUNIT1': u'deg', u'CUNIT2': u'deg', u'CRPIX1': 1, u'CRPIX2': 1, u'CRVAL1': 40.0, u'CRVAL2': 0.0, u'CDELT1': (-0.1), u'CDELT2': 0.1}) (xp, yp) = w.wcs_world2pix(41.0, 2.0, 0) assert_array_almost_equal_nulp(xp, (-10.0), 10) assert_array_almost_equal_nulp(yp, 20.0, 10)
[ "def", "test_dict_init", "(", ")", ":", "w", "=", "wcs", ".", "WCS", "(", "{", "}", ")", "(", "xp", ",", "yp", ")", "=", "w", ".", "wcs_world2pix", "(", "41.0", ",", "2.0", ",", "1", ")", "assert_array_almost_equal_nulp", "(", "xp", ",", "41.0", ",", "10", ")", "assert_array_almost_equal_nulp", "(", "yp", ",", "2.0", ",", "10", ")", "w", "=", "wcs", ".", "WCS", "(", "{", "u'CTYPE1'", ":", "u'GLON-CAR'", ",", "u'CTYPE2'", ":", "u'GLAT-CAR'", ",", "u'CUNIT1'", ":", "u'deg'", ",", "u'CUNIT2'", ":", "u'deg'", ",", "u'CRPIX1'", ":", "1", ",", "u'CRPIX2'", ":", "1", ",", "u'CRVAL1'", ":", "40.0", ",", "u'CRVAL2'", ":", "0.0", ",", "u'CDELT1'", ":", "(", "-", "0.1", ")", ",", "u'CDELT2'", ":", "0.1", "}", ")", "(", "xp", ",", "yp", ")", "=", "w", ".", "wcs_world2pix", "(", "41.0", ",", "2.0", ",", "0", ")", "assert_array_almost_equal_nulp", "(", "xp", ",", "(", "-", "10.0", ")", ",", "10", ")", "assert_array_almost_equal_nulp", "(", "yp", ",", "20.0", ",", "10", ")" ]
test that wcs can be initialized with a dict-like object .
train
false
42,058
def test_datasink_localcopy(dummy_input, tmpdir): local_dir = str(tmpdir) container = u'outputs' attr_folder = u'text_file' input_path = dummy_input ds = nio.DataSink() ds.inputs.container = container ds.inputs.local_copy = local_dir setattr(ds.inputs, attr_folder, input_path) local_copy = os.path.join(local_dir, container, attr_folder, os.path.basename(input_path)) ds.run() src_md5 = hashlib.md5(open(input_path, u'rb').read()).hexdigest() dst_md5 = hashlib.md5(open(local_copy, u'rb').read()).hexdigest() assert (src_md5 == dst_md5)
[ "def", "test_datasink_localcopy", "(", "dummy_input", ",", "tmpdir", ")", ":", "local_dir", "=", "str", "(", "tmpdir", ")", "container", "=", "u'outputs'", "attr_folder", "=", "u'text_file'", "input_path", "=", "dummy_input", "ds", "=", "nio", ".", "DataSink", "(", ")", "ds", ".", "inputs", ".", "container", "=", "container", "ds", ".", "inputs", ".", "local_copy", "=", "local_dir", "setattr", "(", "ds", ".", "inputs", ",", "attr_folder", ",", "input_path", ")", "local_copy", "=", "os", ".", "path", ".", "join", "(", "local_dir", ",", "container", ",", "attr_folder", ",", "os", ".", "path", ".", "basename", "(", "input_path", ")", ")", "ds", ".", "run", "(", ")", "src_md5", "=", "hashlib", ".", "md5", "(", "open", "(", "input_path", ",", "u'rb'", ")", ".", "read", "(", ")", ")", ".", "hexdigest", "(", ")", "dst_md5", "=", "hashlib", ".", "md5", "(", "open", "(", "local_copy", ",", "u'rb'", ")", ".", "read", "(", ")", ")", ".", "hexdigest", "(", ")", "assert", "(", "src_md5", "==", "dst_md5", ")" ]
function to validate datasink will make local copy via local_copy attribute .
train
false
42,059
def extract_client_login_token(http_body, scopes): token_string = get_client_login_token(http_body) token = ClientLoginToken(scopes=scopes) token.set_token_string(token_string) return token
[ "def", "extract_client_login_token", "(", "http_body", ",", "scopes", ")", ":", "token_string", "=", "get_client_login_token", "(", "http_body", ")", "token", "=", "ClientLoginToken", "(", "scopes", "=", "scopes", ")", "token", ".", "set_token_string", "(", "token_string", ")", "return", "token" ]
parses the servers response and returns a clientlogintoken .
train
false
42,060
def _makepass(password, hasher='sha256'): if (hasher == 'sha256'): h = hashlib.sha256(password) elif (hasher == 'md5'): h = hashlib.md5(password) else: return NotImplemented c = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!?.,:;/*-+_()' r = {'Method': h.name, 'Salt': ''.join((random.SystemRandom().choice(c) for x in range(20)))} h.update(r['Salt']) r['Hash'] = h.hexdigest() return r
[ "def", "_makepass", "(", "password", ",", "hasher", "=", "'sha256'", ")", ":", "if", "(", "hasher", "==", "'sha256'", ")", ":", "h", "=", "hashlib", ".", "sha256", "(", "password", ")", "elif", "(", "hasher", "==", "'md5'", ")", ":", "h", "=", "hashlib", ".", "md5", "(", "password", ")", "else", ":", "return", "NotImplemented", "c", "=", "'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!?.,:;/*-+_()'", "r", "=", "{", "'Method'", ":", "h", ".", "name", ",", "'Salt'", ":", "''", ".", "join", "(", "(", "random", ".", "SystemRandom", "(", ")", ".", "choice", "(", "c", ")", "for", "x", "in", "range", "(", "20", ")", ")", ")", "}", "h", ".", "update", "(", "r", "[", "'Salt'", "]", ")", "r", "[", "'Hash'", "]", "=", "h", ".", "hexdigest", "(", ")", "return", "r" ]
create a znc compatible hashed password .
train
true
42,061
def coordinate_latlon_bbox(coord, projection): ul = projection.coordinateLocation(coord) ur = projection.coordinateLocation(coord.right()) ll = projection.coordinateLocation(coord.down()) lr = projection.coordinateLocation(coord.down().right()) n = max(ul.lat, ur.lat, ll.lat, lr.lat) s = min(ul.lat, ur.lat, ll.lat, lr.lat) e = max(ul.lon, ur.lon, ll.lon, lr.lon) w = min(ul.lon, ur.lon, ll.lon, lr.lon) return (w, s, e, n)
[ "def", "coordinate_latlon_bbox", "(", "coord", ",", "projection", ")", ":", "ul", "=", "projection", ".", "coordinateLocation", "(", "coord", ")", "ur", "=", "projection", ".", "coordinateLocation", "(", "coord", ".", "right", "(", ")", ")", "ll", "=", "projection", ".", "coordinateLocation", "(", "coord", ".", "down", "(", ")", ")", "lr", "=", "projection", ".", "coordinateLocation", "(", "coord", ".", "down", "(", ")", ".", "right", "(", ")", ")", "n", "=", "max", "(", "ul", ".", "lat", ",", "ur", ".", "lat", ",", "ll", ".", "lat", ",", "lr", ".", "lat", ")", "s", "=", "min", "(", "ul", ".", "lat", ",", "ur", ".", "lat", ",", "ll", ".", "lat", ",", "lr", ".", "lat", ")", "e", "=", "max", "(", "ul", ".", "lon", ",", "ur", ".", "lon", ",", "ll", ".", "lon", ",", "lr", ".", "lon", ")", "w", "=", "min", "(", "ul", ".", "lon", ",", "ur", ".", "lon", ",", "ll", ".", "lon", ",", "lr", ".", "lon", ")", "return", "(", "w", ",", "s", ",", "e", ",", "n", ")" ]
return an bounding box for a projected tile .
train
false
42,062
def test_no_blocklist_update(config_stub, download_stub, data_tmpdir, basedir, tmpdir, win_registry): config_stub.data = {'content': {'host-block-lists': None, 'host-blocking-enabled': True}} host_blocker = adblock.HostBlocker() host_blocker.adblock_update() host_blocker.read_hosts() for str_url in URLS_TO_CHECK: assert (not host_blocker.is_blocked(QUrl(str_url)))
[ "def", "test_no_blocklist_update", "(", "config_stub", ",", "download_stub", ",", "data_tmpdir", ",", "basedir", ",", "tmpdir", ",", "win_registry", ")", ":", "config_stub", ".", "data", "=", "{", "'content'", ":", "{", "'host-block-lists'", ":", "None", ",", "'host-blocking-enabled'", ":", "True", "}", "}", "host_blocker", "=", "adblock", ".", "HostBlocker", "(", ")", "host_blocker", ".", "adblock_update", "(", ")", "host_blocker", ".", "read_hosts", "(", ")", "for", "str_url", "in", "URLS_TO_CHECK", ":", "assert", "(", "not", "host_blocker", ".", "is_blocked", "(", "QUrl", "(", "str_url", ")", ")", ")" ]
ensure no url is blocked when no block list exists .
train
false
42,063
def greek(size): for (factor, suffix) in _abbrevs: if (size > factor): break return (str(int((size / factor))) + suffix)
[ "def", "greek", "(", "size", ")", ":", "for", "(", "factor", ",", "suffix", ")", "in", "_abbrevs", ":", "if", "(", "size", ">", "factor", ")", ":", "break", "return", "(", "str", "(", "int", "(", "(", "size", "/", "factor", ")", ")", ")", "+", "suffix", ")" ]
return a string representing the greek/metric suffix of a size .
train
false
42,064
@contextfunction def sales_lead_list(context, leads, skip_group=False): request = context['request'] response_format = 'html' if ('response_format' in context): response_format = context['response_format'] return Markup(render_to_string('sales/tags/lead_list', {'leads': leads, 'skip_group': skip_group}, context_instance=RequestContext(request), response_format=response_format))
[ "@", "contextfunction", "def", "sales_lead_list", "(", "context", ",", "leads", ",", "skip_group", "=", "False", ")", ":", "request", "=", "context", "[", "'request'", "]", "response_format", "=", "'html'", "if", "(", "'response_format'", "in", "context", ")", ":", "response_format", "=", "context", "[", "'response_format'", "]", "return", "Markup", "(", "render_to_string", "(", "'sales/tags/lead_list'", ",", "{", "'leads'", ":", "leads", ",", "'skip_group'", ":", "skip_group", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")", ")" ]
print a list of leads .
train
false
42,065
def p_command_goto_bad(p): p[0] = 'INVALID LINE NUMBER IN GOTO'
[ "def", "p_command_goto_bad", "(", "p", ")", ":", "p", "[", "0", "]", "=", "'INVALID LINE NUMBER IN GOTO'" ]
command : goto error .
train
false
42,066
def ibin(n, bits=0, str=False): if (not str): try: bits = as_int(bits) return [(1 if (i == '1') else 0) for i in bin(n)[2:].rjust(bits, '0')] except ValueError: return variations(list(range(2)), n, repetition=True) else: try: bits = as_int(bits) return bin(n)[2:].rjust(bits, '0') except ValueError: return (bin(i)[2:].rjust(n, '0') for i in range((2 ** n)))
[ "def", "ibin", "(", "n", ",", "bits", "=", "0", ",", "str", "=", "False", ")", ":", "if", "(", "not", "str", ")", ":", "try", ":", "bits", "=", "as_int", "(", "bits", ")", "return", "[", "(", "1", "if", "(", "i", "==", "'1'", ")", "else", "0", ")", "for", "i", "in", "bin", "(", "n", ")", "[", "2", ":", "]", ".", "rjust", "(", "bits", ",", "'0'", ")", "]", "except", "ValueError", ":", "return", "variations", "(", "list", "(", "range", "(", "2", ")", ")", ",", "n", ",", "repetition", "=", "True", ")", "else", ":", "try", ":", "bits", "=", "as_int", "(", "bits", ")", "return", "bin", "(", "n", ")", "[", "2", ":", "]", ".", "rjust", "(", "bits", ",", "'0'", ")", "except", "ValueError", ":", "return", "(", "bin", "(", "i", ")", "[", "2", ":", "]", ".", "rjust", "(", "n", ",", "'0'", ")", "for", "i", "in", "range", "(", "(", "2", "**", "n", ")", ")", ")" ]
return a list of length bits corresponding to the binary value of n with small bits to the right .
train
false
42,068
def to_hdf5(filename, *args, **kwargs): if ((len(args) == 1) and isinstance(args[0], dict)): data = args[0] elif ((len(args) == 2) and isinstance(args[0], str) and isinstance(args[1], Array)): data = {args[0]: args[1]} else: raise ValueError("Please provide {'/data/path': array} dictionary") chunks = kwargs.pop('chunks', True) import h5py with h5py.File(filename) as f: dsets = [f.require_dataset(dp, shape=x.shape, dtype=x.dtype, chunks=(tuple([c[0] for c in x.chunks]) if (chunks is True) else chunks), **kwargs) for (dp, x) in data.items()] store(list(data.values()), dsets)
[ "def", "to_hdf5", "(", "filename", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "(", "len", "(", "args", ")", "==", "1", ")", "and", "isinstance", "(", "args", "[", "0", "]", ",", "dict", ")", ")", ":", "data", "=", "args", "[", "0", "]", "elif", "(", "(", "len", "(", "args", ")", "==", "2", ")", "and", "isinstance", "(", "args", "[", "0", "]", ",", "str", ")", "and", "isinstance", "(", "args", "[", "1", "]", ",", "Array", ")", ")", ":", "data", "=", "{", "args", "[", "0", "]", ":", "args", "[", "1", "]", "}", "else", ":", "raise", "ValueError", "(", "\"Please provide {'/data/path': array} dictionary\"", ")", "chunks", "=", "kwargs", ".", "pop", "(", "'chunks'", ",", "True", ")", "import", "h5py", "with", "h5py", ".", "File", "(", "filename", ")", "as", "f", ":", "dsets", "=", "[", "f", ".", "require_dataset", "(", "dp", ",", "shape", "=", "x", ".", "shape", ",", "dtype", "=", "x", ".", "dtype", ",", "chunks", "=", "(", "tuple", "(", "[", "c", "[", "0", "]", "for", "c", "in", "x", ".", "chunks", "]", ")", "if", "(", "chunks", "is", "True", ")", "else", "chunks", ")", ",", "**", "kwargs", ")", "for", "(", "dp", ",", "x", ")", "in", "data", ".", "items", "(", ")", "]", "store", "(", "list", "(", "data", ".", "values", "(", ")", ")", ",", "dsets", ")" ]
store arrays in hdf5 file this saves several dask arrays into several datapaths in an hdf5 file .
train
false
42,069
def get_available_locales(): global available_locales if (not available_locales): available_locales = [] for locale in get_locales(): parsed_locale = Locale.parse(locale) parsed_locale.short_name = locale parsed_locale.identifier = get_identifier_from_locale_class(parsed_locale) available_locales.append(parsed_locale) return available_locales
[ "def", "get_available_locales", "(", ")", ":", "global", "available_locales", "if", "(", "not", "available_locales", ")", ":", "available_locales", "=", "[", "]", "for", "locale", "in", "get_locales", "(", ")", ":", "parsed_locale", "=", "Locale", ".", "parse", "(", "locale", ")", "parsed_locale", ".", "short_name", "=", "locale", "parsed_locale", ".", "identifier", "=", "get_identifier_from_locale_class", "(", "parsed_locale", ")", "available_locales", ".", "append", "(", "parsed_locale", ")", "return", "available_locales" ]
get a list of the available locales e .
train
false
42,070
def token_list_width(tokenlist): ZeroWidthEscape = Token.ZeroWidthEscape return sum((get_cwidth(c) for item in tokenlist for c in item[1] if (item[0] != ZeroWidthEscape)))
[ "def", "token_list_width", "(", "tokenlist", ")", ":", "ZeroWidthEscape", "=", "Token", ".", "ZeroWidthEscape", "return", "sum", "(", "(", "get_cwidth", "(", "c", ")", "for", "item", "in", "tokenlist", "for", "c", "in", "item", "[", "1", "]", "if", "(", "item", "[", "0", "]", "!=", "ZeroWidthEscape", ")", ")", ")" ]
return the character width of this token list .
train
true
42,071
def verifyInstructorStatus(course, instructor): if (type(course) == str): course = db((db.courses.course_name == course)).select(db.courses.id).first() return (db(((db.course_instructor.course == course) & (db.course_instructor.instructor == instructor))).count() > 0)
[ "def", "verifyInstructorStatus", "(", "course", ",", "instructor", ")", ":", "if", "(", "type", "(", "course", ")", "==", "str", ")", ":", "course", "=", "db", "(", "(", "db", ".", "courses", ".", "course_name", "==", "course", ")", ")", ".", "select", "(", "db", ".", "courses", ".", "id", ")", ".", "first", "(", ")", "return", "(", "db", "(", "(", "(", "db", ".", "course_instructor", ".", "course", "==", "course", ")", "&", "(", "db", ".", "course_instructor", ".", "instructor", "==", "instructor", ")", ")", ")", ".", "count", "(", ")", ">", "0", ")" ]
make sure that the instructor specified is actually an instructor for the given course .
train
false
42,072
def _migrate_states_schema(versioned_exploration_states): states_schema_version = versioned_exploration_states['states_schema_version'] if ((states_schema_version is None) or (states_schema_version < 1)): states_schema_version = 0 if (not (0 <= states_schema_version <= feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)): raise Exception(('Sorry, we can only process v1-v%d and unversioned exploration state schemas at present.' % feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)) while (states_schema_version < feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION): exp_domain.Exploration.update_states_from_model(versioned_exploration_states, states_schema_version) states_schema_version += 1
[ "def", "_migrate_states_schema", "(", "versioned_exploration_states", ")", ":", "states_schema_version", "=", "versioned_exploration_states", "[", "'states_schema_version'", "]", "if", "(", "(", "states_schema_version", "is", "None", ")", "or", "(", "states_schema_version", "<", "1", ")", ")", ":", "states_schema_version", "=", "0", "if", "(", "not", "(", "0", "<=", "states_schema_version", "<=", "feconf", ".", "CURRENT_EXPLORATION_STATES_SCHEMA_VERSION", ")", ")", ":", "raise", "Exception", "(", "(", "'Sorry, we can only process v1-v%d and unversioned exploration state schemas at present.'", "%", "feconf", ".", "CURRENT_EXPLORATION_STATES_SCHEMA_VERSION", ")", ")", "while", "(", "states_schema_version", "<", "feconf", ".", "CURRENT_EXPLORATION_STATES_SCHEMA_VERSION", ")", ":", "exp_domain", ".", "Exploration", ".", "update_states_from_model", "(", "versioned_exploration_states", ",", "states_schema_version", ")", "states_schema_version", "+=", "1" ]
holds the responsibility of performing a step-by-step .
train
false
42,074
def _process_times(inst, use_times, n_peaks=None, few=False): if isinstance(use_times, string_types): if (use_times == 'peaks'): if (n_peaks is None): n_peaks = min((3 if few else 7), len(inst.times)) use_times = _find_peaks(inst, n_peaks) elif (use_times == 'auto'): if (n_peaks is None): n_peaks = min((5 if few else 10), len(use_times)) use_times = np.linspace(inst.times[0], inst.times[(-1)], n_peaks) else: raise ValueError("Got an unrecognized method for `times`. Only 'peaks' and 'auto' are supported (or directly passing numbers).") elif np.isscalar(use_times): use_times = [use_times] use_times = np.array(use_times, float) if (use_times.ndim != 1): raise ValueError(('times must be 1D, got %d dimensions' % use_times.ndim)) if (len(use_times) > 20): raise RuntimeError('Too many plots requested. Please pass fewer than 20 time instants.') return use_times
[ "def", "_process_times", "(", "inst", ",", "use_times", ",", "n_peaks", "=", "None", ",", "few", "=", "False", ")", ":", "if", "isinstance", "(", "use_times", ",", "string_types", ")", ":", "if", "(", "use_times", "==", "'peaks'", ")", ":", "if", "(", "n_peaks", "is", "None", ")", ":", "n_peaks", "=", "min", "(", "(", "3", "if", "few", "else", "7", ")", ",", "len", "(", "inst", ".", "times", ")", ")", "use_times", "=", "_find_peaks", "(", "inst", ",", "n_peaks", ")", "elif", "(", "use_times", "==", "'auto'", ")", ":", "if", "(", "n_peaks", "is", "None", ")", ":", "n_peaks", "=", "min", "(", "(", "5", "if", "few", "else", "10", ")", ",", "len", "(", "use_times", ")", ")", "use_times", "=", "np", ".", "linspace", "(", "inst", ".", "times", "[", "0", "]", ",", "inst", ".", "times", "[", "(", "-", "1", ")", "]", ",", "n_peaks", ")", "else", ":", "raise", "ValueError", "(", "\"Got an unrecognized method for `times`. Only 'peaks' and 'auto' are supported (or directly passing numbers).\"", ")", "elif", "np", ".", "isscalar", "(", "use_times", ")", ":", "use_times", "=", "[", "use_times", "]", "use_times", "=", "np", ".", "array", "(", "use_times", ",", "float", ")", "if", "(", "use_times", ".", "ndim", "!=", "1", ")", ":", "raise", "ValueError", "(", "(", "'times must be 1D, got %d dimensions'", "%", "use_times", ".", "ndim", ")", ")", "if", "(", "len", "(", "use_times", ")", ">", "20", ")", ":", "raise", "RuntimeError", "(", "'Too many plots requested. Please pass fewer than 20 time instants.'", ")", "return", "use_times" ]
helper to return a list of times for topomaps .
train
false
42,076
def _distinct_impl(expr, op, **kw): return UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type)
[ "def", "_distinct_impl", "(", "expr", ",", "op", ",", "**", "kw", ")", ":", "return", "UnaryExpression", "(", "expr", ",", "operator", "=", "operators", ".", "distinct_op", ",", "type_", "=", "expr", ".", "type", ")" ]
see :meth: .
train
false
42,077
def _unpack_queue_props(b, length, offset=0): if ((len(b) - offset) < length): raise UnderrunError props = [] end = (length + offset) while (offset < end): (t, l) = struct.unpack_from('!HH', b, offset) if ((len(b) - offset) < l): raise UnderrunError a = _queue_prop_type_to_class.get(t) if (a is None): a = ofp_queue_prop_generic() else: a = a() a.unpack(b[offset:(offset + l)]) assert (len(a) == l) props.append(a) offset += l return (offset, props)
[ "def", "_unpack_queue_props", "(", "b", ",", "length", ",", "offset", "=", "0", ")", ":", "if", "(", "(", "len", "(", "b", ")", "-", "offset", ")", "<", "length", ")", ":", "raise", "UnderrunError", "props", "=", "[", "]", "end", "=", "(", "length", "+", "offset", ")", "while", "(", "offset", "<", "end", ")", ":", "(", "t", ",", "l", ")", "=", "struct", ".", "unpack_from", "(", "'!HH'", ",", "b", ",", "offset", ")", "if", "(", "(", "len", "(", "b", ")", "-", "offset", ")", "<", "l", ")", ":", "raise", "UnderrunError", "a", "=", "_queue_prop_type_to_class", ".", "get", "(", "t", ")", "if", "(", "a", "is", "None", ")", ":", "a", "=", "ofp_queue_prop_generic", "(", ")", "else", ":", "a", "=", "a", "(", ")", "a", ".", "unpack", "(", "b", "[", "offset", ":", "(", "offset", "+", "l", ")", "]", ")", "assert", "(", "len", "(", "a", ")", "==", "l", ")", "props", ".", "append", "(", "a", ")", "offset", "+=", "l", "return", "(", "offset", ",", "props", ")" ]
parses queue props from a buffer b is a buffer offset .
train
false
42,078
def getmodebands(mode): return len(ImageMode.getmode(mode).bands)
[ "def", "getmodebands", "(", "mode", ")", ":", "return", "len", "(", "ImageMode", ".", "getmode", "(", "mode", ")", ".", "bands", ")" ]
gets the number of individual bands for this mode .
train
false
42,079
def get_writer_session(): return context_manager.writer.get_sessionmaker()()
[ "def", "get_writer_session", "(", ")", ":", "return", "context_manager", ".", "writer", ".", "get_sessionmaker", "(", ")", "(", ")" ]
helper to get writer session .
train
false
42,080
def get_minimum_allocatable_size(): return int(environ.get('FLOCKER_FUNCTIONAL_TEST_MINIMUM_ALLOCATABLE_SIZE', GiB(1).to_Byte().value))
[ "def", "get_minimum_allocatable_size", "(", ")", ":", "return", "int", "(", "environ", ".", "get", "(", "'FLOCKER_FUNCTIONAL_TEST_MINIMUM_ALLOCATABLE_SIZE'", ",", "GiB", "(", "1", ")", ".", "to_Byte", "(", ")", ".", "value", ")", ")" ]
return the minimum supported volume size .
train
false
42,082
@not_implemented_for('undirected') def strongly_connected_components_recursive(G): def visit(v, cnt): root[v] = cnt visited[v] = cnt cnt += 1 stack.append(v) for w in G[v]: if (w not in visited): for c in visit(w, cnt): (yield c) if (w not in component): root[v] = min(root[v], root[w]) if (root[v] == visited[v]): component[v] = root[v] tmpc = {v} while (stack[(-1)] != v): w = stack.pop() component[w] = root[v] tmpc.add(w) stack.remove(v) (yield tmpc) visited = {} component = {} root = {} cnt = 0 stack = [] for source in G: if (source not in visited): for c in visit(source, cnt): (yield c)
[ "@", "not_implemented_for", "(", "'undirected'", ")", "def", "strongly_connected_components_recursive", "(", "G", ")", ":", "def", "visit", "(", "v", ",", "cnt", ")", ":", "root", "[", "v", "]", "=", "cnt", "visited", "[", "v", "]", "=", "cnt", "cnt", "+=", "1", "stack", ".", "append", "(", "v", ")", "for", "w", "in", "G", "[", "v", "]", ":", "if", "(", "w", "not", "in", "visited", ")", ":", "for", "c", "in", "visit", "(", "w", ",", "cnt", ")", ":", "(", "yield", "c", ")", "if", "(", "w", "not", "in", "component", ")", ":", "root", "[", "v", "]", "=", "min", "(", "root", "[", "v", "]", ",", "root", "[", "w", "]", ")", "if", "(", "root", "[", "v", "]", "==", "visited", "[", "v", "]", ")", ":", "component", "[", "v", "]", "=", "root", "[", "v", "]", "tmpc", "=", "{", "v", "}", "while", "(", "stack", "[", "(", "-", "1", ")", "]", "!=", "v", ")", ":", "w", "=", "stack", ".", "pop", "(", ")", "component", "[", "w", "]", "=", "root", "[", "v", "]", "tmpc", ".", "add", "(", "w", ")", "stack", ".", "remove", "(", "v", ")", "(", "yield", "tmpc", ")", "visited", "=", "{", "}", "component", "=", "{", "}", "root", "=", "{", "}", "cnt", "=", "0", "stack", "=", "[", "]", "for", "source", "in", "G", ":", "if", "(", "source", "not", "in", "visited", ")", ":", "for", "c", "in", "visit", "(", "source", ",", "cnt", ")", ":", "(", "yield", "c", ")" ]
generate nodes in strongly connected components of graph .
train
false
42,083
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get the repository constructor .
train
false
42,084
def generate_course_key(org, number, run): default_store = os.environ.get('DEFAULT_STORE', 'draft') return CourseLocator(org, number, run, deprecated=(default_store == 'draft'))
[ "def", "generate_course_key", "(", "org", ",", "number", ",", "run", ")", ":", "default_store", "=", "os", ".", "environ", ".", "get", "(", "'DEFAULT_STORE'", ",", "'draft'", ")", "return", "CourseLocator", "(", "org", ",", "number", ",", "run", ",", "deprecated", "=", "(", "default_store", "==", "'draft'", ")", ")" ]
makes a courselocator from org .
train
false
42,085
def is_orthonormal_cont(polys, lower, upper, rtol=0, atol=1e-08): for i in range(len(polys)): for j in range((i + 1)): p1 = polys[i] p2 = polys[j] innerprod = integrate.quad((lambda x: (p1(x) * p2(x))), lower, upper)[0] if (not np.allclose(innerprod, (i == j), rtol=rtol, atol=atol)): return False return True
[ "def", "is_orthonormal_cont", "(", "polys", ",", "lower", ",", "upper", ",", "rtol", "=", "0", ",", "atol", "=", "1e-08", ")", ":", "for", "i", "in", "range", "(", "len", "(", "polys", ")", ")", ":", "for", "j", "in", "range", "(", "(", "i", "+", "1", ")", ")", ":", "p1", "=", "polys", "[", "i", "]", "p2", "=", "polys", "[", "j", "]", "innerprod", "=", "integrate", ".", "quad", "(", "(", "lambda", "x", ":", "(", "p1", "(", "x", ")", "*", "p2", "(", "x", ")", ")", ")", ",", "lower", ",", "upper", ")", "[", "0", "]", "if", "(", "not", "np", ".", "allclose", "(", "innerprod", ",", "(", "i", "==", "j", ")", ",", "rtol", "=", "rtol", ",", "atol", "=", "atol", ")", ")", ":", "return", "False", "return", "True" ]
check whether functions are orthonormal parameters polys : list of polynomials or function returns is_orthonormal : bool is false if the innerproducts are not close to 0 or 1 notes this stops as soon as the first deviation from orthonormality is found .
train
false
42,086
def downloaded(name, artifact, target_dir='/tmp', target_file=None): log.debug(' ======================== STATE: artifactory.downloaded (name: %s) ', name) ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} try: fetch_result = __fetch_from_artifactory(artifact, target_dir, target_file) log.debug('fetch_result=%s', str(fetch_result)) ret['result'] = fetch_result['status'] ret['comment'] = fetch_result['comment'] ret['changes'] = fetch_result['changes'] log.debug('ret=%s', str(ret)) return ret except Exception as exc: ret['result'] = False ret['comment'] = exc return ret
[ "def", "downloaded", "(", "name", ",", "artifact", ",", "target_dir", "=", "'/tmp'", ",", "target_file", "=", "None", ")", ":", "log", ".", "debug", "(", "' ======================== STATE: artifactory.downloaded (name: %s) '", ",", "name", ")", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "try", ":", "fetch_result", "=", "__fetch_from_artifactory", "(", "artifact", ",", "target_dir", ",", "target_file", ")", "log", ".", "debug", "(", "'fetch_result=%s'", ",", "str", "(", "fetch_result", ")", ")", "ret", "[", "'result'", "]", "=", "fetch_result", "[", "'status'", "]", "ret", "[", "'comment'", "]", "=", "fetch_result", "[", "'comment'", "]", "ret", "[", "'changes'", "]", "=", "fetch_result", "[", "'changes'", "]", "log", ".", "debug", "(", "'ret=%s'", ",", "str", "(", "ret", ")", ")", "return", "ret", "except", "Exception", "as", "exc", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "exc", "return", "ret" ]
ensures that the artifact from artifactory exists at given location .
train
false
42,087
def test_SymmetricalLogLocator_set_params(): sym = mticker.SymmetricalLogLocator(base=10, linthresh=1) sym.set_params(subs=[2.0], numticks=8) assert (sym._subs == [2.0]) assert (sym.numticks == 8)
[ "def", "test_SymmetricalLogLocator_set_params", "(", ")", ":", "sym", "=", "mticker", ".", "SymmetricalLogLocator", "(", "base", "=", "10", ",", "linthresh", "=", "1", ")", "sym", ".", "set_params", "(", "subs", "=", "[", "2.0", "]", ",", "numticks", "=", "8", ")", "assert", "(", "sym", ".", "_subs", "==", "[", "2.0", "]", ")", "assert", "(", "sym", ".", "numticks", "==", "8", ")" ]
create symmetrical log locator with default subs =[1 .
train
false
42,088
def _get_prefix_length(number1, number2, bits): for i in range(bits): if ((number1 >> i) == (number2 >> i)): return (bits - i) return 0
[ "def", "_get_prefix_length", "(", "number1", ",", "number2", ",", "bits", ")", ":", "for", "i", "in", "range", "(", "bits", ")", ":", "if", "(", "(", "number1", ">>", "i", ")", "==", "(", "number2", ">>", "i", ")", ")", ":", "return", "(", "bits", "-", "i", ")", "return", "0" ]
get the number of leading bits that are same for two numbers .
train
true
42,094
def type_to_str(t): if (not hasattr(t, 'broadcastable')): return str(t) s = broadcastable_to_str(t.broadcastable) if (s == ''): s = str(t.dtype) else: s = (dtype_to_char(t.dtype) + s) return s
[ "def", "type_to_str", "(", "t", ")", ":", "if", "(", "not", "hasattr", "(", "t", ",", "'broadcastable'", ")", ")", ":", "return", "str", "(", "t", ")", "s", "=", "broadcastable_to_str", "(", "t", ".", "broadcastable", ")", "if", "(", "s", "==", "''", ")", ":", "s", "=", "str", "(", "t", ".", "dtype", ")", "else", ":", "s", "=", "(", "dtype_to_char", "(", "t", ".", "dtype", ")", "+", "s", ")", "return", "s" ]
return str of variable type .
train
false
42,095
def pick_math_environment(code, numbered=False): chunks = code.split('\\begin{') toplevel_code = ''.join([chunk.split('\\end{')[(-1)] for chunk in chunks]) if (toplevel_code.find('\\\\') >= 0): env = 'align' else: env = 'equation' if (not numbered): env += '*' return env
[ "def", "pick_math_environment", "(", "code", ",", "numbered", "=", "False", ")", ":", "chunks", "=", "code", ".", "split", "(", "'\\\\begin{'", ")", "toplevel_code", "=", "''", ".", "join", "(", "[", "chunk", ".", "split", "(", "'\\\\end{'", ")", "[", "(", "-", "1", ")", "]", "for", "chunk", "in", "chunks", "]", ")", "if", "(", "toplevel_code", ".", "find", "(", "'\\\\\\\\'", ")", ">=", "0", ")", ":", "env", "=", "'align'", "else", ":", "env", "=", "'equation'", "if", "(", "not", "numbered", ")", ":", "env", "+=", "'*'", "return", "env" ]
return the right math environment to display code .
train
false
42,096
def orth(x_in, y_in): x = np.array(x_in)[:, None] y = np.array(y_in)[:, None] y = (y - np.dot(x, np.dot(np.linalg.inv(np.dot(x.T, x)), np.dot(x.T, y)))) if (np.linalg.norm(y, 1) > np.exp((-32))): y = y[:, 0].tolist() else: y = y_in return y
[ "def", "orth", "(", "x_in", ",", "y_in", ")", ":", "x", "=", "np", ".", "array", "(", "x_in", ")", "[", ":", ",", "None", "]", "y", "=", "np", ".", "array", "(", "y_in", ")", "[", ":", ",", "None", "]", "y", "=", "(", "y", "-", "np", ".", "dot", "(", "x", ",", "np", ".", "dot", "(", "np", ".", "linalg", ".", "inv", "(", "np", ".", "dot", "(", "x", ".", "T", ",", "x", ")", ")", ",", "np", ".", "dot", "(", "x", ".", "T", ",", "y", ")", ")", ")", ")", "if", "(", "np", ".", "linalg", ".", "norm", "(", "y", ",", "1", ")", ">", "np", ".", "exp", "(", "(", "-", "32", ")", ")", ")", ":", "y", "=", "y", "[", ":", ",", "0", "]", ".", "tolist", "(", ")", "else", ":", "y", "=", "y_in", "return", "y" ]
orthoganlize y_in with respect to x_in .
train
false
42,097
def _load_tests(name): loader = TestLoader() return loader.loadByName(name, recurse=True)
[ "def", "_load_tests", "(", "name", ")", ":", "loader", "=", "TestLoader", "(", ")", "return", "loader", ".", "loadByName", "(", "name", ",", "recurse", "=", "True", ")" ]
find all the tests under name .
train
false
42,098
def hdmi_boost_custom_default(): ' Yet to be implemented ' return '2'
[ "def", "hdmi_boost_custom_default", "(", ")", ":", "return", "'2'" ]
tests the users system to see which hdmi_boost figure should be used .
train
false
42,100
def get_default_naming_series(doctype): naming_series = (frappe.get_meta(doctype).get_field(u'naming_series').options or u'') if naming_series: naming_series = naming_series.split(u'\n') return (naming_series[0] or naming_series[1]) else: return None
[ "def", "get_default_naming_series", "(", "doctype", ")", ":", "naming_series", "=", "(", "frappe", ".", "get_meta", "(", "doctype", ")", ".", "get_field", "(", "u'naming_series'", ")", ".", "options", "or", "u''", ")", "if", "naming_series", ":", "naming_series", "=", "naming_series", ".", "split", "(", "u'\\n'", ")", "return", "(", "naming_series", "[", "0", "]", "or", "naming_series", "[", "1", "]", ")", "else", ":", "return", "None" ]
get default value for naming_series property .
train
false
42,101
def open_database(database, mode=MODE_AUTO): if (((mode == MODE_AUTO) and maxminddb.extension and hasattr(maxminddb.extension, 'Reader')) or (mode == MODE_MMAP_EXT)): return maxminddb.extension.Reader(database) elif (mode in (MODE_AUTO, MODE_MMAP, MODE_FILE, MODE_MEMORY)): return maxminddb.reader.Reader(database, mode) raise ValueError('Unsupported open mode: {0}'.format(mode))
[ "def", "open_database", "(", "database", ",", "mode", "=", "MODE_AUTO", ")", ":", "if", "(", "(", "(", "mode", "==", "MODE_AUTO", ")", "and", "maxminddb", ".", "extension", "and", "hasattr", "(", "maxminddb", ".", "extension", ",", "'Reader'", ")", ")", "or", "(", "mode", "==", "MODE_MMAP_EXT", ")", ")", ":", "return", "maxminddb", ".", "extension", ".", "Reader", "(", "database", ")", "elif", "(", "mode", "in", "(", "MODE_AUTO", ",", "MODE_MMAP", ",", "MODE_FILE", ",", "MODE_MEMORY", ")", ")", ":", "return", "maxminddb", ".", "reader", ".", "Reader", "(", "database", ",", "mode", ")", "raise", "ValueError", "(", "'Unsupported open mode: {0}'", ".", "format", "(", "mode", ")", ")" ]
open a maxmind db database arguments: database -- a path to a valid maxmind db file such as a geoip2 database file .
train
false
42,103
def getLoopOrEmpty(loopIndex, loopLayers): if ((loopIndex < 0) or (loopIndex >= len(loopLayers))): return [] return loopLayers[loopIndex].loops[0]
[ "def", "getLoopOrEmpty", "(", "loopIndex", ",", "loopLayers", ")", ":", "if", "(", "(", "loopIndex", "<", "0", ")", "or", "(", "loopIndex", ">=", "len", "(", "loopLayers", ")", ")", ")", ":", "return", "[", "]", "return", "loopLayers", "[", "loopIndex", "]", ".", "loops", "[", "0", "]" ]
get the loop .
train
false
42,106
def stop_app(app_name): if (not misc.is_app_name_valid(app_name)): logging.error(('Unable to kill app process %s on because of invalid name for application' % app_name)) return False logging.info(('Stopping application %s' % app_name)) watch = ('app___' + app_name) monit_result = monit_interface.stop(watch) if (not monit_result): logging.error(('Unable to shut down monit interface for watch %s' % watch)) return False config_files = glob.glob('{}/appscale-{}-*.cfg'.format(MONIT_CONFIG_DIR, watch)) for config_file in config_files: try: os.remove(config_file) except OSError: logging.exception('Error removing {}'.format(config_file)) if (not remove_logrotate(app_name)): logging.error('Error while setting up log rotation for application: {}'.format(app_name)) return True
[ "def", "stop_app", "(", "app_name", ")", ":", "if", "(", "not", "misc", ".", "is_app_name_valid", "(", "app_name", ")", ")", ":", "logging", ".", "error", "(", "(", "'Unable to kill app process %s on because of invalid name for application'", "%", "app_name", ")", ")", "return", "False", "logging", ".", "info", "(", "(", "'Stopping application %s'", "%", "app_name", ")", ")", "watch", "=", "(", "'app___'", "+", "app_name", ")", "monit_result", "=", "monit_interface", ".", "stop", "(", "watch", ")", "if", "(", "not", "monit_result", ")", ":", "logging", ".", "error", "(", "(", "'Unable to shut down monit interface for watch %s'", "%", "watch", ")", ")", "return", "False", "config_files", "=", "glob", ".", "glob", "(", "'{}/appscale-{}-*.cfg'", ".", "format", "(", "MONIT_CONFIG_DIR", ",", "watch", ")", ")", "for", "config_file", "in", "config_files", ":", "try", ":", "os", ".", "remove", "(", "config_file", ")", "except", "OSError", ":", "logging", ".", "exception", "(", "'Error removing {}'", ".", "format", "(", "config_file", ")", ")", "if", "(", "not", "remove_logrotate", "(", "app_name", ")", ")", ":", "logging", ".", "error", "(", "'Error while setting up log rotation for application: {}'", ".", "format", "(", "app_name", ")", ")", "return", "True" ]
stops all process instances of a google app engine application on this machine .
train
false
42,108
def convert_ipv4_to_ipv6(ipv4): converted_ip = '::ffff:' split_ipaddress = ipv4.split('.') try: socket.inet_aton(ipv4) except socket.error: raise ValueError('ipv4 to be converted is invalid') if (len(split_ipaddress) != 4): raise ValueError('ipv4 address is not in dotted quad format') for (index, string) in enumerate(split_ipaddress): if (index != 1): test = str(hex(int(string)).split('x')[1]) if (len(test) == 1): final = '0' final += test test = final else: test = str(hex(int(string)).split('x')[1]) if (len(test) == 1): final = '0' final += (test + ':') test = final else: test += ':' converted_ip += test return converted_ip
[ "def", "convert_ipv4_to_ipv6", "(", "ipv4", ")", ":", "converted_ip", "=", "'::ffff:'", "split_ipaddress", "=", "ipv4", ".", "split", "(", "'.'", ")", "try", ":", "socket", ".", "inet_aton", "(", "ipv4", ")", "except", "socket", ".", "error", ":", "raise", "ValueError", "(", "'ipv4 to be converted is invalid'", ")", "if", "(", "len", "(", "split_ipaddress", ")", "!=", "4", ")", ":", "raise", "ValueError", "(", "'ipv4 address is not in dotted quad format'", ")", "for", "(", "index", ",", "string", ")", "in", "enumerate", "(", "split_ipaddress", ")", ":", "if", "(", "index", "!=", "1", ")", ":", "test", "=", "str", "(", "hex", "(", "int", "(", "string", ")", ")", ".", "split", "(", "'x'", ")", "[", "1", "]", ")", "if", "(", "len", "(", "test", ")", "==", "1", ")", ":", "final", "=", "'0'", "final", "+=", "test", "test", "=", "final", "else", ":", "test", "=", "str", "(", "hex", "(", "int", "(", "string", ")", ")", ".", "split", "(", "'x'", ")", "[", "1", "]", ")", "if", "(", "len", "(", "test", ")", "==", "1", ")", ":", "final", "=", "'0'", "final", "+=", "(", "test", "+", "':'", ")", "test", "=", "final", "else", ":", "test", "+=", "':'", "converted_ip", "+=", "test", "return", "converted_ip" ]
translates a passed in string of an ipv4 address to an ipv6 address .
train
false
42,110
def maybe_multipart_byteranges_to_document_iters(app_iter, content_type): (content_type, params_list) = parse_content_type(content_type) if (content_type != 'multipart/byteranges'): (yield app_iter) return body_file = FileLikeIter(app_iter) boundary = dict(params_list)['boundary'] for (_headers, body) in mime_to_document_iters(body_file, boundary): (yield (chunk for chunk in iter((lambda : body.read(65536)), '')))
[ "def", "maybe_multipart_byteranges_to_document_iters", "(", "app_iter", ",", "content_type", ")", ":", "(", "content_type", ",", "params_list", ")", "=", "parse_content_type", "(", "content_type", ")", "if", "(", "content_type", "!=", "'multipart/byteranges'", ")", ":", "(", "yield", "app_iter", ")", "return", "body_file", "=", "FileLikeIter", "(", "app_iter", ")", "boundary", "=", "dict", "(", "params_list", ")", "[", "'boundary'", "]", "for", "(", "_headers", ",", "body", ")", "in", "mime_to_document_iters", "(", "body_file", ",", "boundary", ")", ":", "(", "yield", "(", "chunk", "for", "chunk", "in", "iter", "(", "(", "lambda", ":", "body", ".", "read", "(", "65536", ")", ")", ",", "''", ")", ")", ")" ]
takes an iterator that may or may not contain a multipart mime document as well as content type and returns an iterator of body iterators .
train
false
42,112
@blocking_call_on_reactor_thread def can_edit_channel(channel_id, channel_vote): from Tribler.Core.Session import Session if Session.get_instance().get_dispersy(): dispersy = Session.get_instance().get_dispersy_instance() try: cmty = dispersy.get_community(channel_id) (channel_type, is_mod) = cmty.get_channel_mode() if (is_mod or ((channel_vote == VOTE_SUBSCRIBE) and (channel_type == ChannelCommunity.CHANNEL_OPEN))): return True except CommunityNotFoundException: return False return False
[ "@", "blocking_call_on_reactor_thread", "def", "can_edit_channel", "(", "channel_id", ",", "channel_vote", ")", ":", "from", "Tribler", ".", "Core", ".", "Session", "import", "Session", "if", "Session", ".", "get_instance", "(", ")", ".", "get_dispersy", "(", ")", ":", "dispersy", "=", "Session", ".", "get_instance", "(", ")", ".", "get_dispersy_instance", "(", ")", "try", ":", "cmty", "=", "dispersy", ".", "get_community", "(", "channel_id", ")", "(", "channel_type", ",", "is_mod", ")", "=", "cmty", ".", "get_channel_mode", "(", ")", "if", "(", "is_mod", "or", "(", "(", "channel_vote", "==", "VOTE_SUBSCRIBE", ")", "and", "(", "channel_type", "==", "ChannelCommunity", ".", "CHANNEL_OPEN", ")", ")", ")", ":", "return", "True", "except", "CommunityNotFoundException", ":", "return", "False", "return", "False" ]
this method returns whether the channel can be edited or not .
train
false
42,113
def start_scheduler(): interval = (frappe.get_conf().scheduler_interval or 240) schedule.every(interval).seconds.do(enqueue_events_for_all_sites) while True: schedule.run_pending() time.sleep(1)
[ "def", "start_scheduler", "(", ")", ":", "interval", "=", "(", "frappe", ".", "get_conf", "(", ")", ".", "scheduler_interval", "or", "240", ")", "schedule", ".", "every", "(", "interval", ")", ".", "seconds", ".", "do", "(", "enqueue_events_for_all_sites", ")", "while", "True", ":", "schedule", ".", "run_pending", "(", ")", "time", ".", "sleep", "(", "1", ")" ]
run enqueue_events_for_all_sites every 2 minutes .
train
false
42,114
def simulate_user_session(session, user, previous_user_session=None): login_time = user.date_joined if (previous_user_session is not None): login_time = (previous_user_session.logout_time + datetime.timedelta(days=1, seconds=random.randrange(SECONDS_IN_DAY))) session_id = str(uuid.uuid4()) user_session = UserSession(id=session_id, user_id=user.id, login_time=login_time, ip_address=random_ip()) user_session.logout_time = (login_time + datetime.timedelta(seconds=(1 + random.randrange(59)))) session.commit() session.add(user_session) return user_session
[ "def", "simulate_user_session", "(", "session", ",", "user", ",", "previous_user_session", "=", "None", ")", ":", "login_time", "=", "user", ".", "date_joined", "if", "(", "previous_user_session", "is", "not", "None", ")", ":", "login_time", "=", "(", "previous_user_session", ".", "logout_time", "+", "datetime", ".", "timedelta", "(", "days", "=", "1", ",", "seconds", "=", "random", ".", "randrange", "(", "SECONDS_IN_DAY", ")", ")", ")", "session_id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "user_session", "=", "UserSession", "(", "id", "=", "session_id", ",", "user_id", "=", "user", ".", "id", ",", "login_time", "=", "login_time", ",", "ip_address", "=", "random_ip", "(", ")", ")", "user_session", ".", "logout_time", "=", "(", "login_time", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "(", "1", "+", "random", ".", "randrange", "(", "59", ")", ")", ")", ")", "session", ".", "commit", "(", ")", "session", ".", "add", "(", "user_session", ")", "return", "user_session" ]
simulates a single session of a users history .
train
false
42,115
@task def install_crontab(): assert env.nodetype, 'no nodetype specified' assert env.host_string, 'no hosts specified' cron_file = ('~/viewfinder/scripts/crontab.%s' % env.nodetype.lower()) with settings(warn_only=True): run(('if [ -e %s ]; then crontab %s; else crontab -r; fi' % (cron_file, cron_file)))
[ "@", "task", "def", "install_crontab", "(", ")", ":", "assert", "env", ".", "nodetype", ",", "'no nodetype specified'", "assert", "env", ".", "host_string", ",", "'no hosts specified'", "cron_file", "=", "(", "'~/viewfinder/scripts/crontab.%s'", "%", "env", ".", "nodetype", ".", "lower", "(", ")", ")", "with", "settings", "(", "warn_only", "=", "True", ")", ":", "run", "(", "(", "'if [ -e %s ]; then crontab %s; else crontab -r; fi'", "%", "(", "cron_file", ",", "cron_file", ")", ")", ")" ]
install or remove crontab for given node type .
train
false