id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
17,939
def getIsMinimumSides(loops, sides=3): for loop in loops: if (len(loop) < sides): return False return True
[ "def", "getIsMinimumSides", "(", "loops", ",", "sides", "=", "3", ")", ":", "for", "loop", "in", "loops", ":", "if", "(", "len", "(", "loop", ")", "<", "sides", ")", ":", "return", "False", "return", "True" ]
determine if all the loops have at least the given number of sides .
train
false
17,940
def inter_community_edges(G, partition): return nx.quotient_graph(G, partition, create_using=nx.MultiGraph()).size()
[ "def", "inter_community_edges", "(", "G", ",", "partition", ")", ":", "return", "nx", ".", "quotient_graph", "(", "G", ",", "partition", ",", "create_using", "=", "nx", ".", "MultiGraph", "(", ")", ")", ".", "size", "(", ")" ]
returns the number of inter-community edges according to the given partition of the nodes of g .
train
false
17,941
def GramSchmidt(vlist, orthonormal=False): out = [] m = len(vlist) for i in range(m): tmp = vlist[i] for j in range(i): tmp -= vlist[i].project(out[j]) if (not tmp.values()): raise ValueError('GramSchmidt: vector set not linearly independent') out.append(tmp) if orthonormal: for i in range(len(out)): out[i] = out[i].normalized() return out
[ "def", "GramSchmidt", "(", "vlist", ",", "orthonormal", "=", "False", ")", ":", "out", "=", "[", "]", "m", "=", "len", "(", "vlist", ")", "for", "i", "in", "range", "(", "m", ")", ":", "tmp", "=", "vlist", "[", "i", "]", "for", "j", "in", "range", "(", "i", ")", ":", "tmp", "-=", "vlist", "[", "i", "]", ".", "project", "(", "out", "[", "j", "]", ")", "if", "(", "not", "tmp", ".", "values", "(", ")", ")", ":", "raise", "ValueError", "(", "'GramSchmidt: vector set not linearly independent'", ")", "out", ".", "append", "(", "tmp", ")", "if", "orthonormal", ":", "for", "i", "in", "range", "(", "len", "(", "out", ")", ")", ":", "out", "[", "i", "]", "=", "out", "[", "i", "]", ".", "normalized", "(", ")", "return", "out" ]
apply the gram-schmidt process to a set of vectors .
train
false
17,942
def remove_sessions_for_user(user): Session.remove(Q('data.auth_user_id', 'eq', user._id))
[ "def", "remove_sessions_for_user", "(", "user", ")", ":", "Session", ".", "remove", "(", "Q", "(", "'data.auth_user_id'", ",", "'eq'", ",", "user", ".", "_id", ")", ")" ]
permanently remove all stored sessions for the user from the db .
train
false
17,944
def groupmore(func=None, *its): if (not func): func = (lambda x: x) its = sortmore(key=func, *its) nfunc = (lambda x: func(x[0])) zipper = itertools.groupby(zip(*its), nfunc) unzipper = ((key, zip(*groups)) for (key, groups) in zipper) return unzipper
[ "def", "groupmore", "(", "func", "=", "None", ",", "*", "its", ")", ":", "if", "(", "not", "func", ")", ":", "func", "=", "(", "lambda", "x", ":", "x", ")", "its", "=", "sortmore", "(", "key", "=", "func", ",", "*", "its", ")", "nfunc", "=", "(", "lambda", "x", ":", "func", "(", "x", "[", "0", "]", ")", ")", "zipper", "=", "itertools", ".", "groupby", "(", "zip", "(", "*", "its", ")", ",", "nfunc", ")", "unzipper", "=", "(", "(", "key", ",", "zip", "(", "*", "groups", ")", ")", "for", "(", "key", ",", "groups", ")", "in", "zipper", ")", "return", "unzipper" ]
extends the itertools .
train
false
17,945
def initHooks(global_exceptions=True, thread_exceptions=True, pass_original=True): def excepthook(*exception_info): try: message = ''.join(traceback.format_exception(*exception_info)) logger.error('Uncaught exception: %s', message) except: pass if pass_original: sys.__excepthook__(*exception_info) if global_exceptions: sys.excepthook = excepthook if thread_exceptions: old_init = threading.Thread.__init__ def new_init(self, *args, **kwargs): old_init(self, *args, **kwargs) old_run = self.run def new_run(*args, **kwargs): try: old_run(*args, **kwargs) except (KeyboardInterrupt, SystemExit): raise except: excepthook(*sys.exc_info()) self.run = new_run threading.Thread.__init__ = new_init
[ "def", "initHooks", "(", "global_exceptions", "=", "True", ",", "thread_exceptions", "=", "True", ",", "pass_original", "=", "True", ")", ":", "def", "excepthook", "(", "*", "exception_info", ")", ":", "try", ":", "message", "=", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "*", "exception_info", ")", ")", "logger", ".", "error", "(", "'Uncaught exception: %s'", ",", "message", ")", "except", ":", "pass", "if", "pass_original", ":", "sys", ".", "__excepthook__", "(", "*", "exception_info", ")", "if", "global_exceptions", ":", "sys", ".", "excepthook", "=", "excepthook", "if", "thread_exceptions", ":", "old_init", "=", "threading", ".", "Thread", ".", "__init__", "def", "new_init", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "old_init", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "old_run", "=", "self", ".", "run", "def", "new_run", "(", "*", "args", ",", "**", "kwargs", ")", ":", "try", ":", "old_run", "(", "*", "args", ",", "**", "kwargs", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "raise", "except", ":", "excepthook", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "self", ".", "run", "=", "new_run", "threading", ".", "Thread", ".", "__init__", "=", "new_init" ]
this method installs exception catching mechanisms .
train
false
17,947
def binary_hinge_loss(predictions, targets, delta=1, log_odds=None, binary=True): if (log_odds is None): raise FutureWarning('The `log_odds` argument to `binary_hinge_loss` will change its default to `False` in a future version. Explicitly give `log_odds=True` to retain current behavior in your code, but also check the documentation if this is what you want.') log_odds = True if (not log_odds): predictions = theano.tensor.log((predictions / (1 - predictions))) if binary: targets = ((2 * targets) - 1) (predictions, targets) = align_targets(predictions, targets) return theano.tensor.nnet.relu((delta - (predictions * targets)))
[ "def", "binary_hinge_loss", "(", "predictions", ",", "targets", ",", "delta", "=", "1", ",", "log_odds", "=", "None", ",", "binary", "=", "True", ")", ":", "if", "(", "log_odds", "is", "None", ")", ":", "raise", "FutureWarning", "(", "'The `log_odds` argument to `binary_hinge_loss` will change its default to `False` in a future version. Explicitly give `log_odds=True` to retain current behavior in your code, but also check the documentation if this is what you want.'", ")", "log_odds", "=", "True", "if", "(", "not", "log_odds", ")", ":", "predictions", "=", "theano", ".", "tensor", ".", "log", "(", "(", "predictions", "/", "(", "1", "-", "predictions", ")", ")", ")", "if", "binary", ":", "targets", "=", "(", "(", "2", "*", "targets", ")", "-", "1", ")", "(", "predictions", ",", "targets", ")", "=", "align_targets", "(", "predictions", ",", "targets", ")", "return", "theano", ".", "tensor", ".", "nnet", ".", "relu", "(", "(", "delta", "-", "(", "predictions", "*", "targets", ")", ")", ")" ]
computes the binary hinge loss between predictions and targets .
train
false
17,948
@paver.easy.task def upload_project_web(): import docutils.core docutils.core.publish_file(source_path='README', destination_path='readme.html', writer_name='html') cmd = ('pscp' if (platform.system() == 'Windows') else 'scp') paver.easy.sh('{cmd} readme.html web.sourceforge.net:/home/project-web/python-irclib/htdocs/index.html'.format(cmd=cmd)) os.remove('readme.html')
[ "@", "paver", ".", "easy", ".", "task", "def", "upload_project_web", "(", ")", ":", "import", "docutils", ".", "core", "docutils", ".", "core", ".", "publish_file", "(", "source_path", "=", "'README'", ",", "destination_path", "=", "'readme.html'", ",", "writer_name", "=", "'html'", ")", "cmd", "=", "(", "'pscp'", "if", "(", "platform", ".", "system", "(", ")", "==", "'Windows'", ")", "else", "'scp'", ")", "paver", ".", "easy", ".", "sh", "(", "'{cmd} readme.html web.sourceforge.net:/home/project-web/python-irclib/htdocs/index.html'", ".", "format", "(", "cmd", "=", "cmd", ")", ")", "os", ".", "remove", "(", "'readme.html'", ")" ]
generate the project web page at sourceforge using the restructuredtext readme .
train
false
17,949
def check_for_language(lang_code): for path in all_locale_paths(): if (gettext_module.find('django', path, [to_locale(lang_code)]) is not None): return True return False
[ "def", "check_for_language", "(", "lang_code", ")", ":", "for", "path", "in", "all_locale_paths", "(", ")", ":", "if", "(", "gettext_module", ".", "find", "(", "'django'", ",", "path", ",", "[", "to_locale", "(", "lang_code", ")", "]", ")", "is", "not", "None", ")", ":", "return", "True", "return", "False" ]
checks whether there is a global language file for the given language code .
train
false
17,950
def payload(): content_type = request.headers.get('Content-Type', '').split(';')[0] if (content_type == 'application/json'): return request.get_json() elif (content_type == 'application/x-www-form-urlencoded'): return (multidict_to_dict(request.form) if len(request.form) else abort(400, description='No form-urlencoded data supplied')) elif (content_type == 'multipart/form-data'): if (len(request.form) or len(request.files)): formItems = MultiDict(request.form) if config.MULTIPART_FORM_FIELDS_AS_JSON: for (key, lst) in formItems.lists(): new_lst = [] for value in lst: try: new_lst.append(json.loads(value)) except ValueError: new_lst.append(json.loads('"{0}"'.format(value))) formItems.setlist(key, new_lst) payload = CombinedMultiDict([formItems, request.files]) return multidict_to_dict(payload) else: abort(400, description='No multipart/form-data supplied') else: abort(400, description='Unknown or no Content-Type header supplied')
[ "def", "payload", "(", ")", ":", "content_type", "=", "request", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", ".", "split", "(", "';'", ")", "[", "0", "]", "if", "(", "content_type", "==", "'application/json'", ")", ":", "return", "request", ".", "get_json", "(", ")", "elif", "(", "content_type", "==", "'application/x-www-form-urlencoded'", ")", ":", "return", "(", "multidict_to_dict", "(", "request", ".", "form", ")", "if", "len", "(", "request", ".", "form", ")", "else", "abort", "(", "400", ",", "description", "=", "'No form-urlencoded data supplied'", ")", ")", "elif", "(", "content_type", "==", "'multipart/form-data'", ")", ":", "if", "(", "len", "(", "request", ".", "form", ")", "or", "len", "(", "request", ".", "files", ")", ")", ":", "formItems", "=", "MultiDict", "(", "request", ".", "form", ")", "if", "config", ".", "MULTIPART_FORM_FIELDS_AS_JSON", ":", "for", "(", "key", ",", "lst", ")", "in", "formItems", ".", "lists", "(", ")", ":", "new_lst", "=", "[", "]", "for", "value", "in", "lst", ":", "try", ":", "new_lst", ".", "append", "(", "json", ".", "loads", "(", "value", ")", ")", "except", "ValueError", ":", "new_lst", ".", "append", "(", "json", ".", "loads", "(", "'\"{0}\"'", ".", "format", "(", "value", ")", ")", ")", "formItems", ".", "setlist", "(", "key", ",", "new_lst", ")", "payload", "=", "CombinedMultiDict", "(", "[", "formItems", ",", "request", ".", "files", "]", ")", "return", "multidict_to_dict", "(", "payload", ")", "else", ":", "abort", "(", "400", ",", "description", "=", "'No multipart/form-data supplied'", ")", "else", ":", "abort", "(", "400", ",", "description", "=", "'Unknown or no Content-Type header supplied'", ")" ]
performs sanity checks or decoding depending on the content-type .
train
false
17,952
def return_json(filepath): response = get_file_contents(filepath) return json.loads(response)
[ "def", "return_json", "(", "filepath", ")", ":", "response", "=", "get_file_contents", "(", "filepath", ")", "return", "json", ".", "loads", "(", "response", ")" ]
return json object when provided url args: filepath: the path to the json file .
train
false
17,953
@after.each_scenario def capture_console_log(scenario): if scenario.failed: log = world.browser.driver.get_log('browser') try: output_dir = '{}/log'.format(settings.TEST_ROOT) file_name = '{}/{}.log'.format(output_dir, scenario.name.replace(' ', '_')) with open(file_name, 'w') as output_file: for line in log: output_file.write('{}{}'.format(dumps(line), '\n')) except WebDriverException: LOGGER.error('Could not capture the console log')
[ "@", "after", ".", "each_scenario", "def", "capture_console_log", "(", "scenario", ")", ":", "if", "scenario", ".", "failed", ":", "log", "=", "world", ".", "browser", ".", "driver", ".", "get_log", "(", "'browser'", ")", "try", ":", "output_dir", "=", "'{}/log'", ".", "format", "(", "settings", ".", "TEST_ROOT", ")", "file_name", "=", "'{}/{}.log'", ".", "format", "(", "output_dir", ",", "scenario", ".", "name", ".", "replace", "(", "' '", ",", "'_'", ")", ")", "with", "open", "(", "file_name", ",", "'w'", ")", "as", "output_file", ":", "for", "line", "in", "log", ":", "output_file", ".", "write", "(", "'{}{}'", ".", "format", "(", "dumps", "(", "line", ")", ",", "'\\n'", ")", ")", "except", "WebDriverException", ":", "LOGGER", ".", "error", "(", "'Could not capture the console log'", ")" ]
save the console log to help with debugging .
train
false
17,954
@pytest.mark.skipif((not SOLIDITY_AVAILABLE), reason='solc compiler not available') def test_abicontract_interface(): tester_state = state() contract_path = path.join(CONTRACTS_DIR, 'simple_contract.sol') simple_compiled = compile_file(contract_path) simple_address = tester_state.evm(simple_compiled['Simple']['bin']) abi_json = json.dumps(simple_compiled['Simple']['abi']).encode('utf-8') abi = ABIContract(_state=tester_state, _abi=abi_json, address=simple_address, listen=False, log_listener=None, default_key=None) assert (abi.test() == 1)
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "(", "not", "SOLIDITY_AVAILABLE", ")", ",", "reason", "=", "'solc compiler not available'", ")", "def", "test_abicontract_interface", "(", ")", ":", "tester_state", "=", "state", "(", ")", "contract_path", "=", "path", ".", "join", "(", "CONTRACTS_DIR", ",", "'simple_contract.sol'", ")", "simple_compiled", "=", "compile_file", "(", "contract_path", ")", "simple_address", "=", "tester_state", ".", "evm", "(", "simple_compiled", "[", "'Simple'", "]", "[", "'bin'", "]", ")", "abi_json", "=", "json", ".", "dumps", "(", "simple_compiled", "[", "'Simple'", "]", "[", "'abi'", "]", ")", ".", "encode", "(", "'utf-8'", ")", "abi", "=", "ABIContract", "(", "_state", "=", "tester_state", ",", "_abi", "=", "abi_json", ",", "address", "=", "simple_address", ",", "listen", "=", "False", ",", "log_listener", "=", "None", ",", "default_key", "=", "None", ")", "assert", "(", "abi", ".", "test", "(", ")", "==", "1", ")" ]
test for issue #370 .
train
false
17,955
def get_command_regexp(prefix, command): prefix = re.sub(u'(\\s)', u'\\\\\\1', prefix) pattern = u'\n (?:{prefix})({command}) # Command as group 1.\n (?:\\s+ # Whitespace to end command.\n ( # Rest of the line as group 2.\n (?:(\\S+))? # Parameters 1-4 as groups 3-6.\n (?:\\s+(\\S+))?\n (?:\\s+(\\S+))?\n (?:\\s+(\\S+))?\n .* # Accept anything after the parameters.\n # Leave it up to the module to parse\n # the line.\n ))? # Group 2 must be None, if there are no\n # parameters.\n $ # EoL, so there are no partial matches.\n '.format(prefix=prefix, command=command) return re.compile(pattern, (re.IGNORECASE | re.VERBOSE))
[ "def", "get_command_regexp", "(", "prefix", ",", "command", ")", ":", "prefix", "=", "re", ".", "sub", "(", "u'(\\\\s)'", ",", "u'\\\\\\\\\\\\1'", ",", "prefix", ")", "pattern", "=", "u'\\n (?:{prefix})({command}) # Command as group 1.\\n (?:\\\\s+ # Whitespace to end command.\\n ( # Rest of the line as group 2.\\n (?:(\\\\S+))? # Parameters 1-4 as groups 3-6.\\n (?:\\\\s+(\\\\S+))?\\n (?:\\\\s+(\\\\S+))?\\n (?:\\\\s+(\\\\S+))?\\n .* # Accept anything after the parameters.\\n # Leave it up to the module to parse\\n # the line.\\n ))? # Group 2 must be None, if there are no\\n # parameters.\\n $ # EoL, so there are no partial matches.\\n '", ".", "format", "(", "prefix", "=", "prefix", ",", "command", "=", "command", ")", "return", "re", ".", "compile", "(", "pattern", ",", "(", "re", ".", "IGNORECASE", "|", "re", ".", "VERBOSE", ")", ")" ]
return a compiled regexp object that implements the command .
train
false
17,956
@webob.dec.wsgify @microversion.version_handler(1.2) @util.require_content('application/json') def update_resource_class(req): name = util.wsgi_path_item(req.environ, 'name') context = req.environ['placement.context'] data = util.extract_json(req.body, PUT_RC_SCHEMA_V1_2) rc = objects.ResourceClass.get_by_name(context, name) rc.name = data['name'] try: rc.save() except exception.ResourceClassExists: raise webob.exc.HTTPConflict((_('Resource class already exists: %(name)s') % {'name': name}), json_formatter=util.json_error_formatter) except exception.ResourceClassCannotUpdateStandard: raise webob.exc.HTTPBadRequest((_('Cannot update standard resource class %(rp_name)s') % {'rp_name': name}), json_formatter=util.json_error_formatter) req.response.body = encodeutils.to_utf8(jsonutils.dumps(_serialize_resource_class(req.environ, rc))) req.response.status = 200 req.response.content_type = 'application/json' return req.response
[ "@", "webob", ".", "dec", ".", "wsgify", "@", "microversion", ".", "version_handler", "(", "1.2", ")", "@", "util", ".", "require_content", "(", "'application/json'", ")", "def", "update_resource_class", "(", "req", ")", ":", "name", "=", "util", ".", "wsgi_path_item", "(", "req", ".", "environ", ",", "'name'", ")", "context", "=", "req", ".", "environ", "[", "'placement.context'", "]", "data", "=", "util", ".", "extract_json", "(", "req", ".", "body", ",", "PUT_RC_SCHEMA_V1_2", ")", "rc", "=", "objects", ".", "ResourceClass", ".", "get_by_name", "(", "context", ",", "name", ")", "rc", ".", "name", "=", "data", "[", "'name'", "]", "try", ":", "rc", ".", "save", "(", ")", "except", "exception", ".", "ResourceClassExists", ":", "raise", "webob", ".", "exc", ".", "HTTPConflict", "(", "(", "_", "(", "'Resource class already exists: %(name)s'", ")", "%", "{", "'name'", ":", "name", "}", ")", ",", "json_formatter", "=", "util", ".", "json_error_formatter", ")", "except", "exception", ".", "ResourceClassCannotUpdateStandard", ":", "raise", "webob", ".", "exc", ".", "HTTPBadRequest", "(", "(", "_", "(", "'Cannot update standard resource class %(rp_name)s'", ")", "%", "{", "'rp_name'", ":", "name", "}", ")", ",", "json_formatter", "=", "util", ".", "json_error_formatter", ")", "req", ".", "response", ".", "body", "=", "encodeutils", ".", "to_utf8", "(", "jsonutils", ".", "dumps", "(", "_serialize_resource_class", "(", "req", ".", "environ", ",", "rc", ")", ")", ")", "req", ".", "response", ".", "status", "=", "200", "req", ".", "response", ".", "content_type", "=", "'application/json'", "return", "req", ".", "response" ]
put to update a single resource class .
train
false
17,957
def parse_host(entity, default_port=DEFAULT_PORT): host = entity port = default_port if (entity[0] == '['): (host, port) = parse_ipv6_literal_host(entity, default_port) elif entity.endswith('.sock'): return (entity, default_port) elif (entity.find(':') != (-1)): if (entity.count(':') > 1): raise ValueError("Reserved characters such as ':' must be escaped according RFC 2396. An IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732.") (host, port) = host.split(':', 1) if isinstance(port, string_type): if ((not port.isdigit()) or (int(port) > 65535) or (int(port) <= 0)): raise ValueError(('Port must be an integer between 0 and 65535: %s' % (port,))) port = int(port) return (host.lower(), port)
[ "def", "parse_host", "(", "entity", ",", "default_port", "=", "DEFAULT_PORT", ")", ":", "host", "=", "entity", "port", "=", "default_port", "if", "(", "entity", "[", "0", "]", "==", "'['", ")", ":", "(", "host", ",", "port", ")", "=", "parse_ipv6_literal_host", "(", "entity", ",", "default_port", ")", "elif", "entity", ".", "endswith", "(", "'.sock'", ")", ":", "return", "(", "entity", ",", "default_port", ")", "elif", "(", "entity", ".", "find", "(", "':'", ")", "!=", "(", "-", "1", ")", ")", ":", "if", "(", "entity", ".", "count", "(", "':'", ")", ">", "1", ")", ":", "raise", "ValueError", "(", "\"Reserved characters such as ':' must be escaped according RFC 2396. An IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732.\"", ")", "(", "host", ",", "port", ")", "=", "host", ".", "split", "(", "':'", ",", "1", ")", "if", "isinstance", "(", "port", ",", "string_type", ")", ":", "if", "(", "(", "not", "port", ".", "isdigit", "(", ")", ")", "or", "(", "int", "(", "port", ")", ">", "65535", ")", "or", "(", "int", "(", "port", ")", "<=", "0", ")", ")", ":", "raise", "ValueError", "(", "(", "'Port must be an integer between 0 and 65535: %s'", "%", "(", "port", ",", ")", ")", ")", "port", "=", "int", "(", "port", ")", "return", "(", "host", ".", "lower", "(", ")", ",", "port", ")" ]
parse a canonical host:port string into parts .
train
true
17,960
def get_end_time(): end_time = (datetime.datetime.utcnow() - datetime.timedelta(hours=1)) return format_rfc3339(end_time)
[ "def", "get_end_time", "(", ")", ":", "end_time", "=", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "-", "datetime", ".", "timedelta", "(", "hours", "=", "1", ")", ")", "return", "format_rfc3339", "(", "end_time", ")" ]
returns the end time for the 5-minute window to read the custom metric from within .
train
false
17,961
def wavplay(filename): if (os.path.isfile(filename) == False): print 'Input file does not exist. Make sure you computed the analysis/synthesis' elif ((sys.platform == 'linux') or (sys.platform == 'linux2')): subprocess.call(['aplay', filename]) elif (sys.platform == 'darwin'): subprocess.call(['afplay', filename]) elif (sys.platform == 'win32'): if winsound_imported: winsound.PlaySound(filename, winsound.SND_FILENAME) else: print 'Cannot play sound, winsound could not be imported' else: print 'Platform not recognized'
[ "def", "wavplay", "(", "filename", ")", ":", "if", "(", "os", ".", "path", ".", "isfile", "(", "filename", ")", "==", "False", ")", ":", "print", "'Input file does not exist. Make sure you computed the analysis/synthesis'", "elif", "(", "(", "sys", ".", "platform", "==", "'linux'", ")", "or", "(", "sys", ".", "platform", "==", "'linux2'", ")", ")", ":", "subprocess", ".", "call", "(", "[", "'aplay'", ",", "filename", "]", ")", "elif", "(", "sys", ".", "platform", "==", "'darwin'", ")", ":", "subprocess", ".", "call", "(", "[", "'afplay'", ",", "filename", "]", ")", "elif", "(", "sys", ".", "platform", "==", "'win32'", ")", ":", "if", "winsound_imported", ":", "winsound", ".", "PlaySound", "(", "filename", ",", "winsound", ".", "SND_FILENAME", ")", "else", ":", "print", "'Cannot play sound, winsound could not be imported'", "else", ":", "print", "'Platform not recognized'" ]
play a wav audio file from system using os calls filename: name of file to read .
train
false
17,963
def exception_generator(): try: raise Exception('FAKE EXCEPTION') except Exception as error: logger.log((u'FAKE ERROR: ' + ex(error)), logger.ERROR) logger.submit_errors() raise
[ "def", "exception_generator", "(", ")", ":", "try", ":", "raise", "Exception", "(", "'FAKE EXCEPTION'", ")", "except", "Exception", "as", "error", ":", "logger", ".", "log", "(", "(", "u'FAKE ERROR: '", "+", "ex", "(", "error", ")", ")", ",", "logger", ".", "ERROR", ")", "logger", ".", "submit_errors", "(", ")", "raise" ]
dummy function to raise a fake exception and log it .
train
false
17,966
def get_project_translation(request, project=None, subproject=None, lang=None): if ((lang is not None) and (subproject is not None)): translation = get_translation(request, project, subproject, lang) subproject = translation.subproject project = subproject.project else: translation = None if (subproject is not None): subproject = get_subproject(request, project, subproject) project = subproject.project elif (project is not None): project = get_project(request, project) return (project, subproject, translation)
[ "def", "get_project_translation", "(", "request", ",", "project", "=", "None", ",", "subproject", "=", "None", ",", "lang", "=", "None", ")", ":", "if", "(", "(", "lang", "is", "not", "None", ")", "and", "(", "subproject", "is", "not", "None", ")", ")", ":", "translation", "=", "get_translation", "(", "request", ",", "project", ",", "subproject", ",", "lang", ")", "subproject", "=", "translation", ".", "subproject", "project", "=", "subproject", ".", "project", "else", ":", "translation", "=", "None", "if", "(", "subproject", "is", "not", "None", ")", ":", "subproject", "=", "get_subproject", "(", "request", ",", "project", ",", "subproject", ")", "project", "=", "subproject", ".", "project", "elif", "(", "project", "is", "not", "None", ")", ":", "project", "=", "get_project", "(", "request", ",", "project", ")", "return", "(", "project", ",", "subproject", ",", "translation", ")" ]
returns project .
train
false
17,967
@task(name=u'openedx.core.djangoapps.content.course_structures.tasks.update_course_structure') def update_course_structure(course_key): from .models import CourseStructure if (not isinstance(course_key, basestring)): raise ValueError('course_key must be a string. {} is not acceptable.'.format(type(course_key))) course_key = CourseKey.from_string(course_key) try: structure = _generate_course_structure(course_key) except Exception as ex: log.exception('An error occurred while generating course structure: %s', ex.message) raise structure_json = json.dumps(structure['structure']) discussion_id_map_json = json.dumps(structure['discussion_id_map']) (structure_model, created) = CourseStructure.objects.get_or_create(course_id=course_key, defaults={'structure_json': structure_json, 'discussion_id_map_json': discussion_id_map_json}) if (not created): structure_model.structure_json = structure_json structure_model.discussion_id_map_json = discussion_id_map_json structure_model.save()
[ "@", "task", "(", "name", "=", "u'openedx.core.djangoapps.content.course_structures.tasks.update_course_structure'", ")", "def", "update_course_structure", "(", "course_key", ")", ":", "from", ".", "models", "import", "CourseStructure", "if", "(", "not", "isinstance", "(", "course_key", ",", "basestring", ")", ")", ":", "raise", "ValueError", "(", "'course_key must be a string. {} is not acceptable.'", ".", "format", "(", "type", "(", "course_key", ")", ")", ")", "course_key", "=", "CourseKey", ".", "from_string", "(", "course_key", ")", "try", ":", "structure", "=", "_generate_course_structure", "(", "course_key", ")", "except", "Exception", "as", "ex", ":", "log", ".", "exception", "(", "'An error occurred while generating course structure: %s'", ",", "ex", ".", "message", ")", "raise", "structure_json", "=", "json", ".", "dumps", "(", "structure", "[", "'structure'", "]", ")", "discussion_id_map_json", "=", "json", ".", "dumps", "(", "structure", "[", "'discussion_id_map'", "]", ")", "(", "structure_model", ",", "created", ")", "=", "CourseStructure", ".", "objects", ".", "get_or_create", "(", "course_id", "=", "course_key", ",", "defaults", "=", "{", "'structure_json'", ":", "structure_json", ",", "'discussion_id_map_json'", ":", "discussion_id_map_json", "}", ")", "if", "(", "not", "created", ")", ":", "structure_model", ".", "structure_json", "=", "structure_json", "structure_model", ".", "discussion_id_map_json", "=", "discussion_id_map_json", "structure_model", ".", "save", "(", ")" ]
regenerates and updates the course structure for the specified course .
train
false
17,968
def load_plugin_helpers(): global helper_functions helper_functions.clear() helper_functions.update(_builtin_functions) for plugin in reversed(list(p.PluginImplementations(p.ITemplateHelpers))): helper_functions.update(plugin.get_helpers())
[ "def", "load_plugin_helpers", "(", ")", ":", "global", "helper_functions", "helper_functions", ".", "clear", "(", ")", "helper_functions", ".", "update", "(", "_builtin_functions", ")", "for", "plugin", "in", "reversed", "(", "list", "(", "p", ".", "PluginImplementations", "(", "p", ".", "ITemplateHelpers", ")", ")", ")", ":", "helper_functions", ".", "update", "(", "plugin", ".", "get_helpers", "(", ")", ")" ]
loads the list of helpers provided by plugins .
train
false
17,970
def getPluralString(number, suffix): if (number == 1): return ('1 %s' % suffix) return ('%s %ss' % (number, suffix))
[ "def", "getPluralString", "(", "number", ",", "suffix", ")", ":", "if", "(", "number", "==", "1", ")", ":", "return", "(", "'1 %s'", "%", "suffix", ")", "return", "(", "'%s %ss'", "%", "(", "number", ",", "suffix", ")", ")" ]
get the plural string .
train
false
17,972
def findall(pattern, string, flags=0, pos=None, endpos=None, overlapped=False, concurrent=None, **kwargs): return _compile(pattern, flags, kwargs).findall(string, pos, endpos, overlapped, concurrent)
[ "def", "findall", "(", "pattern", ",", "string", ",", "flags", "=", "0", ",", "pos", "=", "None", ",", "endpos", "=", "None", ",", "overlapped", "=", "False", ",", "concurrent", "=", "None", ",", "**", "kwargs", ")", ":", "return", "_compile", "(", "pattern", ",", "flags", ",", "kwargs", ")", ".", "findall", "(", "string", ",", "pos", ",", "endpos", ",", "overlapped", ",", "concurrent", ")" ]
find all files under dir and return the list of full filenames .
train
false
17,973
def register_open(id, factory, accept=None): id = id.upper() ID.append(id) OPEN[id] = (factory, accept)
[ "def", "register_open", "(", "id", ",", "factory", ",", "accept", "=", "None", ")", ":", "id", "=", "id", ".", "upper", "(", ")", "ID", ".", "append", "(", "id", ")", "OPEN", "[", "id", "]", "=", "(", "factory", ",", "accept", ")" ]
register an image file plugin .
train
false
17,974
@contextmanager def make_uploaded_file(content_type, *a, **kw): with make_image_file(*a, **kw) as image_file: (yield UploadedFile(image_file, content_type=content_type, size=os.path.getsize(image_file.name)))
[ "@", "contextmanager", "def", "make_uploaded_file", "(", "content_type", ",", "*", "a", ",", "**", "kw", ")", ":", "with", "make_image_file", "(", "*", "a", ",", "**", "kw", ")", "as", "image_file", ":", "(", "yield", "UploadedFile", "(", "image_file", ",", "content_type", "=", "content_type", ",", "size", "=", "os", ".", "path", ".", "getsize", "(", "image_file", ".", "name", ")", ")", ")" ]
wrap the result of make_image_file in a django uploadedfile .
train
false
17,975
def json_default(obj): if isinstance(obj, datetime): if (obj.utcoffset() is not None): obj = (obj - obj.utcoffset()) return {'$dt': obj.isoformat()} raise TypeError('Type not serializable')
[ "def", "json_default", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "datetime", ")", ":", "if", "(", "obj", ".", "utcoffset", "(", ")", "is", "not", "None", ")", ":", "obj", "=", "(", "obj", "-", "obj", ".", "utcoffset", "(", ")", ")", "return", "{", "'$dt'", ":", "obj", ".", "isoformat", "(", ")", "}", "raise", "TypeError", "(", "'Type not serializable'", ")" ]
json serializer for objects not serializable by default json code .
train
false
17,976
@register.assignment_tag(takes_context=True) def filer_has_permission(context, item, action): permission_method_name = u'has_{action}_permission'.format(action=action) permission_method = getattr(item, permission_method_name, None) request = context.get(u'request') if ((not permission_method) or (not request)): return False return permission_method(request)
[ "@", "register", ".", "assignment_tag", "(", "takes_context", "=", "True", ")", "def", "filer_has_permission", "(", "context", ",", "item", ",", "action", ")", ":", "permission_method_name", "=", "u'has_{action}_permission'", ".", "format", "(", "action", "=", "action", ")", "permission_method", "=", "getattr", "(", "item", ",", "permission_method_name", ",", "None", ")", "request", "=", "context", ".", "get", "(", "u'request'", ")", "if", "(", "(", "not", "permission_method", ")", "or", "(", "not", "request", ")", ")", ":", "return", "False", "return", "permission_method", "(", "request", ")" ]
does the current user have permission to do the given action on the given item .
train
false
17,978
def _path_for_test(test): return FilePath(_path_for_test_id(test.id()))
[ "def", "_path_for_test", "(", "test", ")", ":", "return", "FilePath", "(", "_path_for_test_id", "(", "test", ".", "id", "(", ")", ")", ")" ]
get the temporary directory path for a test .
train
false
17,979
@pytest.fixture def fake_web_tab(stubs, tab_registry, mode_manager, qapp, fake_args): if (PYQT_VERSION < 329216): pytest.skip('Causes segfaults, see #1638') fake_args.backend = 'webengine' return stubs.FakeWebTab
[ "@", "pytest", ".", "fixture", "def", "fake_web_tab", "(", "stubs", ",", "tab_registry", ",", "mode_manager", ",", "qapp", ",", "fake_args", ")", ":", "if", "(", "PYQT_VERSION", "<", "329216", ")", ":", "pytest", ".", "skip", "(", "'Causes segfaults, see #1638'", ")", "fake_args", ".", "backend", "=", "'webengine'", "return", "stubs", ".", "FakeWebTab" ]
fixture providing the fakewebtab *class* .
train
false
17,980
def test_multiclass_fit_sample(): y = Y.copy() y[5] = 2 y[6] = 2 cc = ClusterCentroids(random_state=RND_SEED) (X_resampled, y_resampled) = cc.fit_sample(X, y) count_y_res = Counter(y_resampled) assert_equal(count_y_res[0], 2) assert_equal(count_y_res[1], 2) assert_equal(count_y_res[2], 2)
[ "def", "test_multiclass_fit_sample", "(", ")", ":", "y", "=", "Y", ".", "copy", "(", ")", "y", "[", "5", "]", "=", "2", "y", "[", "6", "]", "=", "2", "cc", "=", "ClusterCentroids", "(", "random_state", "=", "RND_SEED", ")", "(", "X_resampled", ",", "y_resampled", ")", "=", "cc", ".", "fit_sample", "(", "X", ",", "y", ")", "count_y_res", "=", "Counter", "(", "y_resampled", ")", "assert_equal", "(", "count_y_res", "[", "0", "]", ",", "2", ")", "assert_equal", "(", "count_y_res", "[", "1", "]", ",", "2", ")", "assert_equal", "(", "count_y_res", "[", "2", "]", ",", "2", ")" ]
test fit sample method with multiclass target .
train
false
17,981
def check_result_ignore(result, ignore_ranges): for (bears, range) in ignore_ranges: orig = result.origin.lower().split(' ')[0] if (result.overlaps(range) and ((len(bears) == 0) or (orig in bears) or fnmatch(orig, bears))): return True return False
[ "def", "check_result_ignore", "(", "result", ",", "ignore_ranges", ")", ":", "for", "(", "bears", ",", "range", ")", "in", "ignore_ranges", ":", "orig", "=", "result", ".", "origin", ".", "lower", "(", ")", ".", "split", "(", "' '", ")", "[", "0", "]", "if", "(", "result", ".", "overlaps", "(", "range", ")", "and", "(", "(", "len", "(", "bears", ")", "==", "0", ")", "or", "(", "orig", "in", "bears", ")", "or", "fnmatch", "(", "orig", ",", "bears", ")", ")", ")", ":", "return", "True", "return", "False" ]
determines if the result has to be ignored .
train
false
17,982
def cost_tour(graph, tour): steps = zip(tour[0:(-1)], tour[1:]) cost = sum([graph[(step_from, step_to)] for (step_from, step_to) in steps]) return cost
[ "def", "cost_tour", "(", "graph", ",", "tour", ")", ":", "steps", "=", "zip", "(", "tour", "[", "0", ":", "(", "-", "1", ")", "]", ",", "tour", "[", "1", ":", "]", ")", "cost", "=", "sum", "(", "[", "graph", "[", "(", "step_from", ",", "step_to", ")", "]", "for", "(", "step_from", ",", "step_to", ")", "in", "steps", "]", ")", "return", "cost" ]
calculates the travel cost of given tour through a given graph .
train
false
17,983
def _num_plugins_cached(): return len(plugin.PLUGIN_CACHE.keys())
[ "def", "_num_plugins_cached", "(", ")", ":", "return", "len", "(", "plugin", ".", "PLUGIN_CACHE", ".", "keys", "(", ")", ")" ]
returns the number of plugins that have been cached .
train
false
17,984
def config_test(syslog_ng_sbin_dir=None, cfgfile=None): params = ['--syntax-only'] if cfgfile: params.append('--cfgfile={0}'.format(cfgfile)) try: ret = _run_command_in_extended_path(syslog_ng_sbin_dir, 'syslog-ng', params) except CommandExecutionError as err: return _format_return_data(retcode=(-1), stderr=str(err)) retcode = ret.get('retcode', (-1)) stderr = ret.get('stderr', None) stdout = ret.get('stdout', None) return _format_return_data(retcode, stdout, stderr)
[ "def", "config_test", "(", "syslog_ng_sbin_dir", "=", "None", ",", "cfgfile", "=", "None", ")", ":", "params", "=", "[", "'--syntax-only'", "]", "if", "cfgfile", ":", "params", ".", "append", "(", "'--cfgfile={0}'", ".", "format", "(", "cfgfile", ")", ")", "try", ":", "ret", "=", "_run_command_in_extended_path", "(", "syslog_ng_sbin_dir", ",", "'syslog-ng'", ",", "params", ")", "except", "CommandExecutionError", "as", "err", ":", "return", "_format_return_data", "(", "retcode", "=", "(", "-", "1", ")", ",", "stderr", "=", "str", "(", "err", ")", ")", "retcode", "=", "ret", ".", "get", "(", "'retcode'", ",", "(", "-", "1", ")", ")", "stderr", "=", "ret", ".", "get", "(", "'stderr'", ",", "None", ")", "stdout", "=", "ret", ".", "get", "(", "'stdout'", ",", "None", ")", "return", "_format_return_data", "(", "retcode", ",", "stdout", ",", "stderr", ")" ]
runs syntax check against cfgfile .
train
true
17,985
def test_NullLocator_set_params(): loc = mticker.NullLocator() with warnings.catch_warnings(record=True) as w: warnings.simplefilter(u'always') loc.set_params() assert (len(w) == 1)
[ "def", "test_NullLocator_set_params", "(", ")", ":", "loc", "=", "mticker", ".", "NullLocator", "(", ")", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", "as", "w", ":", "warnings", ".", "simplefilter", "(", "u'always'", ")", "loc", ".", "set_params", "(", ")", "assert", "(", "len", "(", "w", ")", "==", "1", ")" ]
create null locator .
train
false
17,986
def get_indent(line): return re.match('^([ \\t]*)', line).group()
[ "def", "get_indent", "(", "line", ")", ":", "return", "re", ".", "match", "(", "'^([ \\\\t]*)'", ",", "line", ")", ".", "group", "(", ")" ]
return the initial space or tab indent of line .
train
false
17,988
def handle_cputime_metric(common_props, sample): cputime_samples = [] wallclock_key = u'-- WALL --' for data in sample['value'].itervalues(): wall_time = data[wallclock_key] for (process, value) in data.iteritems(): if (process != wallclock_key): cputime_sample = dict(common_props) cputime_sample['process'] = process cputime_sample['value'] = value cputime_sample['wallclock'] = wall_time cputime_samples.append(cputime_sample) return cputime_samples
[ "def", "handle_cputime_metric", "(", "common_props", ",", "sample", ")", ":", "cputime_samples", "=", "[", "]", "wallclock_key", "=", "u'-- WALL --'", "for", "data", "in", "sample", "[", "'value'", "]", ".", "itervalues", "(", ")", ":", "wall_time", "=", "data", "[", "wallclock_key", "]", "for", "(", "process", ",", "value", ")", "in", "data", ".", "iteritems", "(", ")", ":", "if", "(", "process", "!=", "wallclock_key", ")", ":", "cputime_sample", "=", "dict", "(", "common_props", ")", "cputime_sample", "[", "'process'", "]", "=", "process", "cputime_sample", "[", "'value'", "]", "=", "value", "cputime_sample", "[", "'wallclock'", "]", "=", "wall_time", "cputime_samples", ".", "append", "(", "cputime_sample", ")", "return", "cputime_samples" ]
create a sample object for a cputime metric sample .
train
false
17,989
def _layerIncludedInState(layer, state): layer_included = (len(layer.include) == 0) for exclude_rule in layer.exclude: if _stateMeetsRule(state, exclude_rule): layer_included = False break for include_rule in layer.include: if _stateMeetsRule(state, include_rule): layer_included = True break return layer_included
[ "def", "_layerIncludedInState", "(", "layer", ",", "state", ")", ":", "layer_included", "=", "(", "len", "(", "layer", ".", "include", ")", "==", "0", ")", "for", "exclude_rule", "in", "layer", ".", "exclude", ":", "if", "_stateMeetsRule", "(", "state", ",", "exclude_rule", ")", ":", "layer_included", "=", "False", "break", "for", "include_rule", "in", "layer", ".", "include", ":", "if", "_stateMeetsRule", "(", "state", ",", "include_rule", ")", ":", "layer_included", "=", "True", "break", "return", "layer_included" ]
returns true if this layer will be included in the given state logic copied from caffes net::filternet() .
train
false
17,992
def getRemainingEdgeTable(edges, vertexes, z): remainingEdgeTable = {} if (len(edges) > 0): if (edges[0].zMinimum == None): for edge in edges: setEdgeMaximumMinimum(edge, vertexes) for edgeIndex in xrange(len(edges)): edge = edges[edgeIndex] if ((edge.zMinimum < z) and (edge.zMaximum > z)): remainingEdgeTable[edgeIndex] = edge return remainingEdgeTable
[ "def", "getRemainingEdgeTable", "(", "edges", ",", "vertexes", ",", "z", ")", ":", "remainingEdgeTable", "=", "{", "}", "if", "(", "len", "(", "edges", ")", ">", "0", ")", ":", "if", "(", "edges", "[", "0", "]", ".", "zMinimum", "==", "None", ")", ":", "for", "edge", "in", "edges", ":", "setEdgeMaximumMinimum", "(", "edge", ",", "vertexes", ")", "for", "edgeIndex", "in", "xrange", "(", "len", "(", "edges", ")", ")", ":", "edge", "=", "edges", "[", "edgeIndex", "]", "if", "(", "(", "edge", ".", "zMinimum", "<", "z", ")", "and", "(", "edge", ".", "zMaximum", ">", "z", ")", ")", ":", "remainingEdgeTable", "[", "edgeIndex", "]", "=", "edge", "return", "remainingEdgeTable" ]
get the remaining edge hashtable .
train
false
17,993
def gf_crt2(U, M, p, E, S, K): v = K.zero for (u, m, e, s) in zip(U, M, E, S): v += (e * ((u * s) % m)) return (v % p)
[ "def", "gf_crt2", "(", "U", ",", "M", ",", "p", ",", "E", ",", "S", ",", "K", ")", ":", "v", "=", "K", ".", "zero", "for", "(", "u", ",", "m", ",", "e", ",", "s", ")", "in", "zip", "(", "U", ",", "M", ",", "E", ",", "S", ")", ":", "v", "+=", "(", "e", "*", "(", "(", "u", "*", "s", ")", "%", "m", ")", ")", "return", "(", "v", "%", "p", ")" ]
second part of the chinese remainder theorem .
train
false
17,995
def chess_pgn_graph(pgn_file='chess_masters_WCC.pgn.bz2'): import bz2 G = nx.MultiDiGraph() game = {} datafile = bz2.BZ2File(pgn_file) lines = (line.decode().rstrip('\r\n') for line in datafile) for line in lines: if line.startswith('['): (tag, value) = line[1:(-1)].split(' ', 1) game[str(tag)] = value.strip('"') elif game: white = game.pop('White') black = game.pop('Black') G.add_edge(white, black, **game) game = {} return G
[ "def", "chess_pgn_graph", "(", "pgn_file", "=", "'chess_masters_WCC.pgn.bz2'", ")", ":", "import", "bz2", "G", "=", "nx", ".", "MultiDiGraph", "(", ")", "game", "=", "{", "}", "datafile", "=", "bz2", ".", "BZ2File", "(", "pgn_file", ")", "lines", "=", "(", "line", ".", "decode", "(", ")", ".", "rstrip", "(", "'\\r\\n'", ")", "for", "line", "in", "datafile", ")", "for", "line", "in", "lines", ":", "if", "line", ".", "startswith", "(", "'['", ")", ":", "(", "tag", ",", "value", ")", "=", "line", "[", "1", ":", "(", "-", "1", ")", "]", ".", "split", "(", "' '", ",", "1", ")", "game", "[", "str", "(", "tag", ")", "]", "=", "value", ".", "strip", "(", "'\"'", ")", "elif", "game", ":", "white", "=", "game", ".", "pop", "(", "'White'", ")", "black", "=", "game", ".", "pop", "(", "'Black'", ")", "G", ".", "add_edge", "(", "white", ",", "black", ",", "**", "game", ")", "game", "=", "{", "}", "return", "G" ]
read chess games in pgn format in pgn_file .
train
false
17,996
def add_lock(packages, **kwargs): locks = list_locks() added = [] try: packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys()) except MinionError as exc: raise CommandExecutionError(exc) for pkg in packages: if (not locks.get(pkg)): added.append(pkg) if added: __zypper__.call('al', *added) return {'added': len(added), 'packages': added}
[ "def", "add_lock", "(", "packages", ",", "**", "kwargs", ")", ":", "locks", "=", "list_locks", "(", ")", "added", "=", "[", "]", "try", ":", "packages", "=", "list", "(", "__salt__", "[", "'pkg_resource.parse_targets'", "]", "(", "packages", ")", "[", "0", "]", ".", "keys", "(", ")", ")", "except", "MinionError", "as", "exc", ":", "raise", "CommandExecutionError", "(", "exc", ")", "for", "pkg", "in", "packages", ":", "if", "(", "not", "locks", ".", "get", "(", "pkg", ")", ")", ":", "added", ".", "append", "(", "pkg", ")", "if", "added", ":", "__zypper__", ".", "call", "(", "'al'", ",", "*", "added", ")", "return", "{", "'added'", ":", "len", "(", "added", ")", ",", "'packages'", ":", "added", "}" ]
add a package lock .
train
false
17,999
def Trace(func): def Decorate(self, *args, **kwargs): args_to_show = [] if (args is not None): args_to_show.extend((str(argument) for argument in args)) if (kwargs is not None): args_to_show.extend((('%s=%s' % (key, value)) for (key, value) in kwargs.iteritems())) args_string = ', '.join(args_to_show) self.log('Entering %s(%s)', func.func_name, args_string) self._indent_level += 1 try: return func(self, *args, **kwargs) finally: self._indent_level -= 1 self.log('Exiting %s(%s)', func.func_name, args_string) return Decorate
[ "def", "Trace", "(", "func", ")", ":", "def", "Decorate", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "args_to_show", "=", "[", "]", "if", "(", "args", "is", "not", "None", ")", ":", "args_to_show", ".", "extend", "(", "(", "str", "(", "argument", ")", "for", "argument", "in", "args", ")", ")", "if", "(", "kwargs", "is", "not", "None", ")", ":", "args_to_show", ".", "extend", "(", "(", "(", "'%s=%s'", "%", "(", "key", ",", "value", ")", ")", "for", "(", "key", ",", "value", ")", "in", "kwargs", ".", "iteritems", "(", ")", ")", ")", "args_string", "=", "', '", ".", "join", "(", "args_to_show", ")", "self", ".", "log", "(", "'Entering %s(%s)'", ",", "func", ".", "func_name", ",", "args_string", ")", "self", ".", "_indent_level", "+=", "1", "try", ":", "return", "func", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "finally", ":", "self", ".", "_indent_level", "-=", "1", "self", ".", "log", "(", "'Exiting %s(%s)'", ",", "func", ".", "func_name", ",", "args_string", ")", "return", "Decorate" ]
call stack logging decorator for hardenedmoduleshook class .
train
false
18,001
def spatial_3d_padding(x, padding=(1, 1, 1), dim_ordering='default'): if (dim_ordering == 'default'): dim_ordering = image_dim_ordering() if (dim_ordering not in {'th', 'tf'}): raise ValueError(('Unknown dim_ordering ' + str(dim_ordering))) input_shape = x.shape if (dim_ordering == 'th'): output_shape = (input_shape[0], input_shape[1], (input_shape[2] + (2 * padding[0])), (input_shape[3] + (2 * padding[1])), (input_shape[4] + (2 * padding[2]))) output = T.zeros(output_shape) indices = (slice(None), slice(None), slice(padding[0], (input_shape[2] + padding[0])), slice(padding[1], (input_shape[3] + padding[1])), slice(padding[2], (input_shape[4] + padding[2]))) elif (dim_ordering == 'tf'): output_shape = (input_shape[0], (input_shape[1] + (2 * padding[0])), (input_shape[2] + (2 * padding[1])), (input_shape[3] + (2 * padding[2])), input_shape[4]) output = T.zeros(output_shape) indices = (slice(None), slice(padding[0], (input_shape[1] + padding[0])), slice(padding[1], (input_shape[2] + padding[1])), slice(padding[2], (input_shape[3] + padding[2])), slice(None)) else: raise ValueError('Invalid dim_ordering:', dim_ordering) return T.set_subtensor(output[indices], x)
[ "def", "spatial_3d_padding", "(", "x", ",", "padding", "=", "(", "1", ",", "1", ",", "1", ")", ",", "dim_ordering", "=", "'default'", ")", ":", "if", "(", "dim_ordering", "==", "'default'", ")", ":", "dim_ordering", "=", "image_dim_ordering", "(", ")", "if", "(", "dim_ordering", "not", "in", "{", "'th'", ",", "'tf'", "}", ")", ":", "raise", "ValueError", "(", "(", "'Unknown dim_ordering '", "+", "str", "(", "dim_ordering", ")", ")", ")", "input_shape", "=", "x", ".", "shape", "if", "(", "dim_ordering", "==", "'th'", ")", ":", "output_shape", "=", "(", "input_shape", "[", "0", "]", ",", "input_shape", "[", "1", "]", ",", "(", "input_shape", "[", "2", "]", "+", "(", "2", "*", "padding", "[", "0", "]", ")", ")", ",", "(", "input_shape", "[", "3", "]", "+", "(", "2", "*", "padding", "[", "1", "]", ")", ")", ",", "(", "input_shape", "[", "4", "]", "+", "(", "2", "*", "padding", "[", "2", "]", ")", ")", ")", "output", "=", "T", ".", "zeros", "(", "output_shape", ")", "indices", "=", "(", "slice", "(", "None", ")", ",", "slice", "(", "None", ")", ",", "slice", "(", "padding", "[", "0", "]", ",", "(", "input_shape", "[", "2", "]", "+", "padding", "[", "0", "]", ")", ")", ",", "slice", "(", "padding", "[", "1", "]", ",", "(", "input_shape", "[", "3", "]", "+", "padding", "[", "1", "]", ")", ")", ",", "slice", "(", "padding", "[", "2", "]", ",", "(", "input_shape", "[", "4", "]", "+", "padding", "[", "2", "]", ")", ")", ")", "elif", "(", "dim_ordering", "==", "'tf'", ")", ":", "output_shape", "=", "(", "input_shape", "[", "0", "]", ",", "(", "input_shape", "[", "1", "]", "+", "(", "2", "*", "padding", "[", "0", "]", ")", ")", ",", "(", "input_shape", "[", "2", "]", "+", "(", "2", "*", "padding", "[", "1", "]", ")", ")", ",", "(", "input_shape", "[", "3", "]", "+", "(", "2", "*", "padding", "[", "2", "]", ")", ")", ",", "input_shape", "[", "4", "]", ")", "output", "=", "T", ".", "zeros", "(", "output_shape", ")", "indices", "=", "(", "slice", "(", "None", ")", ",", "slice", "(", "padding", "[", "0", "]", ",", "(", "input_shape", "[", "1", "]", "+", "padding", "[", "0", "]", ")", ")", ",", "slice", "(", "padding", "[", "1", "]", ",", "(", "input_shape", "[", "2", "]", "+", "padding", "[", "1", "]", ")", ")", ",", "slice", "(", "padding", "[", "2", "]", ",", "(", "input_shape", "[", "3", "]", "+", "padding", "[", "2", "]", ")", ")", ",", "slice", "(", "None", ")", ")", "else", ":", "raise", "ValueError", "(", "'Invalid dim_ordering:'", ",", "dim_ordering", ")", "return", "T", ".", "set_subtensor", "(", "output", "[", "indices", "]", ",", "x", ")" ]
pads 5d tensor with zeros for the depth .
train
false
18,002
def ExportStateMessage(state): return {STATE_READ: 'Batch read from file.', STATE_GETTING: 'Fetching batch from server', STATE_GOT: 'Batch successfully fetched.', STATE_ERROR: 'Error while fetching batch'}[state]
[ "def", "ExportStateMessage", "(", "state", ")", ":", "return", "{", "STATE_READ", ":", "'Batch read from file.'", ",", "STATE_GETTING", ":", "'Fetching batch from server'", ",", "STATE_GOT", ":", "'Batch successfully fetched.'", ",", "STATE_ERROR", ":", "'Error while fetching batch'", "}", "[", "state", "]" ]
converts a numeric state identifier to a status message .
train
false
18,004
@deprecated_renamed_argument('clobber', 'overwrite', '1.3') def writeto(filename, data, header=None, output_verify='exception', overwrite=False, checksum=False): hdu = _makehdu(data, header) if (hdu.is_image and (not isinstance(hdu, PrimaryHDU))): hdu = PrimaryHDU(data, header=header) hdu.writeto(filename, overwrite=overwrite, output_verify=output_verify, checksum=checksum)
[ "@", "deprecated_renamed_argument", "(", "'clobber'", ",", "'overwrite'", ",", "'1.3'", ")", "def", "writeto", "(", "filename", ",", "data", ",", "header", "=", "None", ",", "output_verify", "=", "'exception'", ",", "overwrite", "=", "False", ",", "checksum", "=", "False", ")", ":", "hdu", "=", "_makehdu", "(", "data", ",", "header", ")", "if", "(", "hdu", ".", "is_image", "and", "(", "not", "isinstance", "(", "hdu", ",", "PrimaryHDU", ")", ")", ")", ":", "hdu", "=", "PrimaryHDU", "(", "data", ",", "header", "=", "header", ")", "hdu", ".", "writeto", "(", "filename", ",", "overwrite", "=", "overwrite", ",", "output_verify", "=", "output_verify", ",", "checksum", "=", "checksum", ")" ]
writes a ~astropy .
train
false
18,005
def store(sources, targets, lock=True, regions=None, compute=True, **kwargs): if isinstance(sources, Array): sources = [sources] targets = [targets] if any(((not isinstance(s, Array)) for s in sources)): raise ValueError('All sources must be dask array objects') if (len(sources) != len(targets)): raise ValueError(('Different number of sources [%d] and targets [%d]' % (len(sources), len(targets)))) if (isinstance(regions, tuple) or (regions is None)): regions = [regions] if ((len(sources) > 1) and (len(regions) == 1)): regions *= len(sources) if (len(sources) != len(regions)): raise ValueError(('Different number of sources [%d] and targets [%d] than regions [%d]' % (len(sources), len(targets), len(regions)))) updates = [insert_to_ooc(tgt, src, lock=lock, region=reg) for (tgt, src, reg) in zip(targets, sources, regions)] dsk = merge(([src.dask for src in sources] + updates)) keys = [key for u in updates for key in u] if compute: Array._get(dsk, keys, **kwargs) else: from ..delayed import Delayed name = ('store-' + tokenize(*keys)) dsk[name] = keys return Delayed(name, [dsk])
[ "def", "store", "(", "sources", ",", "targets", ",", "lock", "=", "True", ",", "regions", "=", "None", ",", "compute", "=", "True", ",", "**", "kwargs", ")", ":", "if", "isinstance", "(", "sources", ",", "Array", ")", ":", "sources", "=", "[", "sources", "]", "targets", "=", "[", "targets", "]", "if", "any", "(", "(", "(", "not", "isinstance", "(", "s", ",", "Array", ")", ")", "for", "s", "in", "sources", ")", ")", ":", "raise", "ValueError", "(", "'All sources must be dask array objects'", ")", "if", "(", "len", "(", "sources", ")", "!=", "len", "(", "targets", ")", ")", ":", "raise", "ValueError", "(", "(", "'Different number of sources [%d] and targets [%d]'", "%", "(", "len", "(", "sources", ")", ",", "len", "(", "targets", ")", ")", ")", ")", "if", "(", "isinstance", "(", "regions", ",", "tuple", ")", "or", "(", "regions", "is", "None", ")", ")", ":", "regions", "=", "[", "regions", "]", "if", "(", "(", "len", "(", "sources", ")", ">", "1", ")", "and", "(", "len", "(", "regions", ")", "==", "1", ")", ")", ":", "regions", "*=", "len", "(", "sources", ")", "if", "(", "len", "(", "sources", ")", "!=", "len", "(", "regions", ")", ")", ":", "raise", "ValueError", "(", "(", "'Different number of sources [%d] and targets [%d] than regions [%d]'", "%", "(", "len", "(", "sources", ")", ",", "len", "(", "targets", ")", ",", "len", "(", "regions", ")", ")", ")", ")", "updates", "=", "[", "insert_to_ooc", "(", "tgt", ",", "src", ",", "lock", "=", "lock", ",", "region", "=", "reg", ")", "for", "(", "tgt", ",", "src", ",", "reg", ")", "in", "zip", "(", "targets", ",", "sources", ",", "regions", ")", "]", "dsk", "=", "merge", "(", "(", "[", "src", ".", "dask", "for", "src", "in", "sources", "]", "+", "updates", ")", ")", "keys", "=", "[", "key", "for", "u", "in", "updates", "for", "key", "in", "u", "]", "if", "compute", ":", "Array", ".", "_get", "(", "dsk", ",", "keys", ",", "**", "kwargs", ")", "else", ":", "from", ".", ".", "delayed", "import", "Delayed", "name", "=", "(", "'store-'", "+", "tokenize", "(", "*", "keys", ")", ")", "dsk", "[", "name", "]", "=", "keys", "return", "Delayed", "(", "name", ",", "[", "dsk", "]", ")" ]
store the object x and returns a new object descriptor for it .
train
false
18,006
def scroll_one_line_up(event): w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name) b = event.cli.current_buffer if w: if w.render_info: info = w.render_info if (w.vertical_scroll > 0): first_line_height = info.get_height_for_line(info.first_visible_line()) cursor_up = (info.cursor_position.y - (((info.window_height - 1) - first_line_height) - info.configured_scroll_offsets.bottom)) for _ in range(max(0, cursor_up)): b.cursor_position += b.document.get_cursor_up_position() w.vertical_scroll -= 1
[ "def", "scroll_one_line_up", "(", "event", ")", ":", "w", "=", "find_window_for_buffer_name", "(", "event", ".", "cli", ",", "event", ".", "cli", ".", "current_buffer_name", ")", "b", "=", "event", ".", "cli", ".", "current_buffer", "if", "w", ":", "if", "w", ".", "render_info", ":", "info", "=", "w", ".", "render_info", "if", "(", "w", ".", "vertical_scroll", ">", "0", ")", ":", "first_line_height", "=", "info", ".", "get_height_for_line", "(", "info", ".", "first_visible_line", "(", ")", ")", "cursor_up", "=", "(", "info", ".", "cursor_position", ".", "y", "-", "(", "(", "(", "info", ".", "window_height", "-", "1", ")", "-", "first_line_height", ")", "-", "info", ".", "configured_scroll_offsets", ".", "bottom", ")", ")", "for", "_", "in", "range", "(", "max", "(", "0", ",", "cursor_up", ")", ")", ":", "b", ".", "cursor_position", "+=", "b", ".", "document", ".", "get_cursor_up_position", "(", ")", "w", ".", "vertical_scroll", "-=", "1" ]
scroll_offset -= 1 .
train
true
18,007
def validate_bucket_path(path): _validate_path(path) if (not _GCS_BUCKET_PATH_REGEX.match(path)): raise ValueError(('Bucket should have format /bucket but got %s' % path))
[ "def", "validate_bucket_path", "(", "path", ")", ":", "_validate_path", "(", "path", ")", "if", "(", "not", "_GCS_BUCKET_PATH_REGEX", ".", "match", "(", "path", ")", ")", ":", "raise", "ValueError", "(", "(", "'Bucket should have format /bucket but got %s'", "%", "path", ")", ")" ]
validate a google cloud storage bucket path .
train
false
18,008
@pytest.mark.parametrize('type,extra_inputs', [('shuup.custompaymentprocessor', ['rounding_quantize']), ('shuup_testing.pseudopaymentprocessor', ['bg_color', 'fg_color'])]) def test_new_service_provider_form_fields(rf, admin_user, type, extra_inputs): with override_settings(LANGUAGES=[('en', 'en')]): base_inputs = ['csrfmiddlewaretoken', 'name__en', 'enabled', 'logo'] get_default_shop() view = ServiceProviderEditView.as_view() soup = get_bs_object_for_view(rf.get(('?type=%s' % type)), view, admin_user) provider_form = soup.find('form', attrs={'id': 'service_provider_form'}) rendered_fields = [] for input_field in provider_form.findAll('input'): rendered_fields.append(input_field['name']) assert (rendered_fields == (base_inputs + extra_inputs))
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'type,extra_inputs'", ",", "[", "(", "'shuup.custompaymentprocessor'", ",", "[", "'rounding_quantize'", "]", ")", ",", "(", "'shuup_testing.pseudopaymentprocessor'", ",", "[", "'bg_color'", ",", "'fg_color'", "]", ")", "]", ")", "def", "test_new_service_provider_form_fields", "(", "rf", ",", "admin_user", ",", "type", ",", "extra_inputs", ")", ":", "with", "override_settings", "(", "LANGUAGES", "=", "[", "(", "'en'", ",", "'en'", ")", "]", ")", ":", "base_inputs", "=", "[", "'csrfmiddlewaretoken'", ",", "'name__en'", ",", "'enabled'", ",", "'logo'", "]", "get_default_shop", "(", ")", "view", "=", "ServiceProviderEditView", ".", "as_view", "(", ")", "soup", "=", "get_bs_object_for_view", "(", "rf", ".", "get", "(", "(", "'?type=%s'", "%", "type", ")", ")", ",", "view", ",", "admin_user", ")", "provider_form", "=", "soup", ".", "find", "(", "'form'", ",", "attrs", "=", "{", "'id'", ":", "'service_provider_form'", "}", ")", "rendered_fields", "=", "[", "]", "for", "input_field", "in", "provider_form", ".", "findAll", "(", "'input'", ")", ":", "rendered_fields", ".", "append", "(", "input_field", "[", "'name'", "]", ")", "assert", "(", "rendered_fields", "==", "(", "base_inputs", "+", "extra_inputs", ")", ")" ]
test serviceprovideeditview fields in new mode .
train
false
18,009
def __parse_drac(output): drac = {} section = '' for i in output.splitlines(): if (i.strip().endswith(':') and ('=' not in i)): section = i[0:(-1)] drac[section] = {} if ((len(i.rstrip()) > 0) and ('=' in i)): if (section in drac): drac[section].update(dict([[prop.strip() for prop in i.split('=')]])) else: section = i.strip() if ((section not in drac) and section): drac[section] = {} return drac
[ "def", "__parse_drac", "(", "output", ")", ":", "drac", "=", "{", "}", "section", "=", "''", "for", "i", "in", "output", ".", "splitlines", "(", ")", ":", "if", "(", "i", ".", "strip", "(", ")", ".", "endswith", "(", "':'", ")", "and", "(", "'='", "not", "in", "i", ")", ")", ":", "section", "=", "i", "[", "0", ":", "(", "-", "1", ")", "]", "drac", "[", "section", "]", "=", "{", "}", "if", "(", "(", "len", "(", "i", ".", "rstrip", "(", ")", ")", ">", "0", ")", "and", "(", "'='", "in", "i", ")", ")", ":", "if", "(", "section", "in", "drac", ")", ":", "drac", "[", "section", "]", ".", "update", "(", "dict", "(", "[", "[", "prop", ".", "strip", "(", ")", "for", "prop", "in", "i", ".", "split", "(", "'='", ")", "]", "]", ")", ")", "else", ":", "section", "=", "i", ".", "strip", "(", ")", "if", "(", "(", "section", "not", "in", "drac", ")", "and", "section", ")", ":", "drac", "[", "section", "]", "=", "{", "}", "return", "drac" ]
parse dell drac output .
train
true
18,010
def _update_dataset_maximum_size(deployment, dataset_id, maximum_size): (_, node) = _find_manifestation_and_node(deployment, dataset_id) deployment = deployment.set(nodes=deployment.nodes.discard(node)) node = node.transform(['manifestations', dataset_id, 'dataset', 'maximum_size'], maximum_size) return deployment.set(nodes=deployment.nodes.add(node))
[ "def", "_update_dataset_maximum_size", "(", "deployment", ",", "dataset_id", ",", "maximum_size", ")", ":", "(", "_", ",", "node", ")", "=", "_find_manifestation_and_node", "(", "deployment", ",", "dataset_id", ")", "deployment", "=", "deployment", ".", "set", "(", "nodes", "=", "deployment", ".", "nodes", ".", "discard", "(", "node", ")", ")", "node", "=", "node", ".", "transform", "(", "[", "'manifestations'", ",", "dataset_id", ",", "'dataset'", ",", "'maximum_size'", "]", ",", "maximum_size", ")", "return", "deployment", ".", "set", "(", "nodes", "=", "deployment", ".", "nodes", ".", "add", "(", "node", ")", ")" ]
update the deployment so that the dataset with the supplied dataset_id has the supplied maximum_size .
train
false
18,012
def TurnIntIntoStrInList(the_list): for index in xrange(0, len(the_list)): item = the_list[index] if (type(item) is int): the_list[index] = str(item) elif (type(item) is dict): TurnIntIntoStrInDict(item) elif (type(item) is list): TurnIntIntoStrInList(item)
[ "def", "TurnIntIntoStrInList", "(", "the_list", ")", ":", "for", "index", "in", "xrange", "(", "0", ",", "len", "(", "the_list", ")", ")", ":", "item", "=", "the_list", "[", "index", "]", "if", "(", "type", "(", "item", ")", "is", "int", ")", ":", "the_list", "[", "index", "]", "=", "str", "(", "item", ")", "elif", "(", "type", "(", "item", ")", "is", "dict", ")", ":", "TurnIntIntoStrInDict", "(", "item", ")", "elif", "(", "type", "(", "item", ")", "is", "list", ")", ":", "TurnIntIntoStrInList", "(", "item", ")" ]
given list the_list .
train
false
18,014
def are_datetimes_close(later_datetime, earlier_datetime): difference_in_secs = (later_datetime - earlier_datetime).total_seconds() return (difference_in_secs < feconf.PROXIMAL_TIMEDELTA_SECS)
[ "def", "are_datetimes_close", "(", "later_datetime", ",", "earlier_datetime", ")", ":", "difference_in_secs", "=", "(", "later_datetime", "-", "earlier_datetime", ")", ".", "total_seconds", "(", ")", "return", "(", "difference_in_secs", "<", "feconf", ".", "PROXIMAL_TIMEDELTA_SECS", ")" ]
given two datetimes .
train
false
18,015
@pytest.fixture def prefix_loader(filesystem_loader, dict_loader): return loaders.PrefixLoader({'a': filesystem_loader, 'b': dict_loader})
[ "@", "pytest", ".", "fixture", "def", "prefix_loader", "(", "filesystem_loader", ",", "dict_loader", ")", ":", "return", "loaders", ".", "PrefixLoader", "(", "{", "'a'", ":", "filesystem_loader", ",", "'b'", ":", "dict_loader", "}", ")" ]
returns a prefixloader .
train
false
18,016
def get_scsi_adapter_type(hardware_devices): if (hardware_devices.__class__.__name__ == 'ArrayOfVirtualDevice'): hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if (device.__class__.__name__ in scsi_controller_classes): if (len(device.device) < constants.SCSI_MAX_CONNECT_NUMBER): return scsi_controller_classes[device.__class__.__name__] raise exception.StorageError(reason=_('Unable to find iSCSI Target'))
[ "def", "get_scsi_adapter_type", "(", "hardware_devices", ")", ":", "if", "(", "hardware_devices", ".", "__class__", ".", "__name__", "==", "'ArrayOfVirtualDevice'", ")", ":", "hardware_devices", "=", "hardware_devices", ".", "VirtualDevice", "for", "device", "in", "hardware_devices", ":", "if", "(", "device", ".", "__class__", ".", "__name__", "in", "scsi_controller_classes", ")", ":", "if", "(", "len", "(", "device", ".", "device", ")", "<", "constants", ".", "SCSI_MAX_CONNECT_NUMBER", ")", ":", "return", "scsi_controller_classes", "[", "device", ".", "__class__", ".", "__name__", "]", "raise", "exception", ".", "StorageError", "(", "reason", "=", "_", "(", "'Unable to find iSCSI Target'", ")", ")" ]
selects a proper iscsi adapter type from the existing hardware devices .
train
false
18,018
def do_select(): PCap.use_select = True
[ "def", "do_select", "(", ")", ":", "PCap", ".", "use_select", "=", "True" ]
sets default pcap behavior to try to use select() .
train
false
18,019
def readLong(data): try: big = struct.unpack('>q', data[0:8])[0] rest = data[8:] return (big, rest) except struct.error: print ('Error: too few bytes for long', data, len(data)) return (0, data)
[ "def", "readLong", "(", "data", ")", ":", "try", ":", "big", "=", "struct", ".", "unpack", "(", "'>q'", ",", "data", "[", "0", ":", "8", "]", ")", "[", "0", "]", "rest", "=", "data", "[", "8", ":", "]", "return", "(", "big", ",", "rest", ")", "except", "struct", ".", "error", ":", "print", "(", "'Error: too few bytes for long'", ",", "data", ",", "len", "(", "data", ")", ")", "return", "(", "0", ",", "data", ")" ]
tries to interpret the next 8 bytes of the data as a 64-bit signed integer .
train
false
18,020
def tz_from_string(_option, _opt_str, value, parser): if (value is not None): if (value[0] in ['+', '-']): valarray = [value[i:(i + 2)] for i in range(1, len(value), 2)] multipliers = [3600, 60] offset = 0 for i in range(min(len(valarray), len(multipliers))): offset += (int(valarray[i]) * multipliers[i]) if (value[0] == '-'): offset = (- offset) timezone = OffsetTzInfo(offset=offset) elif tz_pytz: try: timezone = pytz.timezone(value) except pytz.UnknownTimeZoneError: debug.error('Unknown display timezone specified') else: if (not hasattr(time, 'tzset')): debug.error("This operating system doesn't support tzset, please either specify an offset (eg. +1000) or install pytz") timezone = value parser.values.tz = timezone
[ "def", "tz_from_string", "(", "_option", ",", "_opt_str", ",", "value", ",", "parser", ")", ":", "if", "(", "value", "is", "not", "None", ")", ":", "if", "(", "value", "[", "0", "]", "in", "[", "'+'", ",", "'-'", "]", ")", ":", "valarray", "=", "[", "value", "[", "i", ":", "(", "i", "+", "2", ")", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "value", ")", ",", "2", ")", "]", "multipliers", "=", "[", "3600", ",", "60", "]", "offset", "=", "0", "for", "i", "in", "range", "(", "min", "(", "len", "(", "valarray", ")", ",", "len", "(", "multipliers", ")", ")", ")", ":", "offset", "+=", "(", "int", "(", "valarray", "[", "i", "]", ")", "*", "multipliers", "[", "i", "]", ")", "if", "(", "value", "[", "0", "]", "==", "'-'", ")", ":", "offset", "=", "(", "-", "offset", ")", "timezone", "=", "OffsetTzInfo", "(", "offset", "=", "offset", ")", "elif", "tz_pytz", ":", "try", ":", "timezone", "=", "pytz", ".", "timezone", "(", "value", ")", "except", "pytz", ".", "UnknownTimeZoneError", ":", "debug", ".", "error", "(", "'Unknown display timezone specified'", ")", "else", ":", "if", "(", "not", "hasattr", "(", "time", ",", "'tzset'", ")", ")", ":", "debug", ".", "error", "(", "\"This operating system doesn't support tzset, please either specify an offset (eg. +1000) or install pytz\"", ")", "timezone", "=", "value", "parser", ".", "values", ".", "tz", "=", "timezone" ]
stores a tzinfo object from a string .
train
false
18,021
def _build_credentials_tuple(mech, source, user, passwd, extra): user = (_unicode(user) if (user is not None) else None) password = (passwd if (passwd is None) else _unicode(passwd)) if (mech == 'GSSAPI'): properties = extra.get('authmechanismproperties', {}) service_name = properties.get('SERVICE_NAME', 'mongodb') canonicalize = properties.get('CANONICALIZE_HOST_NAME', False) service_realm = properties.get('SERVICE_REALM') props = GSSAPIProperties(service_name=service_name, canonicalize_host_name=canonicalize, service_realm=service_realm) return MongoCredential(mech, '$external', user, password, props) elif (mech == 'MONGODB-X509'): return MongoCredential(mech, '$external', user, None, None) else: if (passwd is None): raise ConfigurationError('A password is required.') return MongoCredential(mech, source, user, password, None)
[ "def", "_build_credentials_tuple", "(", "mech", ",", "source", ",", "user", ",", "passwd", ",", "extra", ")", ":", "user", "=", "(", "_unicode", "(", "user", ")", "if", "(", "user", "is", "not", "None", ")", "else", "None", ")", "password", "=", "(", "passwd", "if", "(", "passwd", "is", "None", ")", "else", "_unicode", "(", "passwd", ")", ")", "if", "(", "mech", "==", "'GSSAPI'", ")", ":", "properties", "=", "extra", ".", "get", "(", "'authmechanismproperties'", ",", "{", "}", ")", "service_name", "=", "properties", ".", "get", "(", "'SERVICE_NAME'", ",", "'mongodb'", ")", "canonicalize", "=", "properties", ".", "get", "(", "'CANONICALIZE_HOST_NAME'", ",", "False", ")", "service_realm", "=", "properties", ".", "get", "(", "'SERVICE_REALM'", ")", "props", "=", "GSSAPIProperties", "(", "service_name", "=", "service_name", ",", "canonicalize_host_name", "=", "canonicalize", ",", "service_realm", "=", "service_realm", ")", "return", "MongoCredential", "(", "mech", ",", "'$external'", ",", "user", ",", "password", ",", "props", ")", "elif", "(", "mech", "==", "'MONGODB-X509'", ")", ":", "return", "MongoCredential", "(", "mech", ",", "'$external'", ",", "user", ",", "None", ",", "None", ")", "else", ":", "if", "(", "passwd", "is", "None", ")", ":", "raise", "ConfigurationError", "(", "'A password is required.'", ")", "return", "MongoCredential", "(", "mech", ",", "source", ",", "user", ",", "password", ",", "None", ")" ]
build and return a mechanism specific credentials tuple .
train
true
18,022
def color_y_axis(ax, color): for t in ax.get_yticklabels(): t.set_color(color) return None
[ "def", "color_y_axis", "(", "ax", ",", "color", ")", ":", "for", "t", "in", "ax", ".", "get_yticklabels", "(", ")", ":", "t", ".", "set_color", "(", "color", ")", "return", "None" ]
color your axes .
train
false
18,023
def migrate_set_tags_and_taxes_updatable(cr, registry, module): env = api.Environment(cr, SUPERUSER_ID, {}) xml_record_ids = env['ir.model.data'].search([('model', 'in', ['account.tax.template', 'account.account.tag']), ('module', 'like', module)]).ids if xml_record_ids: cr.execute("update ir_model_data set noupdate = 'f' where id in %s", (tuple(xml_record_ids),))
[ "def", "migrate_set_tags_and_taxes_updatable", "(", "cr", ",", "registry", ",", "module", ")", ":", "env", "=", "api", ".", "Environment", "(", "cr", ",", "SUPERUSER_ID", ",", "{", "}", ")", "xml_record_ids", "=", "env", "[", "'ir.model.data'", "]", ".", "search", "(", "[", "(", "'model'", ",", "'in'", ",", "[", "'account.tax.template'", ",", "'account.account.tag'", "]", ")", ",", "(", "'module'", ",", "'like'", ",", "module", ")", "]", ")", ".", "ids", "if", "xml_record_ids", ":", "cr", ".", "execute", "(", "\"update ir_model_data set noupdate = 'f' where id in %s\"", ",", "(", "tuple", "(", "xml_record_ids", ")", ",", ")", ")" ]
this is a utility function used to manually set the flag noupdate to false on tags and account tax templates on localization modules that need migration .
train
false
18,025
def pad_parameter_sender(global_control, pad_control): def do_send(parameters, pad=None): if (pad != None): pad_control.send_value(((pad,) + parameters.sysex_bytes)) else: global_control.send_value(parameters.sysex_bytes) return do_send
[ "def", "pad_parameter_sender", "(", "global_control", ",", "pad_control", ")", ":", "def", "do_send", "(", "parameters", ",", "pad", "=", "None", ")", ":", "if", "(", "pad", "!=", "None", ")", ":", "pad_control", ".", "send_value", "(", "(", "(", "pad", ",", ")", "+", "parameters", ".", "sysex_bytes", ")", ")", "else", ":", "global_control", ".", "send_value", "(", "parameters", ".", "sysex_bytes", ")", "return", "do_send" ]
sends the sensitivity parameters for a given pad .
train
false
18,026
def test_json_underscore(): test_data = BytesIO('{"CamelCase": {"becauseWeCan": "ValueExempt"}}') assert (hug.input_format.json_underscore(test_data) == {'camel_case': {'because_we_can': 'ValueExempt'}})
[ "def", "test_json_underscore", "(", ")", ":", "test_data", "=", "BytesIO", "(", "'{\"CamelCase\": {\"becauseWeCan\": \"ValueExempt\"}}'", ")", "assert", "(", "hug", ".", "input_format", ".", "json_underscore", "(", "test_data", ")", "==", "{", "'camel_case'", ":", "{", "'because_we_can'", ":", "'ValueExempt'", "}", "}", ")" ]
ensure that camelcase keys can be converted into under_score for easier use within python .
train
false
18,029
def prefixedMethods(obj, prefix=''): dct = {} accumulateMethods(obj, dct, prefix) return dct.values()
[ "def", "prefixedMethods", "(", "obj", ",", "prefix", "=", "''", ")", ":", "dct", "=", "{", "}", "accumulateMethods", "(", "obj", ",", "dct", ",", "prefix", ")", "return", "dct", ".", "values", "(", ")" ]
given an object c{obj} .
train
false
18,030
def add_metaclass(metaclass): def wrapper(cls): orig_vars = cls.__dict__.copy() orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) for slots_var in orig_vars.get('__slots__', ()): orig_vars.pop(slots_var) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper
[ "def", "add_metaclass", "(", "metaclass", ")", ":", "def", "wrapper", "(", "cls", ")", ":", "orig_vars", "=", "cls", ".", "__dict__", ".", "copy", "(", ")", "orig_vars", ".", "pop", "(", "'__dict__'", ",", "None", ")", "orig_vars", ".", "pop", "(", "'__weakref__'", ",", "None", ")", "for", "slots_var", "in", "orig_vars", ".", "get", "(", "'__slots__'", ",", "(", ")", ")", ":", "orig_vars", ".", "pop", "(", "slots_var", ")", "return", "metaclass", "(", "cls", ".", "__name__", ",", "cls", ".", "__bases__", ",", "orig_vars", ")", "return", "wrapper" ]
class decorator for creating a class with a metaclass .
train
true
18,032
def compute_rigid_transform(refpoints, points): A = array([[points[0], (- points[1]), 1, 0], [points[1], points[0], 0, 1], [points[2], (- points[3]), 1, 0], [points[3], points[2], 0, 1], [points[4], (- points[5]), 1, 0], [points[5], points[4], 0, 1]]) y = array([refpoints[0], refpoints[1], refpoints[2], refpoints[3], refpoints[4], refpoints[5]]) (a, b, tx, ty) = linalg.lstsq(A, y)[0] R = array([[a, (- b)], [b, a]]) return (R, tx, ty)
[ "def", "compute_rigid_transform", "(", "refpoints", ",", "points", ")", ":", "A", "=", "array", "(", "[", "[", "points", "[", "0", "]", ",", "(", "-", "points", "[", "1", "]", ")", ",", "1", ",", "0", "]", ",", "[", "points", "[", "1", "]", ",", "points", "[", "0", "]", ",", "0", ",", "1", "]", ",", "[", "points", "[", "2", "]", ",", "(", "-", "points", "[", "3", "]", ")", ",", "1", ",", "0", "]", ",", "[", "points", "[", "3", "]", ",", "points", "[", "2", "]", ",", "0", ",", "1", "]", ",", "[", "points", "[", "4", "]", ",", "(", "-", "points", "[", "5", "]", ")", ",", "1", ",", "0", "]", ",", "[", "points", "[", "5", "]", ",", "points", "[", "4", "]", ",", "0", ",", "1", "]", "]", ")", "y", "=", "array", "(", "[", "refpoints", "[", "0", "]", ",", "refpoints", "[", "1", "]", ",", "refpoints", "[", "2", "]", ",", "refpoints", "[", "3", "]", ",", "refpoints", "[", "4", "]", ",", "refpoints", "[", "5", "]", "]", ")", "(", "a", ",", "b", ",", "tx", ",", "ty", ")", "=", "linalg", ".", "lstsq", "(", "A", ",", "y", ")", "[", "0", "]", "R", "=", "array", "(", "[", "[", "a", ",", "(", "-", "b", ")", "]", ",", "[", "b", ",", "a", "]", "]", ")", "return", "(", "R", ",", "tx", ",", "ty", ")" ]
computes rotation .
train
false
18,033
def float_uint16(inarray): i16max = ((2 ** 16) - 1) retVal = numpy.around(((i16max * (1.0 + numpy.asarray(inarray))) / 2.0)) return retVal.astype(numpy.uint16)
[ "def", "float_uint16", "(", "inarray", ")", ":", "i16max", "=", "(", "(", "2", "**", "16", ")", "-", "1", ")", "retVal", "=", "numpy", ".", "around", "(", "(", "(", "i16max", "*", "(", "1.0", "+", "numpy", ".", "asarray", "(", "inarray", ")", ")", ")", "/", "2.0", ")", ")", "return", "retVal", ".", "astype", "(", "numpy", ".", "uint16", ")" ]
converts arrays .
train
false
18,034
def dn_startswith(descendant_dn, dn): if (not isinstance(descendant_dn, list)): descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn)) if (not isinstance(dn, list)): dn = ldap.dn.str2dn(utf8_encode(dn)) if (len(descendant_dn) <= len(dn)): return False return is_dn_equal(descendant_dn[(- len(dn)):], dn)
[ "def", "dn_startswith", "(", "descendant_dn", ",", "dn", ")", ":", "if", "(", "not", "isinstance", "(", "descendant_dn", ",", "list", ")", ")", ":", "descendant_dn", "=", "ldap", ".", "dn", ".", "str2dn", "(", "utf8_encode", "(", "descendant_dn", ")", ")", "if", "(", "not", "isinstance", "(", "dn", ",", "list", ")", ")", ":", "dn", "=", "ldap", ".", "dn", ".", "str2dn", "(", "utf8_encode", "(", "dn", ")", ")", "if", "(", "len", "(", "descendant_dn", ")", "<=", "len", "(", "dn", ")", ")", ":", "return", "False", "return", "is_dn_equal", "(", "descendant_dn", "[", "(", "-", "len", "(", "dn", ")", ")", ":", "]", ",", "dn", ")" ]
return true if and only if the descendant_dn is under the dn .
train
false
18,035
def check_sizes(size, width, height): if (not size): return (width, height) if (len(size) != 2): raise ValueError('size argument should be a pair (width, height)') if ((width is not None) and (width != size[0])): raise ValueError(('size[0] (%r) and width (%r) should match when both are used.' % (size[0], width))) if ((height is not None) and (height != size[1])): raise ValueError(('size[1] (%r) and height (%r) should match when both are used.' % (size[1], height))) return size
[ "def", "check_sizes", "(", "size", ",", "width", ",", "height", ")", ":", "if", "(", "not", "size", ")", ":", "return", "(", "width", ",", "height", ")", "if", "(", "len", "(", "size", ")", "!=", "2", ")", ":", "raise", "ValueError", "(", "'size argument should be a pair (width, height)'", ")", "if", "(", "(", "width", "is", "not", "None", ")", "and", "(", "width", "!=", "size", "[", "0", "]", ")", ")", ":", "raise", "ValueError", "(", "(", "'size[0] (%r) and width (%r) should match when both are used.'", "%", "(", "size", "[", "0", "]", ",", "width", ")", ")", ")", "if", "(", "(", "height", "is", "not", "None", ")", "and", "(", "height", "!=", "size", "[", "1", "]", ")", ")", ":", "raise", "ValueError", "(", "(", "'size[1] (%r) and height (%r) should match when both are used.'", "%", "(", "size", "[", "1", "]", ",", "height", ")", ")", ")", "return", "size" ]
check that these arguments .
train
true
18,036
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get new repository .
train
false
18,037
def read_body(environ): length = environ.get('CONTENT_LENGTH', '0') length = (0 if (length == '') else int(length)) return environ['wsgi.input'].read(length)
[ "def", "read_body", "(", "environ", ")", ":", "length", "=", "environ", ".", "get", "(", "'CONTENT_LENGTH'", ",", "'0'", ")", "length", "=", "(", "0", "if", "(", "length", "==", "''", ")", "else", "int", "(", "length", ")", ")", "return", "environ", "[", "'wsgi.input'", "]", ".", "read", "(", "length", ")" ]
pull the body from the request and return it .
train
true
18,038
def verify_mock_calls(mocked_call, expected_calls_and_values, any_order=False): expected_calls = [call[0] for call in expected_calls_and_values] mocked_call.assert_has_calls(expected_calls, any_order=any_order)
[ "def", "verify_mock_calls", "(", "mocked_call", ",", "expected_calls_and_values", ",", "any_order", "=", "False", ")", ":", "expected_calls", "=", "[", "call", "[", "0", "]", "for", "call", "in", "expected_calls_and_values", "]", "mocked_call", ".", "assert_has_calls", "(", "expected_calls", ",", "any_order", "=", "any_order", ")" ]
a convenient method to setup a sequence of mock calls .
train
false
18,039
def reinit_crypto(): if HAS_CRYPTO: Crypto.Random.atfork()
[ "def", "reinit_crypto", "(", ")", ":", "if", "HAS_CRYPTO", ":", "Crypto", ".", "Random", ".", "atfork", "(", ")" ]
when a fork arrises .
train
false
18,040
def set_datastore_value_for_config_key(pack_name, key_name, value, secret=False, user=None): if user: scope = FULL_USER_SCOPE else: scope = FULL_SYSTEM_SCOPE name = get_key_reference(scope=scope, name=key_name, user=user) kvp_api = KeyValuePairAPI(name=name, value=value, scope=scope, secret=secret) kvp_db = KeyValuePairAPI.to_model(kvp_api) existing_kvp_db = KeyValuePair.get_by_scope_and_name(scope=scope, name=name) if existing_kvp_db: kvp_db.id = existing_kvp_db.id kvp_db = KeyValuePair.add_or_update(kvp_db) return kvp_db
[ "def", "set_datastore_value_for_config_key", "(", "pack_name", ",", "key_name", ",", "value", ",", "secret", "=", "False", ",", "user", "=", "None", ")", ":", "if", "user", ":", "scope", "=", "FULL_USER_SCOPE", "else", ":", "scope", "=", "FULL_SYSTEM_SCOPE", "name", "=", "get_key_reference", "(", "scope", "=", "scope", ",", "name", "=", "key_name", ",", "user", "=", "user", ")", "kvp_api", "=", "KeyValuePairAPI", "(", "name", "=", "name", ",", "value", "=", "value", ",", "scope", "=", "scope", ",", "secret", "=", "secret", ")", "kvp_db", "=", "KeyValuePairAPI", ".", "to_model", "(", "kvp_api", ")", "existing_kvp_db", "=", "KeyValuePair", ".", "get_by_scope_and_name", "(", "scope", "=", "scope", ",", "name", "=", "name", ")", "if", "existing_kvp_db", ":", "kvp_db", ".", "id", "=", "existing_kvp_db", ".", "id", "kvp_db", "=", "KeyValuePair", ".", "add_or_update", "(", "kvp_db", ")", "return", "kvp_db" ]
set config value in the datastore .
train
false
18,042
def get_func_name(func): if PY2: return func.func_name else: return func.__name__
[ "def", "get_func_name", "(", "func", ")", ":", "if", "PY2", ":", "return", "func", ".", "func_name", "else", ":", "return", "func", ".", "__name__" ]
return function name .
train
false
18,043
def _gen_ssl_lab_urls(domains): return [('https://www.ssllabs.com/ssltest/analyze.html?d=%s' % dom) for dom in domains]
[ "def", "_gen_ssl_lab_urls", "(", "domains", ")", ":", "return", "[", "(", "'https://www.ssllabs.com/ssltest/analyze.html?d=%s'", "%", "dom", ")", "for", "dom", "in", "domains", "]" ]
returns a list of urls .
train
false
18,044
def bad_view(request): return HttpResponseNotFound('Not found!. This page contains some MAGIC content')
[ "def", "bad_view", "(", "request", ")", ":", "return", "HttpResponseNotFound", "(", "'Not found!. This page contains some MAGIC content'", ")" ]
a view that returns a 404 with some error content .
train
false
18,045
def reservation_rollback(context, reservations, project_id=None): return IMPL.reservation_rollback(context, reservations, project_id=project_id)
[ "def", "reservation_rollback", "(", "context", ",", "reservations", ",", "project_id", "=", "None", ")", ":", "return", "IMPL", ".", "reservation_rollback", "(", "context", ",", "reservations", ",", "project_id", "=", "project_id", ")" ]
roll back quota reservations .
train
false
18,046
def getIntegerFlagFromCharacterSplitLine(character, splitLine): lineFromCharacter = gcodec.getStringFromCharacterSplitLine(character, splitLine) if (lineFromCharacter == None): return 0 return 1
[ "def", "getIntegerFlagFromCharacterSplitLine", "(", "character", ",", "splitLine", ")", ":", "lineFromCharacter", "=", "gcodec", ".", "getStringFromCharacterSplitLine", "(", "character", ",", "splitLine", ")", "if", "(", "lineFromCharacter", "==", "None", ")", ":", "return", "0", "return", "1" ]
get the integer flag after the first occurence of the character in the split line .
train
false
18,047
def modelcontext(model): if (model is None): return Model.get_context() return model
[ "def", "modelcontext", "(", "model", ")", ":", "if", "(", "model", "is", "None", ")", ":", "return", "Model", ".", "get_context", "(", ")", "return", "model" ]
return the given model or try to find it in the context if there was none supplied .
train
false
18,048
def getDft(data, sampleRate=None, wantPhase=False): samples = (2 ** int(np.log2(len(data)))) samplesHalf = (samples // 2) dataSlice = data[:samples] dft = np.fft.fft(dataSlice) dftHalf = (dft[:samplesHalf] / samples) magn = (abs(dftHalf) * 2) magn[0] /= 2.0 if wantPhase: phase = np.arctan2(dftHalf.real, dftHalf.imag) if sampleRate: deltaf = ((sampleRate / samplesHalf) / 2.0) freq = np.linspace(0, (samplesHalf * deltaf), samplesHalf, endpoint=False) if wantPhase: return (magn, freq, phase) return (magn, freq) else: if wantPhase: return (magn, phase) return magn
[ "def", "getDft", "(", "data", ",", "sampleRate", "=", "None", ",", "wantPhase", "=", "False", ")", ":", "samples", "=", "(", "2", "**", "int", "(", "np", ".", "log2", "(", "len", "(", "data", ")", ")", ")", ")", "samplesHalf", "=", "(", "samples", "//", "2", ")", "dataSlice", "=", "data", "[", ":", "samples", "]", "dft", "=", "np", ".", "fft", ".", "fft", "(", "dataSlice", ")", "dftHalf", "=", "(", "dft", "[", ":", "samplesHalf", "]", "/", "samples", ")", "magn", "=", "(", "abs", "(", "dftHalf", ")", "*", "2", ")", "magn", "[", "0", "]", "/=", "2.0", "if", "wantPhase", ":", "phase", "=", "np", ".", "arctan2", "(", "dftHalf", ".", "real", ",", "dftHalf", ".", "imag", ")", "if", "sampleRate", ":", "deltaf", "=", "(", "(", "sampleRate", "/", "samplesHalf", ")", "/", "2.0", ")", "freq", "=", "np", ".", "linspace", "(", "0", ",", "(", "samplesHalf", "*", "deltaf", ")", ",", "samplesHalf", ",", "endpoint", "=", "False", ")", "if", "wantPhase", ":", "return", "(", "magn", ",", "freq", ",", "phase", ")", "return", "(", "magn", ",", "freq", ")", "else", ":", "if", "wantPhase", ":", "return", "(", "magn", ",", "phase", ")", "return", "magn" ]
compute and return magnitudes of numpy .
train
false
18,050
def testable_memoized_property(func=None, key_factory=per_instance, **kwargs): getter = memoized_method(func=func, key_factory=key_factory, **kwargs) def setter(self, val): with getter.put(self) as putter: putter(val) return property(fget=getter, fset=setter, fdel=(lambda self: getter.forget(self)))
[ "def", "testable_memoized_property", "(", "func", "=", "None", ",", "key_factory", "=", "per_instance", ",", "**", "kwargs", ")", ":", "getter", "=", "memoized_method", "(", "func", "=", "func", ",", "key_factory", "=", "key_factory", ",", "**", "kwargs", ")", "def", "setter", "(", "self", ",", "val", ")", ":", "with", "getter", ".", "put", "(", "self", ")", "as", "putter", ":", "putter", "(", "val", ")", "return", "property", "(", "fget", "=", "getter", ",", "fset", "=", "setter", ",", "fdel", "=", "(", "lambda", "self", ":", "getter", ".", "forget", "(", "self", ")", ")", ")" ]
a variant of memoized_property that allows for setting of properties .
train
true
18,052
def delete_atomic_group(id): models.AtomicGroup.smart_get(id).delete()
[ "def", "delete_atomic_group", "(", "id", ")", ":", "models", ".", "AtomicGroup", ".", "smart_get", "(", "id", ")", ".", "delete", "(", ")" ]
delete atomic group .
train
false
18,053
def _maybe_append_formatted_extension(numobj, metadata, num_format, number): if numobj.extension: if (num_format == PhoneNumberFormat.RFC3966): return ((number + _RFC3966_EXTN_PREFIX) + numobj.extension) elif (metadata.preferred_extn_prefix is not None): return ((number + metadata.preferred_extn_prefix) + numobj.extension) else: return ((number + _DEFAULT_EXTN_PREFIX) + numobj.extension) return number
[ "def", "_maybe_append_formatted_extension", "(", "numobj", ",", "metadata", ",", "num_format", ",", "number", ")", ":", "if", "numobj", ".", "extension", ":", "if", "(", "num_format", "==", "PhoneNumberFormat", ".", "RFC3966", ")", ":", "return", "(", "(", "number", "+", "_RFC3966_EXTN_PREFIX", ")", "+", "numobj", ".", "extension", ")", "elif", "(", "metadata", ".", "preferred_extn_prefix", "is", "not", "None", ")", ":", "return", "(", "(", "number", "+", "metadata", ".", "preferred_extn_prefix", ")", "+", "numobj", ".", "extension", ")", "else", ":", "return", "(", "(", "number", "+", "_DEFAULT_EXTN_PREFIX", ")", "+", "numobj", ".", "extension", ")", "return", "number" ]
appends the formatted extension of a phone number to formatted number .
train
true
18,054
def mean_squared_log_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average'): (y_type, y_true, y_pred, multioutput) = _check_reg_targets(y_true, y_pred, multioutput) if ((not (y_true >= 0).all()) and (not (y_pred >= 0).all())): raise ValueError('Mean Squared Logarithmic Error cannot be used when targets contain negative values.') return mean_squared_error(np.log((y_true + 1)), np.log((y_pred + 1)), sample_weight, multioutput)
[ "def", "mean_squared_log_error", "(", "y_true", ",", "y_pred", ",", "sample_weight", "=", "None", ",", "multioutput", "=", "'uniform_average'", ")", ":", "(", "y_type", ",", "y_true", ",", "y_pred", ",", "multioutput", ")", "=", "_check_reg_targets", "(", "y_true", ",", "y_pred", ",", "multioutput", ")", "if", "(", "(", "not", "(", "y_true", ">=", "0", ")", ".", "all", "(", ")", ")", "and", "(", "not", "(", "y_pred", ">=", "0", ")", ".", "all", "(", ")", ")", ")", ":", "raise", "ValueError", "(", "'Mean Squared Logarithmic Error cannot be used when targets contain negative values.'", ")", "return", "mean_squared_error", "(", "np", ".", "log", "(", "(", "y_true", "+", "1", ")", ")", ",", "np", ".", "log", "(", "(", "y_pred", "+", "1", ")", ")", ",", "sample_weight", ",", "multioutput", ")" ]
mean squared logarithmic error regression loss read more in the :ref:user guide <mean_squared_log_error> .
train
false
18,055
def lldp(interface=''): proxy_output = __proxy__['napalm.call']('get_lldp_neighbors_detail', **{}) if (not proxy_output.get('result')): return proxy_output lldp_neighbors = proxy_output.get('out') if interface: lldp_neighbors = {interface: lldp_neighbors.get(interface)} proxy_output.update({'out': lldp_neighbors}) return proxy_output
[ "def", "lldp", "(", "interface", "=", "''", ")", ":", "proxy_output", "=", "__proxy__", "[", "'napalm.call'", "]", "(", "'get_lldp_neighbors_detail'", ",", "**", "{", "}", ")", "if", "(", "not", "proxy_output", ".", "get", "(", "'result'", ")", ")", ":", "return", "proxy_output", "lldp_neighbors", "=", "proxy_output", ".", "get", "(", "'out'", ")", "if", "interface", ":", "lldp_neighbors", "=", "{", "interface", ":", "lldp_neighbors", ".", "get", "(", "interface", ")", "}", "proxy_output", ".", "update", "(", "{", "'out'", ":", "lldp_neighbors", "}", ")", "return", "proxy_output" ]
returns a detailed view of the lldp neighbors .
train
false
18,056
def _api_queue_sort(output, value, kwargs): sort = kwargs.get('sort') direction = kwargs.get('dir', '') if sort: sort_queue(sort, direction) return report(output) else: return report(output, _MSG_NO_VALUE2)
[ "def", "_api_queue_sort", "(", "output", ",", "value", ",", "kwargs", ")", ":", "sort", "=", "kwargs", ".", "get", "(", "'sort'", ")", "direction", "=", "kwargs", ".", "get", "(", "'dir'", ",", "''", ")", "if", "sort", ":", "sort_queue", "(", "sort", ",", "direction", ")", "return", "report", "(", "output", ")", "else", ":", "return", "report", "(", "output", ",", "_MSG_NO_VALUE2", ")" ]
api: accepts output .
train
false
18,057
def _exitOnSignal(sigName, message): import signal try: sigNumber = getattr(signal, sigName) except AttributeError: return def handler(sig, f): sys.exit(message) try: signal.signal(sigNumber, handler) except ValueError: pass
[ "def", "_exitOnSignal", "(", "sigName", ",", "message", ")", ":", "import", "signal", "try", ":", "sigNumber", "=", "getattr", "(", "signal", ",", "sigName", ")", "except", "AttributeError", ":", "return", "def", "handler", "(", "sig", ",", "f", ")", ":", "sys", ".", "exit", "(", "message", ")", "try", ":", "signal", ".", "signal", "(", "sigNumber", ",", "handler", ")", "except", "ValueError", ":", "pass" ]
handles a signal with sys .
train
true
18,058
def printSummary(results): statuses = zip(*results)[0] sys.stdout.write(('{} responses to {} queries'.format(statuses.count(True), len(statuses)) + '\n'))
[ "def", "printSummary", "(", "results", ")", ":", "statuses", "=", "zip", "(", "*", "results", ")", "[", "0", "]", "sys", ".", "stdout", ".", "write", "(", "(", "'{} responses to {} queries'", ".", "format", "(", "statuses", ".", "count", "(", "True", ")", ",", "len", "(", "statuses", ")", ")", "+", "'\\n'", ")", ")" ]
print a summary showing the total number of responses and queries .
train
false
18,060
def init_inventory(bot): global _inventory _inventory = Inventory(bot)
[ "def", "init_inventory", "(", "bot", ")", ":", "global", "_inventory", "_inventory", "=", "Inventory", "(", "bot", ")" ]
initialises the cached inventory .
train
false
18,061
def print_bench_version(ctx, param, value): if ((not value) or ctx.resilient_parsing): return import bench click.echo(bench.__version__) ctx.exit()
[ "def", "print_bench_version", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "(", "(", "not", "value", ")", "or", "ctx", ".", "resilient_parsing", ")", ":", "return", "import", "bench", "click", ".", "echo", "(", "bench", ".", "__version__", ")", "ctx", ".", "exit", "(", ")" ]
prints current bench version .
train
false
18,062
def getCommentElement(elementNode): for childNode in elementNode.childNodes: if (childNode.getNodeName() == '#comment'): if childNode.getTextContent().startswith(globalOriginalTextString): return childNode return None
[ "def", "getCommentElement", "(", "elementNode", ")", ":", "for", "childNode", "in", "elementNode", ".", "childNodes", ":", "if", "(", "childNode", ".", "getNodeName", "(", ")", "==", "'#comment'", ")", ":", "if", "childNode", ".", "getTextContent", "(", ")", ".", "startswith", "(", "globalOriginalTextString", ")", ":", "return", "childNode", "return", "None" ]
get a carving for the file using an import plugin .
train
false
18,063
def isnested(A, B, namespace=None): if (namespace is not None): A = copy.copy(A) A.namespace = namespace B = copy.copy(B) B.namespace = namespace a = A(values=True)[0] b = B(values=True)[0] if (len(a) != len(b)): raise ValueError('A() and B() should be sequences of the same length') nA = len(set(a)) nB = len(set(b)) n = max(nA, nB) AB = [(a[i], b[i]) for i in range(len(a))] nAB = len(set(AB)) if (nAB == n): if (nA > nB): F = A else: F = B return (True, F) else: return (False, None)
[ "def", "isnested", "(", "A", ",", "B", ",", "namespace", "=", "None", ")", ":", "if", "(", "namespace", "is", "not", "None", ")", ":", "A", "=", "copy", ".", "copy", "(", "A", ")", "A", ".", "namespace", "=", "namespace", "B", "=", "copy", ".", "copy", "(", "B", ")", "B", ".", "namespace", "=", "namespace", "a", "=", "A", "(", "values", "=", "True", ")", "[", "0", "]", "b", "=", "B", "(", "values", "=", "True", ")", "[", "0", "]", "if", "(", "len", "(", "a", ")", "!=", "len", "(", "b", ")", ")", ":", "raise", "ValueError", "(", "'A() and B() should be sequences of the same length'", ")", "nA", "=", "len", "(", "set", "(", "a", ")", ")", "nB", "=", "len", "(", "set", "(", "b", ")", ")", "n", "=", "max", "(", "nA", ",", "nB", ")", "AB", "=", "[", "(", "a", "[", "i", "]", ",", "b", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "a", ")", ")", "]", "nAB", "=", "len", "(", "set", "(", "AB", ")", ")", "if", "(", "nAB", "==", "n", ")", ":", "if", "(", "nA", ">", "nB", ")", ":", "F", "=", "A", "else", ":", "F", "=", "B", "return", "(", "True", ",", "F", ")", "else", ":", "return", "(", "False", ",", "None", ")" ]
is factor b nested within factor a or vice versa: a very crude test which depends on the namespace .
train
false
18,064
def _decompose_liveaction(liveaction_db): decomposed = {'liveaction': {}} liveaction_api = vars(LiveActionAPI.from_model(liveaction_db)) for k in liveaction_api.keys(): if (k in LIVEACTION_ATTRIBUTES): decomposed['liveaction'][k] = liveaction_api[k] else: decomposed[k] = getattr(liveaction_db, k) return decomposed
[ "def", "_decompose_liveaction", "(", "liveaction_db", ")", ":", "decomposed", "=", "{", "'liveaction'", ":", "{", "}", "}", "liveaction_api", "=", "vars", "(", "LiveActionAPI", ".", "from_model", "(", "liveaction_db", ")", ")", "for", "k", "in", "liveaction_api", ".", "keys", "(", ")", ":", "if", "(", "k", "in", "LIVEACTION_ATTRIBUTES", ")", ":", "decomposed", "[", "'liveaction'", "]", "[", "k", "]", "=", "liveaction_api", "[", "k", "]", "else", ":", "decomposed", "[", "k", "]", "=", "getattr", "(", "liveaction_db", ",", "k", ")", "return", "decomposed" ]
splits the liveaction into an actionexecution compatible dict .
train
false
18,067
def test_feature_max_length_on_feature_description(): feature = Feature.from_string(FEATURE2) assert_equals(feature.max_length, 47)
[ "def", "test_feature_max_length_on_feature_description", "(", ")", ":", "feature", "=", "Feature", ".", "from_string", "(", "FEATURE2", ")", "assert_equals", "(", "feature", ".", "max_length", ",", "47", ")" ]
the max length of a feature considering when one of the description lines of the feature is longer than the remaining things .
train
false
18,068
def utcnow(with_timezone=False): if utcnow.override_time: try: return utcnow.override_time.pop(0) except AttributeError: return utcnow.override_time if with_timezone: return datetime.datetime.now(tz=iso8601.iso8601.UTC) return datetime.datetime.utcnow()
[ "def", "utcnow", "(", "with_timezone", "=", "False", ")", ":", "if", "utcnow", ".", "override_time", ":", "try", ":", "return", "utcnow", ".", "override_time", ".", "pop", "(", "0", ")", "except", "AttributeError", ":", "return", "utcnow", ".", "override_time", "if", "with_timezone", ":", "return", "datetime", ".", "datetime", ".", "now", "(", "tz", "=", "iso8601", ".", "iso8601", ".", "UTC", ")", "return", "datetime", ".", "datetime", ".", "utcnow", "(", ")" ]
overridable version of utils .
train
false
18,072
def write_packed_refs(f, packed_refs, peeled_refs=None): if (peeled_refs is None): peeled_refs = {} else: f.write('# pack-refs with: peeled\n') for refname in sorted(packed_refs.keys()): f.write(git_line(packed_refs[refname], refname)) if (refname in peeled_refs): f.write((('^' + peeled_refs[refname]) + '\n'))
[ "def", "write_packed_refs", "(", "f", ",", "packed_refs", ",", "peeled_refs", "=", "None", ")", ":", "if", "(", "peeled_refs", "is", "None", ")", ":", "peeled_refs", "=", "{", "}", "else", ":", "f", ".", "write", "(", "'# pack-refs with: peeled\\n'", ")", "for", "refname", "in", "sorted", "(", "packed_refs", ".", "keys", "(", ")", ")", ":", "f", ".", "write", "(", "git_line", "(", "packed_refs", "[", "refname", "]", ",", "refname", ")", ")", "if", "(", "refname", "in", "peeled_refs", ")", ":", "f", ".", "write", "(", "(", "(", "'^'", "+", "peeled_refs", "[", "refname", "]", ")", "+", "'\\n'", ")", ")" ]
write a packed refs file .
train
false
18,073
def need_name(dictionary, raise_error=True): return key_checker(['name'])(dictionary, raise_error)
[ "def", "need_name", "(", "dictionary", ",", "raise_error", "=", "True", ")", ":", "return", "key_checker", "(", "[", "'name'", "]", ")", "(", "dictionary", ",", "raise_error", ")" ]
returns whether the name key exists in the given dictionary .
train
false