id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
20,226
@world.absorb def capture_screenshot(image_name): output_dir = '{}/log/auto_screenshots'.format(settings.TEST_ROOT) image_name = '{}/{}.png'.format(output_dir, image_name.replace(' ', '_')) try: world.browser.driver.save_screenshot(image_name) except WebDriverException: LOGGER.error("Could not capture a screenshot '{}'".format(image_name))
[ "@", "world", ".", "absorb", "def", "capture_screenshot", "(", "image_name", ")", ":", "output_dir", "=", "'{}/log/auto_screenshots'", ".", "format", "(", "settings", ".", "TEST_ROOT", ")", "image_name", "=", "'{}/{}.png'", ".", "format", "(", "output_dir", ",", "image_name", ".", "replace", "(", "' '", ",", "'_'", ")", ")", "try", ":", "world", ".", "browser", ".", "driver", ".", "save_screenshot", "(", "image_name", ")", "except", "WebDriverException", ":", "LOGGER", ".", "error", "(", "\"Could not capture a screenshot '{}'\"", ".", "format", "(", "image_name", ")", ")" ]
capture a screenshot outputting it to a defined directory .
train
false
20,228
@with_setup(step_runner_environ) def test_step_definitions_takes_the_step_object_as_first_argument(): FEATURE = '\n Feature: Steps as args\n Scenario: Steps as args\n When I define this one\n ' @step('When I define this one') def when_i_define_this_one(step): assert_equals(step.sentence, 'When I define this one') f = Feature.from_string(FEATURE) feature_result = f.run() scenario_result = feature_result.scenario_results[0] assert_equals(len(scenario_result.steps_passed), 1) assert_equals(scenario_result.total_steps, 1)
[ "@", "with_setup", "(", "step_runner_environ", ")", "def", "test_step_definitions_takes_the_step_object_as_first_argument", "(", ")", ":", "FEATURE", "=", "'\\n Feature: Steps as args\\n Scenario: Steps as args\\n When I define this one\\n '", "@", "step", "(", "'When I define this one'", ")", "def", "when_i_define_this_one", "(", "step", ")", ":", "assert_equals", "(", "step", ".", "sentence", ",", "'When I define this one'", ")", "f", "=", "Feature", ".", "from_string", "(", "FEATURE", ")", "feature_result", "=", "f", ".", "run", "(", ")", "scenario_result", "=", "feature_result", ".", "scenario_results", "[", "0", "]", "assert_equals", "(", "len", "(", "scenario_result", ".", "steps_passed", ")", ",", "1", ")", "assert_equals", "(", "scenario_result", ".", "total_steps", ",", "1", ")" ]
step definitions takes step object as first argument .
train
false
20,230
@public def str_timedelta(s): return str(timedelta(0, s)).split('.')[0]
[ "@", "public", "def", "str_timedelta", "(", "s", ")", ":", "return", "str", "(", "timedelta", "(", "0", ",", "s", ")", ")", ".", "split", "(", "'.'", ")", "[", "0", "]" ]
formats a time delta .
train
false
20,231
def constrainedAES(s): small_key = helpers.randomKey(26) real_key = (small_key + str(helpers.randomNumbers())) cipher = AES.new(real_key) encrypted = EncodeAES(cipher, s) return (encrypted, small_key, real_key)
[ "def", "constrainedAES", "(", "s", ")", ":", "small_key", "=", "helpers", ".", "randomKey", "(", "26", ")", "real_key", "=", "(", "small_key", "+", "str", "(", "helpers", ".", "randomNumbers", "(", ")", ")", ")", "cipher", "=", "AES", ".", "new", "(", "real_key", ")", "encrypted", "=", "EncodeAES", "(", "cipher", ",", "s", ")", "return", "(", "encrypted", ",", "small_key", ",", "real_key", ")" ]
generates a constrained aes key which is later brute forced in a loop .
train
false
20,232
def OpenFileForRead(path, logtext): frame = None file = None if (not path): return (frame, file) try: if path.endswith('.gz'): frame = open(path, 'rb') file = gzip.GzipFile(fileobj=frame, mode='rt') else: file = open(path, 'rt') if logtext: output.Log(('Opened %s file: %s' % (logtext, path)), 1) else: output.Log(('Opened file: %s' % path), 1) except IOError: output.Error(('Can not open file: %s' % path)) return (frame, file)
[ "def", "OpenFileForRead", "(", "path", ",", "logtext", ")", ":", "frame", "=", "None", "file", "=", "None", "if", "(", "not", "path", ")", ":", "return", "(", "frame", ",", "file", ")", "try", ":", "if", "path", ".", "endswith", "(", "'.gz'", ")", ":", "frame", "=", "open", "(", "path", ",", "'rb'", ")", "file", "=", "gzip", ".", "GzipFile", "(", "fileobj", "=", "frame", ",", "mode", "=", "'rt'", ")", "else", ":", "file", "=", "open", "(", "path", ",", "'rt'", ")", "if", "logtext", ":", "output", ".", "Log", "(", "(", "'Opened %s file: %s'", "%", "(", "logtext", ",", "path", ")", ")", ",", "1", ")", "else", ":", "output", ".", "Log", "(", "(", "'Opened file: %s'", "%", "path", ")", ",", "1", ")", "except", "IOError", ":", "output", ".", "Error", "(", "(", "'Can not open file: %s'", "%", "path", ")", ")", "return", "(", "frame", ",", "file", ")" ]
opens a text file .
train
false
20,233
def should_be_embargoed(embargo): return ((timezone.now() - embargo.initiation_date) >= settings.EMBARGO_PENDING_TIME)
[ "def", "should_be_embargoed", "(", "embargo", ")", ":", "return", "(", "(", "timezone", ".", "now", "(", ")", "-", "embargo", ".", "initiation_date", ")", ">=", "settings", ".", "EMBARGO_PENDING_TIME", ")" ]
returns true if embargo was initiated more than 48 hours prior .
train
false
20,234
def _update_query_state(query_history): if (query_history.last_state <= models.QueryHistory.STATE.running.index): try: state_enum = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(query_history.get_handle()) if (state_enum is None): return False except Exception as e: LOG.error(e) state_enum = models.QueryHistory.STATE.failed query_history.save_state(state_enum) return True
[ "def", "_update_query_state", "(", "query_history", ")", ":", "if", "(", "query_history", ".", "last_state", "<=", "models", ".", "QueryHistory", ".", "STATE", ".", "running", ".", "index", ")", ":", "try", ":", "state_enum", "=", "dbms", ".", "get", "(", "query_history", ".", "owner", ",", "query_history", ".", "get_query_server_config", "(", ")", ")", ".", "get_state", "(", "query_history", ".", "get_handle", "(", ")", ")", "if", "(", "state_enum", "is", "None", ")", ":", "return", "False", "except", "Exception", "as", "e", ":", "LOG", ".", "error", "(", "e", ")", "state_enum", "=", "models", ".", "QueryHistory", ".", "STATE", ".", "failed", "query_history", ".", "save_state", "(", "state_enum", ")", "return", "True" ]
update the last_state for a queryhistory object .
train
false
20,235
def FromPropertyTypeName(type_name): return _PROPERTY_TYPE_STRINGS[type_name]
[ "def", "FromPropertyTypeName", "(", "type_name", ")", ":", "return", "_PROPERTY_TYPE_STRINGS", "[", "type_name", "]" ]
returns the python type given a type name .
train
false
20,236
@pytest.mark.skipif('no_real_s3_credentials()') def test_empty_latest_listing(): bucket_name = bucket_name_mangle('wal-e-test-empty-listing') layout = storage.StorageLayout('s3://{0}/test-prefix'.format(bucket_name)) with FreshBucket(bucket_name, host='s3.amazonaws.com', calling_format=OrdinaryCallingFormat()) as fb: fb.create() bl = BackupList(fb.conn, layout, False) found = list(bl.find_all('LATEST')) assert (len(found) == 0)
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "'no_real_s3_credentials()'", ")", "def", "test_empty_latest_listing", "(", ")", ":", "bucket_name", "=", "bucket_name_mangle", "(", "'wal-e-test-empty-listing'", ")", "layout", "=", "storage", ".", "StorageLayout", "(", "'s3://{0}/test-prefix'", ".", "format", "(", "bucket_name", ")", ")", "with", "FreshBucket", "(", "bucket_name", ",", "host", "=", "'s3.amazonaws.com'", ",", "calling_format", "=", "OrdinaryCallingFormat", "(", ")", ")", "as", "fb", ":", "fb", ".", "create", "(", ")", "bl", "=", "BackupList", "(", "fb", ".", "conn", ",", "layout", ",", "False", ")", "found", "=", "list", "(", "bl", ".", "find_all", "(", "'LATEST'", ")", ")", "assert", "(", "len", "(", "found", ")", "==", "0", ")" ]
test listing a backup-list latest on an empty prefix .
train
false
20,237
def _timedelta_to_duration_pb(timedelta_val): seconds_decimal = timedelta_val.total_seconds() seconds = int(seconds_decimal) if (seconds_decimal < 0): signed_micros = (timedelta_val.microseconds - (10 ** 6)) else: signed_micros = timedelta_val.microseconds nanos = (1000 * signed_micros) return duration_pb2.Duration(seconds=seconds, nanos=nanos)
[ "def", "_timedelta_to_duration_pb", "(", "timedelta_val", ")", ":", "seconds_decimal", "=", "timedelta_val", ".", "total_seconds", "(", ")", "seconds", "=", "int", "(", "seconds_decimal", ")", "if", "(", "seconds_decimal", "<", "0", ")", ":", "signed_micros", "=", "(", "timedelta_val", ".", "microseconds", "-", "(", "10", "**", "6", ")", ")", "else", ":", "signed_micros", "=", "timedelta_val", ".", "microseconds", "nanos", "=", "(", "1000", "*", "signed_micros", ")", "return", "duration_pb2", ".", "Duration", "(", "seconds", "=", "seconds", ",", "nanos", "=", "nanos", ")" ]
convert a python timedelta object to a duration protobuf .
train
false
20,239
def check_enableusersite(): if (hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False)): return False if (hasattr(os, 'getuid') and hasattr(os, 'geteuid')): if (os.geteuid() != os.getuid()): return None if (hasattr(os, 'getgid') and hasattr(os, 'getegid')): if (os.getegid() != os.getgid()): return None return True
[ "def", "check_enableusersite", "(", ")", ":", "if", "(", "hasattr", "(", "sys", ",", "'flags'", ")", "and", "getattr", "(", "sys", ".", "flags", ",", "'no_user_site'", ",", "False", ")", ")", ":", "return", "False", "if", "(", "hasattr", "(", "os", ",", "'getuid'", ")", "and", "hasattr", "(", "os", ",", "'geteuid'", ")", ")", ":", "if", "(", "os", ".", "geteuid", "(", ")", "!=", "os", ".", "getuid", "(", ")", ")", ":", "return", "None", "if", "(", "hasattr", "(", "os", ",", "'getgid'", ")", "and", "hasattr", "(", "os", ",", "'getegid'", ")", ")", ":", "if", "(", "os", ".", "getegid", "(", ")", "!=", "os", ".", "getgid", "(", ")", ")", ":", "return", "None", "return", "True" ]
check if user site directory is safe for inclusion the function tests for the command line flag .
train
true
20,240
def transaction_after_request(response, base_status_code_error=500): if view_has_annotation(NO_AUTO_TRANSACTION_ATTR): return response if (response.status_code >= base_status_code_error): exc_type = HTTPError exc_value = HTTPError(response.status_code) current_atomic.__exit__(exc_type, exc_value, None) else: current_atomic.__exit__(None, None, None) return response
[ "def", "transaction_after_request", "(", "response", ",", "base_status_code_error", "=", "500", ")", ":", "if", "view_has_annotation", "(", "NO_AUTO_TRANSACTION_ATTR", ")", ":", "return", "response", "if", "(", "response", ".", "status_code", ">=", "base_status_code_error", ")", ":", "exc_type", "=", "HTTPError", "exc_value", "=", "HTTPError", "(", "response", ".", "status_code", ")", "current_atomic", ".", "__exit__", "(", "exc_type", ",", "exc_value", ",", "None", ")", "else", ":", "current_atomic", ".", "__exit__", "(", "None", ",", "None", ",", "None", ")", "return", "response" ]
teardown transaction after handling the request .
train
false
20,241
def commit_api(api): if (api == QT_API_PYSIDE2): ID.forbid('PySide') ID.forbid('PyQt4') ID.forbid('PyQt5') if (api == QT_API_PYSIDE): ID.forbid('PySide2') ID.forbid('PyQt4') ID.forbid('PyQt5') elif (api == QT_API_PYQT5): ID.forbid('PySide2') ID.forbid('PySide') ID.forbid('PyQt4') else: ID.forbid('PyQt5') ID.forbid('PySide2') ID.forbid('PySide')
[ "def", "commit_api", "(", "api", ")", ":", "if", "(", "api", "==", "QT_API_PYSIDE2", ")", ":", "ID", ".", "forbid", "(", "'PySide'", ")", "ID", ".", "forbid", "(", "'PyQt4'", ")", "ID", ".", "forbid", "(", "'PyQt5'", ")", "if", "(", "api", "==", "QT_API_PYSIDE", ")", ":", "ID", ".", "forbid", "(", "'PySide2'", ")", "ID", ".", "forbid", "(", "'PyQt4'", ")", "ID", ".", "forbid", "(", "'PyQt5'", ")", "elif", "(", "api", "==", "QT_API_PYQT5", ")", ":", "ID", ".", "forbid", "(", "'PySide2'", ")", "ID", ".", "forbid", "(", "'PySide'", ")", "ID", ".", "forbid", "(", "'PyQt4'", ")", "else", ":", "ID", ".", "forbid", "(", "'PyQt5'", ")", "ID", ".", "forbid", "(", "'PySide2'", ")", "ID", ".", "forbid", "(", "'PySide'", ")" ]
commit to a particular api .
train
false
20,242
@cache_permission def can_delete_comment(user, project): return check_permission(user, project, 'trans.delete_comment')
[ "@", "cache_permission", "def", "can_delete_comment", "(", "user", ",", "project", ")", ":", "return", "check_permission", "(", "user", ",", "project", ",", "'trans.delete_comment'", ")" ]
checks whether user can delete comment on given project .
train
false
20,244
def get_related_model_from_attribute(attribute): if isinstance(attribute, AssociationProxy): return attribute.remote_attr.mapper.class_ return attribute.property.mapper.class_
[ "def", "get_related_model_from_attribute", "(", "attribute", ")", ":", "if", "isinstance", "(", "attribute", ",", "AssociationProxy", ")", ":", "return", "attribute", ".", "remote_attr", ".", "mapper", ".", "class_", "return", "attribute", ".", "property", ".", "mapper", ".", "class_" ]
gets the class of the model related to the given attribute via the given name .
train
false
20,245
def cron_method(handler): def check_if_cron(self, *args, **kwargs): '\n Check if it is executed by Cron in Staging or Production\n Allow run in localhost calling the url\n ' if ((self.request.headers.get('X-AppEngine-Cron') is None) and (config.get('environment') == 'production') and (not users.is_current_user_admin())): return self.error(403) else: return handler(self, *args, **kwargs) return check_if_cron
[ "def", "cron_method", "(", "handler", ")", ":", "def", "check_if_cron", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "(", "self", ".", "request", ".", "headers", ".", "get", "(", "'X-AppEngine-Cron'", ")", "is", "None", ")", "and", "(", "config", ".", "get", "(", "'environment'", ")", "==", "'production'", ")", "and", "(", "not", "users", ".", "is_current_user_admin", "(", ")", ")", ")", ":", "return", "self", ".", "error", "(", "403", ")", "else", ":", "return", "handler", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "return", "check_if_cron" ]
decorator to indicate that this is a cron method and applies request .
train
false
20,248
def memcache_get_response(response): if (('item' not in response) or (not response['item'])): return (None, 1) items = response['item'] for (i, item) in enumerate(items): if (type(item) == dict): if ('MemcacheGetResponse_Item' in item): item = item['MemcacheGetResponse_Item']['value'] else: item = item['Item']['value'] item = truncate(repr(item)) items[i] = item response_short = '\n'.join(items) return (response_short, 0)
[ "def", "memcache_get_response", "(", "response", ")", ":", "if", "(", "(", "'item'", "not", "in", "response", ")", "or", "(", "not", "response", "[", "'item'", "]", ")", ")", ":", "return", "(", "None", ",", "1", ")", "items", "=", "response", "[", "'item'", "]", "for", "(", "i", ",", "item", ")", "in", "enumerate", "(", "items", ")", ":", "if", "(", "type", "(", "item", ")", "==", "dict", ")", ":", "if", "(", "'MemcacheGetResponse_Item'", "in", "item", ")", ":", "item", "=", "item", "[", "'MemcacheGetResponse_Item'", "]", "[", "'value'", "]", "else", ":", "item", "=", "item", "[", "'Item'", "]", "[", "'value'", "]", "item", "=", "truncate", "(", "repr", "(", "item", ")", ")", "items", "[", "i", "]", "=", "item", "response_short", "=", "'\\n'", ".", "join", "(", "items", ")", "return", "(", "response_short", ",", "0", ")" ]
pretty-format a memcache .
train
false
20,251
def _col_info(result, info_dict=None): if (info_dict is None): info_dict = {} out = [] index = [] for i in info_dict: if isinstance(info_dict[i], dict): continue try: out.append(info_dict[i](result)) except: out.append('') index.append(i) out = pd.DataFrame({str(result.model.endog_names): out}, index=index) return out
[ "def", "_col_info", "(", "result", ",", "info_dict", "=", "None", ")", ":", "if", "(", "info_dict", "is", "None", ")", ":", "info_dict", "=", "{", "}", "out", "=", "[", "]", "index", "=", "[", "]", "for", "i", "in", "info_dict", ":", "if", "isinstance", "(", "info_dict", "[", "i", "]", ",", "dict", ")", ":", "continue", "try", ":", "out", ".", "append", "(", "info_dict", "[", "i", "]", "(", "result", ")", ")", "except", ":", "out", ".", "append", "(", "''", ")", "index", ".", "append", "(", "i", ")", "out", "=", "pd", ".", "DataFrame", "(", "{", "str", "(", "result", ".", "model", ".", "endog_names", ")", ":", "out", "}", ",", "index", "=", "index", ")", "return", "out" ]
stack model info in a column .
train
false
20,252
def read_text_file(filename): if PYTHON3: return open(filename, 'r', encoding='utf-8').read() else: return open(filename, 'r').read()
[ "def", "read_text_file", "(", "filename", ")", ":", "if", "PYTHON3", ":", "return", "open", "(", "filename", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", ".", "read", "(", ")", "else", ":", "return", "open", "(", "filename", ",", "'r'", ")", ".", "read", "(", ")" ]
return the contents of *filename* .
train
false
20,253
def test_prewitt_horizontal(): (i, j) = np.mgrid[(-5):6, (-5):6] image = (i >= 0).astype(float) result = (filters.prewitt(image) * np.sqrt(2)) i[(np.abs(j) == 5)] = 10000 assert np.all((result[(i == 0)] == 1)) assert_allclose(result[(np.abs(i) > 1)], 0, atol=1e-10)
[ "def", "test_prewitt_horizontal", "(", ")", ":", "(", "i", ",", "j", ")", "=", "np", ".", "mgrid", "[", "(", "-", "5", ")", ":", "6", ",", "(", "-", "5", ")", ":", "6", "]", "image", "=", "(", "i", ">=", "0", ")", ".", "astype", "(", "float", ")", "result", "=", "(", "filters", ".", "prewitt", "(", "image", ")", "*", "np", ".", "sqrt", "(", "2", ")", ")", "i", "[", "(", "np", ".", "abs", "(", "j", ")", "==", "5", ")", "]", "=", "10000", "assert", "np", ".", "all", "(", "(", "result", "[", "(", "i", "==", "0", ")", "]", "==", "1", ")", ")", "assert_allclose", "(", "result", "[", "(", "np", ".", "abs", "(", "i", ")", ">", "1", ")", "]", ",", "0", ",", "atol", "=", "1e-10", ")" ]
prewitt on an edge should be a horizontal line .
train
false
20,255
def bool_param(registry, xml_parent, data): data['default'] = str(data.get('default', False)).lower() base_param(registry, xml_parent, data, True, 'hudson.model.BooleanParameterDefinition')
[ "def", "bool_param", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "data", "[", "'default'", "]", "=", "str", "(", "data", ".", "get", "(", "'default'", ",", "False", ")", ")", ".", "lower", "(", ")", "base_param", "(", "registry", ",", "xml_parent", ",", "data", ",", "True", ",", "'hudson.model.BooleanParameterDefinition'", ")" ]
yaml: bool a boolean parameter .
train
false
20,256
@login_required def login_protected_redirect_view(request): return HttpResponseRedirect('/get_view/')
[ "@", "login_required", "def", "login_protected_redirect_view", "(", "request", ")", ":", "return", "HttpResponseRedirect", "(", "'/get_view/'", ")" ]
a view that redirects all requests to the get view .
train
false
20,257
def get_attachments(xml): items = get_items(xml) names = {} attachments = [] for item in items: kind = item.find(u'post_type').string filename = item.find(u'post_name').string post_id = item.find(u'post_id').string if (kind == u'attachment'): attachments.append((item.find(u'post_parent').string, item.find(u'attachment_url').string)) else: filename = get_filename(filename, post_id) names[post_id] = filename attachedposts = {} for (parent, url) in attachments: try: parent_name = names[parent] except KeyError: parent_name = None try: attachedposts[parent_name].append(url) except KeyError: attachedposts[parent_name] = [] attachedposts[parent_name].append(url) return attachedposts
[ "def", "get_attachments", "(", "xml", ")", ":", "items", "=", "get_items", "(", "xml", ")", "names", "=", "{", "}", "attachments", "=", "[", "]", "for", "item", "in", "items", ":", "kind", "=", "item", ".", "find", "(", "u'post_type'", ")", ".", "string", "filename", "=", "item", ".", "find", "(", "u'post_name'", ")", ".", "string", "post_id", "=", "item", ".", "find", "(", "u'post_id'", ")", ".", "string", "if", "(", "kind", "==", "u'attachment'", ")", ":", "attachments", ".", "append", "(", "(", "item", ".", "find", "(", "u'post_parent'", ")", ".", "string", ",", "item", ".", "find", "(", "u'attachment_url'", ")", ".", "string", ")", ")", "else", ":", "filename", "=", "get_filename", "(", "filename", ",", "post_id", ")", "names", "[", "post_id", "]", "=", "filename", "attachedposts", "=", "{", "}", "for", "(", "parent", ",", "url", ")", "in", "attachments", ":", "try", ":", "parent_name", "=", "names", "[", "parent", "]", "except", "KeyError", ":", "parent_name", "=", "None", "try", ":", "attachedposts", "[", "parent_name", "]", ".", "append", "(", "url", ")", "except", "KeyError", ":", "attachedposts", "[", "parent_name", "]", "=", "[", "]", "attachedposts", "[", "parent_name", "]", ".", "append", "(", "url", ")", "return", "attachedposts" ]
returns a dictionary of posts that have attachments with a list of the attachment_urls .
train
false
20,258
def get_tex_path_variable_miktex(variable, env=None): print('Reading path for {0}...'.format(variable)) if (env is None): env = os.environ try: command = ['findtexmf', '-alias=latex'] command.append((('-show-path={' + variable) + '}').format(TEXINPUTS='tex', BIBINPUTS='bib', BSTINPUTS='bst')) t = SubprocessTimeoutThread(30, command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, env=env) t.start() stdout = t.stdout if (stdout is None): return None output = u'\n'.join(re.split('\\r?\\n', stdout.decode('utf-8').strip())) return os.pathsep.join([os.path.normpath(p) for p in output.split(os.pathsep)]) except: return None
[ "def", "get_tex_path_variable_miktex", "(", "variable", ",", "env", "=", "None", ")", ":", "print", "(", "'Reading path for {0}...'", ".", "format", "(", "variable", ")", ")", "if", "(", "env", "is", "None", ")", ":", "env", "=", "os", ".", "environ", "try", ":", "command", "=", "[", "'findtexmf'", ",", "'-alias=latex'", "]", "command", ".", "append", "(", "(", "(", "'-show-path={'", "+", "variable", ")", "+", "'}'", ")", ".", "format", "(", "TEXINPUTS", "=", "'tex'", ",", "BIBINPUTS", "=", "'bib'", ",", "BSTINPUTS", "=", "'bst'", ")", ")", "t", "=", "SubprocessTimeoutThread", "(", "30", ",", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "shell", "=", "False", ",", "env", "=", "env", ")", "t", ".", "start", "(", ")", "stdout", "=", "t", ".", "stdout", "if", "(", "stdout", "is", "None", ")", ":", "return", "None", "output", "=", "u'\\n'", ".", "join", "(", "re", ".", "split", "(", "'\\\\r?\\\\n'", ",", "stdout", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", ")", ")", "return", "os", ".", "pathsep", ".", "join", "(", "[", "os", ".", "path", ".", "normpath", "(", "p", ")", "for", "p", "in", "output", ".", "split", "(", "os", ".", "pathsep", ")", "]", ")", "except", ":", "return", "None" ]
uses findtexmf to find the values of a given tex path variable .
train
false
20,260
def _hash_token(application, token): if isinstance(token, dict): hashed_token = tuple(sorted(token.items())) elif isinstance(token, tuple): hashed_token = token else: raise TypeError(('%r is unknown type of token' % token)) return (application.__class__.__name__, application.name, hashed_token)
[ "def", "_hash_token", "(", "application", ",", "token", ")", ":", "if", "isinstance", "(", "token", ",", "dict", ")", ":", "hashed_token", "=", "tuple", "(", "sorted", "(", "token", ".", "items", "(", ")", ")", ")", "elif", "isinstance", "(", "token", ",", "tuple", ")", ":", "hashed_token", "=", "token", "else", ":", "raise", "TypeError", "(", "(", "'%r is unknown type of token'", "%", "token", ")", ")", "return", "(", "application", ".", "__class__", ".", "__name__", ",", "application", ".", "name", ",", "hashed_token", ")" ]
creates a hashable object for given token then we could use it as a dictionary key .
train
true
20,261
def get_studio_url(course, page): studio_link = None if (course.course_edit_method == 'Studio'): studio_link = get_cms_course_link(course, page) return studio_link
[ "def", "get_studio_url", "(", "course", ",", "page", ")", ":", "studio_link", "=", "None", "if", "(", "course", ".", "course_edit_method", "==", "'Studio'", ")", ":", "studio_link", "=", "get_cms_course_link", "(", "course", ",", "page", ")", "return", "studio_link" ]
get the studio url of the page that is passed in .
train
false
20,262
def gf_diff(f, p, K): df = gf_degree(f) (h, n) = (([K.zero] * df), df) for coeff in f[:(-1)]: coeff *= K(n) coeff %= p if coeff: h[(df - n)] = coeff n -= 1 return gf_strip(h)
[ "def", "gf_diff", "(", "f", ",", "p", ",", "K", ")", ":", "df", "=", "gf_degree", "(", "f", ")", "(", "h", ",", "n", ")", "=", "(", "(", "[", "K", ".", "zero", "]", "*", "df", ")", ",", "df", ")", "for", "coeff", "in", "f", "[", ":", "(", "-", "1", ")", "]", ":", "coeff", "*=", "K", "(", "n", ")", "coeff", "%=", "p", "if", "coeff", ":", "h", "[", "(", "df", "-", "n", ")", "]", "=", "coeff", "n", "-=", "1", "return", "gf_strip", "(", "h", ")" ]
differentiate polynomial in gf(p)[x] .
train
false
20,263
def error_rate(predictions, labels): return (100.0 - ((100.0 * np.sum((np.argmax(predictions, 1) == labels))) / predictions.shape[0]))
[ "def", "error_rate", "(", "predictions", ",", "labels", ")", ":", "return", "(", "100.0", "-", "(", "(", "100.0", "*", "np", ".", "sum", "(", "(", "np", ".", "argmax", "(", "predictions", ",", "1", ")", "==", "labels", ")", ")", ")", "/", "predictions", ".", "shape", "[", "0", "]", ")", ")" ]
return the error rate based on dense predictions and sparse labels .
train
true
20,264
def assertIn(first, second, msg=''): (a, b) = (first, second) assert (a in b), ('%s: %r is not in %r' % (msg.format(a, b), a, b))
[ "def", "assertIn", "(", "first", ",", "second", ",", "msg", "=", "''", ")", ":", "(", "a", ",", "b", ")", "=", "(", "first", ",", "second", ")", "assert", "(", "a", "in", "b", ")", ",", "(", "'%s: %r is not in %r'", "%", "(", "msg", ".", "format", "(", "a", ",", "b", ")", ",", "a", ",", "b", ")", ")" ]
checks that first is in second .
train
false
20,267
def randomKey(b=32): return ''.join((random.choice(((string.ascii_letters + string.digits) + '{}!@#$^&()*&[]|,./?')) for x in range(b)))
[ "def", "randomKey", "(", "b", "=", "32", ")", ":", "return", "''", ".", "join", "(", "(", "random", ".", "choice", "(", "(", "(", "string", ".", "ascii_letters", "+", "string", ".", "digits", ")", "+", "'{}!@#$^&()*&[]|,./?'", ")", ")", "for", "x", "in", "range", "(", "b", ")", ")", ")" ]
returns a random string/key of "b" characters in length .
train
false
20,268
def det_quick(M, method=None): if any((i.has(Symbol) for i in M)): if ((M.rows < 8) and all((i.has(Symbol) for i in M))): return det_perm(M) return det_minor(M) else: return (M.det(method=method) if method else M.det())
[ "def", "det_quick", "(", "M", ",", "method", "=", "None", ")", ":", "if", "any", "(", "(", "i", ".", "has", "(", "Symbol", ")", "for", "i", "in", "M", ")", ")", ":", "if", "(", "(", "M", ".", "rows", "<", "8", ")", "and", "all", "(", "(", "i", ".", "has", "(", "Symbol", ")", "for", "i", "in", "M", ")", ")", ")", ":", "return", "det_perm", "(", "M", ")", "return", "det_minor", "(", "M", ")", "else", ":", "return", "(", "M", ".", "det", "(", "method", "=", "method", ")", "if", "method", "else", "M", ".", "det", "(", ")", ")" ]
return det(m) assuming that either there are lots of zeros or the size of the matrix is small .
train
false
20,269
def image_property_create(context, values, session=None): prop_ref = models.ImageProperty() prop = _image_property_update(context, prop_ref, values, session=session) return prop.to_dict()
[ "def", "image_property_create", "(", "context", ",", "values", ",", "session", "=", "None", ")", ":", "prop_ref", "=", "models", ".", "ImageProperty", "(", ")", "prop", "=", "_image_property_update", "(", "context", ",", "prop_ref", ",", "values", ",", "session", "=", "session", ")", "return", "prop", ".", "to_dict", "(", ")" ]
create an imageproperty object .
train
false
20,270
def _item_to_metric(iterator, resource): return Metric.from_api_repr(resource, iterator.client)
[ "def", "_item_to_metric", "(", "iterator", ",", "resource", ")", ":", "return", "Metric", ".", "from_api_repr", "(", "resource", ",", "iterator", ".", "client", ")" ]
convert a metric protobuf to the native object .
train
false
20,271
def platformTrust(): return OpenSSLDefaultPaths()
[ "def", "platformTrust", "(", ")", ":", "return", "OpenSSLDefaultPaths", "(", ")" ]
attempt to discover a set of trusted certificate authority certificates whose trust is managed and updated by tools outside of twisted .
train
false
20,272
def generate_oriented_forest(n): P = list(range((-1), n)) while True: (yield P[1:]) if (P[n] > 0): P[n] = P[P[n]] else: for p in range((n - 1), 0, (-1)): if (P[p] != 0): target = (P[p] - 1) for q in range((p - 1), 0, (-1)): if (P[q] == target): break offset = (p - q) for i in range(p, (n + 1)): P[i] = P[(i - offset)] break else: break
[ "def", "generate_oriented_forest", "(", "n", ")", ":", "P", "=", "list", "(", "range", "(", "(", "-", "1", ")", ",", "n", ")", ")", "while", "True", ":", "(", "yield", "P", "[", "1", ":", "]", ")", "if", "(", "P", "[", "n", "]", ">", "0", ")", ":", "P", "[", "n", "]", "=", "P", "[", "P", "[", "n", "]", "]", "else", ":", "for", "p", "in", "range", "(", "(", "n", "-", "1", ")", ",", "0", ",", "(", "-", "1", ")", ")", ":", "if", "(", "P", "[", "p", "]", "!=", "0", ")", ":", "target", "=", "(", "P", "[", "p", "]", "-", "1", ")", "for", "q", "in", "range", "(", "(", "p", "-", "1", ")", ",", "0", ",", "(", "-", "1", ")", ")", ":", "if", "(", "P", "[", "q", "]", "==", "target", ")", ":", "break", "offset", "=", "(", "p", "-", "q", ")", "for", "i", "in", "range", "(", "p", ",", "(", "n", "+", "1", ")", ")", ":", "P", "[", "i", "]", "=", "P", "[", "(", "i", "-", "offset", ")", "]", "break", "else", ":", "break" ]
this algorithm generates oriented forests .
train
false
20,274
def test_builtin_completion(): superConsole.SendKeys('outputRedirectStart{(}{)}{ENTER}') testRegex = '' superConsole.SendKeys('print mi{TAB}{(}1,2,3{)}{ENTER}') testRegex += '1' superConsole.SendKeys('outputRedirectStop{(}{)}{ENTER}') verifyResults(getTestOutput()[0], testRegex)
[ "def", "test_builtin_completion", "(", ")", ":", "superConsole", ".", "SendKeys", "(", "'outputRedirectStart{(}{)}{ENTER}'", ")", "testRegex", "=", "''", "superConsole", ".", "SendKeys", "(", "'print mi{TAB}{(}1,2,3{)}{ENTER}'", ")", "testRegex", "+=", "'1'", "superConsole", ".", "SendKeys", "(", "'outputRedirectStop{(}{)}{ENTER}'", ")", "verifyResults", "(", "getTestOutput", "(", ")", "[", "0", "]", ",", "testRegex", ")" ]
verifies we can complete to builtins .
train
false
20,275
def fragment_sequences(sequence, qualities, splitchar): if (len(sequence) != len(qualities)): print(sequence, qualities) raise RuntimeError("Internal error: length of sequence and qualities don't match???") retlist = [] if (len(sequence) == 0): return retlist actseq = [] actqual = [] if (sequence[0] != splitchar): inseq = True else: inseq = False for (char, qual) in zip(sequence, qualities): if inseq: if (char != splitchar): actseq.append(char) actqual.append(qual) else: retlist.append((''.join(actseq), actqual)) actseq = [] actqual = [] inseq = False elif (char != splitchar): inseq = True actseq.append(char) actqual.append(qual) if (inseq and len(actseq)): retlist.append((''.join(actseq), actqual)) return retlist
[ "def", "fragment_sequences", "(", "sequence", ",", "qualities", ",", "splitchar", ")", ":", "if", "(", "len", "(", "sequence", ")", "!=", "len", "(", "qualities", ")", ")", ":", "print", "(", "sequence", ",", "qualities", ")", "raise", "RuntimeError", "(", "\"Internal error: length of sequence and qualities don't match???\"", ")", "retlist", "=", "[", "]", "if", "(", "len", "(", "sequence", ")", "==", "0", ")", ":", "return", "retlist", "actseq", "=", "[", "]", "actqual", "=", "[", "]", "if", "(", "sequence", "[", "0", "]", "!=", "splitchar", ")", ":", "inseq", "=", "True", "else", ":", "inseq", "=", "False", "for", "(", "char", ",", "qual", ")", "in", "zip", "(", "sequence", ",", "qualities", ")", ":", "if", "inseq", ":", "if", "(", "char", "!=", "splitchar", ")", ":", "actseq", ".", "append", "(", "char", ")", "actqual", ".", "append", "(", "qual", ")", "else", ":", "retlist", ".", "append", "(", "(", "''", ".", "join", "(", "actseq", ")", ",", "actqual", ")", ")", "actseq", "=", "[", "]", "actqual", "=", "[", "]", "inseq", "=", "False", "elif", "(", "char", "!=", "splitchar", ")", ":", "inseq", "=", "True", "actseq", ".", "append", "(", "char", ")", "actqual", ".", "append", "(", "qual", ")", "if", "(", "inseq", "and", "len", "(", "actseq", ")", ")", ":", "retlist", ".", "append", "(", "(", "''", ".", "join", "(", "actseq", ")", ",", "actqual", ")", ")", "return", "retlist" ]
works like split() on strings .
train
false
20,276
def fragment_fromstring(html, create_parent=False, base_url=None, parser=None, **kw): if (parser is None): parser = html_parser accept_leading_text = bool(create_parent) elements = fragments_fromstring(html, parser=parser, no_leading_text=(not accept_leading_text), base_url=base_url, **kw) if create_parent: if (not isinstance(create_parent, basestring)): create_parent = 'div' new_root = Element(create_parent) if elements: if isinstance(elements[0], basestring): new_root.text = elements[0] del elements[0] new_root.extend(elements) return new_root if (not elements): raise etree.ParserError('No elements found') if (len(elements) > 1): raise etree.ParserError(('Multiple elements found (%s)' % ', '.join([_element_name(e) for e in elements]))) el = elements[0] if (el.tail and el.tail.strip()): raise etree.ParserError(('Element followed by text: %r' % el.tail)) el.tail = None return el
[ "def", "fragment_fromstring", "(", "html", ",", "create_parent", "=", "False", ",", "base_url", "=", "None", ",", "parser", "=", "None", ",", "**", "kw", ")", ":", "if", "(", "parser", "is", "None", ")", ":", "parser", "=", "html_parser", "accept_leading_text", "=", "bool", "(", "create_parent", ")", "elements", "=", "fragments_fromstring", "(", "html", ",", "parser", "=", "parser", ",", "no_leading_text", "=", "(", "not", "accept_leading_text", ")", ",", "base_url", "=", "base_url", ",", "**", "kw", ")", "if", "create_parent", ":", "if", "(", "not", "isinstance", "(", "create_parent", ",", "basestring", ")", ")", ":", "create_parent", "=", "'div'", "new_root", "=", "Element", "(", "create_parent", ")", "if", "elements", ":", "if", "isinstance", "(", "elements", "[", "0", "]", ",", "basestring", ")", ":", "new_root", ".", "text", "=", "elements", "[", "0", "]", "del", "elements", "[", "0", "]", "new_root", ".", "extend", "(", "elements", ")", "return", "new_root", "if", "(", "not", "elements", ")", ":", "raise", "etree", ".", "ParserError", "(", "'No elements found'", ")", "if", "(", "len", "(", "elements", ")", ">", "1", ")", ":", "raise", "etree", ".", "ParserError", "(", "(", "'Multiple elements found (%s)'", "%", "', '", ".", "join", "(", "[", "_element_name", "(", "e", ")", "for", "e", "in", "elements", "]", ")", ")", ")", "el", "=", "elements", "[", "0", "]", "if", "(", "el", ".", "tail", "and", "el", ".", "tail", ".", "strip", "(", ")", ")", ":", "raise", "etree", ".", "ParserError", "(", "(", "'Element followed by text: %r'", "%", "el", ".", "tail", ")", ")", "el", ".", "tail", "=", "None", "return", "el" ]
parses a single html element; it is an error if there is more than one element .
train
true
20,277
def MeanPool2d(net, filter_size=(2, 2), strides=None, padding='SAME', name='meanpool'): if (strides is None): strides = filter_size net = PoolLayer(net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.avg_pool, name=name) return net
[ "def", "MeanPool2d", "(", "net", ",", "filter_size", "=", "(", "2", ",", "2", ")", ",", "strides", "=", "None", ",", "padding", "=", "'SAME'", ",", "name", "=", "'meanpool'", ")", ":", "if", "(", "strides", "is", "None", ")", ":", "strides", "=", "filter_size", "net", "=", "PoolLayer", "(", "net", ",", "ksize", "=", "[", "1", ",", "filter_size", "[", "0", "]", ",", "filter_size", "[", "1", "]", ",", "1", "]", ",", "strides", "=", "[", "1", ",", "strides", "[", "0", "]", ",", "strides", "[", "1", "]", ",", "1", "]", ",", "padding", "=", "padding", ",", "pool", "=", "tf", ".", "nn", ".", "avg_pool", ",", "name", "=", "name", ")", "return", "net" ]
wrapper for :class:poollayer .
train
false
20,278
def make_compact(creation_sequence): first = creation_sequence[0] if isinstance(first, str): cs = creation_sequence[:] elif isinstance(first, tuple): cs = [s[1] for s in creation_sequence] elif isinstance(first, int): return creation_sequence else: raise TypeError('Not a valid creation sequence type') ccs = [] count = 1 for i in range(1, len(cs)): if (cs[i] == cs[(i - 1)]): count += 1 else: ccs.append(count) count = 1 ccs.append(count) return ccs
[ "def", "make_compact", "(", "creation_sequence", ")", ":", "first", "=", "creation_sequence", "[", "0", "]", "if", "isinstance", "(", "first", ",", "str", ")", ":", "cs", "=", "creation_sequence", "[", ":", "]", "elif", "isinstance", "(", "first", ",", "tuple", ")", ":", "cs", "=", "[", "s", "[", "1", "]", "for", "s", "in", "creation_sequence", "]", "elif", "isinstance", "(", "first", ",", "int", ")", ":", "return", "creation_sequence", "else", ":", "raise", "TypeError", "(", "'Not a valid creation sequence type'", ")", "ccs", "=", "[", "]", "count", "=", "1", "for", "i", "in", "range", "(", "1", ",", "len", "(", "cs", ")", ")", ":", "if", "(", "cs", "[", "i", "]", "==", "cs", "[", "(", "i", "-", "1", ")", "]", ")", ":", "count", "+=", "1", "else", ":", "ccs", ".", "append", "(", "count", ")", "count", "=", "1", "ccs", ".", "append", "(", "count", ")", "return", "ccs" ]
returns the creation sequence in a compact form that is the number of is and ds alternating .
train
false
20,279
def check_arrays(x, y, err_msg='', verbose=True, check_dtypes=True): assert (type(x) == type(y)), '{x} != {y}'.format(x=type(x), y=type(y)) assert (x.dtype == y.dtype), '{x.dtype} != {y.dtype}'.format(x=x, y=y) if isinstance(x, LabelArray): assert_array_equal(x.is_missing(), y.is_missing(), err_msg=err_msg, verbose=verbose) x = x.as_string_array() y = y.as_string_array() elif (x.dtype.kind in 'mM'): x_isnat = isnat(x) y_isnat = isnat(y) assert_array_equal(x_isnat, y_isnat, err_msg='NaTs not equal', verbose=verbose) x = np.where(x_isnat, np.zeros_like(x), x) y = np.where(x_isnat, np.zeros_like(x), x) return assert_array_equal(x, y, err_msg=err_msg, verbose=verbose)
[ "def", "check_arrays", "(", "x", ",", "y", ",", "err_msg", "=", "''", ",", "verbose", "=", "True", ",", "check_dtypes", "=", "True", ")", ":", "assert", "(", "type", "(", "x", ")", "==", "type", "(", "y", ")", ")", ",", "'{x} != {y}'", ".", "format", "(", "x", "=", "type", "(", "x", ")", ",", "y", "=", "type", "(", "y", ")", ")", "assert", "(", "x", ".", "dtype", "==", "y", ".", "dtype", ")", ",", "'{x.dtype} != {y.dtype}'", ".", "format", "(", "x", "=", "x", ",", "y", "=", "y", ")", "if", "isinstance", "(", "x", ",", "LabelArray", ")", ":", "assert_array_equal", "(", "x", ".", "is_missing", "(", ")", ",", "y", ".", "is_missing", "(", ")", ",", "err_msg", "=", "err_msg", ",", "verbose", "=", "verbose", ")", "x", "=", "x", ".", "as_string_array", "(", ")", "y", "=", "y", ".", "as_string_array", "(", ")", "elif", "(", "x", ".", "dtype", ".", "kind", "in", "'mM'", ")", ":", "x_isnat", "=", "isnat", "(", "x", ")", "y_isnat", "=", "isnat", "(", "y", ")", "assert_array_equal", "(", "x_isnat", ",", "y_isnat", ",", "err_msg", "=", "'NaTs not equal'", ",", "verbose", "=", "verbose", ")", "x", "=", "np", ".", "where", "(", "x_isnat", ",", "np", ".", "zeros_like", "(", "x", ")", ",", "x", ")", "y", "=", "np", ".", "where", "(", "x_isnat", ",", "np", ".", "zeros_like", "(", "x", ")", ",", "x", ")", "return", "assert_array_equal", "(", "x", ",", "y", ",", "err_msg", "=", "err_msg", ",", "verbose", "=", "verbose", ")" ]
wrapper around np .
train
false
20,280
def init_logger(): logger = logging.getLogger('south') logger.addHandler(NullHandler()) return logger
[ "def", "init_logger", "(", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "'south'", ")", "logger", ".", "addHandler", "(", "NullHandler", "(", ")", ")", "return", "logger" ]
initialize the south logger .
train
false
20,281
def rel_posix_to_abs_local(host, path, environ=None): if (environ is None): environ = os.environ if path.startswith('/'): path = path[1:] root = path_for_host(host, environ) return os.path.join(root, *path.split('/'))
[ "def", "rel_posix_to_abs_local", "(", "host", ",", "path", ",", "environ", "=", "None", ")", ":", "if", "(", "environ", "is", "None", ")", ":", "environ", "=", "os", ".", "environ", "if", "path", ".", "startswith", "(", "'/'", ")", ":", "path", "=", "path", "[", "1", ":", "]", "root", "=", "path_for_host", "(", "host", ",", "environ", ")", "return", "os", ".", "path", ".", "join", "(", "root", ",", "*", "path", ".", "split", "(", "'/'", ")", ")" ]
convert a posix path to the current systems format and prepend the tmp directory the hosts files are in .
train
false
20,282
def addFacesByGrid(faces, grid): cellTopLoops = getIndexedCellLoopsFromIndexedGrid(grid) for cellTopLoop in cellTopLoops: addFacesByConvex(faces, cellTopLoop)
[ "def", "addFacesByGrid", "(", "faces", ",", "grid", ")", ":", "cellTopLoops", "=", "getIndexedCellLoopsFromIndexedGrid", "(", "grid", ")", "for", "cellTopLoop", "in", "cellTopLoops", ":", "addFacesByConvex", "(", "faces", ",", "cellTopLoop", ")" ]
add faces from grid .
train
false
20,284
def submit_and_wait_for_completion(unit_test, connection, start, end, increment, precision, split_range=False): pending_callbacks = [] completed_callbacks = [] for gross_time in range(start, end, increment): timeout = get_timeout(gross_time, start, end, precision, split_range) callback = TimerCallback(timeout) connection.create_timer(timeout, callback.invoke) pending_callbacks.append(callback) while (len(pending_callbacks) is not 0): for callback in pending_callbacks: if callback.was_invoked(): pending_callbacks.remove(callback) completed_callbacks.append(callback) time.sleep(0.1) for callback in completed_callbacks: unit_test.assertAlmostEqual(callback.expected_wait, callback.get_wait_time(), delta=0.15)
[ "def", "submit_and_wait_for_completion", "(", "unit_test", ",", "connection", ",", "start", ",", "end", ",", "increment", ",", "precision", ",", "split_range", "=", "False", ")", ":", "pending_callbacks", "=", "[", "]", "completed_callbacks", "=", "[", "]", "for", "gross_time", "in", "range", "(", "start", ",", "end", ",", "increment", ")", ":", "timeout", "=", "get_timeout", "(", "gross_time", ",", "start", ",", "end", ",", "precision", ",", "split_range", ")", "callback", "=", "TimerCallback", "(", "timeout", ")", "connection", ".", "create_timer", "(", "timeout", ",", "callback", ".", "invoke", ")", "pending_callbacks", ".", "append", "(", "callback", ")", "while", "(", "len", "(", "pending_callbacks", ")", "is", "not", "0", ")", ":", "for", "callback", "in", "pending_callbacks", ":", "if", "callback", ".", "was_invoked", "(", ")", ":", "pending_callbacks", ".", "remove", "(", "callback", ")", "completed_callbacks", ".", "append", "(", "callback", ")", "time", ".", "sleep", "(", "0.1", ")", "for", "callback", "in", "completed_callbacks", ":", "unit_test", ".", "assertAlmostEqual", "(", "callback", ".", "expected_wait", ",", "callback", ".", "get_wait_time", "(", ")", ",", "delta", "=", "0.15", ")" ]
this will submit a number of timers to the provided connection .
train
false
20,286
def PIL_to_npimage(im): return np.array(im)
[ "def", "PIL_to_npimage", "(", "im", ")", ":", "return", "np", ".", "array", "(", "im", ")" ]
transforms a pil/pillow image into a numpy rgb(a) image .
train
false
20,292
@memoize def set_scputimes_ntuple(procfs_path): global scputimes with open_binary(('%s/stat' % procfs_path)) as f: values = f.readline().split()[1:] fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq'] vlen = len(values) if (vlen >= 8): fields.append('steal') if (vlen >= 9): fields.append('guest') if (vlen >= 10): fields.append('guest_nice') scputimes = namedtuple('scputimes', fields) return scputimes
[ "@", "memoize", "def", "set_scputimes_ntuple", "(", "procfs_path", ")", ":", "global", "scputimes", "with", "open_binary", "(", "(", "'%s/stat'", "%", "procfs_path", ")", ")", "as", "f", ":", "values", "=", "f", ".", "readline", "(", ")", ".", "split", "(", ")", "[", "1", ":", "]", "fields", "=", "[", "'user'", ",", "'nice'", ",", "'system'", ",", "'idle'", ",", "'iowait'", ",", "'irq'", ",", "'softirq'", "]", "vlen", "=", "len", "(", "values", ")", "if", "(", "vlen", ">=", "8", ")", ":", "fields", ".", "append", "(", "'steal'", ")", "if", "(", "vlen", ">=", "9", ")", ":", "fields", ".", "append", "(", "'guest'", ")", "if", "(", "vlen", ">=", "10", ")", ":", "fields", ".", "append", "(", "'guest_nice'", ")", "scputimes", "=", "namedtuple", "(", "'scputimes'", ",", "fields", ")", "return", "scputimes" ]
return a namedtuple of variable fields depending on the cpu times available on this linux kernel version which may be: .
train
false
20,293
def IsPrimitiveType(obj): return (isinstance(obj, types.bool) or isinstance(obj, types.byte) or isinstance(obj, types.short) or isinstance(obj, six.integer_types) or isinstance(obj, types.double) or isinstance(obj, types.float) or isinstance(obj, six.string_types) or isinstance(obj, types.PropertyPath) or isinstance(obj, types.ManagedMethod) or isinstance(obj, types.datetime) or isinstance(obj, types.URI) or isinstance(obj, type))
[ "def", "IsPrimitiveType", "(", "obj", ")", ":", "return", "(", "isinstance", "(", "obj", ",", "types", ".", "bool", ")", "or", "isinstance", "(", "obj", ",", "types", ".", "byte", ")", "or", "isinstance", "(", "obj", ",", "types", ".", "short", ")", "or", "isinstance", "(", "obj", ",", "six", ".", "integer_types", ")", "or", "isinstance", "(", "obj", ",", "types", ".", "double", ")", "or", "isinstance", "(", "obj", ",", "types", ".", "float", ")", "or", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", "or", "isinstance", "(", "obj", ",", "types", ".", "PropertyPath", ")", "or", "isinstance", "(", "obj", ",", "types", ".", "ManagedMethod", ")", "or", "isinstance", "(", "obj", ",", "types", ".", "datetime", ")", "or", "isinstance", "(", "obj", ",", "types", ".", "URI", ")", "or", "isinstance", "(", "obj", ",", "type", ")", ")" ]
see if the passed in type is a primitive type .
train
true
20,294
def CreateFolder(path, timestamp, token=None): with test_lib.FakeTime(timestamp): with aff4.FACTORY.Create(path, aff4_type=aff4_standard.VFSDirectory, mode='w', token=token) as _: pass
[ "def", "CreateFolder", "(", "path", ",", "timestamp", ",", "token", "=", "None", ")", ":", "with", "test_lib", ".", "FakeTime", "(", "timestamp", ")", ":", "with", "aff4", ".", "FACTORY", ".", "Create", "(", "path", ",", "aff4_type", "=", "aff4_standard", ".", "VFSDirectory", ",", "mode", "=", "'w'", ",", "token", "=", "token", ")", "as", "_", ":", "pass" ]
creates a vfs folder .
train
false
20,295
def simulate_put(app, path, **kwargs): return simulate_request(app, 'PUT', path, **kwargs)
[ "def", "simulate_put", "(", "app", ",", "path", ",", "**", "kwargs", ")", ":", "return", "simulate_request", "(", "app", ",", "'PUT'", ",", "path", ",", "**", "kwargs", ")" ]
simulates a put request to a wsgi application .
train
false
20,301
def test_unicode_labels_decode(Chart): chart = Chart() chart.add(u('S\xc3\xa9rie1'), [{'value': 1, 'xlink': 'http://1/', 'label': u('{\\}\xc3\x82\xc2\xb0\xc4\xb3\xc3\xa6\xc3\xb0\xc2\xa9&\xc3\x97&<\xe2\x80\x94\xc3\x97\xe2\x82\xac\xc2\xbf_\xe2\x80\xa6\\{_\xe2\x80\xa6')}, {'value': 2, 'xlink': {'href': 'http://6.example.com/'}, 'label': u('\xc3\xa6\xc3\x82\xc2\xb0\xe2\x82\xac\xe2\x89\xa0|\xe2\x82\xac\xc3\xa6\xc3\x82\xc2\xb0\xe2\x82\xac\xc9\x99\xc3\xa6')}, {'value': 3, 'label': 'unicode <3'}]) if (not chart._dual): chart.x_labels = [u('&\xc5\x93'), u('\xc2\xbf?'), u('\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0\xe2\x80\xa0'), 'unicode <3'] chart.render_pyquery()
[ "def", "test_unicode_labels_decode", "(", "Chart", ")", ":", "chart", "=", "Chart", "(", ")", "chart", ".", "add", "(", "u", "(", "'S\\xc3\\xa9rie1'", ")", ",", "[", "{", "'value'", ":", "1", ",", "'xlink'", ":", "'http://1/'", ",", "'label'", ":", "u", "(", "'{\\\\}\\xc3\\x82\\xc2\\xb0\\xc4\\xb3\\xc3\\xa6\\xc3\\xb0\\xc2\\xa9&\\xc3\\x97&<\\xe2\\x80\\x94\\xc3\\x97\\xe2\\x82\\xac\\xc2\\xbf_\\xe2\\x80\\xa6\\\\{_\\xe2\\x80\\xa6'", ")", "}", ",", "{", "'value'", ":", "2", ",", "'xlink'", ":", "{", "'href'", ":", "'http://6.example.com/'", "}", ",", "'label'", ":", "u", "(", "'\\xc3\\xa6\\xc3\\x82\\xc2\\xb0\\xe2\\x82\\xac\\xe2\\x89\\xa0|\\xe2\\x82\\xac\\xc3\\xa6\\xc3\\x82\\xc2\\xb0\\xe2\\x82\\xac\\xc9\\x99\\xc3\\xa6'", ")", "}", ",", "{", "'value'", ":", "3", ",", "'label'", ":", "'unicode <3'", "}", "]", ")", "if", "(", "not", "chart", ".", "_dual", ")", ":", "chart", ".", "x_labels", "=", "[", "u", "(", "'&\\xc5\\x93'", ")", ",", "u", "(", "'\\xc2\\xbf?'", ")", ",", "u", "(", "'\\xe2\\x80\\xa0\\xe2\\x80\\xa0\\xe2\\x80\\xa0\\xe2\\x80\\xa0\\xe2\\x80\\xa0\\xe2\\x80\\xa0\\xe2\\x80\\xa0\\xe2\\x80\\xa0'", ")", ",", "'unicode <3'", "]", "chart", ".", "render_pyquery", "(", ")" ]
test unicode labels .
train
false
20,302
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N): A2kN = 0.0 lj = (Z.searchsorted(Zstar[:(-1)], 'right') - Z.searchsorted(Zstar[:(-1)], 'left')) Bj = lj.cumsum() for i in arange(0, k): s = np.sort(samples[i]) Mij = s.searchsorted(Zstar[:(-1)], side='right') inner = (((lj / float(N)) * (((N * Mij) - (Bj * n[i])) ** 2)) / (Bj * (N - Bj))) A2kN += (inner.sum() / n[i]) return A2kN
[ "def", "_anderson_ksamp_right", "(", "samples", ",", "Z", ",", "Zstar", ",", "k", ",", "n", ",", "N", ")", ":", "A2kN", "=", "0.0", "lj", "=", "(", "Z", ".", "searchsorted", "(", "Zstar", "[", ":", "(", "-", "1", ")", "]", ",", "'right'", ")", "-", "Z", ".", "searchsorted", "(", "Zstar", "[", ":", "(", "-", "1", ")", "]", ",", "'left'", ")", ")", "Bj", "=", "lj", ".", "cumsum", "(", ")", "for", "i", "in", "arange", "(", "0", ",", "k", ")", ":", "s", "=", "np", ".", "sort", "(", "samples", "[", "i", "]", ")", "Mij", "=", "s", ".", "searchsorted", "(", "Zstar", "[", ":", "(", "-", "1", ")", "]", ",", "side", "=", "'right'", ")", "inner", "=", "(", "(", "(", "lj", "/", "float", "(", "N", ")", ")", "*", "(", "(", "(", "N", "*", "Mij", ")", "-", "(", "Bj", "*", "n", "[", "i", "]", ")", ")", "**", "2", ")", ")", "/", "(", "Bj", "*", "(", "N", "-", "Bj", ")", ")", ")", "A2kN", "+=", "(", "inner", ".", "sum", "(", ")", "/", "n", "[", "i", "]", ")", "return", "A2kN" ]
compute a2akn equation 6 of scholz & stephens .
train
false
20,303
def _monkeypatch_console(): try: import termios import sys import pyrepl.unix_console uc = pyrepl.unix_console.UnixConsole old = uc.prepare def prep(self): old(self) f = sys.stdin.fileno() a = termios.tcgetattr(f) a[1] |= 1 termios.tcsetattr(f, termios.TCSANOW, a) uc.prepare = prep except: pass
[ "def", "_monkeypatch_console", "(", ")", ":", "try", ":", "import", "termios", "import", "sys", "import", "pyrepl", ".", "unix_console", "uc", "=", "pyrepl", ".", "unix_console", ".", "UnixConsole", "old", "=", "uc", ".", "prepare", "def", "prep", "(", "self", ")", ":", "old", "(", "self", ")", "f", "=", "sys", ".", "stdin", ".", "fileno", "(", ")", "a", "=", "termios", ".", "tcgetattr", "(", "f", ")", "a", "[", "1", "]", "|=", "1", "termios", ".", "tcsetattr", "(", "f", ",", "termios", ".", "TCSANOW", ",", "a", ")", "uc", ".", "prepare", "=", "prep", "except", ":", "pass" ]
the readline in pypy turns off output postprocessing .
train
false
20,304
def cache_timer_decorator(fn_name): def wrap(fn): def timed_fn(self, *a, **kw): use_timer = kw.pop('use_timer', True) try: getattr(g, 'log') except TypeError: return fn(self, *a, **kw) if (use_timer and self.stats): publish = (random.random() < g.stats.CACHE_SAMPLE_RATE) cache_name = self.stats.cache_name timer_name = ('cache.%s.%s' % (cache_name, fn_name)) timer = g.stats.get_timer(timer_name, publish) timer.start() else: timer = None result = fn(self, *a, **kw) if timer: timer.stop() return result return timed_fn return wrap
[ "def", "cache_timer_decorator", "(", "fn_name", ")", ":", "def", "wrap", "(", "fn", ")", ":", "def", "timed_fn", "(", "self", ",", "*", "a", ",", "**", "kw", ")", ":", "use_timer", "=", "kw", ".", "pop", "(", "'use_timer'", ",", "True", ")", "try", ":", "getattr", "(", "g", ",", "'log'", ")", "except", "TypeError", ":", "return", "fn", "(", "self", ",", "*", "a", ",", "**", "kw", ")", "if", "(", "use_timer", "and", "self", ".", "stats", ")", ":", "publish", "=", "(", "random", ".", "random", "(", ")", "<", "g", ".", "stats", ".", "CACHE_SAMPLE_RATE", ")", "cache_name", "=", "self", ".", "stats", ".", "cache_name", "timer_name", "=", "(", "'cache.%s.%s'", "%", "(", "cache_name", ",", "fn_name", ")", ")", "timer", "=", "g", ".", "stats", ".", "get_timer", "(", "timer_name", ",", "publish", ")", "timer", ".", "start", "(", ")", "else", ":", "timer", "=", "None", "result", "=", "fn", "(", "self", ",", "*", "a", ",", "**", "kw", ")", "if", "timer", ":", "timer", ".", "stop", "(", ")", "return", "result", "return", "timed_fn", "return", "wrap" ]
use to decorate cachechain operations so timings will be recorded .
train
false
20,305
def printResults(results, domainname): sys.stdout.write(('# Domain Summary for %r\n' % (domainname,))) sys.stdout.write(('\n\n'.join(results) + '\n'))
[ "def", "printResults", "(", "results", ",", "domainname", ")", ":", "sys", ".", "stdout", ".", "write", "(", "(", "'# Domain Summary for %r\\n'", "%", "(", "domainname", ",", ")", ")", ")", "sys", ".", "stdout", ".", "write", "(", "(", "'\\n\\n'", ".", "join", "(", "results", ")", "+", "'\\n'", ")", ")" ]
print the formatted results for each dns record type .
train
false
20,306
@require_authorized_access_to_student_data @render_to('coachreports/student_view.html') def student_view(request): return student_view_context(request=request)
[ "@", "require_authorized_access_to_student_data", "@", "render_to", "(", "'coachreports/student_view.html'", ")", "def", "student_view", "(", "request", ")", ":", "return", "student_view_context", "(", "request", "=", "request", ")" ]
student view: data generated on the back-end .
train
false
20,308
def create_spline(y, yp, x, h): from scipy.interpolate import PPoly (n, m) = y.shape c = np.empty((4, n, (m - 1)), dtype=y.dtype) slope = ((y[:, 1:] - y[:, :(-1)]) / h) t = (((yp[:, :(-1)] + yp[:, 1:]) - (2 * slope)) / h) c[0] = (t / h) c[1] = (((slope - yp[:, :(-1)]) / h) - t) c[2] = yp[:, :(-1)] c[3] = y[:, :(-1)] c = np.rollaxis(c, 1) return PPoly(c, x, extrapolate=True, axis=1)
[ "def", "create_spline", "(", "y", ",", "yp", ",", "x", ",", "h", ")", ":", "from", "scipy", ".", "interpolate", "import", "PPoly", "(", "n", ",", "m", ")", "=", "y", ".", "shape", "c", "=", "np", ".", "empty", "(", "(", "4", ",", "n", ",", "(", "m", "-", "1", ")", ")", ",", "dtype", "=", "y", ".", "dtype", ")", "slope", "=", "(", "(", "y", "[", ":", ",", "1", ":", "]", "-", "y", "[", ":", ",", ":", "(", "-", "1", ")", "]", ")", "/", "h", ")", "t", "=", "(", "(", "(", "yp", "[", ":", ",", ":", "(", "-", "1", ")", "]", "+", "yp", "[", ":", ",", "1", ":", "]", ")", "-", "(", "2", "*", "slope", ")", ")", "/", "h", ")", "c", "[", "0", "]", "=", "(", "t", "/", "h", ")", "c", "[", "1", "]", "=", "(", "(", "(", "slope", "-", "yp", "[", ":", ",", ":", "(", "-", "1", ")", "]", ")", "/", "h", ")", "-", "t", ")", "c", "[", "2", "]", "=", "yp", "[", ":", ",", ":", "(", "-", "1", ")", "]", "c", "[", "3", "]", "=", "y", "[", ":", ",", ":", "(", "-", "1", ")", "]", "c", "=", "np", ".", "rollaxis", "(", "c", ",", "1", ")", "return", "PPoly", "(", "c", ",", "x", ",", "extrapolate", "=", "True", ",", "axis", "=", "1", ")" ]
create a cubic spline given values and derivatives .
train
false
20,309
def remove_floating_forward(floating_ip, fixed_ip, device, network): for (chain, rule) in floating_forward_rules(floating_ip, fixed_ip, device): iptables_manager.ipv4['nat'].remove_rule(chain, rule) iptables_manager.apply() if (device != network['bridge']): remove_ebtables_rules(*floating_ebtables_rules(fixed_ip, network))
[ "def", "remove_floating_forward", "(", "floating_ip", ",", "fixed_ip", ",", "device", ",", "network", ")", ":", "for", "(", "chain", ",", "rule", ")", "in", "floating_forward_rules", "(", "floating_ip", ",", "fixed_ip", ",", "device", ")", ":", "iptables_manager", ".", "ipv4", "[", "'nat'", "]", ".", "remove_rule", "(", "chain", ",", "rule", ")", "iptables_manager", ".", "apply", "(", ")", "if", "(", "device", "!=", "network", "[", "'bridge'", "]", ")", ":", "remove_ebtables_rules", "(", "*", "floating_ebtables_rules", "(", "fixed_ip", ",", "network", ")", ")" ]
remove forwarding for floating ip .
train
false
20,310
def _decorate_with_warning(func, wtype, message, docstring_header=None): message = _sanitize_restructured_text(message) @decorator def warned(fn, *args, **kwargs): warnings.warn(wtype(message), stacklevel=3) return fn(*args, **kwargs) doc = (((func.__doc__ is not None) and func.__doc__) or '') if (docstring_header is not None): docstring_header %= dict(func=func.__name__) doc = inject_docstring_text(doc, docstring_header, 1) decorated = warned(func) decorated.__doc__ = doc return decorated
[ "def", "_decorate_with_warning", "(", "func", ",", "wtype", ",", "message", ",", "docstring_header", "=", "None", ")", ":", "message", "=", "_sanitize_restructured_text", "(", "message", ")", "@", "decorator", "def", "warned", "(", "fn", ",", "*", "args", ",", "**", "kwargs", ")", ":", "warnings", ".", "warn", "(", "wtype", "(", "message", ")", ",", "stacklevel", "=", "3", ")", "return", "fn", "(", "*", "args", ",", "**", "kwargs", ")", "doc", "=", "(", "(", "(", "func", ".", "__doc__", "is", "not", "None", ")", "and", "func", ".", "__doc__", ")", "or", "''", ")", "if", "(", "docstring_header", "is", "not", "None", ")", ":", "docstring_header", "%=", "dict", "(", "func", "=", "func", ".", "__name__", ")", "doc", "=", "inject_docstring_text", "(", "doc", ",", "docstring_header", ",", "1", ")", "decorated", "=", "warned", "(", "func", ")", "decorated", ".", "__doc__", "=", "doc", "return", "decorated" ]
wrap a function with a warnings .
train
false
20,312
def getEndpointsFromSegmentTable(segmentTable): endpoints = [] segmentTableKeys = segmentTable.keys() segmentTableKeys.sort() for segmentTableKey in segmentTableKeys: for segment in segmentTable[segmentTableKey]: for endpoint in segment: endpoints.append(endpoint) return endpoints
[ "def", "getEndpointsFromSegmentTable", "(", "segmentTable", ")", ":", "endpoints", "=", "[", "]", "segmentTableKeys", "=", "segmentTable", ".", "keys", "(", ")", "segmentTableKeys", ".", "sort", "(", ")", "for", "segmentTableKey", "in", "segmentTableKeys", ":", "for", "segment", "in", "segmentTable", "[", "segmentTableKey", "]", ":", "for", "endpoint", "in", "segment", ":", "endpoints", ".", "append", "(", "endpoint", ")", "return", "endpoints" ]
get the endpoints from the segment table .
train
false
20,313
def _parse_subject(subject): ret = {} nids = [] for (nid_name, nid_num) in six.iteritems(subject.nid): if (nid_num in nids): continue val = getattr(subject, nid_name) if val: ret[nid_name] = val nids.append(nid_num) return ret
[ "def", "_parse_subject", "(", "subject", ")", ":", "ret", "=", "{", "}", "nids", "=", "[", "]", "for", "(", "nid_name", ",", "nid_num", ")", "in", "six", ".", "iteritems", "(", "subject", ".", "nid", ")", ":", "if", "(", "nid_num", "in", "nids", ")", ":", "continue", "val", "=", "getattr", "(", "subject", ",", "nid_name", ")", "if", "val", ":", "ret", "[", "nid_name", "]", "=", "val", "nids", ".", "append", "(", "nid_num", ")", "return", "ret" ]
returns a dict containing all values in an x509 subject .
train
false
20,314
def get_trace_component_for_trigger_instance(trigger_instance_db): trace_component = {} trace_component = {'id': str(trigger_instance_db.id), 'ref': trigger_instance_db.trigger} caused_by = {} if ((trigger_instance_db.trigger == ACTION_SENSOR_TRIGGER_REF) or (trigger_instance_db.trigger == NOTIFY_TRIGGER_REF)): caused_by['type'] = 'action_execution' caused_by['id'] = trigger_instance_db.payload['execution_id'] trace_component['caused_by'] = caused_by return trace_component
[ "def", "get_trace_component_for_trigger_instance", "(", "trigger_instance_db", ")", ":", "trace_component", "=", "{", "}", "trace_component", "=", "{", "'id'", ":", "str", "(", "trigger_instance_db", ".", "id", ")", ",", "'ref'", ":", "trigger_instance_db", ".", "trigger", "}", "caused_by", "=", "{", "}", "if", "(", "(", "trigger_instance_db", ".", "trigger", "==", "ACTION_SENSOR_TRIGGER_REF", ")", "or", "(", "trigger_instance_db", ".", "trigger", "==", "NOTIFY_TRIGGER_REF", ")", ")", ":", "caused_by", "[", "'type'", "]", "=", "'action_execution'", "caused_by", "[", "'id'", "]", "=", "trigger_instance_db", ".", "payload", "[", "'execution_id'", "]", "trace_component", "[", "'caused_by'", "]", "=", "caused_by", "return", "trace_component" ]
returns the trace_component compatible dict representation of a triggerinstance .
train
false
20,315
def test_exit_code(): exitcode_py = 'exitcode.py' f = open(exitcode_py, 'w') f.writelines(['import sys\n', 'sys.exit(99)\n']) f.close() process = System.Diagnostics.Process() process.StartInfo.FileName = executable process.StartInfo.Arguments = exitcode_py process.StartInfo.CreateNoWindow = True process.StartInfo.UseShellExecute = False process.Start() process.WaitForExit() if (process.ExitCode != 99): print 'SEVERE FAILURE: sys.exit test failed, cannot run tests!' System.Environment.Exit(1)
[ "def", "test_exit_code", "(", ")", ":", "exitcode_py", "=", "'exitcode.py'", "f", "=", "open", "(", "exitcode_py", ",", "'w'", ")", "f", ".", "writelines", "(", "[", "'import sys\\n'", ",", "'sys.exit(99)\\n'", "]", ")", "f", ".", "close", "(", ")", "process", "=", "System", ".", "Diagnostics", ".", "Process", "(", ")", "process", ".", "StartInfo", ".", "FileName", "=", "executable", "process", ".", "StartInfo", ".", "Arguments", "=", "exitcode_py", "process", ".", "StartInfo", ".", "CreateNoWindow", "=", "True", "process", ".", "StartInfo", ".", "UseShellExecute", "=", "False", "process", ".", "Start", "(", ")", "process", ".", "WaitForExit", "(", ")", "if", "(", "process", ".", "ExitCode", "!=", "99", ")", ":", "print", "'SEVERE FAILURE: sys.exit test failed, cannot run tests!'", "System", ".", "Environment", ".", "Exit", "(", "1", ")" ]
verify if we were to fail we would get a good exit code back .
train
false
20,317
def _convert_all_package_confs_to_dir(): for conf_file in SUPPORTED_CONFS: _package_conf_file_to_dir(conf_file)
[ "def", "_convert_all_package_confs_to_dir", "(", ")", ":", "for", "conf_file", "in", "SUPPORTED_CONFS", ":", "_package_conf_file_to_dir", "(", "conf_file", ")" ]
convert all /etc/portage/package .
train
false
20,318
def to_python(rule): namespace = {'IN': in_range_list, 'WITHIN': within_range_list, 'MOD': cldr_modulo, 'extract_operands': extract_operands} to_python_func = _PythonCompiler().compile result = ['def evaluate(n):', ' n, i, v, w, f, t = extract_operands(n)'] for (tag, ast) in PluralRule.parse(rule).abstract: result.append((' if (%s): return %r' % (to_python_func(ast), str(tag)))) result.append((' return %r' % _fallback_tag)) code = compile('\n'.join(result), '<rule>', 'exec') eval(code, namespace) return namespace['evaluate']
[ "def", "to_python", "(", "rule", ")", ":", "namespace", "=", "{", "'IN'", ":", "in_range_list", ",", "'WITHIN'", ":", "within_range_list", ",", "'MOD'", ":", "cldr_modulo", ",", "'extract_operands'", ":", "extract_operands", "}", "to_python_func", "=", "_PythonCompiler", "(", ")", ".", "compile", "result", "=", "[", "'def evaluate(n):'", ",", "' n, i, v, w, f, t = extract_operands(n)'", "]", "for", "(", "tag", ",", "ast", ")", "in", "PluralRule", ".", "parse", "(", "rule", ")", ".", "abstract", ":", "result", ".", "append", "(", "(", "' if (%s): return %r'", "%", "(", "to_python_func", "(", "ast", ")", ",", "str", "(", "tag", ")", ")", ")", ")", "result", ".", "append", "(", "(", "' return %r'", "%", "_fallback_tag", ")", ")", "code", "=", "compile", "(", "'\\n'", ".", "join", "(", "result", ")", ",", "'<rule>'", ",", "'exec'", ")", "eval", "(", "code", ",", "namespace", ")", "return", "namespace", "[", "'evaluate'", "]" ]
convert a list/dict of rules or a pluralrule object into a regular python function .
train
false
20,320
def check_for_invalid_qos_spec_combination(info, volume_type): if (info['legacy'] and info['spec']): msg = (_('Conflicting QoS specifications in volume type %s: when QoS spec is associated to volume type, legacy "netapp:qos_policy_group" is not allowed in the volume type extra specs.') % volume_type['id']) raise exception.Invalid(msg)
[ "def", "check_for_invalid_qos_spec_combination", "(", "info", ",", "volume_type", ")", ":", "if", "(", "info", "[", "'legacy'", "]", "and", "info", "[", "'spec'", "]", ")", ":", "msg", "=", "(", "_", "(", "'Conflicting QoS specifications in volume type %s: when QoS spec is associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed in the volume type extra specs.'", ")", "%", "volume_type", "[", "'id'", "]", ")", "raise", "exception", ".", "Invalid", "(", "msg", ")" ]
invalidate qos spec if both legacy and non-legacy info is present .
train
false
20,321
def fu(rv, measure=(lambda x: (L(x), x.count_ops()))): fRL1 = greedy(RL1, measure) fRL2 = greedy(RL2, measure) was = rv rv = sympify(rv) if (not isinstance(rv, Expr)): return rv.func(*[fu(a, measure=measure) for a in rv.args]) rv = TR1(rv) if rv.has(tan, cot): rv1 = fRL1(rv) if (measure(rv1) < measure(rv)): rv = rv1 if rv.has(tan, cot): rv = TR2(rv) if rv.has(sin, cos): rv1 = fRL2(rv) rv2 = TR8(TRmorrie(rv1)) rv = min([was, rv, rv1, rv2], key=measure) return min(TR2i(rv), rv, key=measure)
[ "def", "fu", "(", "rv", ",", "measure", "=", "(", "lambda", "x", ":", "(", "L", "(", "x", ")", ",", "x", ".", "count_ops", "(", ")", ")", ")", ")", ":", "fRL1", "=", "greedy", "(", "RL1", ",", "measure", ")", "fRL2", "=", "greedy", "(", "RL2", ",", "measure", ")", "was", "=", "rv", "rv", "=", "sympify", "(", "rv", ")", "if", "(", "not", "isinstance", "(", "rv", ",", "Expr", ")", ")", ":", "return", "rv", ".", "func", "(", "*", "[", "fu", "(", "a", ",", "measure", "=", "measure", ")", "for", "a", "in", "rv", ".", "args", "]", ")", "rv", "=", "TR1", "(", "rv", ")", "if", "rv", ".", "has", "(", "tan", ",", "cot", ")", ":", "rv1", "=", "fRL1", "(", "rv", ")", "if", "(", "measure", "(", "rv1", ")", "<", "measure", "(", "rv", ")", ")", ":", "rv", "=", "rv1", "if", "rv", ".", "has", "(", "tan", ",", "cot", ")", ":", "rv", "=", "TR2", "(", "rv", ")", "if", "rv", ".", "has", "(", "sin", ",", "cos", ")", ":", "rv1", "=", "fRL2", "(", "rv", ")", "rv2", "=", "TR8", "(", "TRmorrie", "(", "rv1", ")", ")", "rv", "=", "min", "(", "[", "was", ",", "rv", ",", "rv1", ",", "rv2", "]", ",", "key", "=", "measure", ")", "return", "min", "(", "TR2i", "(", "rv", ")", ",", "rv", ",", "key", "=", "measure", ")" ]
attempt to simplify expression by using transformation rules given in the algorithm by fu et al .
train
false
20,322
def isprint(c): return (c in (((string.ascii_letters + string.digits) + string.punctuation) + ' '))
[ "def", "isprint", "(", "c", ")", ":", "return", "(", "c", "in", "(", "(", "(", "string", ".", "ascii_letters", "+", "string", ".", "digits", ")", "+", "string", ".", "punctuation", ")", "+", "' '", ")", ")" ]
isprint(c) -> bool return true if a character is printable .
train
false
20,324
def getDefaultIntent(profile): try: if (not isinstance(profile, ImageCmsProfile)): profile = ImageCmsProfile(profile) return profile.profile.rendering_intent except (AttributeError, IOError, TypeError, ValueError) as v: raise PyCMSError(v)
[ "def", "getDefaultIntent", "(", "profile", ")", ":", "try", ":", "if", "(", "not", "isinstance", "(", "profile", ",", "ImageCmsProfile", ")", ")", ":", "profile", "=", "ImageCmsProfile", "(", "profile", ")", "return", "profile", ".", "profile", ".", "rendering_intent", "except", "(", "AttributeError", ",", "IOError", ",", "TypeError", ",", "ValueError", ")", "as", "v", ":", "raise", "PyCMSError", "(", "v", ")" ]
gets the default intent name for the given profile .
train
false
20,325
def gridsearch(Classifier, documents=[], folds=10, **kwargs): def product(*args): p = [[]] for iterable in args: p = [(x + [y]) for x in p for y in iterable] for p in p: (yield tuple(p)) s = [] p = [] for (k, v) in kwargs.items(): p.append([(k, v) for v in v]) for p in product(*p): p = dict(p) s.append((K_fold_cross_validation(Classifier, documents, folds, **p), p)) return sorted(s, reverse=True)
[ "def", "gridsearch", "(", "Classifier", ",", "documents", "=", "[", "]", ",", "folds", "=", "10", ",", "**", "kwargs", ")", ":", "def", "product", "(", "*", "args", ")", ":", "p", "=", "[", "[", "]", "]", "for", "iterable", "in", "args", ":", "p", "=", "[", "(", "x", "+", "[", "y", "]", ")", "for", "x", "in", "p", "for", "y", "in", "iterable", "]", "for", "p", "in", "p", ":", "(", "yield", "tuple", "(", "p", ")", ")", "s", "=", "[", "]", "p", "=", "[", "]", "for", "(", "k", ",", "v", ")", "in", "kwargs", ".", "items", "(", ")", ":", "p", ".", "append", "(", "[", "(", "k", ",", "v", ")", "for", "v", "in", "v", "]", ")", "for", "p", "in", "product", "(", "*", "p", ")", ":", "p", "=", "dict", "(", "p", ")", "s", ".", "append", "(", "(", "K_fold_cross_validation", "(", "Classifier", ",", "documents", ",", "folds", ",", "**", "p", ")", ",", "p", ")", ")", "return", "sorted", "(", "s", ",", "reverse", "=", "True", ")" ]
returns the test results for every combination of optional parameters .
train
false
20,326
def autocommit(using=None): warnings.warn('autocommit is deprecated in favor of set_autocommit.', PendingDeprecationWarning, stacklevel=2) def entering(using): enter_transaction_management(managed=False, using=using) def exiting(exc_type, using): leave_transaction_management(using=using) return _transaction_func(entering, exiting, using)
[ "def", "autocommit", "(", "using", "=", "None", ")", ":", "warnings", ".", "warn", "(", "'autocommit is deprecated in favor of set_autocommit.'", ",", "PendingDeprecationWarning", ",", "stacklevel", "=", "2", ")", "def", "entering", "(", "using", ")", ":", "enter_transaction_management", "(", "managed", "=", "False", ",", "using", "=", "using", ")", "def", "exiting", "(", "exc_type", ",", "using", ")", ":", "leave_transaction_management", "(", "using", "=", "using", ")", "return", "_transaction_func", "(", "entering", ",", "exiting", ",", "using", ")" ]
decorator that activates commit on save .
train
false
20,328
def _get_cookie_settings(request): if request.session.get_expire_at_browser_close(): max_age = None expires = None else: max_age = request.session.get_expiry_age() expires_time = (time.time() + max_age) expires = cookie_date(expires_time) cookie_settings = {u'max_age': max_age, u'expires': expires, u'domain': settings.SESSION_COOKIE_DOMAIN, u'path': u'/', u'httponly': None} return cookie_settings
[ "def", "_get_cookie_settings", "(", "request", ")", ":", "if", "request", ".", "session", ".", "get_expire_at_browser_close", "(", ")", ":", "max_age", "=", "None", "expires", "=", "None", "else", ":", "max_age", "=", "request", ".", "session", ".", "get_expiry_age", "(", ")", "expires_time", "=", "(", "time", ".", "time", "(", ")", "+", "max_age", ")", "expires", "=", "cookie_date", "(", "expires_time", ")", "cookie_settings", "=", "{", "u'max_age'", ":", "max_age", ",", "u'expires'", ":", "expires", ",", "u'domain'", ":", "settings", ".", "SESSION_COOKIE_DOMAIN", ",", "u'path'", ":", "u'/'", ",", "u'httponly'", ":", "None", "}", "return", "cookie_settings" ]
returns the common cookie settings .
train
false
20,329
@webauth.SecurityCheck def RenderHelp(request, path, document_root=None, content_type=None): _ = document_root _ = content_type request.REQ = request.REQUEST help_path = request.path.split('/', 2)[(-1)] if (not help_path): return AccessDenied('Error: Invalid help path.') try: user_record = aff4.FACTORY.Open(aff4.ROOT_URN.Add('users').Add(request.user), aff4_users.GRRUser, token=BuildToken(request, 60)) settings = user_record.Get(user_record.Schema.GUI_SETTINGS) except IOError: settings = aff4_users.GRRUser.SchemaCls.GUI_SETTINGS() if (settings.docs_location == settings.DocsLocation.REMOTE): return RedirectToRemoteHelp(help_path) else: static_handler_components = urls.static_handler.split('.') static_handler_module = importlib.import_module('.'.join(static_handler_components[0:(-1)])) static_handler = getattr(static_handler_module, static_handler_components[(-1)]) return static_handler(request, path, document_root=config_lib.CONFIG['AdminUI.help_root'])
[ "@", "webauth", ".", "SecurityCheck", "def", "RenderHelp", "(", "request", ",", "path", ",", "document_root", "=", "None", ",", "content_type", "=", "None", ")", ":", "_", "=", "document_root", "_", "=", "content_type", "request", ".", "REQ", "=", "request", ".", "REQUEST", "help_path", "=", "request", ".", "path", ".", "split", "(", "'/'", ",", "2", ")", "[", "(", "-", "1", ")", "]", "if", "(", "not", "help_path", ")", ":", "return", "AccessDenied", "(", "'Error: Invalid help path.'", ")", "try", ":", "user_record", "=", "aff4", ".", "FACTORY", ".", "Open", "(", "aff4", ".", "ROOT_URN", ".", "Add", "(", "'users'", ")", ".", "Add", "(", "request", ".", "user", ")", ",", "aff4_users", ".", "GRRUser", ",", "token", "=", "BuildToken", "(", "request", ",", "60", ")", ")", "settings", "=", "user_record", ".", "Get", "(", "user_record", ".", "Schema", ".", "GUI_SETTINGS", ")", "except", "IOError", ":", "settings", "=", "aff4_users", ".", "GRRUser", ".", "SchemaCls", ".", "GUI_SETTINGS", "(", ")", "if", "(", "settings", ".", "docs_location", "==", "settings", ".", "DocsLocation", ".", "REMOTE", ")", ":", "return", "RedirectToRemoteHelp", "(", "help_path", ")", "else", ":", "static_handler_components", "=", "urls", ".", "static_handler", ".", "split", "(", "'.'", ")", "static_handler_module", "=", "importlib", ".", "import_module", "(", "'.'", ".", "join", "(", "static_handler_components", "[", "0", ":", "(", "-", "1", ")", "]", ")", ")", "static_handler", "=", "getattr", "(", "static_handler_module", ",", "static_handler_components", "[", "(", "-", "1", ")", "]", ")", "return", "static_handler", "(", "request", ",", "path", ",", "document_root", "=", "config_lib", ".", "CONFIG", "[", "'AdminUI.help_root'", "]", ")" ]
either serves local help files or redirects to the remote ones .
train
false
20,331
def _get_closest_match(s, keys): threshold = 3 minmatch = None mindist = (threshold + 1) for key in keys: d = _levenshtein(s, key) if (d < mindist): minmatch = key mindist = d if (mindist <= threshold): return minmatch return None
[ "def", "_get_closest_match", "(", "s", ",", "keys", ")", ":", "threshold", "=", "3", "minmatch", "=", "None", "mindist", "=", "(", "threshold", "+", "1", ")", "for", "key", "in", "keys", ":", "d", "=", "_levenshtein", "(", "s", ",", "key", ")", "if", "(", "d", "<", "mindist", ")", ":", "minmatch", "=", "key", "mindist", "=", "d", "if", "(", "mindist", "<=", "threshold", ")", ":", "return", "minmatch", "return", "None" ]
returns a probable match for the given key s out of the possible keys in keys .
train
false
20,332
def decorator_factory(decfac): return partial((lambda df, param: decorator(partial(df, param))), decfac)
[ "def", "decorator_factory", "(", "decfac", ")", ":", "return", "partial", "(", "(", "lambda", "df", ",", "param", ":", "decorator", "(", "partial", "(", "df", ",", "param", ")", ")", ")", ",", "decfac", ")" ]
decorator_factory returns a one-parameter family of decorators .
train
false
20,333
def load_npz(filename, obj): with numpy.load(filename) as f: d = NpzDeserializer(f) d.load(obj)
[ "def", "load_npz", "(", "filename", ",", "obj", ")", ":", "with", "numpy", ".", "load", "(", "filename", ")", "as", "f", ":", "d", "=", "NpzDeserializer", "(", "f", ")", "d", ".", "load", "(", "obj", ")" ]
load a sparse matrix from a file using .
train
false
20,335
def hashable_index(index): if (type(index) is tuple): return tuple(map(hashable_index, index)) elif isinstance(index, list): return hashable_list(index) elif isinstance(index, slice): return _slice(index.start, index.stop, index.step) return index
[ "def", "hashable_index", "(", "index", ")", ":", "if", "(", "type", "(", "index", ")", "is", "tuple", ")", ":", "return", "tuple", "(", "map", "(", "hashable_index", ",", "index", ")", ")", "elif", "isinstance", "(", "index", ",", "list", ")", ":", "return", "hashable_list", "(", "index", ")", "elif", "isinstance", "(", "index", ",", "slice", ")", ":", "return", "_slice", "(", "index", ".", "start", ",", "index", ".", "stop", ",", "index", ".", "step", ")", "return", "index" ]
convert slice-thing into something hashable .
train
false
20,337
def group_snapshot_get_all_by_project(context, project_id, filters=None): return IMPL.group_snapshot_get_all_by_project(context, project_id, filters)
[ "def", "group_snapshot_get_all_by_project", "(", "context", ",", "project_id", ",", "filters", "=", "None", ")", ":", "return", "IMPL", ".", "group_snapshot_get_all_by_project", "(", "context", ",", "project_id", ",", "filters", ")" ]
get all group snapshots belonging to a project .
train
false
20,340
def is_process_running(node, name): command = ['pidof', '-x', name] d = node.run_as_root(command) def not_existing(failure): failure.trap(ProcessTerminated) return False d.addCallbacks((lambda result: True), not_existing) return d
[ "def", "is_process_running", "(", "node", ",", "name", ")", ":", "command", "=", "[", "'pidof'", ",", "'-x'", ",", "name", "]", "d", "=", "node", ".", "run_as_root", "(", "command", ")", "def", "not_existing", "(", "failure", ")", ":", "failure", ".", "trap", "(", "ProcessTerminated", ")", "return", "False", "d", ".", "addCallbacks", "(", "(", "lambda", "result", ":", "True", ")", ",", "not_existing", ")", "return", "d" ]
returns true if a process with pid is running .
train
false
20,341
def get_jinja2(factory=Jinja2, key=_registry_key, app=None): app = (app or webapp2.get_app()) jinja2 = app.registry.get(key) if (not jinja2): jinja2 = app.registry[key] = factory(app) return jinja2
[ "def", "get_jinja2", "(", "factory", "=", "Jinja2", ",", "key", "=", "_registry_key", ",", "app", "=", "None", ")", ":", "app", "=", "(", "app", "or", "webapp2", ".", "get_app", "(", ")", ")", "jinja2", "=", "app", ".", "registry", ".", "get", "(", "key", ")", "if", "(", "not", "jinja2", ")", ":", "jinja2", "=", "app", ".", "registry", "[", "key", "]", "=", "factory", "(", "app", ")", "return", "jinja2" ]
returns an instance of :class:jinja2 from the app registry .
train
false
20,342
@utils.decorator def transactional(func, args, kwds, **options): return transactional_async.wrapped_decorator(func, args, kwds, **options).get_result()
[ "@", "utils", ".", "decorator", "def", "transactional", "(", "func", ",", "args", ",", "kwds", ",", "**", "options", ")", ":", "return", "transactional_async", ".", "wrapped_decorator", "(", "func", ",", "args", ",", "kwds", ",", "**", "options", ")", ".", "get_result", "(", ")" ]
decorator to make a function automatically run in a transaction .
train
true
20,343
def simple_key_binder(mod, keynames=None): def func(dgroup): for key in dgroup.keys[:]: dgroup.qtile.unmapKey(key) dgroup.keys.remove(key) if keynames: keys = keynames else: keys = list(map(str, (list(range(1, 10)) + [0]))) for (keyname, group) in zip(keys, dgroup.qtile.groups): name = group.name key = Key([mod], keyname, lazy.group[name].toscreen()) key_s = Key([mod, 'shift'], keyname, lazy.window.togroup(name)) key_c = Key([mod, 'control'], keyname, lazy.group.switch_groups(name)) dgroup.keys.append(key) dgroup.keys.append(key_s) dgroup.keys.append(key_c) dgroup.qtile.mapKey(key) dgroup.qtile.mapKey(key_s) dgroup.qtile.mapKey(key_c) return func
[ "def", "simple_key_binder", "(", "mod", ",", "keynames", "=", "None", ")", ":", "def", "func", "(", "dgroup", ")", ":", "for", "key", "in", "dgroup", ".", "keys", "[", ":", "]", ":", "dgroup", ".", "qtile", ".", "unmapKey", "(", "key", ")", "dgroup", ".", "keys", ".", "remove", "(", "key", ")", "if", "keynames", ":", "keys", "=", "keynames", "else", ":", "keys", "=", "list", "(", "map", "(", "str", ",", "(", "list", "(", "range", "(", "1", ",", "10", ")", ")", "+", "[", "0", "]", ")", ")", ")", "for", "(", "keyname", ",", "group", ")", "in", "zip", "(", "keys", ",", "dgroup", ".", "qtile", ".", "groups", ")", ":", "name", "=", "group", ".", "name", "key", "=", "Key", "(", "[", "mod", "]", ",", "keyname", ",", "lazy", ".", "group", "[", "name", "]", ".", "toscreen", "(", ")", ")", "key_s", "=", "Key", "(", "[", "mod", ",", "'shift'", "]", ",", "keyname", ",", "lazy", ".", "window", ".", "togroup", "(", "name", ")", ")", "key_c", "=", "Key", "(", "[", "mod", ",", "'control'", "]", ",", "keyname", ",", "lazy", ".", "group", ".", "switch_groups", "(", "name", ")", ")", "dgroup", ".", "keys", ".", "append", "(", "key", ")", "dgroup", ".", "keys", ".", "append", "(", "key_s", ")", "dgroup", ".", "keys", ".", "append", "(", "key_c", ")", "dgroup", ".", "qtile", ".", "mapKey", "(", "key", ")", "dgroup", ".", "qtile", ".", "mapKey", "(", "key_s", ")", "dgroup", ".", "qtile", ".", "mapKey", "(", "key_c", ")", "return", "func" ]
bind keys to mod+group position or to the keys specified as second argument .
train
false
20,344
def test_bad_algo_option(script, tmpdir): result = script.pip('hash', '-a', 'poppycock', _hello_file(tmpdir), expect_error=True) assert ("invalid choice: 'poppycock'" in str(result))
[ "def", "test_bad_algo_option", "(", "script", ",", "tmpdir", ")", ":", "result", "=", "script", ".", "pip", "(", "'hash'", ",", "'-a'", ",", "'poppycock'", ",", "_hello_file", "(", "tmpdir", ")", ",", "expect_error", "=", "True", ")", "assert", "(", "\"invalid choice: 'poppycock'\"", "in", "str", "(", "result", ")", ")" ]
make sure the -a option raises an error when given a bad operand .
train
false
20,345
def partname_to_device(part): return os.path.join(os.sep, 'dev', part)
[ "def", "partname_to_device", "(", "part", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "sep", ",", "'dev'", ",", "part", ")" ]
converts a partition name to its associated device .
train
false
20,346
def _bem_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs, coil_type): (parallel, p_fun, _) = parallel_func(_do_inf_pots, n_jobs) nas = np.array_split B = np.sum(parallel((p_fun(mri_rr, sr.copy(), mri_Q, sol.copy()) for (sr, sol) in zip(nas(bem_rr, n_jobs), nas(solution.T, n_jobs)))), axis=0) if (coil_type == 'meg'): (parallel, p_fun, _) = parallel_func(_do_prim_curr, n_jobs) pcc = np.concatenate(parallel((p_fun(rr, c) for c in nas(coils, n_jobs))), axis=1) B += pcc B *= _MAG_FACTOR return B
[ "def", "_bem_pot_or_field", "(", "rr", ",", "mri_rr", ",", "mri_Q", ",", "coils", ",", "solution", ",", "bem_rr", ",", "n_jobs", ",", "coil_type", ")", ":", "(", "parallel", ",", "p_fun", ",", "_", ")", "=", "parallel_func", "(", "_do_inf_pots", ",", "n_jobs", ")", "nas", "=", "np", ".", "array_split", "B", "=", "np", ".", "sum", "(", "parallel", "(", "(", "p_fun", "(", "mri_rr", ",", "sr", ".", "copy", "(", ")", ",", "mri_Q", ",", "sol", ".", "copy", "(", ")", ")", "for", "(", "sr", ",", "sol", ")", "in", "zip", "(", "nas", "(", "bem_rr", ",", "n_jobs", ")", ",", "nas", "(", "solution", ".", "T", ",", "n_jobs", ")", ")", ")", ")", ",", "axis", "=", "0", ")", "if", "(", "coil_type", "==", "'meg'", ")", ":", "(", "parallel", ",", "p_fun", ",", "_", ")", "=", "parallel_func", "(", "_do_prim_curr", ",", "n_jobs", ")", "pcc", "=", "np", ".", "concatenate", "(", "parallel", "(", "(", "p_fun", "(", "rr", ",", "c", ")", "for", "c", "in", "nas", "(", "coils", ",", "n_jobs", ")", ")", ")", ",", "axis", "=", "1", ")", "B", "+=", "pcc", "B", "*=", "_MAG_FACTOR", "return", "B" ]
calculate the magnetic field or electric potential forward solution .
train
false
20,348
def run_setup(setup_script, args): old_dir = os.getcwd() save_argv = sys.argv[:] save_path = sys.path[:] setup_dir = os.path.abspath(os.path.dirname(setup_script)) temp_dir = os.path.join(setup_dir, 'temp') if (not os.path.isdir(temp_dir)): os.makedirs(temp_dir) save_tmp = tempfile.tempdir save_modules = sys.modules.copy() pr_state = pkg_resources.__getstate__() try: tempfile.tempdir = temp_dir os.chdir(setup_dir) try: sys.argv[:] = ([setup_script] + list(args)) sys.path.insert(0, setup_dir) working_set.__init__() working_set.callbacks.append((lambda dist: dist.activate())) DirectorySandbox(setup_dir).run((lambda : execfile('setup.py', {'__file__': setup_script, '__name__': '__main__'}))) except SystemExit: v = sys.exc_info()[1] if (v.args and v.args[0]): raise finally: pkg_resources.__setstate__(pr_state) sys.modules.update(save_modules) del_modules = [mod_name for mod_name in sys.modules if ((mod_name not in save_modules) and (not mod_name.startswith('encodings.')))] list(map(sys.modules.__delitem__, del_modules)) os.chdir(old_dir) sys.path[:] = save_path sys.argv[:] = save_argv tempfile.tempdir = save_tmp
[ "def", "run_setup", "(", "setup_script", ",", "args", ")", ":", "old_dir", "=", "os", ".", "getcwd", "(", ")", "save_argv", "=", "sys", ".", "argv", "[", ":", "]", "save_path", "=", "sys", ".", "path", "[", ":", "]", "setup_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "setup_script", ")", ")", "temp_dir", "=", "os", ".", "path", ".", "join", "(", "setup_dir", ",", "'temp'", ")", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "temp_dir", ")", ")", ":", "os", ".", "makedirs", "(", "temp_dir", ")", "save_tmp", "=", "tempfile", ".", "tempdir", "save_modules", "=", "sys", ".", "modules", ".", "copy", "(", ")", "pr_state", "=", "pkg_resources", ".", "__getstate__", "(", ")", "try", ":", "tempfile", ".", "tempdir", "=", "temp_dir", "os", ".", "chdir", "(", "setup_dir", ")", "try", ":", "sys", ".", "argv", "[", ":", "]", "=", "(", "[", "setup_script", "]", "+", "list", "(", "args", ")", ")", "sys", ".", "path", ".", "insert", "(", "0", ",", "setup_dir", ")", "working_set", ".", "__init__", "(", ")", "working_set", ".", "callbacks", ".", "append", "(", "(", "lambda", "dist", ":", "dist", ".", "activate", "(", ")", ")", ")", "DirectorySandbox", "(", "setup_dir", ")", ".", "run", "(", "(", "lambda", ":", "execfile", "(", "'setup.py'", ",", "{", "'__file__'", ":", "setup_script", ",", "'__name__'", ":", "'__main__'", "}", ")", ")", ")", "except", "SystemExit", ":", "v", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", "(", "v", ".", "args", "and", "v", ".", "args", "[", "0", "]", ")", ":", "raise", "finally", ":", "pkg_resources", ".", "__setstate__", "(", "pr_state", ")", "sys", ".", "modules", ".", "update", "(", "save_modules", ")", "del_modules", "=", "[", "mod_name", "for", "mod_name", "in", "sys", ".", "modules", "if", "(", "(", "mod_name", "not", "in", "save_modules", ")", "and", "(", "not", "mod_name", ".", "startswith", "(", "'encodings.'", ")", ")", ")", "]", "list", "(", "map", "(", "sys", ".", "modules", ".", "__delitem__", ",", "del_modules", ")", ")", "os", ".", "chdir", "(", "old_dir", ")", "sys", ".", "path", "[", ":", "]", "=", "save_path", "sys", ".", "argv", "[", ":", "]", "=", "save_argv", "tempfile", ".", "tempdir", "=", "save_tmp" ]
run a setup script in a somewhat controlled environment .
train
true
20,349
def property_from_list(index): @empty_if_none def property_from_list_lambda(values): if (len(values) > index): return values[index] return '' return property_from_list_lambda
[ "def", "property_from_list", "(", "index", ")", ":", "@", "empty_if_none", "def", "property_from_list_lambda", "(", "values", ")", ":", "if", "(", "len", "(", "values", ")", ">", "index", ")", ":", "return", "values", "[", "index", "]", "return", "''", "return", "property_from_list_lambda" ]
return the nth item from a list .
train
false
20,350
def refresh_action(parent): return qtutils.add_action(parent, cmds.Refresh.name(), cmds.run(cmds.Refresh), hotkeys.REFRESH)
[ "def", "refresh_action", "(", "parent", ")", ":", "return", "qtutils", ".", "add_action", "(", "parent", ",", "cmds", ".", "Refresh", ".", "name", "(", ")", ",", "cmds", ".", "run", "(", "cmds", ".", "Refresh", ")", ",", "hotkeys", ".", "REFRESH", ")" ]
refresh the repository state -> qaction .
train
false
20,351
def digest(un, pw, nonce=None, uri=None, method=u'GET', nc=1, qop=u'auth', realm=REALM, cnonce=None, algorithm=u'MD5', body='', modify=(lambda x: None)): from calibre.srv.auth import DigestAuth templ = u'username="{un}", realm="{realm}", qop={qop}, method="{method}", nonce="{nonce}", uri="{uri}", nc={nc}, algorithm="{algorithm}", cnonce="{cnonce}", response="{response}"' h = templ.format(un=un, realm=realm, qop=qop, uri=uri, method=method, nonce=nonce, nc=nc, cnonce=cnonce, algorithm=algorithm, response=None) da = DigestAuth(h) modify(da) pw = getattr(da, u'pw', pw) class Data(object, ): def __init__(self): self.method = method def peek(): return body response = da.request_digest(pw, Data()) return (u'Digest ' + templ.format(un=un, realm=realm, qop=qop, uri=uri, method=method, nonce=nonce, nc=nc, cnonce=cnonce, algorithm=algorithm, response=response)).encode(u'ascii')
[ "def", "digest", "(", "un", ",", "pw", ",", "nonce", "=", "None", ",", "uri", "=", "None", ",", "method", "=", "u'GET'", ",", "nc", "=", "1", ",", "qop", "=", "u'auth'", ",", "realm", "=", "REALM", ",", "cnonce", "=", "None", ",", "algorithm", "=", "u'MD5'", ",", "body", "=", "''", ",", "modify", "=", "(", "lambda", "x", ":", "None", ")", ")", ":", "from", "calibre", ".", "srv", ".", "auth", "import", "DigestAuth", "templ", "=", "u'username=\"{un}\", realm=\"{realm}\", qop={qop}, method=\"{method}\", nonce=\"{nonce}\", uri=\"{uri}\", nc={nc}, algorithm=\"{algorithm}\", cnonce=\"{cnonce}\", response=\"{response}\"'", "h", "=", "templ", ".", "format", "(", "un", "=", "un", ",", "realm", "=", "realm", ",", "qop", "=", "qop", ",", "uri", "=", "uri", ",", "method", "=", "method", ",", "nonce", "=", "nonce", ",", "nc", "=", "nc", ",", "cnonce", "=", "cnonce", ",", "algorithm", "=", "algorithm", ",", "response", "=", "None", ")", "da", "=", "DigestAuth", "(", "h", ")", "modify", "(", "da", ")", "pw", "=", "getattr", "(", "da", ",", "u'pw'", ",", "pw", ")", "class", "Data", "(", "object", ",", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "method", "=", "method", "def", "peek", "(", ")", ":", "return", "body", "response", "=", "da", ".", "request_digest", "(", "pw", ",", "Data", "(", ")", ")", "return", "(", "u'Digest '", "+", "templ", ".", "format", "(", "un", "=", "un", ",", "realm", "=", "realm", ",", "qop", "=", "qop", ",", "uri", "=", "uri", ",", "method", "=", "method", ",", "nonce", "=", "nonce", ",", "nc", "=", "nc", ",", "cnonce", "=", "cnonce", ",", "algorithm", "=", "algorithm", ",", "response", "=", "response", ")", ")", ".", "encode", "(", "u'ascii'", ")" ]
return a checksum digest for a string instr a string checksum : md5 the hashing algorithm to use to generate checksums .
train
false
20,352
def __getattr__(self, name): if (name == '__del__'): raise AttributeError('Without this, Python segfaults.') updateInstance(self) log.msg(('(rebuilding stale %s instance (%s))' % (reflect.qual(self.__class__), name))) result = getattr(self, name) return result
[ "def", "__getattr__", "(", "self", ",", "name", ")", ":", "if", "(", "name", "==", "'__del__'", ")", ":", "raise", "AttributeError", "(", "'Without this, Python segfaults.'", ")", "updateInstance", "(", "self", ")", "log", ".", "msg", "(", "(", "'(rebuilding stale %s instance (%s))'", "%", "(", "reflect", ".", "qual", "(", "self", ".", "__class__", ")", ",", "name", ")", ")", ")", "result", "=", "getattr", "(", "self", ",", "name", ")", "return", "result" ]
a getattr method to cause a class to be refreshed .
train
false
20,355
def sdm_from_dict(d, O): return sdm_strip(sdm_sort(list(d.items()), O))
[ "def", "sdm_from_dict", "(", "d", ",", "O", ")", ":", "return", "sdm_strip", "(", "sdm_sort", "(", "list", "(", "d", ".", "items", "(", ")", ")", ",", "O", ")", ")" ]
create an sdm from a dictionary .
train
false
20,356
def make_changed_file(path, env): def remove_file(path): try: os.remove(path) except OSError: pass latest = get_installable_version(version) new_path = remove_extension(path) with open(path, 'r') as templated_file: with open(new_path, 'w') as new_file: new_file.write(templated_file.read().replace(PLACEHOLDER, latest)) env.app.connect('build-finished', (lambda self, *args: remove_file(new_path)))
[ "def", "make_changed_file", "(", "path", ",", "env", ")", ":", "def", "remove_file", "(", "path", ")", ":", "try", ":", "os", ".", "remove", "(", "path", ")", "except", "OSError", ":", "pass", "latest", "=", "get_installable_version", "(", "version", ")", "new_path", "=", "remove_extension", "(", "path", ")", "with", "open", "(", "path", ",", "'r'", ")", "as", "templated_file", ":", "with", "open", "(", "new_path", ",", "'w'", ")", "as", "new_file", ":", "new_file", ".", "write", "(", "templated_file", ".", "read", "(", ")", ".", "replace", "(", "PLACEHOLDER", ",", "latest", ")", ")", "env", ".", "app", ".", "connect", "(", "'build-finished'", ",", "(", "lambda", "self", ",", "*", "args", ":", "remove_file", "(", "new_path", ")", ")", ")" ]
given the path to a template file .
train
false
20,357
def get_date_formats(): warnings.warn("'django.utils.translation.get_date_formats' is deprecated. Please update your code to use the new i18n aware formatting.", PendingDeprecationWarning) from google.appengine._internal.django.conf import settings date_format = ugettext('DATE_FORMAT') datetime_format = ugettext('DATETIME_FORMAT') time_format = ugettext('TIME_FORMAT') if (date_format == 'DATE_FORMAT'): date_format = settings.DATE_FORMAT if (datetime_format == 'DATETIME_FORMAT'): datetime_format = settings.DATETIME_FORMAT if (time_format == 'TIME_FORMAT'): time_format = settings.TIME_FORMAT return (date_format, datetime_format, time_format)
[ "def", "get_date_formats", "(", ")", ":", "warnings", ".", "warn", "(", "\"'django.utils.translation.get_date_formats' is deprecated. Please update your code to use the new i18n aware formatting.\"", ",", "PendingDeprecationWarning", ")", "from", "google", ".", "appengine", ".", "_internal", ".", "django", ".", "conf", "import", "settings", "date_format", "=", "ugettext", "(", "'DATE_FORMAT'", ")", "datetime_format", "=", "ugettext", "(", "'DATETIME_FORMAT'", ")", "time_format", "=", "ugettext", "(", "'TIME_FORMAT'", ")", "if", "(", "date_format", "==", "'DATE_FORMAT'", ")", ":", "date_format", "=", "settings", ".", "DATE_FORMAT", "if", "(", "datetime_format", "==", "'DATETIME_FORMAT'", ")", ":", "datetime_format", "=", "settings", ".", "DATETIME_FORMAT", "if", "(", "time_format", "==", "'TIME_FORMAT'", ")", ":", "time_format", "=", "settings", ".", "TIME_FORMAT", "return", "(", "date_format", ",", "datetime_format", ",", "time_format", ")" ]
checks whether translation files provide a translation for some technical message id to store date and time formats .
train
false
20,359
def configure_formats(options, config, log, formats=None): if (formats is None): formats = default_formats() config.HTMLExporter.template_file = 'basic' config.SlidesExporter.template_file = 'slides_reveal' config.TemplateExporter.template_path = [os.path.join(os.path.dirname(__file__), 'templates', 'nbconvert')] for (key, format) in formats.items(): exporter_cls = format.get('exporter', exporter_map[key]) if options.processes: formats[key]['exporter'] = exporter_cls else: formats[key]['exporter'] = exporter_cls(config=config, log=log) return formats
[ "def", "configure_formats", "(", "options", ",", "config", ",", "log", ",", "formats", "=", "None", ")", ":", "if", "(", "formats", "is", "None", ")", ":", "formats", "=", "default_formats", "(", ")", "config", ".", "HTMLExporter", ".", "template_file", "=", "'basic'", "config", ".", "SlidesExporter", ".", "template_file", "=", "'slides_reveal'", "config", ".", "TemplateExporter", ".", "template_path", "=", "[", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'templates'", ",", "'nbconvert'", ")", "]", "for", "(", "key", ",", "format", ")", "in", "formats", ".", "items", "(", ")", ":", "exporter_cls", "=", "format", ".", "get", "(", "'exporter'", ",", "exporter_map", "[", "key", "]", ")", "if", "options", ".", "processes", ":", "formats", "[", "key", "]", "[", "'exporter'", "]", "=", "exporter_cls", "else", ":", "formats", "[", "key", "]", "[", "'exporter'", "]", "=", "exporter_cls", "(", "config", "=", "config", ",", "log", "=", "log", ")", "return", "formats" ]
format-specific configuration .
train
false
20,360
def session_for_reply_channel(reply_channel): reply_name = reply_channel hashed = hashlib.md5(reply_name.encode('utf8')).hexdigest() session_key = ('chn' + hashed[:29]) session_engine = import_module(getattr(settings, 'CHANNEL_SESSION_ENGINE', settings.SESSION_ENGINE)) if (session_engine is signed_cookies): raise ValueError('You cannot use channels session functionality with signed cookie sessions!') instance = session_engine.SessionStore(session_key=session_key) instance._session.keys() instance._session_key = session_key return instance
[ "def", "session_for_reply_channel", "(", "reply_channel", ")", ":", "reply_name", "=", "reply_channel", "hashed", "=", "hashlib", ".", "md5", "(", "reply_name", ".", "encode", "(", "'utf8'", ")", ")", ".", "hexdigest", "(", ")", "session_key", "=", "(", "'chn'", "+", "hashed", "[", ":", "29", "]", ")", "session_engine", "=", "import_module", "(", "getattr", "(", "settings", ",", "'CHANNEL_SESSION_ENGINE'", ",", "settings", ".", "SESSION_ENGINE", ")", ")", "if", "(", "session_engine", "is", "signed_cookies", ")", ":", "raise", "ValueError", "(", "'You cannot use channels session functionality with signed cookie sessions!'", ")", "instance", "=", "session_engine", ".", "SessionStore", "(", "session_key", "=", "session_key", ")", "instance", ".", "_session", ".", "keys", "(", ")", "instance", ".", "_session_key", "=", "session_key", "return", "instance" ]
returns a session object tied to the reply_channel unicode string passed in as an argument .
train
false
20,361
@lower('print_item', types.Any) def print_item_impl(context, builder, sig, args): (ty,) = sig.args (val,) = args pyapi = context.get_python_api(builder) if context.enable_nrt: context.nrt.incref(builder, ty, val) obj = pyapi.from_native_value(ty, val) with builder.if_else(cgutils.is_not_null(builder, obj), likely=True) as (if_ok, if_error): with if_ok: pyapi.print_object(obj) pyapi.decref(obj) with if_error: cstr = context.insert_const_string(builder.module, 'the print() function') strobj = pyapi.string_from_string(cstr) pyapi.err_write_unraisable(strobj) pyapi.decref(strobj) res = context.get_dummy_value() return impl_ret_untracked(context, builder, sig.return_type, res)
[ "@", "lower", "(", "'print_item'", ",", "types", ".", "Any", ")", "def", "print_item_impl", "(", "context", ",", "builder", ",", "sig", ",", "args", ")", ":", "(", "ty", ",", ")", "=", "sig", ".", "args", "(", "val", ",", ")", "=", "args", "pyapi", "=", "context", ".", "get_python_api", "(", "builder", ")", "if", "context", ".", "enable_nrt", ":", "context", ".", "nrt", ".", "incref", "(", "builder", ",", "ty", ",", "val", ")", "obj", "=", "pyapi", ".", "from_native_value", "(", "ty", ",", "val", ")", "with", "builder", ".", "if_else", "(", "cgutils", ".", "is_not_null", "(", "builder", ",", "obj", ")", ",", "likely", "=", "True", ")", "as", "(", "if_ok", ",", "if_error", ")", ":", "with", "if_ok", ":", "pyapi", ".", "print_object", "(", "obj", ")", "pyapi", ".", "decref", "(", "obj", ")", "with", "if_error", ":", "cstr", "=", "context", ".", "insert_const_string", "(", "builder", ".", "module", ",", "'the print() function'", ")", "strobj", "=", "pyapi", ".", "string_from_string", "(", "cstr", ")", "pyapi", ".", "err_write_unraisable", "(", "strobj", ")", "pyapi", ".", "decref", "(", "strobj", ")", "res", "=", "context", ".", "get_dummy_value", "(", ")", "return", "impl_ret_untracked", "(", "context", ",", "builder", ",", "sig", ".", "return_type", ",", "res", ")" ]
print a single constant value .
train
false
20,362
def get_all_project_types(): global PROJECT_TYPES return list(PROJECT_TYPES.keys())
[ "def", "get_all_project_types", "(", ")", ":", "global", "PROJECT_TYPES", "return", "list", "(", "PROJECT_TYPES", ".", "keys", "(", ")", ")" ]
returns the availables project types .
train
false
20,363
def format_system_message(errno): ALLOCATE_BUFFER = 256 FROM_SYSTEM = 4096 flags = (ALLOCATE_BUFFER | FROM_SYSTEM) source = None message_id = errno language_id = 0 result_buffer = ctypes.wintypes.LPWSTR() buffer_size = 0 arguments = None bytes = ctypes.windll.kernel32.FormatMessageW(flags, source, message_id, language_id, ctypes.byref(result_buffer), buffer_size, arguments) handle_nonzero_success(bytes) message = result_buffer.value ctypes.windll.kernel32.LocalFree(result_buffer) return message
[ "def", "format_system_message", "(", "errno", ")", ":", "ALLOCATE_BUFFER", "=", "256", "FROM_SYSTEM", "=", "4096", "flags", "=", "(", "ALLOCATE_BUFFER", "|", "FROM_SYSTEM", ")", "source", "=", "None", "message_id", "=", "errno", "language_id", "=", "0", "result_buffer", "=", "ctypes", ".", "wintypes", ".", "LPWSTR", "(", ")", "buffer_size", "=", "0", "arguments", "=", "None", "bytes", "=", "ctypes", ".", "windll", ".", "kernel32", ".", "FormatMessageW", "(", "flags", ",", "source", ",", "message_id", ",", "language_id", ",", "ctypes", ".", "byref", "(", "result_buffer", ")", ",", "buffer_size", ",", "arguments", ")", "handle_nonzero_success", "(", "bytes", ")", "message", "=", "result_buffer", ".", "value", "ctypes", ".", "windll", ".", "kernel32", ".", "LocalFree", "(", "result_buffer", ")", "return", "message" ]
call formatmessage with a system error number to retrieve the descriptive error message .
train
true
20,365
def check_csv(option, opt, value): if isinstance(value, (list, tuple)): return value try: return splitstrip(value) except ValueError: raise OptionValueError(('option %s: invalid csv value: %r' % (opt, value)))
[ "def", "check_csv", "(", "option", ",", "opt", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "value", "try", ":", "return", "splitstrip", "(", "value", ")", "except", "ValueError", ":", "raise", "OptionValueError", "(", "(", "'option %s: invalid csv value: %r'", "%", "(", "opt", ",", "value", ")", ")", ")" ]
some of the curator methods should not operate against multiple indices at once .
train
false
20,366
def get_image_data_iter(uri): if uri.startswith('file://'): uri = uri.split('file://')[(-1)] return open(uri, 'r') return urllib.request.urlopen(uri)
[ "def", "get_image_data_iter", "(", "uri", ")", ":", "if", "uri", ".", "startswith", "(", "'file://'", ")", ":", "uri", "=", "uri", ".", "split", "(", "'file://'", ")", "[", "(", "-", "1", ")", "]", "return", "open", "(", "uri", ",", "'r'", ")", "return", "urllib", ".", "request", ".", "urlopen", "(", "uri", ")" ]
returns iterable object either for local file or uri .
train
false