id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
3,362
def _duration_pb_to_timedelta(duration_pb): return datetime.timedelta(seconds=duration_pb.seconds, microseconds=(duration_pb.nanos / 1000.0))
[ "def", "_duration_pb_to_timedelta", "(", "duration_pb", ")", ":", "return", "datetime", ".", "timedelta", "(", "seconds", "=", "duration_pb", ".", "seconds", ",", "microseconds", "=", "(", "duration_pb", ".", "nanos", "/", "1000.0", ")", ")" ]
convert a duration protobuf to a python timedelta object .
train
true
3,363
def thread_first(val, *forms): def evalform_front(val, form): if callable(form): return form(val) if isinstance(form, tuple): (func, args) = (form[0], form[1:]) args = ((val,) + args) return func(*args) return reduce(evalform_front, forms, val)
[ "def", "thread_first", "(", "val", ",", "*", "forms", ")", ":", "def", "evalform_front", "(", "val", ",", "form", ")", ":", "if", "callable", "(", "form", ")", ":", "return", "form", "(", "val", ")", "if", "isinstance", "(", "form", ",", "tuple", ")", ":", "(", "func", ",", "args", ")", "=", "(", "form", "[", "0", "]", ",", "form", "[", "1", ":", "]", ")", "args", "=", "(", "(", "val", ",", ")", "+", "args", ")", "return", "func", "(", "*", "args", ")", "return", "reduce", "(", "evalform_front", ",", "forms", ",", "val", ")" ]
thread value through a sequence of functions/forms .
train
false
3,364
def import_env_vars(environ, target): weblate_var = re.compile('^WEBLATE_[A-Za-z0-9_]+$') for (name, value) in environ.items(): if weblate_var.match(name): try: setattr(target, name[8:], ast.literal_eval(Template(value).substitute(environ))) except ValueError as err: if (not err.args): err.args = (("Error parsing %s = '%s': %s" % (name, value, err)),) raise
[ "def", "import_env_vars", "(", "environ", ",", "target", ")", ":", "weblate_var", "=", "re", ".", "compile", "(", "'^WEBLATE_[A-Za-z0-9_]+$'", ")", "for", "(", "name", ",", "value", ")", "in", "environ", ".", "items", "(", ")", ":", "if", "weblate_var", ".", "match", "(", "name", ")", ":", "try", ":", "setattr", "(", "target", ",", "name", "[", "8", ":", "]", ",", "ast", ".", "literal_eval", "(", "Template", "(", "value", ")", ".", "substitute", "(", "environ", ")", ")", ")", "except", "ValueError", "as", "err", ":", "if", "(", "not", "err", ".", "args", ")", ":", "err", ".", "args", "=", "(", "(", "\"Error parsing %s = '%s': %s\"", "%", "(", "name", ",", "value", ",", "err", ")", ")", ",", ")", "raise" ]
imports weblate_* variables into given object .
train
false
3,365
def CheckAppId(request_trusted, request_app_id, app_id): assert app_id Check((request_trusted or (app_id == request_app_id)), ('app "%s" cannot access app "%s"\'s data' % (request_app_id, app_id)))
[ "def", "CheckAppId", "(", "request_trusted", ",", "request_app_id", ",", "app_id", ")", ":", "assert", "app_id", "Check", "(", "(", "request_trusted", "or", "(", "app_id", "==", "request_app_id", ")", ")", ",", "(", "'app \"%s\" cannot access app \"%s\"\\'s data'", "%", "(", "request_app_id", ",", "app_id", ")", ")", ")" ]
check that this is the stub for app_id .
train
false
3,367
@contextlib.contextmanager def save_lookups(): namespace_dirs = {namespace: list(look.directories) for (namespace, look) in LOOKUP.items()} try: (yield) finally: LOOKUP.clear() for (namespace, directories) in namespace_dirs.items(): for directory in directories: add_lookup(namespace, directory)
[ "@", "contextlib", ".", "contextmanager", "def", "save_lookups", "(", ")", ":", "namespace_dirs", "=", "{", "namespace", ":", "list", "(", "look", ".", "directories", ")", "for", "(", "namespace", ",", "look", ")", "in", "LOOKUP", ".", "items", "(", ")", "}", "try", ":", "(", "yield", ")", "finally", ":", "LOOKUP", ".", "clear", "(", ")", "for", "(", "namespace", ",", "directories", ")", "in", "namespace_dirs", ".", "items", "(", ")", ":", "for", "directory", "in", "directories", ":", "add_lookup", "(", "namespace", ",", "directory", ")" ]
a context manager to save and restore the mako template lookup path .
train
false
3,369
@pytest.fixture def temporary_topic(): pubsub_client = pubsub.Client() topic = pubsub_client.topic(TOPIC_NAME) if topic.exists(): topic.delete() (yield) if topic.exists(): topic.delete()
[ "@", "pytest", ".", "fixture", "def", "temporary_topic", "(", ")", ":", "pubsub_client", "=", "pubsub", ".", "Client", "(", ")", "topic", "=", "pubsub_client", ".", "topic", "(", "TOPIC_NAME", ")", "if", "topic", ".", "exists", "(", ")", ":", "topic", ".", "delete", "(", ")", "(", "yield", ")", "if", "topic", ".", "exists", "(", ")", ":", "topic", ".", "delete", "(", ")" ]
fixture that ensures the test dataset does not exist before or after a test .
train
false
3,370
def _chain_procs(procs_args, **kwargs): last_stdout = None procs = [] for (i, args) in enumerate(procs_args): proc_kwargs = kwargs.copy() if (i > 0): proc_kwargs['stdin'] = last_stdout if (i < (len(procs_args) - 1)): proc_kwargs['stdout'] = PIPE proc = Popen(args, **proc_kwargs) last_stdout = proc.stdout procs.append(proc) return procs
[ "def", "_chain_procs", "(", "procs_args", ",", "**", "kwargs", ")", ":", "last_stdout", "=", "None", "procs", "=", "[", "]", "for", "(", "i", ",", "args", ")", "in", "enumerate", "(", "procs_args", ")", ":", "proc_kwargs", "=", "kwargs", ".", "copy", "(", ")", "if", "(", "i", ">", "0", ")", ":", "proc_kwargs", "[", "'stdin'", "]", "=", "last_stdout", "if", "(", "i", "<", "(", "len", "(", "procs_args", ")", "-", "1", ")", ")", ":", "proc_kwargs", "[", "'stdout'", "]", "=", "PIPE", "proc", "=", "Popen", "(", "args", ",", "**", "proc_kwargs", ")", "last_stdout", "=", "proc", ".", "stdout", "procs", ".", "append", "(", "proc", ")", "return", "procs" ]
input: list of lists of command line arguments .
train
false
3,371
def arbitrary_address(family): if (family == 'AF_INET'): return ('localhost', 0) elif (family == 'AF_UNIX'): return tempfile.mktemp(prefix='listener-', dir=get_temp_dir()) elif (family == 'AF_PIPE'): return tempfile.mktemp(prefix=('\\\\.\\pipe\\pyc-%d-%d-' % (os.getpid(), _mmap_counter.next())), dir='') else: raise ValueError('unrecognized family')
[ "def", "arbitrary_address", "(", "family", ")", ":", "if", "(", "family", "==", "'AF_INET'", ")", ":", "return", "(", "'localhost'", ",", "0", ")", "elif", "(", "family", "==", "'AF_UNIX'", ")", ":", "return", "tempfile", ".", "mktemp", "(", "prefix", "=", "'listener-'", ",", "dir", "=", "get_temp_dir", "(", ")", ")", "elif", "(", "family", "==", "'AF_PIPE'", ")", ":", "return", "tempfile", ".", "mktemp", "(", "prefix", "=", "(", "'\\\\\\\\.\\\\pipe\\\\pyc-%d-%d-'", "%", "(", "os", ".", "getpid", "(", ")", ",", "_mmap_counter", ".", "next", "(", ")", ")", ")", ",", "dir", "=", "''", ")", "else", ":", "raise", "ValueError", "(", "'unrecognized family'", ")" ]
return an arbitrary free address for the given family .
train
false
3,372
def init_django(): global django, management, create_test_db, destroy_test_db global setup_test_environment, teardown_test_environment if (not django): return from django.core import management project_dir = management.setup_environ(settings) sys.path.insert(0, project_dir) try: from django.test.utils import create_test_db, destroy_test_db except ImportError: from django.db import connection create_test_db = connection.creation.create_test_db destroy_test_db = connection.creation.destroy_test_db from django.test.utils import setup_test_environment, teardown_test_environment return True
[ "def", "init_django", "(", ")", ":", "global", "django", ",", "management", ",", "create_test_db", ",", "destroy_test_db", "global", "setup_test_environment", ",", "teardown_test_environment", "if", "(", "not", "django", ")", ":", "return", "from", "django", ".", "core", "import", "management", "project_dir", "=", "management", ".", "setup_environ", "(", "settings", ")", "sys", ".", "path", ".", "insert", "(", "0", ",", "project_dir", ")", "try", ":", "from", "django", ".", "test", ".", "utils", "import", "create_test_db", ",", "destroy_test_db", "except", "ImportError", ":", "from", "django", ".", "db", "import", "connection", "create_test_db", "=", "connection", ".", "creation", ".", "create_test_db", "destroy_test_db", "=", "connection", ".", "creation", ".", "destroy_test_db", "from", "django", ".", "test", ".", "utils", "import", "setup_test_environment", ",", "teardown_test_environment", "return", "True" ]
bootstrap django and initialise this module .
train
false
3,374
def dct(x, type=2, n=None, axis=(-1), norm=None, overwrite_x=False): if ((type == 1) and (norm is not None)): raise NotImplementedError('Orthonormalization not yet supported for DCT-I') return _dct(x, type, n, axis, normalize=norm, overwrite_x=overwrite_x)
[ "def", "dct", "(", "x", ",", "type", "=", "2", ",", "n", "=", "None", ",", "axis", "=", "(", "-", "1", ")", ",", "norm", "=", "None", ",", "overwrite_x", "=", "False", ")", ":", "if", "(", "(", "type", "==", "1", ")", "and", "(", "norm", "is", "not", "None", ")", ")", ":", "raise", "NotImplementedError", "(", "'Orthonormalization not yet supported for DCT-I'", ")", "return", "_dct", "(", "x", ",", "type", ",", "n", ",", "axis", ",", "normalize", "=", "norm", ",", "overwrite_x", "=", "overwrite_x", ")" ]
return the discrete cosine transform of arbitrary type sequence x .
train
false
3,375
def app_pack(app, request, raise_ex=False, filenames=None): try: if (filenames is None): app_cleanup(app, request) filename = apath(('../deposit/web2py.app.%s.w2p' % app), request) w2p_pack(filename, apath(app, request), filenames=filenames) return filename except Exception as e: if raise_ex: raise return False
[ "def", "app_pack", "(", "app", ",", "request", ",", "raise_ex", "=", "False", ",", "filenames", "=", "None", ")", ":", "try", ":", "if", "(", "filenames", "is", "None", ")", ":", "app_cleanup", "(", "app", ",", "request", ")", "filename", "=", "apath", "(", "(", "'../deposit/web2py.app.%s.w2p'", "%", "app", ")", ",", "request", ")", "w2p_pack", "(", "filename", ",", "apath", "(", "app", ",", "request", ")", ",", "filenames", "=", "filenames", ")", "return", "filename", "except", "Exception", "as", "e", ":", "if", "raise_ex", ":", "raise", "return", "False" ]
builds a w2p package for the application args: app: application name request: the global request object returns: filename of the w2p file or none on error .
train
false
3,376
def MsgUser(msg): msg_tested_versions = ['xp', 'vista', '2008', '2003'] msg_args = ['/c', '%SystemRoot%\\System32\\msg.exe', '*', '/TIME:0'] host_version = platform.platform().lower() if (not msg): return ('Command not ran.', 'Empty message.', (-1)) else: msg_args.extend([msg]) for version in msg_tested_versions: if (host_version.find(version) != (-1)): res = client_utils_common.Execute('cmd', msg_args, time_limit=(-1), bypass_whitelist=True) return res return ('', 'Command not available for this version.', (-1))
[ "def", "MsgUser", "(", "msg", ")", ":", "msg_tested_versions", "=", "[", "'xp'", ",", "'vista'", ",", "'2008'", ",", "'2003'", "]", "msg_args", "=", "[", "'/c'", ",", "'%SystemRoot%\\\\System32\\\\msg.exe'", ",", "'*'", ",", "'/TIME:0'", "]", "host_version", "=", "platform", ".", "platform", "(", ")", ".", "lower", "(", ")", "if", "(", "not", "msg", ")", ":", "return", "(", "'Command not ran.'", ",", "'Empty message.'", ",", "(", "-", "1", ")", ")", "else", ":", "msg_args", ".", "extend", "(", "[", "msg", "]", ")", "for", "version", "in", "msg_tested_versions", ":", "if", "(", "host_version", ".", "find", "(", "version", ")", "!=", "(", "-", "1", ")", ")", ":", "res", "=", "client_utils_common", ".", "Execute", "(", "'cmd'", ",", "msg_args", ",", "time_limit", "=", "(", "-", "1", ")", ",", "bypass_whitelist", "=", "True", ")", "return", "res", "return", "(", "''", ",", "'Command not available for this version.'", ",", "(", "-", "1", ")", ")" ]
sends a message to a user .
train
true
3,377
def get_bucket_location_or_error(access_key, secret_key, bucket_name): try: connection = connect_s3(access_key, secret_key) except: raise InvalidAuthError() if ((bucket_name != bucket_name.lower()) or ('.' in bucket_name)): connection.calling_format = OrdinaryCallingFormat() try: return connect_s3(access_key, secret_key).get_bucket(bucket_name, validate=False).get_location() except exception.S3ResponseError: raise InvalidFolderError()
[ "def", "get_bucket_location_or_error", "(", "access_key", ",", "secret_key", ",", "bucket_name", ")", ":", "try", ":", "connection", "=", "connect_s3", "(", "access_key", ",", "secret_key", ")", "except", ":", "raise", "InvalidAuthError", "(", ")", "if", "(", "(", "bucket_name", "!=", "bucket_name", ".", "lower", "(", ")", ")", "or", "(", "'.'", "in", "bucket_name", ")", ")", ":", "connection", ".", "calling_format", "=", "OrdinaryCallingFormat", "(", ")", "try", ":", "return", "connect_s3", "(", "access_key", ",", "secret_key", ")", ".", "get_bucket", "(", "bucket_name", ",", "validate", "=", "False", ")", ".", "get_location", "(", ")", "except", "exception", ".", "S3ResponseError", ":", "raise", "InvalidFolderError", "(", ")" ]
returns the location of a bucket or raises addonerror .
train
false
3,378
def delete_invite(request, invite_pk): invite = get_object_or_404(Invite, pk=invite_pk) group = invite.group if (group.curators.filter(id=request.user.userprofile.id).exists() or request.user.userprofile.is_manager): redeemer = invite.redeemer invite.delete() notify_redeemer_invitation_invalid.delay(redeemer.pk, group.pk) msg = _(u'The invitation to {0} has been successfully revoked.').format(redeemer) messages.success(request, msg) next_section = request.GET.get('next') next_url = urlparams(reverse('groups:group_edit', args=[group.url]), next_section) return HttpResponseRedirect(next_url) raise Http404()
[ "def", "delete_invite", "(", "request", ",", "invite_pk", ")", ":", "invite", "=", "get_object_or_404", "(", "Invite", ",", "pk", "=", "invite_pk", ")", "group", "=", "invite", ".", "group", "if", "(", "group", ".", "curators", ".", "filter", "(", "id", "=", "request", ".", "user", ".", "userprofile", ".", "id", ")", ".", "exists", "(", ")", "or", "request", ".", "user", ".", "userprofile", ".", "is_manager", ")", ":", "redeemer", "=", "invite", ".", "redeemer", "invite", ".", "delete", "(", ")", "notify_redeemer_invitation_invalid", ".", "delay", "(", "redeemer", ".", "pk", ",", "group", ".", "pk", ")", "msg", "=", "_", "(", "u'The invitation to {0} has been successfully revoked.'", ")", ".", "format", "(", "redeemer", ")", "messages", ".", "success", "(", "request", ",", "msg", ")", "next_section", "=", "request", ".", "GET", ".", "get", "(", "'next'", ")", "next_url", "=", "urlparams", "(", "reverse", "(", "'groups:group_edit'", ",", "args", "=", "[", "group", ".", "url", "]", ")", ",", "next_section", ")", "return", "HttpResponseRedirect", "(", "next_url", ")", "raise", "Http404", "(", ")" ]
delete an invite to join a group .
train
false
3,379
def test_string_literals_are_prefixed(): errors = [] for (abs_path, rel_path) in walk_python_files(): if (rel_path in _STRING_LITERALS_WHITELIST): continue problems = find_unprefixed_string_literals(abs_path) if problems: errors.append((rel_path, problems)) if errors: lines = [u'Unprefixed string literals:'] for (filename, problems) in errors: lines.append((u' ' + filename)) for (line_no, col_no) in problems: lines.append(u' line {}, column {}'.format(line_no, col_no)) raise AssertionError(u'\n'.join(lines))
[ "def", "test_string_literals_are_prefixed", "(", ")", ":", "errors", "=", "[", "]", "for", "(", "abs_path", ",", "rel_path", ")", "in", "walk_python_files", "(", ")", ":", "if", "(", "rel_path", "in", "_STRING_LITERALS_WHITELIST", ")", ":", "continue", "problems", "=", "find_unprefixed_string_literals", "(", "abs_path", ")", "if", "problems", ":", "errors", ".", "append", "(", "(", "rel_path", ",", "problems", ")", ")", "if", "errors", ":", "lines", "=", "[", "u'Unprefixed string literals:'", "]", "for", "(", "filename", ",", "problems", ")", "in", "errors", ":", "lines", ".", "append", "(", "(", "u' '", "+", "filename", ")", ")", "for", "(", "line_no", ",", "col_no", ")", "in", "problems", ":", "lines", ".", "append", "(", "u' line {}, column {}'", ".", "format", "(", "line_no", ",", "col_no", ")", ")", "raise", "AssertionError", "(", "u'\\n'", ".", "join", "(", "lines", ")", ")" ]
test that string literals are prefixed by u .
train
false
3,380
def image_quad_norm(inarray): if (inarray.shape[(-1)] != inarray.shape[(-2)]): return ((2 * np.sum(np.sum((np.abs(inarray) ** 2), axis=(-1)), axis=(-1))) - np.sum((np.abs(inarray[..., 0]) ** 2), axis=(-1))) else: return np.sum(np.sum((np.abs(inarray) ** 2), axis=(-1)), axis=(-1))
[ "def", "image_quad_norm", "(", "inarray", ")", ":", "if", "(", "inarray", ".", "shape", "[", "(", "-", "1", ")", "]", "!=", "inarray", ".", "shape", "[", "(", "-", "2", ")", "]", ")", ":", "return", "(", "(", "2", "*", "np", ".", "sum", "(", "np", ".", "sum", "(", "(", "np", ".", "abs", "(", "inarray", ")", "**", "2", ")", ",", "axis", "=", "(", "-", "1", ")", ")", ",", "axis", "=", "(", "-", "1", ")", ")", ")", "-", "np", ".", "sum", "(", "(", "np", ".", "abs", "(", "inarray", "[", "...", ",", "0", "]", ")", "**", "2", ")", ",", "axis", "=", "(", "-", "1", ")", ")", ")", "else", ":", "return", "np", ".", "sum", "(", "np", ".", "sum", "(", "(", "np", ".", "abs", "(", "inarray", ")", "**", "2", ")", ",", "axis", "=", "(", "-", "1", ")", ")", ",", "axis", "=", "(", "-", "1", ")", ")" ]
return the quadratic norm of images in fourier space .
train
false
3,381
def delete_api_model(restApiId, modelName, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_model(restApiId=restApiId, modelName=modelName) return {'deleted': True} except ClientError as e: return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
[ "def", "delete_api_model", "(", "restApiId", ",", "modelName", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "conn", ".", "delete_model", "(", "restApiId", "=", "restApiId", ",", "modelName", "=", "modelName", ")", "return", "{", "'deleted'", ":", "True", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'deleted'", ":", "False", ",", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
delete a model identified by name in a given api cli example: .
train
false
3,382
def _prepare_data(data): if isinstance(data, dict): new_data = {} for (key, value) in data.iteritems(): new_data[key] = _prepare_data(value) return new_data elif (isinstance(data, list) or isinstance(data, tuple) or isinstance(data, set)): return [_prepare_data(item) for item in data] elif isinstance(data, datetime.date): if ((data is NULL_DATETIME) or (data is NULL_DATE)): return None return str(data) else: return data
[ "def", "_prepare_data", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "new_data", "=", "{", "}", "for", "(", "key", ",", "value", ")", "in", "data", ".", "iteritems", "(", ")", ":", "new_data", "[", "key", "]", "=", "_prepare_data", "(", "value", ")", "return", "new_data", "elif", "(", "isinstance", "(", "data", ",", "list", ")", "or", "isinstance", "(", "data", ",", "tuple", ")", "or", "isinstance", "(", "data", ",", "set", ")", ")", ":", "return", "[", "_prepare_data", "(", "item", ")", "for", "item", "in", "data", "]", "elif", "isinstance", "(", "data", ",", "datetime", ".", "date", ")", ":", "if", "(", "(", "data", "is", "NULL_DATETIME", ")", "or", "(", "data", "is", "NULL_DATE", ")", ")", ":", "return", "None", "return", "str", "(", "data", ")", "else", ":", "return", "data" ]
recursively process data structures .
train
false
3,383
def get_email_from_user_id(user_id): user_settings = get_user_settings(user_id) return user_settings.email
[ "def", "get_email_from_user_id", "(", "user_id", ")", ":", "user_settings", "=", "get_user_settings", "(", "user_id", ")", "return", "user_settings", ".", "email" ]
gets the email from a given user_id .
train
false
3,385
def format_wf_instances(instances): has_wf = False for instance in instances: if (not getattr(instance, 'children', None)): continue else: has_wf = True break if (not has_wf): return instances for instance in instances: if getattr(instance, 'children', None): instance.id = (WF_PREFIX + instance.id) else: instance.id = (NON_WF_PREFIX + instance.id) return instances
[ "def", "format_wf_instances", "(", "instances", ")", ":", "has_wf", "=", "False", "for", "instance", "in", "instances", ":", "if", "(", "not", "getattr", "(", "instance", ",", "'children'", ",", "None", ")", ")", ":", "continue", "else", ":", "has_wf", "=", "True", "break", "if", "(", "not", "has_wf", ")", ":", "return", "instances", "for", "instance", "in", "instances", ":", "if", "getattr", "(", "instance", ",", "'children'", ",", "None", ")", ":", "instance", ".", "id", "=", "(", "WF_PREFIX", "+", "instance", ".", "id", ")", "else", ":", "instance", ".", "id", "=", "(", "NON_WF_PREFIX", "+", "instance", ".", "id", ")", "return", "instances" ]
adds identification characters to a workflow and appropriately shifts the non-workflow instances .
train
false
3,387
def test_ast_valid_while(): can_compile(u'(while foo bar)')
[ "def", "test_ast_valid_while", "(", ")", ":", "can_compile", "(", "u'(while foo bar)'", ")" ]
make sure ast cant compile invalid while .
train
false
3,388
def solvify(f, symbol, domain): solution_set = solveset(f, symbol, domain) result = None if (solution_set is S.EmptySet): result = [] elif isinstance(solution_set, ConditionSet): raise NotImplementedError('solveset is unable to solve this equation.') elif isinstance(solution_set, FiniteSet): result = list(solution_set) else: period = periodicity(f, symbol) if (period is not None): solutions = S.EmptySet if isinstance(solution_set, ImageSet): iter_solutions = (solution_set,) elif isinstance(solution_set, Union): if all((isinstance(i, ImageSet) for i in solution_set.args)): iter_solutions = solution_set.args for solution in iter_solutions: solutions += solution.intersect(Interval(0, period, False, True)) if isinstance(solutions, FiniteSet): result = list(solutions) else: solution = solution_set.intersect(domain) if isinstance(solution, FiniteSet): result += solution return result
[ "def", "solvify", "(", "f", ",", "symbol", ",", "domain", ")", ":", "solution_set", "=", "solveset", "(", "f", ",", "symbol", ",", "domain", ")", "result", "=", "None", "if", "(", "solution_set", "is", "S", ".", "EmptySet", ")", ":", "result", "=", "[", "]", "elif", "isinstance", "(", "solution_set", ",", "ConditionSet", ")", ":", "raise", "NotImplementedError", "(", "'solveset is unable to solve this equation.'", ")", "elif", "isinstance", "(", "solution_set", ",", "FiniteSet", ")", ":", "result", "=", "list", "(", "solution_set", ")", "else", ":", "period", "=", "periodicity", "(", "f", ",", "symbol", ")", "if", "(", "period", "is", "not", "None", ")", ":", "solutions", "=", "S", ".", "EmptySet", "if", "isinstance", "(", "solution_set", ",", "ImageSet", ")", ":", "iter_solutions", "=", "(", "solution_set", ",", ")", "elif", "isinstance", "(", "solution_set", ",", "Union", ")", ":", "if", "all", "(", "(", "isinstance", "(", "i", ",", "ImageSet", ")", "for", "i", "in", "solution_set", ".", "args", ")", ")", ":", "iter_solutions", "=", "solution_set", ".", "args", "for", "solution", "in", "iter_solutions", ":", "solutions", "+=", "solution", ".", "intersect", "(", "Interval", "(", "0", ",", "period", ",", "False", ",", "True", ")", ")", "if", "isinstance", "(", "solutions", ",", "FiniteSet", ")", ":", "result", "=", "list", "(", "solutions", ")", "else", ":", "solution", "=", "solution_set", ".", "intersect", "(", "domain", ")", "if", "isinstance", "(", "solution", ",", "FiniteSet", ")", ":", "result", "+=", "solution", "return", "result" ]
solves an equation using solveset and returns the solution in accordance with the solve output api .
train
false
3,389
def _slotnames(cls): names = cls.__dict__.get('__slotnames__') if (names is not None): return names names = [] if (not hasattr(cls, '__slots__')): pass else: for c in cls.__mro__: if ('__slots__' in c.__dict__): slots = c.__dict__['__slots__'] if isinstance(slots, str): slots = (slots,) for name in slots: if (name in ('__dict__', '__weakref__')): continue elif (name.startswith('__') and (not name.endswith('__'))): names.append(('_%s%s' % (c.__name__, name))) else: names.append(name) try: cls.__slotnames__ = names except: pass return names
[ "def", "_slotnames", "(", "cls", ")", ":", "names", "=", "cls", ".", "__dict__", ".", "get", "(", "'__slotnames__'", ")", "if", "(", "names", "is", "not", "None", ")", ":", "return", "names", "names", "=", "[", "]", "if", "(", "not", "hasattr", "(", "cls", ",", "'__slots__'", ")", ")", ":", "pass", "else", ":", "for", "c", "in", "cls", ".", "__mro__", ":", "if", "(", "'__slots__'", "in", "c", ".", "__dict__", ")", ":", "slots", "=", "c", ".", "__dict__", "[", "'__slots__'", "]", "if", "isinstance", "(", "slots", ",", "str", ")", ":", "slots", "=", "(", "slots", ",", ")", "for", "name", "in", "slots", ":", "if", "(", "name", "in", "(", "'__dict__'", ",", "'__weakref__'", ")", ")", ":", "continue", "elif", "(", "name", ".", "startswith", "(", "'__'", ")", "and", "(", "not", "name", ".", "endswith", "(", "'__'", ")", ")", ")", ":", "names", ".", "append", "(", "(", "'_%s%s'", "%", "(", "c", ".", "__name__", ",", "name", ")", ")", ")", "else", ":", "names", ".", "append", "(", "name", ")", "try", ":", "cls", ".", "__slotnames__", "=", "names", "except", ":", "pass", "return", "names" ]
return a list of slot names for a given class .
train
true
3,392
def rescale_value(value): s = (1 if (value >= 50) else (-1)) c = (value if (value < 50) else (value - 50)) return (s * (c * 64))
[ "def", "rescale_value", "(", "value", ")", ":", "s", "=", "(", "1", "if", "(", "value", ">=", "50", ")", "else", "(", "-", "1", ")", ")", "c", "=", "(", "value", "if", "(", "value", "<", "50", ")", "else", "(", "value", "-", "50", ")", ")", "return", "(", "s", "*", "(", "c", "*", "64", ")", ")" ]
rescale the input value from the range of 0 .
train
false
3,393
def load_fixtures(fixtures_dict=None): if (fixtures_dict is None): fixtures_dict = {} all_fixtures = {} fixtures_base_path = get_fixtures_base_path() for (fixture_type, fixtures) in six.iteritems(fixtures_dict): loaded_fixtures = {} for fixture in fixtures: fixture_path = ((fixtures_base_path + '/') + fixture) fixture_dict = load_content(fixture_path) loaded_fixtures[fixture] = fixture_dict all_fixtures[fixture_type] = loaded_fixtures return all_fixtures
[ "def", "load_fixtures", "(", "fixtures_dict", "=", "None", ")", ":", "if", "(", "fixtures_dict", "is", "None", ")", ":", "fixtures_dict", "=", "{", "}", "all_fixtures", "=", "{", "}", "fixtures_base_path", "=", "get_fixtures_base_path", "(", ")", "for", "(", "fixture_type", ",", "fixtures", ")", "in", "six", ".", "iteritems", "(", "fixtures_dict", ")", ":", "loaded_fixtures", "=", "{", "}", "for", "fixture", "in", "fixtures", ":", "fixture_path", "=", "(", "(", "fixtures_base_path", "+", "'/'", ")", "+", "fixture", ")", "fixture_dict", "=", "load_content", "(", "fixture_path", ")", "loaded_fixtures", "[", "fixture", "]", "=", "fixture_dict", "all_fixtures", "[", "fixture_type", "]", "=", "loaded_fixtures", "return", "all_fixtures" ]
loads fixtures specified in fixtures_dict .
train
false
3,395
def thumb_scale_size(orig_width, orig_height, width, height): if (width is None): width = scale_aspect(orig_width, orig_height, height) elif (height is None): height = scale_aspect(orig_height, orig_width, width) elif ((orig_width * height) >= (orig_height * width)): width = scale_aspect(orig_width, orig_height, height) else: height = scale_aspect(orig_height, orig_width, width) return (width, height)
[ "def", "thumb_scale_size", "(", "orig_width", ",", "orig_height", ",", "width", ",", "height", ")", ":", "if", "(", "width", "is", "None", ")", ":", "width", "=", "scale_aspect", "(", "orig_width", ",", "orig_height", ",", "height", ")", "elif", "(", "height", "is", "None", ")", ":", "height", "=", "scale_aspect", "(", "orig_height", ",", "orig_width", ",", "width", ")", "elif", "(", "(", "orig_width", "*", "height", ")", ">=", "(", "orig_height", "*", "width", ")", ")", ":", "width", "=", "scale_aspect", "(", "orig_width", ",", "orig_height", ",", "height", ")", "else", ":", "height", "=", "scale_aspect", "(", "orig_height", ",", "orig_width", ",", "width", ")", "return", "(", "width", ",", "height", ")" ]
determine size to scale to scale for thumbnailst params params: orig_width .
train
false
3,396
def dashboard_activity_list(user_id, limit, offset): q = _dashboard_activity_query(user_id, (limit + offset)) return _activities_at_offset(q, limit, offset)
[ "def", "dashboard_activity_list", "(", "user_id", ",", "limit", ",", "offset", ")", ":", "q", "=", "_dashboard_activity_query", "(", "user_id", ",", "(", "limit", "+", "offset", ")", ")", "return", "_activities_at_offset", "(", "q", ",", "limit", ",", "offset", ")" ]
return the authorized users dashboard activity stream .
train
false
3,397
def make_list(value): return list(value)
[ "def", "make_list", "(", "value", ")", ":", "return", "list", "(", "value", ")" ]
returns the value turned into a list .
train
false
3,398
@require_GET def more_tweets(request): max_id = request.GET.get('max_id') raw_filter = request.GET.get('filter') filter = (raw_filter if (raw_filter in FILTERS) else 'recent') return render(request, 'customercare/tweets.html', {'tweets': _get_tweets(locale=request.LANGUAGE_CODE, max_id=max_id, filter=filter, https=request.is_secure())})
[ "@", "require_GET", "def", "more_tweets", "(", "request", ")", ":", "max_id", "=", "request", ".", "GET", ".", "get", "(", "'max_id'", ")", "raw_filter", "=", "request", ".", "GET", ".", "get", "(", "'filter'", ")", "filter", "=", "(", "raw_filter", "if", "(", "raw_filter", "in", "FILTERS", ")", "else", "'recent'", ")", "return", "render", "(", "request", ",", "'customercare/tweets.html'", ",", "{", "'tweets'", ":", "_get_tweets", "(", "locale", "=", "request", ".", "LANGUAGE_CODE", ",", "max_id", "=", "max_id", ",", "filter", "=", "filter", ",", "https", "=", "request", ".", "is_secure", "(", ")", ")", "}", ")" ]
ajax view returning a list of tweets .
train
false
3,399
def states(opts, functions, utils, serializers, whitelist=None): ret = LazyLoader(_module_dirs(opts, 'states'), opts, tag='states', pack={'__salt__': functions}, whitelist=whitelist) ret.pack['__states__'] = ret ret.pack['__utils__'] = utils ret.pack['__serializers__'] = serializers return ret
[ "def", "states", "(", "opts", ",", "functions", ",", "utils", ",", "serializers", ",", "whitelist", "=", "None", ")", ":", "ret", "=", "LazyLoader", "(", "_module_dirs", "(", "opts", ",", "'states'", ")", ",", "opts", ",", "tag", "=", "'states'", ",", "pack", "=", "{", "'__salt__'", ":", "functions", "}", ",", "whitelist", "=", "whitelist", ")", "ret", ".", "pack", "[", "'__states__'", "]", "=", "ret", "ret", ".", "pack", "[", "'__utils__'", "]", "=", "utils", "ret", ".", "pack", "[", "'__serializers__'", "]", "=", "serializers", "return", "ret" ]
returns the state modules .
train
true
3,403
def parse_user_define(text): text = text.strip() if ('=' in text): text = unqote(text) (name, value) = text.split('=', 1) name = name.strip() value = unqote(value.strip()) else: name = text value = 'true' return (name, value)
[ "def", "parse_user_define", "(", "text", ")", ":", "text", "=", "text", ".", "strip", "(", ")", "if", "(", "'='", "in", "text", ")", ":", "text", "=", "unqote", "(", "text", ")", "(", "name", ",", "value", ")", "=", "text", ".", "split", "(", "'='", ",", "1", ")", "name", "=", "name", ".", "strip", "(", ")", "value", "=", "unqote", "(", "value", ".", "strip", "(", ")", ")", "else", ":", "name", "=", "text", "value", "=", "'true'", "return", "(", "name", ",", "value", ")" ]
parse "{name}={value}" text and return parts as tuple .
train
false
3,404
def getSortedInjectionTests(): retVal = copy.deepcopy(conf.tests) def priorityFunction(test): retVal = SORT_ORDER.FIRST if (test.stype == PAYLOAD.TECHNIQUE.UNION): retVal = SORT_ORDER.LAST elif (('details' in test) and ('dbms' in test.details)): if intersect(test.details.dbms, Backend.getIdentifiedDbms()): retVal = SORT_ORDER.SECOND else: retVal = SORT_ORDER.THIRD return retVal if Backend.getIdentifiedDbms(): retVal = sorted(retVal, key=priorityFunction) return retVal
[ "def", "getSortedInjectionTests", "(", ")", ":", "retVal", "=", "copy", ".", "deepcopy", "(", "conf", ".", "tests", ")", "def", "priorityFunction", "(", "test", ")", ":", "retVal", "=", "SORT_ORDER", ".", "FIRST", "if", "(", "test", ".", "stype", "==", "PAYLOAD", ".", "TECHNIQUE", ".", "UNION", ")", ":", "retVal", "=", "SORT_ORDER", ".", "LAST", "elif", "(", "(", "'details'", "in", "test", ")", "and", "(", "'dbms'", "in", "test", ".", "details", ")", ")", ":", "if", "intersect", "(", "test", ".", "details", ".", "dbms", ",", "Backend", ".", "getIdentifiedDbms", "(", ")", ")", ":", "retVal", "=", "SORT_ORDER", ".", "SECOND", "else", ":", "retVal", "=", "SORT_ORDER", ".", "THIRD", "return", "retVal", "if", "Backend", ".", "getIdentifiedDbms", "(", ")", ":", "retVal", "=", "sorted", "(", "retVal", ",", "key", "=", "priorityFunction", ")", "return", "retVal" ]
returns prioritized test list by eventually detected dbms from error messages .
train
false
3,405
def get_temperature_from_pressure(): return _sensehat.get_temperature_from_pressure()
[ "def", "get_temperature_from_pressure", "(", ")", ":", "return", "_sensehat", ".", "get_temperature_from_pressure", "(", ")" ]
gets the temperature in degrees celsius from the pressure sensor .
train
false
3,406
@register_opt() @local_optimizer([tensor.Rebroadcast]) def local_gpu_rebroadcast(node): if isinstance(node.op, tensor.Rebroadcast): (x,) = node.inputs if (x.owner and isinstance(x.owner.op, HostFromGpu)): gpu_x = x.owner.inputs[0] return [host_from_gpu(node.op(gpu_x))]
[ "@", "register_opt", "(", ")", "@", "local_optimizer", "(", "[", "tensor", ".", "Rebroadcast", "]", ")", "def", "local_gpu_rebroadcast", "(", "node", ")", ":", "if", "isinstance", "(", "node", ".", "op", ",", "tensor", ".", "Rebroadcast", ")", ":", "(", "x", ",", ")", "=", "node", ".", "inputs", "if", "(", "x", ".", "owner", "and", "isinstance", "(", "x", ".", "owner", ".", "op", ",", "HostFromGpu", ")", ")", ":", "gpu_x", "=", "x", ".", "owner", ".", "inputs", "[", "0", "]", "return", "[", "host_from_gpu", "(", "node", ".", "op", "(", "gpu_x", ")", ")", "]" ]
rebroadcast(host_from_gpu(x)) -> host_from_gpu(rebroadcast(x)) .
train
false
3,407
def attributive(adjective): return adjective
[ "def", "attributive", "(", "adjective", ")", ":", "return", "adjective" ]
for a predicative adjective .
train
false
3,411
def list_bundled_profiles(): path = os.path.join(get_ipython_package_dir(), u'core', u'profile') files = os.listdir(path) profiles = [] for profile in files: full_path = os.path.join(path, profile) if (os.path.isdir(full_path) and (profile != '__pycache__')): profiles.append(profile) return profiles
[ "def", "list_bundled_profiles", "(", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "get_ipython_package_dir", "(", ")", ",", "u'core'", ",", "u'profile'", ")", "files", "=", "os", ".", "listdir", "(", "path", ")", "profiles", "=", "[", "]", "for", "profile", "in", "files", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "profile", ")", "if", "(", "os", ".", "path", ".", "isdir", "(", "full_path", ")", "and", "(", "profile", "!=", "'__pycache__'", ")", ")", ":", "profiles", ".", "append", "(", "profile", ")", "return", "profiles" ]
list profiles that are bundled with ipython .
train
true
3,412
def arguments(function, extra_arguments=0): if (not hasattr(function, '__code__')): return () return function.__code__.co_varnames[:(function.__code__.co_argcount + extra_arguments)]
[ "def", "arguments", "(", "function", ",", "extra_arguments", "=", "0", ")", ":", "if", "(", "not", "hasattr", "(", "function", ",", "'__code__'", ")", ")", ":", "return", "(", ")", "return", "function", ".", "__code__", ".", "co_varnames", "[", ":", "(", "function", ".", "__code__", ".", "co_argcount", "+", "extra_arguments", ")", "]" ]
returns the name of all arguments a function takes .
train
true
3,413
def test_only_major_dots_count(): line = Line(show_only_major_dots=True) line.add('test', range(12)) line.x_labels = map(str, range(12)) line.x_labels_major_count = 2 q = line.render_pyquery() assert (len(q('.dots')) == 2)
[ "def", "test_only_major_dots_count", "(", ")", ":", "line", "=", "Line", "(", "show_only_major_dots", "=", "True", ")", "line", ".", "add", "(", "'test'", ",", "range", "(", "12", ")", ")", "line", ".", "x_labels", "=", "map", "(", "str", ",", "range", "(", "12", ")", ")", "line", ".", "x_labels_major_count", "=", "2", "q", "=", "line", ".", "render_pyquery", "(", ")", "assert", "(", "len", "(", "q", "(", "'.dots'", ")", ")", "==", "2", ")" ]
test major dots with a major label count .
train
false
3,415
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): (X, y, groups) = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel((delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for (train, test) in cv.split(X, y, groups))) return np.array(scores)[:, 0]
[ "def", "cross_val_score", "(", "estimator", ",", "X", ",", "y", "=", "None", ",", "groups", "=", "None", ",", "scoring", "=", "None", ",", "cv", "=", "None", ",", "n_jobs", "=", "1", ",", "verbose", "=", "0", ",", "fit_params", "=", "None", ",", "pre_dispatch", "=", "'2*n_jobs'", ")", ":", "(", "X", ",", "y", ",", "groups", ")", "=", "indexable", "(", "X", ",", "y", ",", "groups", ")", "cv", "=", "check_cv", "(", "cv", ",", "y", ",", "classifier", "=", "is_classifier", "(", "estimator", ")", ")", "scorer", "=", "check_scoring", "(", "estimator", ",", "scoring", "=", "scoring", ")", "parallel", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ",", "verbose", "=", "verbose", ",", "pre_dispatch", "=", "pre_dispatch", ")", "scores", "=", "parallel", "(", "(", "delayed", "(", "_fit_and_score", ")", "(", "clone", "(", "estimator", ")", ",", "X", ",", "y", ",", "scorer", ",", "train", ",", "test", ",", "verbose", ",", "None", ",", "fit_params", ")", "for", "(", "train", ",", "test", ")", "in", "cv", ".", "split", "(", "X", ",", "y", ",", "groups", ")", ")", ")", "return", "np", ".", "array", "(", "scores", ")", "[", ":", ",", "0", "]" ]
evaluate a score by cross-validation .
train
true
3,416
def lecun_uniform(shape, name=None, dim_ordering='th'): (fan_in, fan_out) = get_fans(shape, dim_ordering=dim_ordering) scale = np.sqrt((3.0 / fan_in)) return uniform(shape, scale, name=name)
[ "def", "lecun_uniform", "(", "shape", ",", "name", "=", "None", ",", "dim_ordering", "=", "'th'", ")", ":", "(", "fan_in", ",", "fan_out", ")", "=", "get_fans", "(", "shape", ",", "dim_ordering", "=", "dim_ordering", ")", "scale", "=", "np", ".", "sqrt", "(", "(", "3.0", "/", "fan_in", ")", ")", "return", "uniform", "(", "shape", ",", "scale", ",", "name", "=", "name", ")" ]
lecun uniform variance scaling initializer .
train
false
3,417
def _text_to_vim(start, end, text): lines = text.split('\n') new_end = _calc_end(lines, start) before = _vim.buf[start.line][:start.col] after = _vim.buf[end.line][end.col:] new_lines = [] if len(lines): new_lines.append((before + lines[0])) new_lines.extend(lines[1:]) new_lines[(-1)] += after _vim.buf[start.line:(end.line + 1)] = new_lines _vim.buf.cursor = start _vim.command('normal! zv') return new_end
[ "def", "_text_to_vim", "(", "start", ",", "end", ",", "text", ")", ":", "lines", "=", "text", ".", "split", "(", "'\\n'", ")", "new_end", "=", "_calc_end", "(", "lines", ",", "start", ")", "before", "=", "_vim", ".", "buf", "[", "start", ".", "line", "]", "[", ":", "start", ".", "col", "]", "after", "=", "_vim", ".", "buf", "[", "end", ".", "line", "]", "[", "end", ".", "col", ":", "]", "new_lines", "=", "[", "]", "if", "len", "(", "lines", ")", ":", "new_lines", ".", "append", "(", "(", "before", "+", "lines", "[", "0", "]", ")", ")", "new_lines", ".", "extend", "(", "lines", "[", "1", ":", "]", ")", "new_lines", "[", "(", "-", "1", ")", "]", "+=", "after", "_vim", ".", "buf", "[", "start", ".", "line", ":", "(", "end", ".", "line", "+", "1", ")", "]", "=", "new_lines", "_vim", ".", "buf", ".", "cursor", "=", "start", "_vim", ".", "command", "(", "'normal! zv'", ")", "return", "new_end" ]
copy the given text to the current buffer .
train
false
3,418
def scan_languages(): csvpath = odoo.modules.module.get_resource_path('base', 'res', 'res.lang.csv') try: result = [] with open(csvpath) as csvfile: reader = csv.reader(csvfile, delimiter=',', quotechar='"') fields = reader.next() code_index = fields.index('code') name_index = fields.index('name') for row in reader: result.append((ustr(row[code_index]), ustr(row[name_index]))) except Exception: _logger.error('Could not read %s', csvpath) result = [] return sorted((result or [('en_US', u'English')]), key=itemgetter(1))
[ "def", "scan_languages", "(", ")", ":", "csvpath", "=", "odoo", ".", "modules", ".", "module", ".", "get_resource_path", "(", "'base'", ",", "'res'", ",", "'res.lang.csv'", ")", "try", ":", "result", "=", "[", "]", "with", "open", "(", "csvpath", ")", "as", "csvfile", ":", "reader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "','", ",", "quotechar", "=", "'\"'", ")", "fields", "=", "reader", ".", "next", "(", ")", "code_index", "=", "fields", ".", "index", "(", "'code'", ")", "name_index", "=", "fields", ".", "index", "(", "'name'", ")", "for", "row", "in", "reader", ":", "result", ".", "append", "(", "(", "ustr", "(", "row", "[", "code_index", "]", ")", ",", "ustr", "(", "row", "[", "name_index", "]", ")", ")", ")", "except", "Exception", ":", "_logger", ".", "error", "(", "'Could not read %s'", ",", "csvpath", ")", "result", "=", "[", "]", "return", "sorted", "(", "(", "result", "or", "[", "(", "'en_US'", ",", "u'English'", ")", "]", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ")" ]
returns all languages supported by openerp for translation :returns: a list of pairs :rtype: [] .
train
false
3,419
def filesizeHandler(field): return displayHandler(field, humanFilesize)
[ "def", "filesizeHandler", "(", "field", ")", ":", "return", "displayHandler", "(", "field", ",", "humanFilesize", ")" ]
format field value using humanfilesize() .
train
false
3,420
def remove_version_from_guid(guid): if ('/repos/' not in guid): return None last_slash = guid.rfind('/') return guid[:last_slash]
[ "def", "remove_version_from_guid", "(", "guid", ")", ":", "if", "(", "'/repos/'", "not", "in", "guid", ")", ":", "return", "None", "last_slash", "=", "guid", ".", "rfind", "(", "'/'", ")", "return", "guid", "[", ":", "last_slash", "]" ]
removes version from toolshed-derived tool_id .
train
false
3,422
def beneficiary_type(): return s3_rest_controller()
[ "def", "beneficiary_type", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
beneficiary types: restful crud controller .
train
false
3,425
def _peeloff_pi(arg): for a in Add.make_args(arg): if (a is S.Pi): K = S.One break elif a.is_Mul: (K, p) = a.as_two_terms() if ((p is S.Pi) and K.is_Rational): break else: return (arg, S.Zero) m1 = ((K % S.Half) * S.Pi) m2 = ((K * S.Pi) - m1) return ((arg - m2), m2)
[ "def", "_peeloff_pi", "(", "arg", ")", ":", "for", "a", "in", "Add", ".", "make_args", "(", "arg", ")", ":", "if", "(", "a", "is", "S", ".", "Pi", ")", ":", "K", "=", "S", ".", "One", "break", "elif", "a", ".", "is_Mul", ":", "(", "K", ",", "p", ")", "=", "a", ".", "as_two_terms", "(", ")", "if", "(", "(", "p", "is", "S", ".", "Pi", ")", "and", "K", ".", "is_Rational", ")", ":", "break", "else", ":", "return", "(", "arg", ",", "S", ".", "Zero", ")", "m1", "=", "(", "(", "K", "%", "S", ".", "Half", ")", "*", "S", ".", "Pi", ")", "m2", "=", "(", "(", "K", "*", "S", ".", "Pi", ")", "-", "m1", ")", "return", "(", "(", "arg", "-", "m2", ")", ",", "m2", ")" ]
split arg into two parts .
train
false
3,426
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, volume_id): return IMPL.block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, volume_id)
[ "def", "block_device_mapping_destroy_by_instance_and_volume", "(", "context", ",", "instance_uuid", ",", "volume_id", ")", ":", "return", "IMPL", ".", "block_device_mapping_destroy_by_instance_and_volume", "(", "context", ",", "instance_uuid", ",", "volume_id", ")" ]
destroy the block device mapping .
train
false
3,427
def get_resource_id(resource, name=None, resource_id=None, region=None, key=None, keyid=None, profile=None): try: return {'id': _get_resource_id(resource, name, region=region, key=key, keyid=keyid, profile=profile)} except BotoServerError as e: return {'error': salt.utils.boto.get_error(e)}
[ "def", "get_resource_id", "(", "resource", ",", "name", "=", "None", ",", "resource_id", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "return", "{", "'id'", ":", "_get_resource_id", "(", "resource", ",", "name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "}", "except", "BotoServerError", "as", "e", ":", "return", "{", "'error'", ":", "salt", ".", "utils", ".", "boto", ".", "get_error", "(", "e", ")", "}" ]
get an aws id for a vpc resource by type and name .
train
false
3,428
def getsemod(module): return list_semod().get(module, {})
[ "def", "getsemod", "(", "module", ")", ":", "return", "list_semod", "(", ")", ".", "get", "(", "module", ",", "{", "}", ")" ]
return the information on a specific selinux module cli example: .
train
false
3,429
def reformat_dict_keys(keymap=None, inputdict=None): keymap = (keymap or {}) inputdict = (inputdict or {}) return dict([(outk, inputdict[ink]) for (ink, outk) in keymap.items() if (ink in inputdict)])
[ "def", "reformat_dict_keys", "(", "keymap", "=", "None", ",", "inputdict", "=", "None", ")", ":", "keymap", "=", "(", "keymap", "or", "{", "}", ")", "inputdict", "=", "(", "inputdict", "or", "{", "}", ")", "return", "dict", "(", "[", "(", "outk", ",", "inputdict", "[", "ink", "]", ")", "for", "(", "ink", ",", "outk", ")", "in", "keymap", ".", "items", "(", ")", "if", "(", "ink", "in", "inputdict", ")", "]", ")" ]
utility function for mapping one dict format to another .
train
false
3,430
def validate_boxes(boxes, width=0, height=0): x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] assert (x1 >= 0).all() assert (y1 >= 0).all() assert (x2 >= x1).all() assert (y2 >= y1).all() assert (x2 < width).all() assert (y2 < height).all()
[ "def", "validate_boxes", "(", "boxes", ",", "width", "=", "0", ",", "height", "=", "0", ")", ":", "x1", "=", "boxes", "[", ":", ",", "0", "]", "y1", "=", "boxes", "[", ":", ",", "1", "]", "x2", "=", "boxes", "[", ":", ",", "2", "]", "y2", "=", "boxes", "[", ":", ",", "3", "]", "assert", "(", "x1", ">=", "0", ")", ".", "all", "(", ")", "assert", "(", "y1", ">=", "0", ")", ".", "all", "(", ")", "assert", "(", "x2", ">=", "x1", ")", ".", "all", "(", ")", "assert", "(", "y2", ">=", "y1", ")", ".", "all", "(", ")", "assert", "(", "x2", "<", "width", ")", ".", "all", "(", ")", "assert", "(", "y2", "<", "height", ")", ".", "all", "(", ")" ]
check that a set of boxes are valid .
train
false
3,431
def _is_recarray(data): return isinstance(data, np.core.recarray)
[ "def", "_is_recarray", "(", "data", ")", ":", "return", "isinstance", "(", "data", ",", "np", ".", "core", ".", "recarray", ")" ]
returns true if data is a recarray .
train
false
3,432
def rename_blob(bucket_name, blob_name, new_name): storage_client = storage.Client() bucket = storage_client.get_bucket(bucket_name) blob = bucket.blob(blob_name) new_blob = bucket.rename_blob(blob, new_name) print 'Blob {} has been renamed to {}'.format(blob.name, new_blob.name)
[ "def", "rename_blob", "(", "bucket_name", ",", "blob_name", ",", "new_name", ")", ":", "storage_client", "=", "storage", ".", "Client", "(", ")", "bucket", "=", "storage_client", ".", "get_bucket", "(", "bucket_name", ")", "blob", "=", "bucket", ".", "blob", "(", "blob_name", ")", "new_blob", "=", "bucket", ".", "rename_blob", "(", "blob", ",", "new_name", ")", "print", "'Blob {} has been renamed to {}'", ".", "format", "(", "blob", ".", "name", ",", "new_blob", ".", "name", ")" ]
renames a blob .
train
false
3,433
def clear_node(name): ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if __opts__['test']: ret['comment'] = 'Clearing local node statistics' return ret __salt__['trafficserver.clear_node']() ret['result'] = True ret['comment'] = 'Cleared local node statistics' return ret
[ "def", "clear_node", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Clearing local node statistics'", "return", "ret", "__salt__", "[", "'trafficserver.clear_node'", "]", "(", ")", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Cleared local node statistics'", "return", "ret" ]
clears accumulated statistics on the local node .
train
true
3,434
def str2stdout(sourcestring, colors=None, title='', markup='html', header=None, footer=None, linenumbers=0, form=None): Parser(sourcestring, colors=colors, title=title, markup=markup, header=header, footer=footer, linenumbers=linenumbers).format(form)
[ "def", "str2stdout", "(", "sourcestring", ",", "colors", "=", "None", ",", "title", "=", "''", ",", "markup", "=", "'html'", ",", "header", "=", "None", ",", "footer", "=", "None", ",", "linenumbers", "=", "0", ",", "form", "=", "None", ")", ":", "Parser", "(", "sourcestring", ",", "colors", "=", "colors", ",", "title", "=", "title", ",", "markup", "=", "markup", ",", "header", "=", "header", ",", "footer", "=", "footer", ",", "linenumbers", "=", "linenumbers", ")", ".", "format", "(", "form", ")" ]
converts a code to colorized html .
train
false
3,435
def getTransferClosestSurroundingLoop(oldOrderedLocation, remainingSurroundingLoops, skein): if (len(remainingSurroundingLoops) > 0): oldOrderedLocation.z = remainingSurroundingLoops[0].z closestDistance = 1e+18 closestSurroundingLoop = None for remainingSurroundingLoop in remainingSurroundingLoops: distance = getNearestDistanceIndex(oldOrderedLocation.dropAxis(2), remainingSurroundingLoop.boundary).distance if (distance < closestDistance): closestDistance = distance closestSurroundingLoop = remainingSurroundingLoop remainingSurroundingLoops.remove(closestSurroundingLoop) closestSurroundingLoop.addToThreads(oldOrderedLocation, skein) return closestSurroundingLoop
[ "def", "getTransferClosestSurroundingLoop", "(", "oldOrderedLocation", ",", "remainingSurroundingLoops", ",", "skein", ")", ":", "if", "(", "len", "(", "remainingSurroundingLoops", ")", ">", "0", ")", ":", "oldOrderedLocation", ".", "z", "=", "remainingSurroundingLoops", "[", "0", "]", ".", "z", "closestDistance", "=", "1e+18", "closestSurroundingLoop", "=", "None", "for", "remainingSurroundingLoop", "in", "remainingSurroundingLoops", ":", "distance", "=", "getNearestDistanceIndex", "(", "oldOrderedLocation", ".", "dropAxis", "(", "2", ")", ",", "remainingSurroundingLoop", ".", "boundary", ")", ".", "distance", "if", "(", "distance", "<", "closestDistance", ")", ":", "closestDistance", "=", "distance", "closestSurroundingLoop", "=", "remainingSurroundingLoop", "remainingSurroundingLoops", ".", "remove", "(", "closestSurroundingLoop", ")", "closestSurroundingLoop", ".", "addToThreads", "(", "oldOrderedLocation", ",", "skein", ")", "return", "closestSurroundingLoop" ]
get and transfer the closest remaining surrounding loop .
train
false
3,436
def cleanup_traceback(tb, exclude): orig_tb = tb[:] while tb: for rpcfile in exclude: if tb[0][0].count(rpcfile): break else: break del tb[0] while tb: for rpcfile in exclude: if tb[(-1)][0].count(rpcfile): break else: break del tb[(-1)] if (len(tb) == 0): tb[:] = orig_tb[:] print >>sys.stderr, '** IDLE Internal Exception: ' rpchandler = rpc.objecttable['exec'].rpchandler for i in range(len(tb)): (fn, ln, nm, line) = tb[i] if (nm == '?'): nm = '-toplevel-' if ((not line) and fn.startswith('<pyshell#')): line = rpchandler.remotecall('linecache', 'getline', (fn, ln), {}) tb[i] = (fn, ln, nm, line)
[ "def", "cleanup_traceback", "(", "tb", ",", "exclude", ")", ":", "orig_tb", "=", "tb", "[", ":", "]", "while", "tb", ":", "for", "rpcfile", "in", "exclude", ":", "if", "tb", "[", "0", "]", "[", "0", "]", ".", "count", "(", "rpcfile", ")", ":", "break", "else", ":", "break", "del", "tb", "[", "0", "]", "while", "tb", ":", "for", "rpcfile", "in", "exclude", ":", "if", "tb", "[", "(", "-", "1", ")", "]", "[", "0", "]", ".", "count", "(", "rpcfile", ")", ":", "break", "else", ":", "break", "del", "tb", "[", "(", "-", "1", ")", "]", "if", "(", "len", "(", "tb", ")", "==", "0", ")", ":", "tb", "[", ":", "]", "=", "orig_tb", "[", ":", "]", "print", ">>", "sys", ".", "stderr", ",", "'** IDLE Internal Exception: '", "rpchandler", "=", "rpc", ".", "objecttable", "[", "'exec'", "]", ".", "rpchandler", "for", "i", "in", "range", "(", "len", "(", "tb", ")", ")", ":", "(", "fn", ",", "ln", ",", "nm", ",", "line", ")", "=", "tb", "[", "i", "]", "if", "(", "nm", "==", "'?'", ")", ":", "nm", "=", "'-toplevel-'", "if", "(", "(", "not", "line", ")", "and", "fn", ".", "startswith", "(", "'<pyshell#'", ")", ")", ":", "line", "=", "rpchandler", ".", "remotecall", "(", "'linecache'", ",", "'getline'", ",", "(", "fn", ",", "ln", ")", ",", "{", "}", ")", "tb", "[", "i", "]", "=", "(", "fn", ",", "ln", ",", "nm", ",", "line", ")" ]
remove excluded traces from beginning/end of tb; get cached lines .
train
false
3,437
def restoreDatabase(version): logger.log(u'Restoring database before trying upgrade again') if (not sickbeard.helpers.restoreVersionedFile(dbFilename(suffix=('v' + str(version))), version)): logger.log_error_and_exit(u'Database restore failed, abort upgrading database') return False else: return True
[ "def", "restoreDatabase", "(", "version", ")", ":", "logger", ".", "log", "(", "u'Restoring database before trying upgrade again'", ")", "if", "(", "not", "sickbeard", ".", "helpers", ".", "restoreVersionedFile", "(", "dbFilename", "(", "suffix", "=", "(", "'v'", "+", "str", "(", "version", ")", ")", ")", ",", "version", ")", ")", ":", "logger", ".", "log_error_and_exit", "(", "u'Database restore failed, abort upgrading database'", ")", "return", "False", "else", ":", "return", "True" ]
restores a database to a previous version .
train
false
3,438
@open_tab def load_page(data, socket): if ('url' not in data): return {'error': 4001, 'message': 'Required parameter url'} socket.tab.loaded = False meta = data.get('_meta', {}) def on_complete(is_error, error_info=None): extra_meta = {'id': meta.get('id')} if is_error: msg = ('Unknown error' if (error_info is None) else error_info.text) extra_meta.update(error=4500, reason=msg) else: socket.tab.loaded = True socket.sendMessage(metadata(socket, extra_meta)) cookies(socket) headers = {} if ('user_agent' in meta): headers['User-Agent'] = meta['user_agent'] socket.tab.go(data['url'], (lambda : on_complete(False)), (lambda err=None: on_complete(True, err)), baseurl=data.get('baseurl'), headers=headers)
[ "@", "open_tab", "def", "load_page", "(", "data", ",", "socket", ")", ":", "if", "(", "'url'", "not", "in", "data", ")", ":", "return", "{", "'error'", ":", "4001", ",", "'message'", ":", "'Required parameter url'", "}", "socket", ".", "tab", ".", "loaded", "=", "False", "meta", "=", "data", ".", "get", "(", "'_meta'", ",", "{", "}", ")", "def", "on_complete", "(", "is_error", ",", "error_info", "=", "None", ")", ":", "extra_meta", "=", "{", "'id'", ":", "meta", ".", "get", "(", "'id'", ")", "}", "if", "is_error", ":", "msg", "=", "(", "'Unknown error'", "if", "(", "error_info", "is", "None", ")", "else", "error_info", ".", "text", ")", "extra_meta", ".", "update", "(", "error", "=", "4500", ",", "reason", "=", "msg", ")", "else", ":", "socket", ".", "tab", ".", "loaded", "=", "True", "socket", ".", "sendMessage", "(", "metadata", "(", "socket", ",", "extra_meta", ")", ")", "cookies", "(", "socket", ")", "headers", "=", "{", "}", "if", "(", "'user_agent'", "in", "meta", ")", ":", "headers", "[", "'User-Agent'", "]", "=", "meta", "[", "'user_agent'", "]", "socket", ".", "tab", ".", "go", "(", "data", "[", "'url'", "]", ",", "(", "lambda", ":", "on_complete", "(", "False", ")", ")", ",", "(", "lambda", "err", "=", "None", ":", "on_complete", "(", "True", ",", "err", ")", ")", ",", "baseurl", "=", "data", ".", "get", "(", "'baseurl'", ")", ",", "headers", "=", "headers", ")" ]
load page in virtual url from provided url .
train
false
3,440
def upload_stable_pdf(user='pandas'): if os.system('cd build/latex; scp pandas.pdf {0}@pandas.pydata.org:/usr/share/nginx/pandas/pandas-docs/stable/'.format(user)): raise SystemExit('PDF upload to stable failed')
[ "def", "upload_stable_pdf", "(", "user", "=", "'pandas'", ")", ":", "if", "os", ".", "system", "(", "'cd build/latex; scp pandas.pdf {0}@pandas.pydata.org:/usr/share/nginx/pandas/pandas-docs/stable/'", ".", "format", "(", "user", ")", ")", ":", "raise", "SystemExit", "(", "'PDF upload to stable failed'", ")" ]
push a copy to the pydata dev directory .
train
false
3,441
def reverse_opt_map(opt_map): revdict = {} for (key, value) in list(opt_map.items()): if is_container(value): value = value[0] if ((key != u'flags') and (value is not None)): revdict[value.split()[0]] = key return revdict
[ "def", "reverse_opt_map", "(", "opt_map", ")", ":", "revdict", "=", "{", "}", "for", "(", "key", ",", "value", ")", "in", "list", "(", "opt_map", ".", "items", "(", ")", ")", ":", "if", "is_container", "(", "value", ")", ":", "value", "=", "value", "[", "0", "]", "if", "(", "(", "key", "!=", "u'flags'", ")", "and", "(", "value", "is", "not", "None", ")", ")", ":", "revdict", "[", "value", ".", "split", "(", ")", "[", "0", "]", "]", "=", "key", "return", "revdict" ]
reverse the key/value pairs of the option map in the interface classes .
train
false
3,442
def _get_excludes_option(**kwargs): disable_excludes = kwargs.get('disableexcludes', '') ret = [] if disable_excludes: log.info("Disabling excludes for '%s'", disable_excludes) ret.append('--disableexcludes={0}'.format(disable_excludes)) return ret
[ "def", "_get_excludes_option", "(", "**", "kwargs", ")", ":", "disable_excludes", "=", "kwargs", ".", "get", "(", "'disableexcludes'", ",", "''", ")", "ret", "=", "[", "]", "if", "disable_excludes", ":", "log", ".", "info", "(", "\"Disabling excludes for '%s'\"", ",", "disable_excludes", ")", "ret", ".", "append", "(", "'--disableexcludes={0}'", ".", "format", "(", "disable_excludes", ")", ")", "return", "ret" ]
returns a list of --disableexcludes option to be used in the yum command .
train
false
3,443
def _parse_local_version(local): if (local is not None): return tuple(((part.lower() if (not part.isdigit()) else int(part)) for part in _local_version_seperators.split(local)))
[ "def", "_parse_local_version", "(", "local", ")", ":", "if", "(", "local", "is", "not", "None", ")", ":", "return", "tuple", "(", "(", "(", "part", ".", "lower", "(", ")", "if", "(", "not", "part", ".", "isdigit", "(", ")", ")", "else", "int", "(", "part", ")", ")", "for", "part", "in", "_local_version_seperators", ".", "split", "(", "local", ")", ")", ")" ]
takes a string like abc .
train
true
3,444
def p_defsection(p): p.lexer.lastsection = 1 print 'tokens = ', repr(tokenlist) print print 'precedence = ', repr(preclist) print print '# -------------- RULES ----------------' print
[ "def", "p_defsection", "(", "p", ")", ":", "p", ".", "lexer", ".", "lastsection", "=", "1", "print", "'tokens = '", ",", "repr", "(", "tokenlist", ")", "print", "print", "'precedence = '", ",", "repr", "(", "preclist", ")", "print", "print", "'# -------------- RULES ----------------'", "print" ]
defsection : definitions section | section .
train
false
3,446
def decrypt(stream, parameters): encodedStream = '' if ((parameters == None) or (parameters == {})): return (0, stream) elif ((not parameters.has_key('/Name')) or (parameters['/Name'] == None)): return (0, stream) else: cryptFilterName = parameters['/Name'].getValue() if (cryptFilterName == 'Identity'): return (0, stream) else: return ((-1), 'Decrypt not supported yet')
[ "def", "decrypt", "(", "stream", ",", "parameters", ")", ":", "encodedStream", "=", "''", "if", "(", "(", "parameters", "==", "None", ")", "or", "(", "parameters", "==", "{", "}", ")", ")", ":", "return", "(", "0", ",", "stream", ")", "elif", "(", "(", "not", "parameters", ".", "has_key", "(", "'/Name'", ")", ")", "or", "(", "parameters", "[", "'/Name'", "]", "==", "None", ")", ")", ":", "return", "(", "0", ",", "stream", ")", "else", ":", "cryptFilterName", "=", "parameters", "[", "'/Name'", "]", ".", "getValue", "(", ")", "if", "(", "cryptFilterName", "==", "'Identity'", ")", ":", "return", "(", "0", ",", "stream", ")", "else", ":", "return", "(", "(", "-", "1", ")", ",", "'Decrypt not supported yet'", ")" ]
decrypt ciphertext .
train
false
3,447
def _check_stc(stc, evoked, idx, ratio=50.0): assert_array_almost_equal(stc.times, evoked.times, 5) amps = np.sum((stc.data ** 2), axis=1) order = np.argsort(amps)[::(-1)] amps = amps[order] verts = np.concatenate(stc.vertices)[order] assert_equal(idx, verts[0], err_msg=str(list(verts))) assert_true((amps[0] > (ratio * amps[1])), msg=str((amps[0] / amps[1])))
[ "def", "_check_stc", "(", "stc", ",", "evoked", ",", "idx", ",", "ratio", "=", "50.0", ")", ":", "assert_array_almost_equal", "(", "stc", ".", "times", ",", "evoked", ".", "times", ",", "5", ")", "amps", "=", "np", ".", "sum", "(", "(", "stc", ".", "data", "**", "2", ")", ",", "axis", "=", "1", ")", "order", "=", "np", ".", "argsort", "(", "amps", ")", "[", ":", ":", "(", "-", "1", ")", "]", "amps", "=", "amps", "[", "order", "]", "verts", "=", "np", ".", "concatenate", "(", "stc", ".", "vertices", ")", "[", "order", "]", "assert_equal", "(", "idx", ",", "verts", "[", "0", "]", ",", "err_msg", "=", "str", "(", "list", "(", "verts", ")", ")", ")", "assert_true", "(", "(", "amps", "[", "0", "]", ">", "(", "ratio", "*", "amps", "[", "1", "]", ")", ")", ",", "msg", "=", "str", "(", "(", "amps", "[", "0", "]", "/", "amps", "[", "1", "]", ")", ")", ")" ]
check that stcs are compatible .
train
false
3,448
def affiliation(): return s3_rest_controller()
[ "def", "affiliation", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
restful crud controller .
train
false
3,451
def track_from_filename(filename, filetype=None, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False): filetype = (filetype or filename.split('.')[(-1)]) file_object = open(filename, 'rb') result = track_from_file(file_object, filetype, timeout, force_upload) file_object.close() return result
[ "def", "track_from_filename", "(", "filename", ",", "filetype", "=", "None", ",", "timeout", "=", "DEFAULT_ASYNC_TIMEOUT", ",", "force_upload", "=", "False", ")", ":", "filetype", "=", "(", "filetype", "or", "filename", ".", "split", "(", "'.'", ")", "[", "(", "-", "1", ")", "]", ")", "file_object", "=", "open", "(", "filename", ",", "'rb'", ")", "result", "=", "track_from_file", "(", "file_object", ",", "filetype", ",", "timeout", ",", "force_upload", ")", "file_object", ".", "close", "(", ")", "return", "result" ]
create a track object from a filename .
train
true
3,452
def get_repeat_action_user_count(db=db, timedelta=timedelta(days=30)): start_date = (timezone.now() - timedelta) pipeline = [{'$match': {'date': {'$gt': start_date}}}, {'$group': {'_id': '$user', 'nodelog_id': {'$addToSet': '$_id'}}}] user_nodelog = db.nodelog.aggregate(pipeline)['result'] repeat_action_count = 0 repeat_action_user_age = [] for i in user_nodelog: if i['_id']: user_id = i['_id'] nodelog_id = i['nodelog_id'] nodelogs = db.nodelog.find({'_id': {'$in': nodelog_id}}).sort([('date', 1)]) repeat_action_date = {} for nodelog in nodelogs: action = nodelog['action'] date = nodelog['date'] if (action not in repeat_action_date): repeat_action_date[action] = date elif (abs((date - repeat_action_date[action]).total_seconds()) < 3): repeat_action_date[action] = date else: repeat_action_count += 1 date_registered = db.user.find({'_id': user_id}).next()['date_registered'] age = (date - date_registered).days repeat_action_user_age.append(age) break return {'repeat_action_count': repeat_action_count, 'repeat_action_age': repeat_action_user_age}
[ "def", "get_repeat_action_user_count", "(", "db", "=", "db", ",", "timedelta", "=", "timedelta", "(", "days", "=", "30", ")", ")", ":", "start_date", "=", "(", "timezone", ".", "now", "(", ")", "-", "timedelta", ")", "pipeline", "=", "[", "{", "'$match'", ":", "{", "'date'", ":", "{", "'$gt'", ":", "start_date", "}", "}", "}", ",", "{", "'$group'", ":", "{", "'_id'", ":", "'$user'", ",", "'nodelog_id'", ":", "{", "'$addToSet'", ":", "'$_id'", "}", "}", "}", "]", "user_nodelog", "=", "db", ".", "nodelog", ".", "aggregate", "(", "pipeline", ")", "[", "'result'", "]", "repeat_action_count", "=", "0", "repeat_action_user_age", "=", "[", "]", "for", "i", "in", "user_nodelog", ":", "if", "i", "[", "'_id'", "]", ":", "user_id", "=", "i", "[", "'_id'", "]", "nodelog_id", "=", "i", "[", "'nodelog_id'", "]", "nodelogs", "=", "db", ".", "nodelog", ".", "find", "(", "{", "'_id'", ":", "{", "'$in'", ":", "nodelog_id", "}", "}", ")", ".", "sort", "(", "[", "(", "'date'", ",", "1", ")", "]", ")", "repeat_action_date", "=", "{", "}", "for", "nodelog", "in", "nodelogs", ":", "action", "=", "nodelog", "[", "'action'", "]", "date", "=", "nodelog", "[", "'date'", "]", "if", "(", "action", "not", "in", "repeat_action_date", ")", ":", "repeat_action_date", "[", "action", "]", "=", "date", "elif", "(", "abs", "(", "(", "date", "-", "repeat_action_date", "[", "action", "]", ")", ".", "total_seconds", "(", ")", ")", "<", "3", ")", ":", "repeat_action_date", "[", "action", "]", "=", "date", "else", ":", "repeat_action_count", "+=", "1", "date_registered", "=", "db", ".", "user", ".", "find", "(", "{", "'_id'", ":", "user_id", "}", ")", ".", "next", "(", ")", "[", "'date_registered'", "]", "age", "=", "(", "date", "-", "date_registered", ")", ".", "days", "repeat_action_user_age", ".", "append", "(", "age", ")", "break", "return", "{", "'repeat_action_count'", ":", "repeat_action_count", ",", "'repeat_action_age'", ":", "repeat_action_user_age", "}" ]
get the number of users that have repetitive actions during the last month .
train
false
3,453
def processSVGElementpolyline(svgReader, xmlElement): if ('points' not in xmlElement.attributeDictionary): print 'Warning, in processSVGElementpolyline in svgReader can not get a value for d in:' print xmlElement.attributeDictionary return rotatedLoopLayer = svgReader.getRotatedLoopLayer() words = getRightStripMinusSplit(xmlElement.attributeDictionary['points'].replace(',', ' ')) path = [] for wordIndex in xrange(0, len(words), 2): path.append(euclidean.getComplexByWords(words[wordIndex:])) rotatedLoopLayer.loops += getTransformedOutlineByPath(path, xmlElement, svgReader.yAxisPointingUpward)
[ "def", "processSVGElementpolyline", "(", "svgReader", ",", "xmlElement", ")", ":", "if", "(", "'points'", "not", "in", "xmlElement", ".", "attributeDictionary", ")", ":", "print", "'Warning, in processSVGElementpolyline in svgReader can not get a value for d in:'", "print", "xmlElement", ".", "attributeDictionary", "return", "rotatedLoopLayer", "=", "svgReader", ".", "getRotatedLoopLayer", "(", ")", "words", "=", "getRightStripMinusSplit", "(", "xmlElement", ".", "attributeDictionary", "[", "'points'", "]", ".", "replace", "(", "','", ",", "' '", ")", ")", "path", "=", "[", "]", "for", "wordIndex", "in", "xrange", "(", "0", ",", "len", "(", "words", ")", ",", "2", ")", ":", "path", ".", "append", "(", "euclidean", ".", "getComplexByWords", "(", "words", "[", "wordIndex", ":", "]", ")", ")", "rotatedLoopLayer", ".", "loops", "+=", "getTransformedOutlineByPath", "(", "path", ",", "xmlElement", ",", "svgReader", ".", "yAxisPointingUpward", ")" ]
process elementnode by svgreader .
train
false
3,454
def encode_entity_table_key(key): if (not isinstance(key, entity_pb.Reference)): key = entity_pb.Reference(key) prefix = dbconstants.KEY_DELIMITER.join([key.app(), key.name_space()]) return get_entity_key(prefix, key.path())
[ "def", "encode_entity_table_key", "(", "key", ")", ":", "if", "(", "not", "isinstance", "(", "key", ",", "entity_pb", ".", "Reference", ")", ")", ":", "key", "=", "entity_pb", ".", "Reference", "(", "key", ")", "prefix", "=", "dbconstants", ".", "KEY_DELIMITER", ".", "join", "(", "[", "key", ".", "app", "(", ")", ",", "key", ".", "name_space", "(", ")", "]", ")", "return", "get_entity_key", "(", "prefix", ",", "key", ".", "path", "(", ")", ")" ]
create a key that can be used for the entities table .
train
false
3,455
def make_gh_link_node(app, rawtext, role, kind, api_type, id, options=None): url = ('%s/%s/%s' % (BOKEH_GH, api_type, id)) options = (options or {}) _try_url(app, url, role) set_classes(options) node = nodes.reference(rawtext, (kind + utils.unescape(id)), refuri=url, **options) return node
[ "def", "make_gh_link_node", "(", "app", ",", "rawtext", ",", "role", ",", "kind", ",", "api_type", ",", "id", ",", "options", "=", "None", ")", ":", "url", "=", "(", "'%s/%s/%s'", "%", "(", "BOKEH_GH", ",", "api_type", ",", "id", ")", ")", "options", "=", "(", "options", "or", "{", "}", ")", "_try_url", "(", "app", ",", "url", ",", "role", ")", "set_classes", "(", "options", ")", "node", "=", "nodes", ".", "reference", "(", "rawtext", ",", "(", "kind", "+", "utils", ".", "unescape", "(", "id", ")", ")", ",", "refuri", "=", "url", ",", "**", "options", ")", "return", "node" ]
return a link to a bokeh github resource .
train
false
3,456
def denoise_tv_bregman(image, weight, max_iter=100, eps=0.001, isotropic=True): return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic)
[ "def", "denoise_tv_bregman", "(", "image", ",", "weight", ",", "max_iter", "=", "100", ",", "eps", "=", "0.001", ",", "isotropic", "=", "True", ")", ":", "return", "_denoise_tv_bregman", "(", "image", ",", "weight", ",", "max_iter", ",", "eps", ",", "isotropic", ")" ]
perform total-variation denoising using split-bregman optimization .
train
false
3,457
def getEvaluatedDictionary(evaluationKeys, xmlElement): evaluatedDictionary = {} zeroLength = (len(evaluationKeys) == 0) for key in xmlElement.attributeDictionary.keys(): if ((key in evaluationKeys) or zeroLength): value = getEvaluatedValueObliviously(key, xmlElement) if (value == None): valueString = str(xmlElement.attributeDictionary[key]) print 'Warning, getEvaluatedDictionary in evaluate can not get a value for:' print valueString evaluatedDictionary[(key + '__Warning__')] = ('Can not evaluate: ' + valueString.replace('"', ' ').replace("'", ' ')) else: evaluatedDictionary[key] = value return evaluatedDictionary
[ "def", "getEvaluatedDictionary", "(", "evaluationKeys", ",", "xmlElement", ")", ":", "evaluatedDictionary", "=", "{", "}", "zeroLength", "=", "(", "len", "(", "evaluationKeys", ")", "==", "0", ")", "for", "key", "in", "xmlElement", ".", "attributeDictionary", ".", "keys", "(", ")", ":", "if", "(", "(", "key", "in", "evaluationKeys", ")", "or", "zeroLength", ")", ":", "value", "=", "getEvaluatedValueObliviously", "(", "key", ",", "xmlElement", ")", "if", "(", "value", "==", "None", ")", ":", "valueString", "=", "str", "(", "xmlElement", ".", "attributeDictionary", "[", "key", "]", ")", "print", "'Warning, getEvaluatedDictionary in evaluate can not get a value for:'", "print", "valueString", "evaluatedDictionary", "[", "(", "key", "+", "'__Warning__'", ")", "]", "=", "(", "'Can not evaluate: '", "+", "valueString", ".", "replace", "(", "'\"'", ",", "' '", ")", ".", "replace", "(", "\"'\"", ",", "' '", ")", ")", "else", ":", "evaluatedDictionary", "[", "key", "]", "=", "value", "return", "evaluatedDictionary" ]
get the evaluated dictionary .
train
false
3,458
def get_next_url(request): if ('next' in request.POST): url = request.POST.get('next') elif ('next' in request.GET): url = request.GET.get('next') else: url = request.META.get('HTTP_REFERER') if ((not settings.DEBUG) and (not is_safe_url(url, Site.objects.get_current().domain))): return None return url
[ "def", "get_next_url", "(", "request", ")", ":", "if", "(", "'next'", "in", "request", ".", "POST", ")", ":", "url", "=", "request", ".", "POST", ".", "get", "(", "'next'", ")", "elif", "(", "'next'", "in", "request", ".", "GET", ")", ":", "url", "=", "request", ".", "GET", ".", "get", "(", "'next'", ")", "else", ":", "url", "=", "request", ".", "META", ".", "get", "(", "'HTTP_REFERER'", ")", "if", "(", "(", "not", "settings", ".", "DEBUG", ")", "and", "(", "not", "is_safe_url", "(", "url", ",", "Site", ".", "objects", ".", "get_current", "(", ")", ".", "domain", ")", ")", ")", ":", "return", "None", "return", "url" ]
given a request object .
train
false
3,459
def cleanup_html(html): match = _body_re.search(html) if match: html = html[match.end():] match = _end_body_re.search(html) if match: html = html[:match.start()] html = _ins_del_re.sub('', html) return html
[ "def", "cleanup_html", "(", "html", ")", ":", "match", "=", "_body_re", ".", "search", "(", "html", ")", "if", "match", ":", "html", "=", "html", "[", "match", ".", "end", "(", ")", ":", "]", "match", "=", "_end_body_re", ".", "search", "(", "html", ")", "if", "match", ":", "html", "=", "html", "[", ":", "match", ".", "start", "(", ")", "]", "html", "=", "_ins_del_re", ".", "sub", "(", "''", ",", "html", ")", "return", "html" ]
this cleans the html .
train
true
3,460
def splitquery(url): global _queryprog if (_queryprog is None): import re _queryprog = re.compile('^(.*)\\?([^?]*)$') match = _queryprog.match(url) if match: return match.group(1, 2) return (url, None)
[ "def", "splitquery", "(", "url", ")", ":", "global", "_queryprog", "if", "(", "_queryprog", "is", "None", ")", ":", "import", "re", "_queryprog", "=", "re", ".", "compile", "(", "'^(.*)\\\\?([^?]*)$'", ")", "match", "=", "_queryprog", ".", "match", "(", "url", ")", "if", "match", ":", "return", "match", ".", "group", "(", "1", ",", "2", ")", "return", "(", "url", ",", "None", ")" ]
splitquery --> /path .
train
true
3,461
def compare_domains(urls): first_domain = None for url in urls: try: if ((not urlparse.urlparse(url).scheme) and (not url.startswith('/'))): url = ('//' + url) parsed = urlparse.urlparse(url.lower(), 'http') domain = (parsed.scheme, parsed.hostname, parsed.port) except ValueError: return False if (not first_domain): first_domain = domain continue if (first_domain != domain): return False return True
[ "def", "compare_domains", "(", "urls", ")", ":", "first_domain", "=", "None", "for", "url", "in", "urls", ":", "try", ":", "if", "(", "(", "not", "urlparse", ".", "urlparse", "(", "url", ")", ".", "scheme", ")", "and", "(", "not", "url", ".", "startswith", "(", "'/'", ")", ")", ")", ":", "url", "=", "(", "'//'", "+", "url", ")", "parsed", "=", "urlparse", ".", "urlparse", "(", "url", ".", "lower", "(", ")", ",", "'http'", ")", "domain", "=", "(", "parsed", ".", "scheme", ",", "parsed", ".", "hostname", ",", "parsed", ".", "port", ")", "except", "ValueError", ":", "return", "False", "if", "(", "not", "first_domain", ")", ":", "first_domain", "=", "domain", "continue", "if", "(", "first_domain", "!=", "domain", ")", ":", "return", "False", "return", "True" ]
return true if the domains of the provided urls are the same .
train
false
3,462
def compare_alignments(old_list, new_list): if (len(old_list) != len(new_list)): raise ValueError(('%i vs %i alignments' % (len(old_list), len(new_list)))) for (old, new) in zip(old_list, new_list): if (len(old) != len(new)): raise ValueError(('Alignment with %i vs %i records' % (len(old), len(new)))) compare_records(old, new) return True
[ "def", "compare_alignments", "(", "old_list", ",", "new_list", ")", ":", "if", "(", "len", "(", "old_list", ")", "!=", "len", "(", "new_list", ")", ")", ":", "raise", "ValueError", "(", "(", "'%i vs %i alignments'", "%", "(", "len", "(", "old_list", ")", ",", "len", "(", "new_list", ")", ")", ")", ")", "for", "(", "old", ",", "new", ")", "in", "zip", "(", "old_list", ",", "new_list", ")", ":", "if", "(", "len", "(", "old", ")", "!=", "len", "(", "new", ")", ")", ":", "raise", "ValueError", "(", "(", "'Alignment with %i vs %i records'", "%", "(", "len", "(", "old", ")", ",", "len", "(", "new", ")", ")", ")", ")", "compare_records", "(", "old", ",", "new", ")", "return", "True" ]
check two lists of alignments agree .
train
false
3,463
def user_can_skip_entrance_exam(user, course): if (not course_has_entrance_exam(course)): return True if (not user.is_authenticated()): return False if has_access(user, 'staff', course): return True if EntranceExamConfiguration.user_can_skip_entrance_exam(user, course.id): return True if (not get_entrance_exam_content(user, course)): return True return False
[ "def", "user_can_skip_entrance_exam", "(", "user", ",", "course", ")", ":", "if", "(", "not", "course_has_entrance_exam", "(", "course", ")", ")", ":", "return", "True", "if", "(", "not", "user", ".", "is_authenticated", "(", ")", ")", ":", "return", "False", "if", "has_access", "(", "user", ",", "'staff'", ",", "course", ")", ":", "return", "True", "if", "EntranceExamConfiguration", ".", "user_can_skip_entrance_exam", "(", "user", ",", "course", ".", "id", ")", ":", "return", "True", "if", "(", "not", "get_entrance_exam_content", "(", "user", ",", "course", ")", ")", ":", "return", "True", "return", "False" ]
checks all of the various override conditions for a user to skip an entrance exam begin by short-circuiting if the course does not have an entrance exam .
train
false
3,464
def indices(shape): iterables = [range(v) for v in shape] return product(*iterables)
[ "def", "indices", "(", "shape", ")", ":", "iterables", "=", "[", "range", "(", "v", ")", "for", "v", "in", "shape", "]", "return", "product", "(", "*", "iterables", ")" ]
show all indices in the database cli example: .
train
false
3,465
def unichr_safe(index): try: return unichr(index) except ValueError: return unichr(65533)
[ "def", "unichr_safe", "(", "index", ")", ":", "try", ":", "return", "unichr", "(", "index", ")", "except", "ValueError", ":", "return", "unichr", "(", "65533", ")" ]
return the unicode character corresponding to the index .
train
false
3,467
def get_time_units(time_amount): time_unit = 's' if (time_amount > 60): time_amount /= 60 time_unit = 'm' if (time_amount > 60): time_amount /= 60 time_unit = 'h' return (time_amount, time_unit)
[ "def", "get_time_units", "(", "time_amount", ")", ":", "time_unit", "=", "'s'", "if", "(", "time_amount", ">", "60", ")", ":", "time_amount", "/=", "60", "time_unit", "=", "'m'", "if", "(", "time_amount", ">", "60", ")", ":", "time_amount", "/=", "60", "time_unit", "=", "'h'", "return", "(", "time_amount", ",", "time_unit", ")" ]
get a nomralized length of time in the largest unit of time .
train
false
3,468
def test_history_import_relpath_in_metadata(): with HistoryArchive() as history_archive: history_archive.write_metafiles(dataset_file_name='../outside.txt') history_archive.write_file('datasets/Pasted_Entry_1.txt', 'foo') history_archive.write_outside() _run_jihaw_cleanup(history_archive, 'Relative parent path in datasets_attrs.txt allowed')
[ "def", "test_history_import_relpath_in_metadata", "(", ")", ":", "with", "HistoryArchive", "(", ")", "as", "history_archive", ":", "history_archive", ".", "write_metafiles", "(", "dataset_file_name", "=", "'../outside.txt'", ")", "history_archive", ".", "write_file", "(", "'datasets/Pasted_Entry_1.txt'", ",", "'foo'", ")", "history_archive", ".", "write_outside", "(", ")", "_run_jihaw_cleanup", "(", "history_archive", ",", "'Relative parent path in datasets_attrs.txt allowed'", ")" ]
ensure that dataset_attrs .
train
false
3,469
def ensure_valid(obj): if (not obj.isValid()): raise QtValueError(obj)
[ "def", "ensure_valid", "(", "obj", ")", ":", "if", "(", "not", "obj", ".", "isValid", "(", ")", ")", ":", "raise", "QtValueError", "(", "obj", ")" ]
ensure a qt object with an .
train
false
3,470
def _invalid(m, comment=INVALID_RESPONSE, out=None): return _set_status(m, status=False, comment=comment, out=out)
[ "def", "_invalid", "(", "m", ",", "comment", "=", "INVALID_RESPONSE", ",", "out", "=", "None", ")", ":", "return", "_set_status", "(", "m", ",", "status", "=", "False", ",", "comment", "=", "comment", ",", "out", "=", "out", ")" ]
return invalid status .
train
true
3,471
def has_children_visible_to_specific_content_groups(xblock): if (not xblock.has_children): return False for child in xblock.get_children(): if is_visible_to_specific_content_groups(child): return True return False
[ "def", "has_children_visible_to_specific_content_groups", "(", "xblock", ")", ":", "if", "(", "not", "xblock", ".", "has_children", ")", ":", "return", "False", "for", "child", "in", "xblock", ".", "get_children", "(", ")", ":", "if", "is_visible_to_specific_content_groups", "(", "child", ")", ":", "return", "True", "return", "False" ]
returns true if this xblock has children that are limited to specific content groups .
train
false
3,473
def marketing_link(name): link_map = settings.MKTG_URL_LINK_MAP enable_mktg_site = configuration_helpers.get_value('ENABLE_MKTG_SITE', settings.FEATURES.get('ENABLE_MKTG_SITE', False)) marketing_urls = configuration_helpers.get_value('MKTG_URLS', settings.MKTG_URLS) if (enable_mktg_site and (name in marketing_urls)): if (name == 'ROOT'): return marketing_urls.get('ROOT') return urljoin(marketing_urls.get('ROOT'), marketing_urls.get(name)) elif ((not enable_mktg_site) and (name in link_map)): if (link_map[name] is not None): return reverse(link_map[name]) else: log.debug('Cannot find corresponding link for name: %s', name) return '#'
[ "def", "marketing_link", "(", "name", ")", ":", "link_map", "=", "settings", ".", "MKTG_URL_LINK_MAP", "enable_mktg_site", "=", "configuration_helpers", ".", "get_value", "(", "'ENABLE_MKTG_SITE'", ",", "settings", ".", "FEATURES", ".", "get", "(", "'ENABLE_MKTG_SITE'", ",", "False", ")", ")", "marketing_urls", "=", "configuration_helpers", ".", "get_value", "(", "'MKTG_URLS'", ",", "settings", ".", "MKTG_URLS", ")", "if", "(", "enable_mktg_site", "and", "(", "name", "in", "marketing_urls", ")", ")", ":", "if", "(", "name", "==", "'ROOT'", ")", ":", "return", "marketing_urls", ".", "get", "(", "'ROOT'", ")", "return", "urljoin", "(", "marketing_urls", ".", "get", "(", "'ROOT'", ")", ",", "marketing_urls", ".", "get", "(", "name", ")", ")", "elif", "(", "(", "not", "enable_mktg_site", ")", "and", "(", "name", "in", "link_map", ")", ")", ":", "if", "(", "link_map", "[", "name", "]", "is", "not", "None", ")", ":", "return", "reverse", "(", "link_map", "[", "name", "]", ")", "else", ":", "log", ".", "debug", "(", "'Cannot find corresponding link for name: %s'", ",", "name", ")", "return", "'#'" ]
returns the correct url for a link to the marketing site depending on if the marketing site is enabled since the marketing site is enabled by a setting .
train
false
3,474
def jobcheck(**kwargs): if (not kwargs): return {'error': 'You have given a condition'} return _atq(**kwargs)
[ "def", "jobcheck", "(", "**", "kwargs", ")", ":", "if", "(", "not", "kwargs", ")", ":", "return", "{", "'error'", ":", "'You have given a condition'", "}", "return", "_atq", "(", "**", "kwargs", ")" ]
check the job from queue .
train
false
3,475
def fetch_streams(plugin): return plugin.get_streams(stream_types=args.stream_types, sorting_excludes=args.stream_sorting_excludes)
[ "def", "fetch_streams", "(", "plugin", ")", ":", "return", "plugin", ".", "get_streams", "(", "stream_types", "=", "args", ".", "stream_types", ",", "sorting_excludes", "=", "args", ".", "stream_sorting_excludes", ")" ]
fetches streams using correct parameters .
train
false
3,476
def reverse_ip(ip_addr): return ipaddress.ip_address(ip_addr).reverse_pointer
[ "def", "reverse_ip", "(", "ip_addr", ")", ":", "return", "ipaddress", ".", "ip_address", "(", "ip_addr", ")", ".", "reverse_pointer" ]
returns the reversed ip address .
train
false
3,477
def _get_lines(file_path, line_prefixes, parameter): try: remaining_prefixes = list(line_prefixes) (proc_file, results) = (open(file_path), {}) for line in proc_file: if (not remaining_prefixes): break for prefix in remaining_prefixes: if line.startswith(prefix): results[prefix] = line remaining_prefixes.remove(prefix) break proc_file.close() if remaining_prefixes: if (len(remaining_prefixes) == 1): msg = ('%s did not contain a %s entry' % (file_path, remaining_prefixes[0])) else: msg = ('%s did not contain %s entries' % (file_path, ', '.join(remaining_prefixes))) raise IOError(msg) else: return results except IOError as exc: _log_failure(parameter, exc) raise exc
[ "def", "_get_lines", "(", "file_path", ",", "line_prefixes", ",", "parameter", ")", ":", "try", ":", "remaining_prefixes", "=", "list", "(", "line_prefixes", ")", "(", "proc_file", ",", "results", ")", "=", "(", "open", "(", "file_path", ")", ",", "{", "}", ")", "for", "line", "in", "proc_file", ":", "if", "(", "not", "remaining_prefixes", ")", ":", "break", "for", "prefix", "in", "remaining_prefixes", ":", "if", "line", ".", "startswith", "(", "prefix", ")", ":", "results", "[", "prefix", "]", "=", "line", "remaining_prefixes", ".", "remove", "(", "prefix", ")", "break", "proc_file", ".", "close", "(", ")", "if", "remaining_prefixes", ":", "if", "(", "len", "(", "remaining_prefixes", ")", "==", "1", ")", ":", "msg", "=", "(", "'%s did not contain a %s entry'", "%", "(", "file_path", ",", "remaining_prefixes", "[", "0", "]", ")", ")", "else", ":", "msg", "=", "(", "'%s did not contain %s entries'", "%", "(", "file_path", ",", "', '", ".", "join", "(", "remaining_prefixes", ")", ")", ")", "raise", "IOError", "(", "msg", ")", "else", ":", "return", "results", "except", "IOError", "as", "exc", ":", "_log_failure", "(", "parameter", ",", "exc", ")", "raise", "exc" ]
fetches lines with the given prefixes from a file .
train
false
3,479
def _check_precisions_full(precisions, covariance_type): for (k, prec) in enumerate(precisions): prec = _check_precision_matrix(prec, covariance_type)
[ "def", "_check_precisions_full", "(", "precisions", ",", "covariance_type", ")", ":", "for", "(", "k", ",", "prec", ")", "in", "enumerate", "(", "precisions", ")", ":", "prec", "=", "_check_precision_matrix", "(", "prec", ",", "covariance_type", ")" ]
check the precision matrices are symmetric and positive-definite .
train
false
3,480
def get_monitors(account_name, monitor_names, debug=False): requested_mons = [] account = get_account_by_name(account_name) account_manager = account_registry.get(account.account_type.name)() for monitor_name in monitor_names: watcher_class = watcher_registry[monitor_name] if account_manager.is_compatible_with_account_type(watcher_class.account_type): monitor = Monitor(watcher_class, account, debug) requested_mons.append(monitor) return requested_mons
[ "def", "get_monitors", "(", "account_name", ",", "monitor_names", ",", "debug", "=", "False", ")", ":", "requested_mons", "=", "[", "]", "account", "=", "get_account_by_name", "(", "account_name", ")", "account_manager", "=", "account_registry", ".", "get", "(", "account", ".", "account_type", ".", "name", ")", "(", ")", "for", "monitor_name", "in", "monitor_names", ":", "watcher_class", "=", "watcher_registry", "[", "monitor_name", "]", "if", "account_manager", ".", "is_compatible_with_account_type", "(", "watcher_class", ".", "account_type", ")", ":", "monitor", "=", "Monitor", "(", "watcher_class", ",", "account", ",", "debug", ")", "requested_mons", ".", "append", "(", "monitor", ")", "return", "requested_mons" ]
returns a list of monitors in the correct audit order which apply to one or more of the accounts .
train
false
3,481
def getIntegerString(number): return str(int(number))
[ "def", "getIntegerString", "(", "number", ")", ":", "return", "str", "(", "int", "(", "number", ")", ")" ]
get integer as string .
train
false
3,482
def check_path(options, rootdir=None, candidates=None, code=None): if (not candidates): candidates = [] for path_ in options.paths: path = op.abspath(path_) if op.isdir(path): for (root, _, files) in walk(path): candidates += [op.relpath(op.join(root, f), CURDIR) for f in files] else: candidates.append(path) if (rootdir is None): rootdir = (path if op.isdir(path) else op.dirname(path)) paths = [] for path in candidates: if ((not options.force) and (not any((l.allow(path) for (_, l) in options.linters)))): continue if (not op.exists(path)): continue paths.append(path) if options.async: return check_async(paths, options, rootdir) errors = [] for path in paths: errors += run(path=path, code=code, rootdir=rootdir, options=options) return errors
[ "def", "check_path", "(", "options", ",", "rootdir", "=", "None", ",", "candidates", "=", "None", ",", "code", "=", "None", ")", ":", "if", "(", "not", "candidates", ")", ":", "candidates", "=", "[", "]", "for", "path_", "in", "options", ".", "paths", ":", "path", "=", "op", ".", "abspath", "(", "path_", ")", "if", "op", ".", "isdir", "(", "path", ")", ":", "for", "(", "root", ",", "_", ",", "files", ")", "in", "walk", "(", "path", ")", ":", "candidates", "+=", "[", "op", ".", "relpath", "(", "op", ".", "join", "(", "root", ",", "f", ")", ",", "CURDIR", ")", "for", "f", "in", "files", "]", "else", ":", "candidates", ".", "append", "(", "path", ")", "if", "(", "rootdir", "is", "None", ")", ":", "rootdir", "=", "(", "path", "if", "op", ".", "isdir", "(", "path", ")", "else", "op", ".", "dirname", "(", "path", ")", ")", "paths", "=", "[", "]", "for", "path", "in", "candidates", ":", "if", "(", "(", "not", "options", ".", "force", ")", "and", "(", "not", "any", "(", "(", "l", ".", "allow", "(", "path", ")", "for", "(", "_", ",", "l", ")", "in", "options", ".", "linters", ")", ")", ")", ")", ":", "continue", "if", "(", "not", "op", ".", "exists", "(", "path", ")", ")", ":", "continue", "paths", ".", "append", "(", "path", ")", "if", "options", ".", "async", ":", "return", "check_async", "(", "paths", ",", "options", ",", "rootdir", ")", "errors", "=", "[", "]", "for", "path", "in", "paths", ":", "errors", "+=", "run", "(", "path", "=", "path", ",", "code", "=", "code", ",", "rootdir", "=", "rootdir", ",", "options", "=", "options", ")", "return", "errors" ]
check sys .
train
true
3,483
def pathContainingDumpOf(testCase, *dumpables): fname = testCase.mktemp() with open(fname, 'wb') as f: for dumpable in dumpables: f.write(dumpable.dump(FILETYPE_PEM)) return fname
[ "def", "pathContainingDumpOf", "(", "testCase", ",", "*", "dumpables", ")", ":", "fname", "=", "testCase", ".", "mktemp", "(", ")", "with", "open", "(", "fname", ",", "'wb'", ")", "as", "f", ":", "for", "dumpable", "in", "dumpables", ":", "f", ".", "write", "(", "dumpable", ".", "dump", "(", "FILETYPE_PEM", ")", ")", "return", "fname" ]
create a temporary file to store some serializable-as-pem objects in .
train
false
3,484
def test_bootstrap_axis(): x = rs.randn(10, 20) n_boot = 100 out_default = algo.bootstrap(x, n_boot=n_boot) assert_equal(out_default.shape, (n_boot,)) out_axis = algo.bootstrap(x, n_boot=n_boot, axis=0) assert_equal(out_axis.shape, (n_boot, 20))
[ "def", "test_bootstrap_axis", "(", ")", ":", "x", "=", "rs", ".", "randn", "(", "10", ",", "20", ")", "n_boot", "=", "100", "out_default", "=", "algo", ".", "bootstrap", "(", "x", ",", "n_boot", "=", "n_boot", ")", "assert_equal", "(", "out_default", ".", "shape", ",", "(", "n_boot", ",", ")", ")", "out_axis", "=", "algo", ".", "bootstrap", "(", "x", ",", "n_boot", "=", "n_boot", ",", "axis", "=", "0", ")", "assert_equal", "(", "out_axis", ".", "shape", ",", "(", "n_boot", ",", "20", ")", ")" ]
test axis kwarg to bootstrap function .
train
false
3,485
def _retry_over_time(fun, catch, args=[], kwargs={}, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30): retries = 0 interval_range = __fxrange(interval_start, (interval_max + interval_start), interval_step, repeatlast=True) for retries in count(): try: return fun(*args, **kwargs) except catch as exc: if (max_retries and (retries >= max_retries)): raise tts = float((errback(exc, interval_range, retries) if errback else next(interval_range))) if tts: sleep(tts)
[ "def", "_retry_over_time", "(", "fun", ",", "catch", ",", "args", "=", "[", "]", ",", "kwargs", "=", "{", "}", ",", "errback", "=", "None", ",", "max_retries", "=", "None", ",", "interval_start", "=", "2", ",", "interval_step", "=", "2", ",", "interval_max", "=", "30", ")", ":", "retries", "=", "0", "interval_range", "=", "__fxrange", "(", "interval_start", ",", "(", "interval_max", "+", "interval_start", ")", ",", "interval_step", ",", "repeatlast", "=", "True", ")", "for", "retries", "in", "count", "(", ")", ":", "try", ":", "return", "fun", "(", "*", "args", ",", "**", "kwargs", ")", "except", "catch", "as", "exc", ":", "if", "(", "max_retries", "and", "(", "retries", ">=", "max_retries", ")", ")", ":", "raise", "tts", "=", "float", "(", "(", "errback", "(", "exc", ",", "interval_range", ",", "retries", ")", "if", "errback", "else", "next", "(", "interval_range", ")", ")", ")", "if", "tts", ":", "sleep", "(", "tts", ")" ]
retry the function over and over until max retries is exceeded .
train
true
3,486
def test_coord_init_array(): for a in ([u'1 2', u'3 4'], [[u'1', u'2'], [u'3', u'4']], [[1, 2], [3, 4]]): sc = SkyCoord(a, unit=u'deg') assert allclose((sc.ra - ([1, 3] * u.deg)), (0 * u.deg)) assert allclose((sc.dec - ([2, 4] * u.deg)), (0 * u.deg)) sc = SkyCoord(np.array(a), unit=u'deg') assert allclose((sc.ra - ([1, 3] * u.deg)), (0 * u.deg)) assert allclose((sc.dec - ([2, 4] * u.deg)), (0 * u.deg))
[ "def", "test_coord_init_array", "(", ")", ":", "for", "a", "in", "(", "[", "u'1 2'", ",", "u'3 4'", "]", ",", "[", "[", "u'1'", ",", "u'2'", "]", ",", "[", "u'3'", ",", "u'4'", "]", "]", ",", "[", "[", "1", ",", "2", "]", ",", "[", "3", ",", "4", "]", "]", ")", ":", "sc", "=", "SkyCoord", "(", "a", ",", "unit", "=", "u'deg'", ")", "assert", "allclose", "(", "(", "sc", ".", "ra", "-", "(", "[", "1", ",", "3", "]", "*", "u", ".", "deg", ")", ")", ",", "(", "0", "*", "u", ".", "deg", ")", ")", "assert", "allclose", "(", "(", "sc", ".", "dec", "-", "(", "[", "2", ",", "4", "]", "*", "u", ".", "deg", ")", ")", ",", "(", "0", "*", "u", ".", "deg", ")", ")", "sc", "=", "SkyCoord", "(", "np", ".", "array", "(", "a", ")", ",", "unit", "=", "u'deg'", ")", "assert", "allclose", "(", "(", "sc", ".", "ra", "-", "(", "[", "1", ",", "3", "]", "*", "u", ".", "deg", ")", ")", ",", "(", "0", "*", "u", ".", "deg", ")", ")", "assert", "allclose", "(", "(", "sc", ".", "dec", "-", "(", "[", "2", ",", "4", "]", "*", "u", ".", "deg", ")", ")", ",", "(", "0", "*", "u", ".", "deg", ")", ")" ]
input in the form of a list array or numpy array .
train
false