id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
28,232
def _uninstall_flocker_centos7(): return sequence((_disable_flocker_systemd() + [run_from_args(['yum', 'erase', '-y', 'clusterhq-python-flocker']), wipe_yum_cache(repository='clusterhq'), wipe_yum_cache(repository='clusterhq-testing'), run_from_args(['yum', 'erase', '-y', 'clusterhq-release'])]))
[ "def", "_uninstall_flocker_centos7", "(", ")", ":", "return", "sequence", "(", "(", "_disable_flocker_systemd", "(", ")", "+", "[", "run_from_args", "(", "[", "'yum'", ",", "'erase'", ",", "'-y'", ",", "'clusterhq-python-flocker'", "]", ")", ",", "wipe_yum_cache...
return an effect for uninstalling the flocker package from a centos 7 or rhel 7 .
train
false
28,233
def _generate_bq_schema(df, default_type='STRING'): type_mapping = {'i': 'INTEGER', 'b': 'BOOLEAN', 'f': 'FLOAT', 'O': 'STRING', 'S': 'STRING', 'U': 'STRING', 'M': 'TIMESTAMP'} fields = [] for (column_name, dtype) in df.dtypes.iteritems(): fields.append({'name': column_name, 'type': type_mapping.get(dtype.kind, default_type)}) return {'fields': fields}
[ "def", "_generate_bq_schema", "(", "df", ",", "default_type", "=", "'STRING'", ")", ":", "type_mapping", "=", "{", "'i'", ":", "'INTEGER'", ",", "'b'", ":", "'BOOLEAN'", ",", "'f'", ":", "'FLOAT'", ",", "'O'", ":", "'STRING'", ",", "'S'", ":", "'STRING'"...
given a passed df .
train
false
28,234
def emit_message(message, stream=None): if (stream is None): stream = sys.stderr stream.write(('%(message)s\n' % vars())) stream.flush()
[ "def", "emit_message", "(", "message", ",", "stream", "=", "None", ")", ":", "if", "(", "stream", "is", "None", ")", ":", "stream", "=", "sys", ".", "stderr", "stream", ".", "write", "(", "(", "'%(message)s\\n'", "%", "vars", "(", ")", ")", ")", "s...
emit a message to the specified stream .
train
false
28,236
@pytest.mark.django_db def test_directory_create_name_with_slashes_or_backslashes(root): with pytest.raises(ValidationError): Directory.objects.create(name='slashed/name', parent=root) with pytest.raises(ValidationError): Directory.objects.create(name='backslashed\\name', parent=root)
[ "@", "pytest", ".", "mark", ".", "django_db", "def", "test_directory_create_name_with_slashes_or_backslashes", "(", "root", ")", ":", "with", "pytest", ".", "raises", "(", "ValidationError", ")", ":", "Directory", ".", "objects", ".", "create", "(", "name", "=",...
test directories are not created with slashes on their name .
train
false
28,237
def ensure_user_role_exists(keystone, user_name, tenant_name, role_name, check_mode): user = get_user(keystone, user_name) tenant = get_tenant(keystone, tenant_name) roles = [x for x in keystone.roles.roles_for_user(user, tenant) if (x.name == role_name)] count = len(roles) if (count == 1): role = roles[0] return (False, role.id) elif (count > 1): raise ValueError(('%d roles with name %s' % (count, role_name))) if check_mode: return (True, None) try: role = get_role(keystone, role_name) except KeyError: role = keystone.roles.create(role_name) keystone.roles.add_user_role(user, role, tenant) return (True, role.id)
[ "def", "ensure_user_role_exists", "(", "keystone", ",", "user_name", ",", "tenant_name", ",", "role_name", ",", "check_mode", ")", ":", "user", "=", "get_user", "(", "keystone", ",", "user_name", ")", "tenant", "=", "get_tenant", "(", "keystone", ",", "tenant_...
check if role exists return if a new role was created or if the role was newly assigned to the user for the tenant .
train
false
28,238
def finder(paths=None): widget = new_finder(paths=paths, parent=qtutils.active_window()) widget.show() widget.raise_() return widget
[ "def", "finder", "(", "paths", "=", "None", ")", ":", "widget", "=", "new_finder", "(", "paths", "=", "paths", ",", "parent", "=", "qtutils", ".", "active_window", "(", ")", ")", "widget", ".", "show", "(", ")", "widget", ".", "raise_", "(", ")", "...
prompt and use git grep to find the content .
train
false
28,239
def get_summary(commit): return commit.message.splitlines()[0].replace(' ', '-')
[ "def", "get_summary", "(", "commit", ")", ":", "return", "commit", ".", "message", ".", "splitlines", "(", ")", "[", "0", "]", ".", "replace", "(", "' '", ",", "'-'", ")" ]
determine the summary line for use in a filename .
train
false
28,240
def unicodeToFloat(val): if (val == 'None'): val = None else: try: val = locale.atof(val) except ValueError: return None return val
[ "def", "unicodeToFloat", "(", "val", ")", ":", "if", "(", "val", "==", "'None'", ")", ":", "val", "=", "None", "else", ":", "try", ":", "val", "=", "locale", ".", "atof", "(", "val", ")", "except", "ValueError", ":", "return", "None", "return", "va...
convert a unicode object from wx dialogs into a float .
train
false
28,241
def detach_required(): if (parent_is_inet() or parent_is_init()): return False return True
[ "def", "detach_required", "(", ")", ":", "if", "(", "parent_is_inet", "(", ")", "or", "parent_is_init", "(", ")", ")", ":", "return", "False", "return", "True" ]
check if detaching is required this is done by collecting the results of parent_is_inet and parent_is_init .
train
false
28,243
def print_ospf_out(a_dict): field_order = ('Int', 'IP', 'Area', 'Type', 'Cost', 'Hello', 'Dead') print for a_field in field_order: if (a_dict.get(a_field) is not None): print ('%15s: %-20s' % (a_field, a_dict.get(a_field)))
[ "def", "print_ospf_out", "(", "a_dict", ")", ":", "field_order", "=", "(", "'Int'", ",", "'IP'", ",", "'Area'", ",", "'Type'", ",", "'Cost'", ",", "'Hello'", ",", "'Dead'", ")", "print", "for", "a_field", "in", "field_order", ":", "if", "(", "a_dict", ...
prints a given ospf interface section to stdout .
train
false
28,244
def start_next_music(): global current_music, next_change_delay if (music_enabled and current_playlist): next_music = current_playlist.next() if next_music: print 'albow.music: loading', repr(next_music) music.load(next_music) music.play() next_change_delay = change_delay current_music = next_music
[ "def", "start_next_music", "(", ")", ":", "global", "current_music", ",", "next_change_delay", "if", "(", "music_enabled", "and", "current_playlist", ")", ":", "next_music", "=", "current_playlist", ".", "next", "(", ")", "if", "next_music", ":", "print", "'albo...
start playing the next item from the current playlist immediately .
train
false
28,245
def combine_dicts(*dicts): result = {} for d in dicts: if d: for (k, v) in d.items(): if (isinstance(v, ClearedValue) and (v.value is None)): result.pop(k, None) else: result[k] = _strip_clear_tag(v) return result
[ "def", "combine_dicts", "(", "*", "dicts", ")", ":", "result", "=", "{", "}", "for", "d", "in", "dicts", ":", "if", "d", ":", "for", "(", "k", ",", "v", ")", "in", "d", ".", "items", "(", ")", ":", "if", "(", "isinstance", "(", "v", ",", "C...
combines multiple dictionaries into one by adding a prefix to keys .
train
false
28,247
def get_encoder(encoding, *args, **kwargs): def _get_encoder_class(): if (encoding == AMF0): try: from cpyamf import amf0 except ImportError: from pyamf import amf0 return amf0.Encoder elif (encoding == AMF3): try: from cpyamf import amf3 except ImportError: from pyamf import amf3 return amf3.Encoder raise ValueError(('Unknown encoding %r' % (encoding,))) return _get_encoder_class()(*args, **kwargs)
[ "def", "get_encoder", "(", "encoding", ",", "*", "args", ",", "**", "kwargs", ")", ":", "def", "_get_encoder_class", "(", ")", ":", "if", "(", "encoding", "==", "AMF0", ")", ":", "try", ":", "from", "cpyamf", "import", "amf0", "except", "ImportError", ...
returns a l{codec .
train
true
28,248
def get_initializable_comment_fields(context): ret = get_editable_fields(Comment(user_id=context['cc_requester']['id'], type='comment'), context) ret |= NON_UPDATABLE_COMMENT_FIELDS return ret
[ "def", "get_initializable_comment_fields", "(", "context", ")", ":", "ret", "=", "get_editable_fields", "(", "Comment", "(", "user_id", "=", "context", "[", "'cc_requester'", "]", "[", "'id'", "]", ",", "type", "=", "'comment'", ")", ",", "context", ")", "re...
return the set of fields that the requester can initialize for a comment any field that is editable by the author should also be initializable .
train
false
28,249
def init_env(project='default', set_syspath=True): cfg = get_config() if cfg.has_option('settings', project): os.environ['SCRAPY_SETTINGS_MODULE'] = cfg.get('settings', project) closest = closest_scrapy_cfg() if closest: projdir = os.path.dirname(closest) if (set_syspath and (projdir not in sys.path)): sys.path.append(projdir)
[ "def", "init_env", "(", "project", "=", "'default'", ",", "set_syspath", "=", "True", ")", ":", "cfg", "=", "get_config", "(", ")", "if", "cfg", ".", "has_option", "(", "'settings'", ",", "project", ")", ":", "os", ".", "environ", "[", "'SCRAPY_SETTINGS_...
initialize environment to use command-line tool from inside a project dir .
train
false
28,252
def find_goroutine(goid): vp = gdb.lookup_type('void').pointer() for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'): if (ptr['status'] == 6): continue if (ptr['goid'] == goid): return (ptr['sched'][x].cast(vp) for x in ('pc', 'sp')) return (None, None)
[ "def", "find_goroutine", "(", "goid", ")", ":", "vp", "=", "gdb", ".", "lookup_type", "(", "'void'", ")", ".", "pointer", "(", ")", "for", "ptr", "in", "linked_list", "(", "gdb", ".", "parse_and_eval", "(", "\"'runtime.allg'\"", ")", ",", "'alllink'", ")...
find_goroutine attempts to find the goroutine identified by goid .
train
false
28,253
@csrf_protect @permission_required('comments.can_moderate') def approve(request, comment_id, next=None): comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID) if (request.method == 'POST'): perform_approve(request, comment) return next_redirect(request, next, approve_done, c=comment.pk) else: return render_to_response('comments/approve.html', {'comment': comment, 'next': next}, template.RequestContext(request))
[ "@", "csrf_protect", "@", "permission_required", "(", "'comments.can_moderate'", ")", "def", "approve", "(", "request", ",", "comment_id", ",", "next", "=", "None", ")", ":", "comment", "=", "get_object_or_404", "(", "comments", ".", "get_model", "(", ")", ","...
approve a comment .
train
false
28,254
def _exponents(expr, x): def _exponents_(expr, x, res): if (expr == x): res.update([1]) return if (expr.is_Pow and (expr.base == x)): res.update([expr.exp]) return for arg in expr.args: _exponents_(arg, x, res) res = set() _exponents_(expr, x, res) return res
[ "def", "_exponents", "(", "expr", ",", "x", ")", ":", "def", "_exponents_", "(", "expr", ",", "x", ",", "res", ")", ":", "if", "(", "expr", "==", "x", ")", ":", "res", ".", "update", "(", "[", "1", "]", ")", "return", "if", "(", "expr", ".", ...
find the exponents of x in expr .
train
false
28,255
def test_assignment_and_copy(): for name in ('quantity', 'arraywrap'): m = MIXIN_COLS[name] t0 = QTable([m], names=['m']) for (i0, i1) in ((1, 2), (slice(0, 2), slice(1, 3)), (np.array([1, 2]), np.array([2, 3]))): t = t0.copy() t['m'][i0] = m[i1] if (name == 'arraywrap'): assert np.all((t['m'].data[i0] == m.data[i1])) assert np.all((t0['m'].data[i0] == m.data[i0])) assert np.all((t0['m'].data[i0] != t['m'].data[i0])) else: assert np.all((t['m'][i0] == m[i1])) assert np.all((t0['m'][i0] == m[i0])) assert np.all((t0['m'][i0] != t['m'][i0]))
[ "def", "test_assignment_and_copy", "(", ")", ":", "for", "name", "in", "(", "'quantity'", ",", "'arraywrap'", ")", ":", "m", "=", "MIXIN_COLS", "[", "name", "]", "t0", "=", "QTable", "(", "[", "m", "]", ",", "names", "=", "[", "'m'", "]", ")", "for...
test that assignment of an int .
train
false
28,257
def selLexicase(individuals, k): selected_individuals = [] for i in range(k): fit_weights = individuals[0].fitness.weights candidates = individuals cases = list(range(len(individuals[0].fitness.values))) random.shuffle(cases) while ((len(cases) > 0) and (len(candidates) > 1)): f = min if (fit_weights[cases[0]] > 0): f = max best_val_for_case = f(map((lambda x: x.fitness.values[cases[0]]), candidates)) candidates = list(filter((lambda x: (x.fitness.values[cases[0]] == best_val_for_case)), candidates)) cases.pop(0) selected_individuals.append(random.choice(candidates)) return selected_individuals
[ "def", "selLexicase", "(", "individuals", ",", "k", ")", ":", "selected_individuals", "=", "[", "]", "for", "i", "in", "range", "(", "k", ")", ":", "fit_weights", "=", "individuals", "[", "0", "]", ".", "fitness", ".", "weights", "candidates", "=", "in...
returns an individual that does the best on the fitness cases when considered one at a time in random order .
train
false
28,258
def deactivatePdpContextAccept(): a = TpPd(pd=8) b = MessageType(mesType=71) packet = (a / b) return packet
[ "def", "deactivatePdpContextAccept", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "8", ")", "b", "=", "MessageType", "(", "mesType", "=", "71", ")", "packet", "=", "(", "a", "/", "b", ")", "return", "packet" ]
deactivate pdp context accept section 9 .
train
true
28,260
def check_install_build_global(options, check_options=None): if (check_options is None): check_options = options def getname(n): return getattr(check_options, n, None) names = ['build_options', 'global_options', 'install_options'] if any(map(getname, names)): control = options.format_control fmt_ctl_no_binary(control) warnings.warn('Disabling all use of wheels due to the use of --build-options / --global-options / --install-options.', stacklevel=2)
[ "def", "check_install_build_global", "(", "options", ",", "check_options", "=", "None", ")", ":", "if", "(", "check_options", "is", "None", ")", ":", "check_options", "=", "options", "def", "getname", "(", "n", ")", ":", "return", "getattr", "(", "check_opti...
disable wheels if per-setup .
train
true
28,261
def get_instance_path(instance, forceold=False): pre_grizzly_name = os.path.join(CONF.instances_path, instance['name']) if (forceold or os.path.exists(pre_grizzly_name)): return pre_grizzly_name return os.path.join(CONF.instances_path, instance['uuid'])
[ "def", "get_instance_path", "(", "instance", ",", "forceold", "=", "False", ")", ":", "pre_grizzly_name", "=", "os", ".", "path", ".", "join", "(", "CONF", ".", "instances_path", ",", "instance", "[", "'name'", "]", ")", "if", "(", "forceold", "or", "os"...
determine the correct path for instance storage .
train
false
28,263
@authenticated_rest_api_view(is_webhook=True) @has_request_variables def api_zendesk_webhook(request, user_profile, ticket_title=REQ(), ticket_id=REQ(), message=REQ(), stream=REQ(default='zendesk')): subject = truncate(('#%s: %s' % (ticket_id, ticket_title)), 60) check_send_message(user_profile, get_client('ZulipZenDeskWebhook'), 'stream', [stream], subject, message) return json_success()
[ "@", "authenticated_rest_api_view", "(", "is_webhook", "=", "True", ")", "@", "has_request_variables", "def", "api_zendesk_webhook", "(", "request", ",", "user_profile", ",", "ticket_title", "=", "REQ", "(", ")", ",", "ticket_id", "=", "REQ", "(", ")", ",", "m...
zendesk uses trigers with message templates .
train
false
28,264
def tags_action(client, stream_name, tags, action='create', check_mode=False): success = False err_msg = '' params = {'StreamName': stream_name} try: if (not check_mode): if (action == 'create'): params['Tags'] = tags client.add_tags_to_stream(**params) success = True elif (action == 'delete'): params['TagKeys'] = tags.keys() client.remove_tags_from_stream(**params) success = True else: err_msg = 'Invalid action {0}'.format(action) elif (action == 'create'): success = True elif (action == 'delete'): success = True else: err_msg = 'Invalid action {0}'.format(action) except botocore.exceptions.ClientError as e: err_msg = str(e) return (success, err_msg)
[ "def", "tags_action", "(", "client", ",", "stream_name", ",", "tags", ",", "action", "=", "'create'", ",", "check_mode", "=", "False", ")", ":", "success", "=", "False", "err_msg", "=", "''", "params", "=", "{", "'StreamName'", ":", "stream_name", "}", "...
create or delete multiple tags from a kinesis stream .
train
false
28,265
def __get_version(saltstack_version): try: from salt._version import __saltstack_version__ return __saltstack_version__ except ImportError: return __discover_version(saltstack_version)
[ "def", "__get_version", "(", "saltstack_version", ")", ":", "try", ":", "from", "salt", ".", "_version", "import", "__saltstack_version__", "return", "__saltstack_version__", "except", "ImportError", ":", "return", "__discover_version", "(", "saltstack_version", ")" ]
if we can get a version provided at installation time or from git .
train
false
28,268
def test_main(arith=None, verbose=None, todo_tests=None, debug=None): init() global TEST_ALL, DEBUG TEST_ALL = (arith if (arith is not None) else is_resource_enabled('decimal')) DEBUG = debug if (todo_tests is None): test_classes = [DecimalExplicitConstructionTest, DecimalImplicitConstructionTest, DecimalArithmeticOperatorsTest, DecimalFormatTest, DecimalUseOfContextTest, DecimalUsabilityTest, DecimalPythonAPItests, ContextAPItests, DecimalTest, WithStatementTest, ContextFlags] else: test_classes = [DecimalTest] for filename in os.listdir(directory): if (('.decTest' not in filename) or filename.startswith('.')): continue (head, tail) = filename.split('.') if ((todo_tests is not None) and (head not in todo_tests)): continue tester = (lambda self, f=filename: self.eval_file((directory + f))) setattr(DecimalTest, ('test_' + head), tester) del filename, head, tail, tester try: run_unittest(*test_classes) if (todo_tests is None): import decimal as DecimalModule run_doctest(DecimalModule, verbose) finally: setcontext(ORIGINAL_CONTEXT)
[ "def", "test_main", "(", "arith", "=", "None", ",", "verbose", "=", "None", ",", "todo_tests", "=", "None", ",", "debug", "=", "None", ")", ":", "init", "(", ")", "global", "TEST_ALL", ",", "DEBUG", "TEST_ALL", "=", "(", "arith", "if", "(", "arith", ...
run tests when run as main .
train
false
28,269
@with_setup(prepare_stdout) def test_output_level_2_success(): runner = Runner(join(abspath(dirname(__file__)), 'output_features', 'many_successful_scenarios'), verbosity=2) runner.run() assert_stdout_lines('Do nothing ... OK\nDo nothing (again) ... OK\n\n1 feature (1 passed)\n2 scenarios (2 passed)\n2 steps (2 passed)\n')
[ "@", "with_setup", "(", "prepare_stdout", ")", "def", "test_output_level_2_success", "(", ")", ":", "runner", "=", "Runner", "(", "join", "(", "abspath", "(", "dirname", "(", "__file__", ")", ")", ",", "'output_features'", ",", "'many_successful_scenarios'", ")"...
output with verbosity 2 must show only the scenario names .
train
false
28,270
def sstrrepr(expr, **settings): p = StrReprPrinter(settings) s = p.doprint(expr) return s
[ "def", "sstrrepr", "(", "expr", ",", "**", "settings", ")", ":", "p", "=", "StrReprPrinter", "(", "settings", ")", "s", "=", "p", ".", "doprint", "(", "expr", ")", "return", "s" ]
return expr in mixed str/repr form i .
train
false
28,271
def map_services(environment): url_map = {} for (service, local_port) in SERVICES.items(): if (environment == 'production'): url_map[service] = production_url(service) if (environment == 'development'): url_map[service] = local_url(local_port) return url_map
[ "def", "map_services", "(", "environment", ")", ":", "url_map", "=", "{", "}", "for", "(", "service", ",", "local_port", ")", "in", "SERVICES", ".", "items", "(", ")", ":", "if", "(", "environment", "==", "'production'", ")", ":", "url_map", "[", "serv...
generates a map of services to correct urls for running locally or when deployed .
train
false
28,274
def add_setup_section(config, app, module, label, icon): try: setup_section = get_setup_section(app, module, label, icon) if setup_section: config.append(setup_section) except ImportError: pass
[ "def", "add_setup_section", "(", "config", ",", "app", ",", "module", ",", "label", ",", "icon", ")", ":", "try", ":", "setup_section", "=", "get_setup_section", "(", "app", ",", "module", ",", "label", ",", "icon", ")", "if", "setup_section", ":", "conf...
add common sections to /desk#module/setup .
train
false
28,276
def createFile(finalSize=2000000000): chunk = np.random.normal(size=1000000).astype(np.float32) f = h5py.File('test.hdf5', 'w') f.create_dataset('data', data=chunk, chunks=True, maxshape=(None,)) data = f['data'] nChunks = (finalSize // (chunk.size * chunk.itemsize)) with pg.ProgressDialog('Generating test.hdf5...', 0, nChunks) as dlg: for i in range(nChunks): newshape = [(data.shape[0] + chunk.shape[0])] data.resize(newshape) data[(- chunk.shape[0]):] = chunk dlg += 1 if dlg.wasCanceled(): f.close() os.remove('test.hdf5') sys.exit() dlg += 1 f.close()
[ "def", "createFile", "(", "finalSize", "=", "2000000000", ")", ":", "chunk", "=", "np", ".", "random", ".", "normal", "(", "size", "=", "1000000", ")", ".", "astype", "(", "np", ".", "float32", ")", "f", "=", "h5py", ".", "File", "(", "'test.hdf5'", ...
create a large hdf5 data file for testing .
train
false
28,277
def get_unused_port(): def try_bind(port, socket_type, socket_proto): s = socket.socket(socket.AF_INET, socket_type, socket_proto) try: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(('', port)) return s.getsockname()[1] except socket.error: return None finally: s.close() while True: port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP) if (port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP)): return port
[ "def", "get_unused_port", "(", ")", ":", "def", "try_bind", "(", "port", ",", "socket_type", ",", "socket_proto", ")", ":", "s", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket_type", ",", "socket_proto", ")", "try", ":", "s", ...
finds a semi-random available port .
train
false
28,278
def write_dig(fname, pts, coord_frame=None): if (coord_frame is not None): coord_frame = _to_const(coord_frame) pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts)) bad_frames = (pts_frames - set((coord_frame,))) if (len(bad_frames) > 0): raise ValueError(('Points have coord_frame entries that are incompatible with coord_frame=%i: %s.' % (coord_frame, str(tuple(bad_frames))))) with start_file(fname) as fid: write_dig_points(fid, pts, block=True, coord_frame=coord_frame) end_file(fid)
[ "def", "write_dig", "(", "fname", ",", "pts", ",", "coord_frame", "=", "None", ")", ":", "if", "(", "coord_frame", "is", "not", "None", ")", ":", "coord_frame", "=", "_to_const", "(", "coord_frame", ")", "pts_frames", "=", "set", "(", "(", "pt", ".", ...
write digitization data to a fif file .
train
false
28,279
def register_collector(collector): assert isinstance(collector, Collector), ('collector=%r' % (collector,)) if (collector.name in COLLECTORS): col = COLLECTORS[collector.name] if (col.proc is not None): LOG.error('%s still has a process (pid=%d) and is being reset, terminating', col.name, col.proc.pid) col.shutdown() COLLECTORS[collector.name] = collector
[ "def", "register_collector", "(", "collector", ")", ":", "assert", "isinstance", "(", "collector", ",", "Collector", ")", ",", "(", "'collector=%r'", "%", "(", "collector", ",", ")", ")", "if", "(", "collector", ".", "name", "in", "COLLECTORS", ")", ":", ...
register a collector with the collectors global .
train
false
28,280
def load_commitmsg(): filename = qtutils.open_file(N_(u'Load Commit Message'), directory=main.model().getcwd()) if filename: cmds.do(cmds.LoadCommitMessageFromFile, filename)
[ "def", "load_commitmsg", "(", ")", ":", "filename", "=", "qtutils", ".", "open_file", "(", "N_", "(", "u'Load Commit Message'", ")", ",", "directory", "=", "main", ".", "model", "(", ")", ".", "getcwd", "(", ")", ")", "if", "filename", ":", "cmds", "."...
load a commit message from a file .
train
false
28,281
def monkey_patch(): Context.Context.exec_command = my_exec_command
[ "def", "monkey_patch", "(", ")", ":", "Context", ".", "Context", ".", "exec_command", "=", "my_exec_command" ]
patches decorators for all functions in a specified module .
train
false
28,284
def requires_img_lib(): from ..io import _check_img_lib if sys.platform.startswith('win'): has_img_lib = False else: has_img_lib = (not all(((c is None) for c in _check_img_lib()))) return np.testing.dec.skipif((not has_img_lib), 'imageio or PIL required')
[ "def", "requires_img_lib", "(", ")", ":", "from", ".", ".", "io", "import", "_check_img_lib", "if", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "has_img_lib", "=", "False", "else", ":", "has_img_lib", "=", "(", "not", "all", "(", ...
decorator for tests that require an image library .
train
false
28,285
def genSpecSines(ipfreq, ipmag, ipphase, N, fs): Y = UF_C.genSpecSines(((N * ipfreq) / float(fs)), ipmag, ipphase, N) return Y
[ "def", "genSpecSines", "(", "ipfreq", ",", "ipmag", ",", "ipphase", ",", "N", ",", "fs", ")", ":", "Y", "=", "UF_C", ".", "genSpecSines", "(", "(", "(", "N", "*", "ipfreq", ")", "/", "float", "(", "fs", ")", ")", ",", "ipmag", ",", "ipphase", "...
generate a spectrum from a series of sine values .
train
false
28,286
def vtcheck(md5, vtKey): vtUrl = 'https://www.virustotal.com/vtapi/v2/file/report' parameters = {'resource': md5, 'apikey': vtKey} try: data = urllib.urlencode(parameters) req = urllib2.Request(vtUrl, data) response = urllib2.urlopen(req) jsonResponse = response.read() except: return ((-1), 'The request to VirusTotal has not been successful') try: jsonDict = json.loads(jsonResponse) except: return ((-1), 'An error has occurred while parsing the JSON response from VirusTotal') return (0, jsonDict)
[ "def", "vtcheck", "(", "md5", ",", "vtKey", ")", ":", "vtUrl", "=", "'https://www.virustotal.com/vtapi/v2/file/report'", "parameters", "=", "{", "'resource'", ":", "md5", ",", "'apikey'", ":", "vtKey", "}", "try", ":", "data", "=", "urllib", ".", "urlencode", ...
function to check a hash on virustotal and get the report summary .
train
false
28,287
def get_related_model(model, relationname): mapper = sqlalchemy_inspect(model) attribute = mapper.all_orm_descriptors[relationname] hasattr(model, relationname) return get_related_model_from_attribute(attribute)
[ "def", "get_related_model", "(", "model", ",", "relationname", ")", ":", "mapper", "=", "sqlalchemy_inspect", "(", "model", ")", "attribute", "=", "mapper", ".", "all_orm_descriptors", "[", "relationname", "]", "hasattr", "(", "model", ",", "relationname", ")", ...
gets the class of the model to which model is related by the attribute whose name is relationname .
train
false
28,289
def SetPassword(user): print 'Please enter your password twice to reset:' pwd = getpass.getpass() pwd2 = getpass.getpass() assert (pwd == pwd2), "passwords don't match" version = _CURRENT_PASSWORD_VERSION salt = _GenerateSalt(version) hashed = _HashPassword(pwd, version, salt) data = dict(salt=salt, hashed=hashed, version=version) secrets.PutSecret(_PasswordName(user), json.dumps(data))
[ "def", "SetPassword", "(", "user", ")", ":", "print", "'Please enter your password twice to reset:'", "pwd", "=", "getpass", ".", "getpass", "(", ")", "pwd2", "=", "getpass", ".", "getpass", "(", ")", "assert", "(", "pwd", "==", "pwd2", ")", ",", "\"password...
accepts a user password as input from stdin and stores it to the secrets database .
train
false
28,292
def text_length(tree): total = 0 for node in tree.getiterator(): if node.text: total += len(node.text.strip()) if node.tail: total += len(node.tail.strip()) return total
[ "def", "text_length", "(", "tree", ")", ":", "total", "=", "0", "for", "node", "in", "tree", ".", "getiterator", "(", ")", ":", "if", "node", ".", "text", ":", "total", "+=", "len", "(", "node", ".", "text", ".", "strip", "(", ")", ")", "if", "...
find the length of the text content .
train
false
28,293
def extraneous_whitespace(logical_line): line = logical_line for char in '([{': found = line.find((char + ' ')) if (found > (-1)): return ((found + 1), ("E201 whitespace after '%s'" % char)) for char in '}])': found = line.find((' ' + char)) if ((found > (-1)) and (line[(found - 1)] != ',')): return (found, ("E202 whitespace before '%s'" % char)) for char in ',;:': found = line.find((' ' + char)) if (found > (-1)): return (found, ("E203 whitespace before '%s'" % char))
[ "def", "extraneous_whitespace", "(", "logical_line", ")", ":", "line", "=", "logical_line", "for", "char", "in", "'([{'", ":", "found", "=", "line", ".", "find", "(", "(", "char", "+", "' '", ")", ")", "if", "(", "found", ">", "(", "-", "1", ")", "...
avoid extraneous whitespace in the following situations: - immediately inside parentheses .
train
true
28,294
def deconvolve(signal, divisor): num = atleast_1d(signal) den = atleast_1d(divisor) N = len(num) D = len(den) if (D > N): quot = [] rem = num else: input = ones(((N - D) + 1), float) input[1:] = 0 quot = lfilter(num, den, input) rem = (num - convolve(den, quot, mode='full')) return (quot, rem)
[ "def", "deconvolve", "(", "signal", ",", "divisor", ")", ":", "num", "=", "atleast_1d", "(", "signal", ")", "den", "=", "atleast_1d", "(", "divisor", ")", "N", "=", "len", "(", "num", ")", "D", "=", "len", "(", "den", ")", "if", "(", "D", ">", ...
deconvolves divisor out of signal .
train
false
28,295
def select_n_series(series, n, keep, method): dtype = series.dtype if (not issubclass(dtype.type, (np.integer, np.floating, np.datetime64, np.timedelta64))): raise TypeError(('Cannot use method %r with dtype %s' % (method, dtype))) if (keep not in ('first', 'last')): raise ValueError('keep must be either "first", "last"') if (n <= 0): return series[[]] dropped = series.dropna() if (n >= len(series)): return select_n_slow(dropped, n, keep, method) inds = _select_methods[method](dropped.values, n, keep) return dropped.iloc[inds]
[ "def", "select_n_series", "(", "series", ",", "n", ",", "keep", ",", "method", ")", ":", "dtype", "=", "series", ".", "dtype", "if", "(", "not", "issubclass", "(", "dtype", ".", "type", ",", "(", "np", ".", "integer", ",", "np", ".", "floating", ",...
implement n largest/smallest for pandas series parameters series : pandas .
train
false
28,297
def _get_cibfile(cibname): cibfile = os.path.join(_get_cibpath(), '{0}.{1}'.format(cibname, 'cib')) log.trace('cibfile: {0}'.format(cibfile)) return cibfile
[ "def", "_get_cibfile", "(", "cibname", ")", ":", "cibfile", "=", "os", ".", "path", ".", "join", "(", "_get_cibpath", "(", ")", ",", "'{0}.{1}'", ".", "format", "(", "cibname", ",", "'cib'", ")", ")", "log", ".", "trace", "(", "'cibfile: {0}'", ".", ...
get the full path of a cached cib-file with the name of the cib .
train
true
28,298
def apply_change_ubuntu(targetState, name): if (targetState == 'present'): localeGenExitValue = call(['locale-gen', name]) else: try: f = open('/var/lib/locales/supported.d/local', 'r') content = f.readlines() finally: f.close() try: f = open('/var/lib/locales/supported.d/local', 'w') for line in content: (locale, charset) = line.split(' ') if (locale != name): f.write(line) finally: f.close() localeGenExitValue = call(['locale-gen', '--purge']) if (localeGenExitValue != 0): raise EnvironmentError(localeGenExitValue, ('locale.gen failed to execute, it returned ' + str(localeGenExitValue)))
[ "def", "apply_change_ubuntu", "(", "targetState", ",", "name", ")", ":", "if", "(", "targetState", "==", "'present'", ")", ":", "localeGenExitValue", "=", "call", "(", "[", "'locale-gen'", ",", "name", "]", ")", "else", ":", "try", ":", "f", "=", "open",...
create or remove locale .
train
false
28,299
def deprecated_module(removal_version, hint_message=None): warn_or_error(removal_version, u'module', hint_message)
[ "def", "deprecated_module", "(", "removal_version", ",", "hint_message", "=", "None", ")", ":", "warn_or_error", "(", "removal_version", ",", "u'module'", ",", "hint_message", ")" ]
marks an entire module as deprecated .
train
false
28,300
def user_addmedia(userids, active, mediatypeid, period, sendto, severity, **connection_args): conn_args = _login(**connection_args) try: if conn_args: method = 'user.addmedia' params = {'users': []} if (not isinstance(userids, list)): userids = [userids] for user in userids: params['users'].append({'userid': user}) params['medias'] = [{'active': active, 'mediatypeid': mediatypeid, 'period': period, 'sendto': sendto, 'severity': severity}] ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result']['mediaids'] else: raise KeyError except KeyError: return ret
[ "def", "user_addmedia", "(", "userids", ",", "active", ",", "mediatypeid", ",", "period", ",", "sendto", ",", "severity", ",", "**", "connection_args", ")", ":", "conn_args", "=", "_login", "(", "**", "connection_args", ")", "try", ":", "if", "conn_args", ...
add new media to multiple users .
train
true
28,302
def reblock_2x2(B): if ((not isinstance(B, BlockMatrix)) or (not all(((d > 2) for d in B.blocks.shape)))): return B BM = BlockMatrix return BM([[B.blocks[(0, 0)], BM(B.blocks[0, 1:])], [BM(B.blocks[1:, 0]), BM(B.blocks[1:, 1:])]])
[ "def", "reblock_2x2", "(", "B", ")", ":", "if", "(", "(", "not", "isinstance", "(", "B", ",", "BlockMatrix", ")", ")", "or", "(", "not", "all", "(", "(", "(", "d", ">", "2", ")", "for", "d", "in", "B", ".", "blocks", ".", "shape", ")", ")", ...
reblock a blockmatrix so that it has 2x2 blocks of block matrices .
train
false
28,303
@core_helper def get_pkg_dict_extra(pkg_dict, key, default=None): extras = (pkg_dict['extras'] if ('extras' in pkg_dict) else []) for extra in extras: if (extra['key'] == key): return extra['value'] return default
[ "@", "core_helper", "def", "get_pkg_dict_extra", "(", "pkg_dict", ",", "key", ",", "default", "=", "None", ")", ":", "extras", "=", "(", "pkg_dict", "[", "'extras'", "]", "if", "(", "'extras'", "in", "pkg_dict", ")", "else", "[", "]", ")", "for", "extr...
returns the value for the dataset extra with the provided key .
train
false
28,304
def _have_socket_rds(): try: s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) except (AttributeError, OSError): return False else: s.close() return True
[ "def", "_have_socket_rds", "(", ")", ":", "try", ":", "s", "=", "socket", ".", "socket", "(", "socket", ".", "PF_RDS", ",", "socket", ".", "SOCK_SEQPACKET", ",", "0", ")", "except", "(", "AttributeError", ",", "OSError", ")", ":", "return", "False", "e...
check whether rds sockets are supported on this host .
train
false
28,306
@register.simple_tag(takes_context=True) def navigation_bar_hooks(context): s = u'' for hook in NavigationBarHook.hooks: try: for nav_info in hook.get_entries(context): if nav_info: url_name = nav_info.get(u'url_name', None) if url_name: nav_info[u'url'] = local_site_reverse(url_name, request=context.get(u'request')) context.push() context[u'entry'] = nav_info s += render_to_string(u'extensions/navbar_entry.html', context) context.pop() except Exception as e: extension = hook.extension logging.error(u'Error when running NavigationBarHook.get_entries function in extension: "%s": %s', extension.id, e, exc_info=1) return s
[ "@", "register", ".", "simple_tag", "(", "takes_context", "=", "True", ")", "def", "navigation_bar_hooks", "(", "context", ")", ":", "s", "=", "u''", "for", "hook", "in", "NavigationBarHook", ".", "hooks", ":", "try", ":", "for", "nav_info", "in", "hook", ...
displays all registered navigation bar entries .
train
false
28,307
def _parse_repo_file(filename): repos = {} header = '' repo = '' with salt.utils.fopen(filename, 'r') as rfile: for line in rfile: if line.startswith('['): repo = line.strip().replace('[', '').replace(']', '') repos[repo] = {} if (not line): if (not repo): header += line if line.startswith('#'): if (not repo): header += line else: if ('comments' not in repos[repo]): repos[repo]['comments'] = [] repos[repo]['comments'].append(line.strip()) continue if ('=' in line): try: comps = line.strip().split('=') repos[repo][comps[0].strip()] = '='.join(comps[1:]) except KeyError: log.error("Failed to parse line in %s, offending line was '%s'", filename, line.rstrip()) if (comps[0].strip() == 'enabled'): repos[repo]['disabled'] = (comps[1] != '1') return (header, repos)
[ "def", "_parse_repo_file", "(", "filename", ")", ":", "repos", "=", "{", "}", "header", "=", "''", "repo", "=", "''", "with", "salt", ".", "utils", ".", "fopen", "(", "filename", ",", "'r'", ")", "as", "rfile", ":", "for", "line", "in", "rfile", ":...
turn a single repo file into a dict .
train
false
28,308
def _get_endpoint_region(endpoint): return (endpoint.get('region_id') or endpoint.get('region'))
[ "def", "_get_endpoint_region", "(", "endpoint", ")", ":", "return", "(", "endpoint", ".", "get", "(", "'region_id'", ")", "or", "endpoint", ".", "get", "(", "'region'", ")", ")" ]
common function for getting the region from endpoint .
train
false
28,309
def db_remove(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): query = 'DROP DATABASE "{0}"'.format(name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, runas=runas, maintenance_db=maintenance_db, password=password) return (ret['retcode'] == 0)
[ "def", "db_remove", "(", "name", ",", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "maintenance_db", "=", "None", ",", "password", "=", "None", ",", "runas", "=", "None", ")", ":", "query", "=", "'DROP DATABASE \"{0}\"'...
removes a databases from the mysql server .
train
true
28,310
def richards(x, A, u, d, v, y0): y = ((A * pow((1 + (v + (np.exp((1 + v)) * np.exp(((((u / A) * (1 + v)) * (1 + (1 / v))) * (d - x)))))), (- (1 / v)))) + y0) return y
[ "def", "richards", "(", "x", ",", "A", ",", "u", ",", "d", ",", "v", ",", "y0", ")", ":", "y", "=", "(", "(", "A", "*", "pow", "(", "(", "1", "+", "(", "v", "+", "(", "np", ".", "exp", "(", "(", "1", "+", "v", ")", ")", "*", "np", ...
gompertz growth model proposed in zwietering et al .
train
false
28,312
def kill(coro): return KillEvent(coro)
[ "def", "kill", "(", "coro", ")", ":", "return", "KillEvent", "(", "coro", ")" ]
kill greenlet asynchronously .
train
false
28,314
@magic_arguments() @argument('-f', '--foo', help='an argument') @argument_group('Group') @argument('-b', '--bar', help='a grouped argument') @argument_group('Second Group') @argument('-z', '--baz', help='another grouped argument') def magic_foo3(self, args): return parse_argstring(magic_foo3, args)
[ "@", "magic_arguments", "(", ")", "@", "argument", "(", "'-f'", ",", "'--foo'", ",", "help", "=", "'an argument'", ")", "@", "argument_group", "(", "'Group'", ")", "@", "argument", "(", "'-b'", ",", "'--bar'", ",", "help", "=", "'a grouped argument'", ")",...
a docstring .
train
false
28,315
def assert_valid_codeobj(allowed_codes, code_obj, expr): assert_no_dunder_name(code_obj, expr) if (set(_get_opcodes(code_obj)) - allowed_codes): raise ValueError(('forbidden opcode(s) in %r' % expr)) for const in code_obj.co_consts: if isinstance(const, CodeType): assert_valid_codeobj(allowed_codes, const, 'lambda')
[ "def", "assert_valid_codeobj", "(", "allowed_codes", ",", "code_obj", ",", "expr", ")", ":", "assert_no_dunder_name", "(", "code_obj", ",", "expr", ")", "if", "(", "set", "(", "_get_opcodes", "(", "code_obj", ")", ")", "-", "allowed_codes", ")", ":", "raise"...
asserts that the provided code object validates against the bytecode and name constraints .
train
false
28,317
@register.simple_tag def replace_page_param(query, page_number, page_key=u'p'): return conditional_escape(replace_page_in_query(query, page_number, page_key))
[ "@", "register", ".", "simple_tag", "def", "replace_page_param", "(", "query", ",", "page_number", ",", "page_key", "=", "u'p'", ")", ":", "return", "conditional_escape", "(", "replace_page_in_query", "(", "query", ",", "page_number", ",", "page_key", ")", ")" ]
replaces page_key from query string with page_number .
train
false
28,318
def assert_np_arrays_not_equal(expected, got): nans = [] equals = [] for (i, value) in enumerate(expected): if np.isnan(value): assert_true(np.isnan(got[i])) nans.append(value) else: try: assert_not_equal(value, got[i]) except AssertionError: equals.append(got[i]) if (len(equals) == len(expected[len(nans):])): raise AssertionError('Arrays were equal.') elif equals: print(('Arrays had %i/%i equivalent values.' % (len(equals), len(expected[len(nans):]))))
[ "def", "assert_np_arrays_not_equal", "(", "expected", ",", "got", ")", ":", "nans", "=", "[", "]", "equals", "=", "[", "]", "for", "(", "i", ",", "value", ")", "in", "enumerate", "(", "expected", ")", ":", "if", "np", ".", "isnan", "(", "value", ")...
verifies expected and got have the same number of leading nan fields .
train
false
28,319
def make_commodity_future_info(first_sid, root_symbols, years, month_codes=None): nineteen_days = pd.Timedelta(days=19) one_year = pd.Timedelta(days=365) return make_future_info(first_sid=first_sid, root_symbols=root_symbols, years=years, notice_date_func=(lambda dt: ((dt - MonthBegin(2)) + nineteen_days)), expiration_date_func=(lambda dt: ((dt - MonthBegin(1)) + nineteen_days)), start_date_func=(lambda dt: (dt - one_year)), month_codes=month_codes)
[ "def", "make_commodity_future_info", "(", "first_sid", ",", "root_symbols", ",", "years", ",", "month_codes", "=", "None", ")", ":", "nineteen_days", "=", "pd", ".", "Timedelta", "(", "days", "=", "19", ")", "one_year", "=", "pd", ".", "Timedelta", "(", "d...
make futures testing data that simulates the notice/expiration date behavior of physical commodities like oil .
train
true
28,323
def delete_search_units(source_units, languages): index = get_source_index() writer = index.writer() try: for pk in source_units: writer.delete_by_term('pk', pk) finally: writer.commit() for (lang, units) in languages.items(): index = get_target_index(lang) writer = index.writer() try: for pk in units: writer.delete_by_term('pk', pk) finally: writer.commit()
[ "def", "delete_search_units", "(", "source_units", ",", "languages", ")", ":", "index", "=", "get_source_index", "(", ")", "writer", "=", "index", ".", "writer", "(", ")", "try", ":", "for", "pk", "in", "source_units", ":", "writer", ".", "delete_by_term", ...
delete fulltext index for given set of units .
train
false
28,324
def remove_markup(html): return HTML(bleach.clean(html, tags=[], strip=True))
[ "def", "remove_markup", "(", "html", ")", ":", "return", "HTML", "(", "bleach", ".", "clean", "(", "html", ",", "tags", "=", "[", "]", ",", "strip", "=", "True", ")", ")" ]
return html with markup stripped and text html-escaped .
train
false
28,325
@raise_event def send_response(resource, response): if isinstance(response, Response): return response else: return _prepare_response(resource, *(response if response else [None]))
[ "@", "raise_event", "def", "send_response", "(", "resource", ",", "response", ")", ":", "if", "isinstance", "(", "response", ",", "Response", ")", ":", "return", "response", "else", ":", "return", "_prepare_response", "(", "resource", ",", "*", "(", "respons...
prepares the response for the client .
train
false
28,326
def _extract_action_num_and_node_id(m): return dict(action_num=(int(m.group('action_num')) - 1), node_id=m.group('node_id'))
[ "def", "_extract_action_num_and_node_id", "(", "m", ")", ":", "return", "dict", "(", "action_num", "=", "(", "int", "(", "m", ".", "group", "(", "'action_num'", ")", ")", "-", "1", ")", ",", "node_id", "=", "m", ".", "group", "(", "'node_id'", ")", "...
helper method: extract *action_num* and *node_id* from the given regex match .
train
false
28,328
def NamedTupleRow(cursor): from collections import namedtuple attr_names = [x[0] for x in cursor._ColBufferList] class Row(namedtuple('Row', attr_names, rename=True), ): cursor_description = cursor.description def __new__(cls, iterable): return super(Row, cls).__new__(cls, *iterable) return Row
[ "def", "NamedTupleRow", "(", "cursor", ")", ":", "from", "collections", "import", "namedtuple", "attr_names", "=", "[", "x", "[", "0", "]", "for", "x", "in", "cursor", ".", "_ColBufferList", "]", "class", "Row", "(", "namedtuple", "(", "'Row'", ",", "att...
named tuple to allow attribute lookup by name .
train
false
28,329
def slaac(value, query=''): try: vtype = ipaddr(value, 'type') if (vtype == 'address'): v = ipaddr(value, 'cidr') elif (vtype == 'network'): v = ipaddr(value, 'subnet') if (ipaddr(value, 'version') != 6): return False value = netaddr.IPNetwork(v) except: return False if (not query): return False try: mac = hwaddr(query, alias='slaac') eui = netaddr.EUI(mac) except: return False return eui.ipv6(value.network)
[ "def", "slaac", "(", "value", ",", "query", "=", "''", ")", ":", "try", ":", "vtype", "=", "ipaddr", "(", "value", ",", "'type'", ")", "if", "(", "vtype", "==", "'address'", ")", ":", "v", "=", "ipaddr", "(", "value", ",", "'cidr'", ")", "elif", ...
get the slaac address within given network .
train
false
28,330
def _netstat_route_freebsd(): ret = [] cmd = 'netstat -f inet -rn | tail -n+5' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() if ((__grains__['os'] == 'FreeBSD') and (int(__grains__.get('osmajorrelease', 0)) < 10)): ret.append({'addr_family': 'inet', 'destination': comps[0], 'gateway': comps[1], 'netmask': comps[2], 'flags': comps[3], 'interface': comps[5]}) else: ret.append({'addr_family': 'inet', 'destination': comps[0], 'gateway': comps[1], 'netmask': '', 'flags': comps[2], 'interface': comps[3]}) cmd = 'netstat -f inet6 -rn | tail -n+5' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({'addr_family': 'inet6', 'destination': comps[0], 'gateway': comps[1], 'netmask': '', 'flags': comps[2], 'interface': comps[3]}) return ret
[ "def", "_netstat_route_freebsd", "(", ")", ":", "ret", "=", "[", "]", "cmd", "=", "'netstat -f inet -rn | tail -n+5'", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "True", ")", "for", "line", "in", "out", ".", "split...
return netstat routing information for freebsd and macos .
train
true
28,332
@auth.route('/reset-password', methods=['GET', 'POST']) def forgot_password(): if (not current_user.is_anonymous): return redirect(url_for('forum.index')) form = ForgotPasswordForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user: send_reset_token.delay(user) flash(_('Email sent! Please check your inbox.'), 'info') return redirect(url_for('auth.forgot_password')) else: flash(_('You have entered an username or email address that is not linked with your account.'), 'danger') return render_template('auth/forgot_password.html', form=form)
[ "@", "auth", ".", "route", "(", "'/reset-password'", ",", "methods", "=", "[", "'GET'", ",", "'POST'", "]", ")", "def", "forgot_password", "(", ")", ":", "if", "(", "not", "current_user", ".", "is_anonymous", ")", ":", "return", "redirect", "(", "url_for...
view function that handles a forgotten password request .
train
false
28,333
def check_tree_exact_match(fasta_labels, tree_fp): raw_fasta_labels = set([label.split('_')[0] for label in fasta_labels]) tree_f = open(tree_fp, 'U') tree = DndParser(tree_f) tree_tips = set(tree.getTipNames()) labels_not_in_tips = [] for curr_label in raw_fasta_labels: if (curr_label not in tree_tips): labels_not_in_tips.append(curr_label) if (len(labels_not_in_tips) == 0): labels_not_in_tips = True tips_not_in_labels = [] for curr_tip in tree_tips: if (curr_tip not in raw_fasta_labels): tips_not_in_labels.append(curr_tip) if (len(tips_not_in_labels) == 0): tips_not_in_labels = True return [labels_not_in_tips, tips_not_in_labels]
[ "def", "check_tree_exact_match", "(", "fasta_labels", ",", "tree_fp", ")", ":", "raw_fasta_labels", "=", "set", "(", "[", "label", ".", "split", "(", "'_'", ")", "[", "0", "]", "for", "label", "in", "fasta_labels", "]", ")", "tree_f", "=", "open", "(", ...
checks fasta labels to exact match to tree tips returns a list of two lists .
train
false
28,334
def irc_logins(full_load, pkt): user_search = re.match(irc_user_re, full_load) pass_search = re.match(irc_pw_re, full_load) pass_search2 = re.search(irc_pw_re2, full_load.lower()) if user_search: msg = ('IRC nick: %s' % user_search.group(1)) return msg if pass_search: msg = ('IRC pass: %s' % pass_search.group(1)) return msg if pass_search2: msg = ('IRC pass: %s' % pass_search2.group(1)) return msg
[ "def", "irc_logins", "(", "full_load", ",", "pkt", ")", ":", "user_search", "=", "re", ".", "match", "(", "irc_user_re", ",", "full_load", ")", "pass_search", "=", "re", ".", "match", "(", "irc_pw_re", ",", "full_load", ")", "pass_search2", "=", "re", "....
find irc logins .
train
false
28,335
def execlpe(file, *args): env = args[(-1)] execvpe(file, args[:(-1)], env)
[ "def", "execlpe", "(", "file", ",", "*", "args", ")", ":", "env", "=", "args", "[", "(", "-", "1", ")", "]", "execvpe", "(", "file", ",", "args", "[", ":", "(", "-", "1", ")", "]", ",", "env", ")" ]
execlpe execute the executable file with argument list args and environment env .
train
false
28,336
def get_task_runner(local_task_job): if (_TASK_RUNNER == 'BashTaskRunner'): return BashTaskRunner(local_task_job) elif (_TASK_RUNNER == 'CgroupTaskRunner'): from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner return CgroupTaskRunner(local_task_job) else: raise AirflowException('Unknown task runner type {}'.format(_TASK_RUNNER))
[ "def", "get_task_runner", "(", "local_task_job", ")", ":", "if", "(", "_TASK_RUNNER", "==", "'BashTaskRunner'", ")", ":", "return", "BashTaskRunner", "(", "local_task_job", ")", "elif", "(", "_TASK_RUNNER", "==", "'CgroupTaskRunner'", ")", ":", "from", "airflow", ...
get the task runner that can be used to run the given job .
train
true
28,337
def lsb_release_attr(attribute): return _distro.lsb_release_attr(attribute)
[ "def", "lsb_release_attr", "(", "attribute", ")", ":", "return", "_distro", ".", "lsb_release_attr", "(", "attribute", ")" ]
return a single named information item from the lsb_release command output data source of the current linux distribution .
train
false
28,338
@pytest.mark.django_db def test_data_tp_bad(): with pytest.raises(IntegrityError): TPData.objects.create()
[ "@", "pytest", ".", "mark", ".", "django_db", "def", "test_data_tp_bad", "(", ")", ":", "with", "pytest", ".", "raises", "(", "IntegrityError", ")", ":", "TPData", ".", "objects", ".", "create", "(", ")" ]
test that you cant add a duplicate file extension .
train
false
28,339
@conf.commands.register def srloop(pkts, *args, **kargs): return __sr_loop(sr, pkts, *args, **kargs)
[ "@", "conf", ".", "commands", ".", "register", "def", "srloop", "(", "pkts", ",", "*", "args", ",", "**", "kargs", ")", ":", "return", "__sr_loop", "(", "sr", ",", "pkts", ",", "*", "args", ",", "**", "kargs", ")" ]
send a packet at layer 3 in loop and print the answer each time srloop --> none .
train
false
28,340
def __get_root(*append): if (append is None): append = [] path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..', *append) return os.path.abspath(path)
[ "def", "__get_root", "(", "*", "append", ")", ":", "if", "(", "append", "is", "None", ")", ":", "append", "=", "[", "]", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'..'", ","...
returns the root directory of the project .
train
false
28,341
@then(u'pgcli exits') def step_wait_exit(context): _expect_exact(context, pexpect.EOF, timeout=5)
[ "@", "then", "(", "u'pgcli exits'", ")", "def", "step_wait_exit", "(", "context", ")", ":", "_expect_exact", "(", "context", ",", "pexpect", ".", "EOF", ",", "timeout", "=", "5", ")" ]
make sure the cli exits .
train
false
28,342
def medianbias(x1, x2, axis=0): x1 = np.asanyarray(x1) x2 = np.asanyarray(x2) return np.median((x1 - x2), axis=axis)
[ "def", "medianbias", "(", "x1", ",", "x2", ",", "axis", "=", "0", ")", ":", "x1", "=", "np", ".", "asanyarray", "(", "x1", ")", "x2", "=", "np", ".", "asanyarray", "(", "x2", ")", "return", "np", ".", "median", "(", "(", "x1", "-", "x2", ")",...
median bias .
train
false
28,343
def get_representatives(mapping, seqs): for (label, seq) in seqs: if (label in mapping): seq = BiologicalSequence(seq, id=('%s: %d' % (label, (len(mapping[label]) + 1)))) (yield seq.upper())
[ "def", "get_representatives", "(", "mapping", ",", "seqs", ")", ":", "for", "(", "label", ",", "seq", ")", "in", "seqs", ":", "if", "(", "label", "in", "mapping", ")", ":", "seq", "=", "BiologicalSequence", "(", "seq", ",", "id", "=", "(", "'%s: %d'"...
returns representative seqs .
train
false
28,344
def generate_strings(total_strings, string_length=20): statements = [] for _ in range(0, total_strings): text = ''.join((random.choice(((string.ascii_letters + string.digits) + ' ')) for _ in range(string_length))) statements.append(text) return statements
[ "def", "generate_strings", "(", "total_strings", ",", "string_length", "=", "20", ")", ":", "statements", "=", "[", "]", "for", "_", "in", "range", "(", "0", ",", "total_strings", ")", ":", "text", "=", "''", ".", "join", "(", "(", "random", ".", "ch...
print out automatically generated strings for translation .
train
false
28,345
def test_identity_input(): ident1 = Identity(1) shift = Shift(1) rotation = Rotation2D(angle=90) model = ((ident1 & shift) | rotation) assert_allclose(model(1, 2), [(-3.0), 1.0]) TestModel = ((ident1 & Shift) | Rotation2D) model = TestModel(offset_1=1, angle_2=90) assert_allclose(model(1, 2), [(-3.0), 1.0])
[ "def", "test_identity_input", "(", ")", ":", "ident1", "=", "Identity", "(", "1", ")", "shift", "=", "Shift", "(", "1", ")", "rotation", "=", "Rotation2D", "(", "angle", "=", "90", ")", "model", "=", "(", "(", "ident1", "&", "shift", ")", "|", "rot...
test a case where an identity model is the first in a chain of composite models and thus is responsible for handling input broadcasting properly .
train
false
28,346
def test_IndexLocator_set_params(): index = mticker.IndexLocator(base=3, offset=4) index.set_params(base=7, offset=7) assert (index._base == 7) assert (index.offset == 7)
[ "def", "test_IndexLocator_set_params", "(", ")", ":", "index", "=", "mticker", ".", "IndexLocator", "(", "base", "=", "3", ",", "offset", "=", "4", ")", "index", ".", "set_params", "(", "base", "=", "7", ",", "offset", "=", "7", ")", "assert", "(", "...
create index locator with 3 base .
train
false
28,347
def basename(p): return split(p)[1]
[ "def", "basename", "(", "p", ")", ":", "return", "split", "(", "p", ")", "[", "1", "]" ]
returns the final component of a pathname .
train
false
28,348
def setBlocking(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) flags = (flags & (~ os.O_NONBLOCK)) fcntl.fcntl(fd, fcntl.F_SETFL, flags)
[ "def", "setBlocking", "(", "fd", ")", ":", "flags", "=", "fcntl", ".", "fcntl", "(", "fd", ",", "fcntl", ".", "F_GETFL", ")", "flags", "=", "(", "flags", "&", "(", "~", "os", ".", "O_NONBLOCK", ")", ")", "fcntl", ".", "fcntl", "(", "fd", ",", "...
make a file descriptor blocking .
train
false
28,349
def splitBorder(parts): width = style = color = None if (len(parts) > 3): log.warn('To many elements for border style %r', parts) for part in parts: if isSize(part): width = part elif (hasattr(part, 'lower') and (part.lower() in _borderStyleTable)): style = part else: color = part return (width, style, color)
[ "def", "splitBorder", "(", "parts", ")", ":", "width", "=", "style", "=", "color", "=", "None", "if", "(", "len", "(", "parts", ")", ">", "3", ")", ":", "log", ".", "warn", "(", "'To many elements for border style %r'", ",", "parts", ")", "for", "part"...
the order of the elements seems to be of no importance: URL#border-shorthand-properties .
train
true
28,350
def get_theme_path_real(theme, themes_dirs): for themes_dir in themes_dirs: dir_name = os.path.join(themes_dir, theme) if os.path.isdir(dir_name): return dir_name dir_name = resource_filename(u'nikola', os.path.join(u'data', u'themes', theme)) if os.path.isdir(dir_name): return dir_name raise Exception(u"Can't find theme '{0}'".format(theme))
[ "def", "get_theme_path_real", "(", "theme", ",", "themes_dirs", ")", ":", "for", "themes_dir", "in", "themes_dirs", ":", "dir_name", "=", "os", ".", "path", ".", "join", "(", "themes_dir", ",", "theme", ")", "if", "os", ".", "path", ".", "isdir", "(", ...
return the path where the given themes files are located .
train
false
28,351
def document_link(resource, document_id, version=None): version_part = (('?version=%s' % version) if version else '') return {'title': ('%s' % config.DOMAIN[resource]['item_title']), 'href': ('%s/%s%s' % (resource_link(), document_id, version_part))}
[ "def", "document_link", "(", "resource", ",", "document_id", ",", "version", "=", "None", ")", ":", "version_part", "=", "(", "(", "'?version=%s'", "%", "version", ")", "if", "version", "else", "''", ")", "return", "{", "'title'", ":", "(", "'%s'", "%", ...
public link to the document .
train
false
28,352
@app.template_filter('localize_dt') def localize_dt(dt, tzname): localized_dt = timezone(tzname).localize(dt) return localized_dt.isoformat()
[ "@", "app", ".", "template_filter", "(", "'localize_dt'", ")", "def", "localize_dt", "(", "dt", ",", "tzname", ")", ":", "localized_dt", "=", "timezone", "(", "tzname", ")", ".", "localize", "(", "dt", ")", "return", "localized_dt", ".", "isoformat", "(", ...
accepts a datetime object and a timezone name .
train
false
28,353
def create_streams_if_needed(realm, stream_dicts): added_streams = [] existing_streams = [] for stream_dict in stream_dicts: (stream, created) = create_stream_if_needed(realm, stream_dict['name'], invite_only=stream_dict.get('invite_only', False), stream_description=stream_dict.get('description', '')) if created: added_streams.append(stream) else: existing_streams.append(stream) return (added_streams, existing_streams)
[ "def", "create_streams_if_needed", "(", "realm", ",", "stream_dicts", ")", ":", "added_streams", "=", "[", "]", "existing_streams", "=", "[", "]", "for", "stream_dict", "in", "stream_dicts", ":", "(", "stream", ",", "created", ")", "=", "create_stream_if_needed"...
note that stream_dict["name"] is assumed to already be stripped of whitespace .
train
false
28,354
def test_enn_not_good_object(): nn = 'rnd' enn = EditedNearestNeighbours(n_neighbors=nn, random_state=RND_SEED, kind_sel='mode') assert_raises(ValueError, enn.fit_sample, X, Y)
[ "def", "test_enn_not_good_object", "(", ")", ":", "nn", "=", "'rnd'", "enn", "=", "EditedNearestNeighbours", "(", "n_neighbors", "=", "nn", ",", "random_state", "=", "RND_SEED", ",", "kind_sel", "=", "'mode'", ")", "assert_raises", "(", "ValueError", ",", "enn...
test either if an error is raised while a wrong type of nn is given .
train
false
28,355
def datetime_format(dtobj): (year, month, day) = (dtobj.year, dtobj.month, dtobj.day) (hour, minute, second) = (dtobj.hour, dtobj.minute, dtobj.second) now = timezone.now() if (year < now.year): timestring = str(dtobj.date()) elif (dtobj.date() < now.date()): timestring = ('%02i-%02i' % (day, month)) elif (hour < (now.hour - 1)): timestring = ('%02i:%02i' % (hour, minute)) else: timestring = ('%02i:%02i:%02i' % (hour, minute, second)) return timestring
[ "def", "datetime_format", "(", "dtobj", ")", ":", "(", "year", ",", "month", ",", "day", ")", "=", "(", "dtobj", ".", "year", ",", "dtobj", ".", "month", ",", "dtobj", ".", "day", ")", "(", "hour", ",", "minute", ",", "second", ")", "=", "(", "...
pretty-prints the time since a given time .
train
false
28,356
def cmd_graph(args): usage = 'usage: graph <FIELD...>' if (len(args) < 1): print usage return if (args[0][0] == ':'): i = int(args[0][1:]) g = mestate.graphs[i] expression = g.expression args = expression.split() mestate.console.write(('Added graph: %s\n' % g.name)) if g.description: mestate.console.write(('%s\n' % g.description), fg='blue') mestate.rl.add_history(('graph %s' % ' '.join(expression.split()))) mestate.last_graph = g else: expression = ' '.join(args) mestate.last_graph = GraphDefinition('Untitled', expression, '', [expression], None) display_graph(mestate.last_graph)
[ "def", "cmd_graph", "(", "args", ")", ":", "usage", "=", "'usage: graph <FIELD...>'", "if", "(", "len", "(", "args", ")", "<", "1", ")", ":", "print", "usage", "return", "if", "(", "args", "[", "0", "]", "[", "0", "]", "==", "':'", ")", ":", "i",...
graph command .
train
true
28,357
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
create a new chacha20 cipher :keywords: key : byte string the secret key to use in the symmetric cipher .
train
false
28,358
def dependency_ordered(test_databases, dependencies): ordered_test_databases = [] resolved_databases = set() dependencies_map = {} for (sig, (_, aliases)) in test_databases: all_deps = set() for alias in aliases: all_deps.update(dependencies.get(alias, [])) if (not all_deps.isdisjoint(aliases)): raise ImproperlyConfigured(('Circular dependency: databases %r depend on each other, but are aliases.' % aliases)) dependencies_map[sig] = all_deps while test_databases: changed = False deferred = [] for (signature, (db_name, aliases)) in test_databases: if dependencies_map[signature].issubset(resolved_databases): resolved_databases.update(aliases) ordered_test_databases.append((signature, (db_name, aliases))) changed = True else: deferred.append((signature, (db_name, aliases))) if (not changed): raise ImproperlyConfigured('Circular dependency in TEST_DEPENDENCIES') test_databases = deferred return ordered_test_databases
[ "def", "dependency_ordered", "(", "test_databases", ",", "dependencies", ")", ":", "ordered_test_databases", "=", "[", "]", "resolved_databases", "=", "set", "(", ")", "dependencies_map", "=", "{", "}", "for", "(", "sig", ",", "(", "_", ",", "aliases", ")", ...
reorder test_databases into an order that honors the dependencies described in test_dependencies .
train
false
28,359
def cleanup_delete(chunks): while 1: try: (pre_delete, delete, post_delete) = split_delete(chunks) except NoDeletes: break (unbalanced_start, balanced, unbalanced_end) = split_unbalanced(delete) locate_unbalanced_start(unbalanced_start, pre_delete, post_delete) locate_unbalanced_end(unbalanced_end, pre_delete, post_delete) doc = pre_delete if (doc and (not doc[(-1)].endswith(' '))): doc[(-1)] += ' ' doc.append('<del>') if (balanced and balanced[(-1)].endswith(' ')): balanced[(-1)] = balanced[(-1)][:(-1)] doc.extend(balanced) doc.append('</del> ') doc.extend(post_delete) chunks = doc return chunks
[ "def", "cleanup_delete", "(", "chunks", ")", ":", "while", "1", ":", "try", ":", "(", "pre_delete", ",", "delete", ",", "post_delete", ")", "=", "split_delete", "(", "chunks", ")", "except", "NoDeletes", ":", "break", "(", "unbalanced_start", ",", "balance...
cleans up any del_start/del_end markers in the document .
train
true
28,361
def load_opus(name): global _lib _lib = libopus_loader(name)
[ "def", "load_opus", "(", "name", ")", ":", "global", "_lib", "_lib", "=", "libopus_loader", "(", "name", ")" ]
loads the libopus shared library for use with voice .
train
false