id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
34,039 | def get_from_url(url):
import urllib2
try:
return urllib2.urlopen(url).read()
except:
return None
| [
"def",
"get_from_url",
"(",
"url",
")",
":",
"import",
"urllib2",
"try",
":",
"return",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
".",
"read",
"(",
")",
"except",
":",
"return",
"None"
] | retrieve url and return content timeout sets non-standard timeout . | train | false |
34,041 | def copy_local(data, dest, use_put=None):
dpath = os.path.dirname(dest)
if (not os.path.isdir(dpath)):
os.makedirs(dpath)
if use_put:
open(dest, 'ab').write(data)
else:
open(dest, 'a').write(('%s %s\n' % (time.time(), data)))
| [
"def",
"copy_local",
"(",
"data",
",",
"dest",
",",
"use_put",
"=",
"None",
")",
":",
"dpath",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"dest",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dpath",
")",
")",
":",
"os",
".",
... | copy data locally to a file to aid in debugging . | train | false |
34,043 | def _LLSGuessPayloadClass(p, **kargs):
cls = conf.raw_layer
if (len(p) >= 4):
typ = struct.unpack('!H', p[0:2])[0]
clsname = _OSPF_LLSclasses.get(typ, 'LLS_Generic_TLV')
cls = globals()[clsname]
return cls(p, **kargs)
| [
"def",
"_LLSGuessPayloadClass",
"(",
"p",
",",
"**",
"kargs",
")",
":",
"cls",
"=",
"conf",
".",
"raw_layer",
"if",
"(",
"len",
"(",
"p",
")",
">=",
"4",
")",
":",
"typ",
"=",
"struct",
".",
"unpack",
"(",
"'!H'",
",",
"p",
"[",
"0",
":",
"2",
... | guess the correct lls class for a given payload . | train | true |
34,045 | @must_be_logged_in
def personal_access_token_detail(auth, **kwargs):
_id = kwargs.get('_id')
try:
record = ApiOAuth2PersonalToken.find_one(Q('_id', 'eq', _id))
except NoResultsFound:
raise HTTPError(http.NOT_FOUND)
if (record.owner != auth.user):
raise HTTPError(http.FORBIDDEN)
if (record.is_active is False):
raise HTTPError(http.GONE)
token_detail_url = api_v2_url('tokens/{}/'.format(_id))
return {'token_list_url': '', 'token_detail_url': token_detail_url, 'scope_options': get_available_scopes()}
| [
"@",
"must_be_logged_in",
"def",
"personal_access_token_detail",
"(",
"auth",
",",
"**",
"kwargs",
")",
":",
"_id",
"=",
"kwargs",
".",
"get",
"(",
"'_id'",
")",
"try",
":",
"record",
"=",
"ApiOAuth2PersonalToken",
".",
"find_one",
"(",
"Q",
"(",
"'_id'",
... | show detail for a single personal access token . | train | false |
34,046 | def is_int_like(val):
try:
return (str(int(val)) == str(val))
except Exception:
return False
| [
"def",
"is_int_like",
"(",
"val",
")",
":",
"try",
":",
"return",
"(",
"str",
"(",
"int",
"(",
"val",
")",
")",
"==",
"str",
"(",
"val",
")",
")",
"except",
"Exception",
":",
"return",
"False"
] | check if a value looks like an int . | train | false |
34,049 | def set_seed(x):
node_names = list(six.iterkeys(tf.get_default_graph()._nodes_by_name))
if ((len(node_names) > 0) and (node_names != ['keras_learning_phase'])):
raise RuntimeError('Seeding is not supported after initializing part of the graph. Please move set_seed to the beginning of your code.')
np.random.seed(x)
tf.set_random_seed(x)
| [
"def",
"set_seed",
"(",
"x",
")",
":",
"node_names",
"=",
"list",
"(",
"six",
".",
"iterkeys",
"(",
"tf",
".",
"get_default_graph",
"(",
")",
".",
"_nodes_by_name",
")",
")",
"if",
"(",
"(",
"len",
"(",
"node_names",
")",
">",
"0",
")",
"and",
"(",... | set seed for both numpy and tensorflow . | train | false |
34,050 | def _community_email(body, kind):
Email.handler.add_to_queue(c.user, g.community_email, g.domain, g.community_email, kind, body=body)
| [
"def",
"_community_email",
"(",
"body",
",",
"kind",
")",
":",
"Email",
".",
"handler",
".",
"add_to_queue",
"(",
"c",
".",
"user",
",",
"g",
".",
"community_email",
",",
"g",
".",
"domain",
",",
"g",
".",
"community_email",
",",
"kind",
",",
"body",
... | for sending email to the community mailbox . | train | false |
34,051 | def _writeDocument(newFilename, clonedNode):
makeSureDirectoryExists(newFilename)
f = open(newFilename, 'w')
f.write(clonedNode.toxml('utf-8'))
f.close()
| [
"def",
"_writeDocument",
"(",
"newFilename",
",",
"clonedNode",
")",
":",
"makeSureDirectoryExists",
"(",
"newFilename",
")",
"f",
"=",
"open",
"(",
"newFilename",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"clonedNode",
".",
"toxml",
"(",
"'utf-8'",
")",
")... | serialize the given node to xml into the named file . | train | false |
34,052 | def set_remote_login(enable):
state = salt.utils.mac_utils.validate_enabled(enable)
cmd = 'systemsetup -f -setremotelogin {0}'.format(state)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(state, get_remote_login, normalize_ret=True)
| [
"def",
"set_remote_login",
"(",
"enable",
")",
":",
"state",
"=",
"salt",
".",
"utils",
".",
"mac_utils",
".",
"validate_enabled",
"(",
"enable",
")",
"cmd",
"=",
"'systemsetup -f -setremotelogin {0}'",
".",
"format",
"(",
"state",
")",
"salt",
".",
"utils",
... | set the remote login to either on or off . | train | false |
34,053 | def aic_sigma(sigma2, nobs, df_modelwc, islog=False):
if (not islog):
sigma2 = np.log(sigma2)
return (sigma2 + (aic(0, nobs, df_modelwc) / nobs))
| [
"def",
"aic_sigma",
"(",
"sigma2",
",",
"nobs",
",",
"df_modelwc",
",",
"islog",
"=",
"False",
")",
":",
"if",
"(",
"not",
"islog",
")",
":",
"sigma2",
"=",
"np",
".",
"log",
"(",
"sigma2",
")",
"return",
"(",
"sigma2",
"+",
"(",
"aic",
"(",
"0",... | akaike information criterion parameters sigma2 : float estimate of the residual variance or determinant of sigma_hat in the multivariate case . | train | false |
34,055 | def canonical_representation(a, d, DE):
l = Poly((1 / d.LC()), DE.t)
(a, d) = (a.mul(l), d.mul(l))
(q, r) = a.div(d)
(dn, ds) = splitfactor(d, DE)
(b, c) = gcdex_diophantine(dn.as_poly(DE.t), ds.as_poly(DE.t), r.as_poly(DE.t))
(b, c) = (b.as_poly(DE.t), c.as_poly(DE.t))
return (q, (b, ds), (c, dn))
| [
"def",
"canonical_representation",
"(",
"a",
",",
"d",
",",
"DE",
")",
":",
"l",
"=",
"Poly",
"(",
"(",
"1",
"/",
"d",
".",
"LC",
"(",
")",
")",
",",
"DE",
".",
"t",
")",
"(",
"a",
",",
"d",
")",
"=",
"(",
"a",
".",
"mul",
"(",
"l",
")"... | canonical representation . | train | false |
34,056 | def test_tab_insertion():
superConsole.SendKeys('outputRedirectStart{(}True{)}{ENTER}')
testRegex = ''
superConsole.SendKeys('print "j{TAB}{TAB}y"{ENTER}')
testRegex += 'j y'
superConsole.SendKeys('outputRedirectStop{(}{)}{ENTER}')
verifyResults(getTestOutput()[0], testRegex)
| [
"def",
"test_tab_insertion",
"(",
")",
":",
"superConsole",
".",
"SendKeys",
"(",
"'outputRedirectStart{(}True{)}{ENTER}'",
")",
"testRegex",
"=",
"''",
"superConsole",
".",
"SendKeys",
"(",
"'print \"j{TAB}{TAB}y\"{ENTER}'",
")",
"testRegex",
"+=",
"'j y'",
"superCo... | tab insertion . | train | false |
34,057 | def test_should_support_both_meta_sequence_and_constructor_exclude():
class SequencedTable(tables.Table, ):
a = tables.Column()
b = tables.Column()
c = tables.Column()
class Meta:
sequence = (u'a', u'...')
table = SequencedTable([], exclude=(u'c',))
table.as_html(request)
| [
"def",
"test_should_support_both_meta_sequence_and_constructor_exclude",
"(",
")",
":",
"class",
"SequencedTable",
"(",
"tables",
".",
"Table",
",",
")",
":",
"a",
"=",
"tables",
".",
"Column",
"(",
")",
"b",
"=",
"tables",
".",
"Column",
"(",
")",
"c",
"=",... | issue #32 describes a problem when both meta . | train | false |
34,058 | def get_texcommand():
texsystem_options = [u'xelatex', u'lualatex', u'pdflatex']
texsystem = rcParams.get(u'pgf.texsystem', u'xelatex')
return (texsystem if (texsystem in texsystem_options) else u'xelatex')
| [
"def",
"get_texcommand",
"(",
")",
":",
"texsystem_options",
"=",
"[",
"u'xelatex'",
",",
"u'lualatex'",
",",
"u'pdflatex'",
"]",
"texsystem",
"=",
"rcParams",
".",
"get",
"(",
"u'pgf.texsystem'",
",",
"u'xelatex'",
")",
"return",
"(",
"texsystem",
"if",
"(",
... | get chosen tex system from rc . | train | false |
34,059 | def _multinomial_loss(w, X, Y, alpha, sample_weight):
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == (n_classes * (n_features + 1)))
w = w.reshape(n_classes, (-1))
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, (-1)]
w = w[:, :(-1)]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = (- ((sample_weight * Y) * p).sum())
loss += ((0.5 * alpha) * squared_norm(w))
p = np.exp(p, p)
return (loss, p, w)
| [
"def",
"_multinomial_loss",
"(",
"w",
",",
"X",
",",
"Y",
",",
"alpha",
",",
"sample_weight",
")",
":",
"n_classes",
"=",
"Y",
".",
"shape",
"[",
"1",
"]",
"n_features",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"fit_intercept",
"=",
"(",
"w",
".",
... | computes multinomial loss and class probabilities . | train | false |
34,060 | def _match_query(query, attrs, attrs_checked):
inner = query[1:(-1)]
if inner.startswith(('&', '|')):
if (inner[0] == '&'):
matchfn = all
else:
matchfn = any
groups = _paren_groups(inner[1:])
return matchfn((_match_query(group, attrs, attrs_checked) for group in groups))
if inner.startswith('!'):
return (not _match_query(query[2:(-1)], attrs, attrs_checked))
(k, _sep, v) = inner.partition('=')
attrs_checked.add(k.lower())
return _match(k, v, attrs)
| [
"def",
"_match_query",
"(",
"query",
",",
"attrs",
",",
"attrs_checked",
")",
":",
"inner",
"=",
"query",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
"if",
"inner",
".",
"startswith",
"(",
"(",
"'&'",
",",
"'|'",
")",
")",
":",
"if",
"(",
"inner",
... | match an ldap query to an attribute dictionary . | train | false |
34,061 | @real_memoize
def is_windows():
return sys.platform.startswith('win')
| [
"@",
"real_memoize",
"def",
"is_windows",
"(",
")",
":",
"return",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")"
] | simple function to return if a host is windows or not . | train | false |
34,064 | def get_ptr_type(structure, member):
if (len(member) > 1):
(_, tp) = get_obj_offset(types, [structure, member[0]])
if (tp == 'array'):
return types[structure][1][member[0]][1][2][1]
else:
return get_ptr_type(tp, member[1:])
else:
return types[structure][1][member[0]][1][1]
| [
"def",
"get_ptr_type",
"(",
"structure",
",",
"member",
")",
":",
"if",
"(",
"len",
"(",
"member",
")",
">",
"1",
")",
":",
"(",
"_",
",",
"tp",
")",
"=",
"get_obj_offset",
"(",
"types",
",",
"[",
"structure",
",",
"member",
"[",
"0",
"]",
"]",
... | return the type a pointer points to . | train | false |
34,065 | @require_chanmsg
@require_privilege(OP, u'You are not a channel operator.')
@commands(u'kick')
@priority(u'high')
def kick(bot, trigger):
if (bot.privileges[trigger.sender][bot.nick] < HALFOP):
return bot.reply(u"I'm not a channel operator!")
text = trigger.group().split()
argc = len(text)
if (argc < 2):
return
opt = Identifier(text[1])
nick = opt
channel = trigger.sender
reasonidx = 2
if (not opt.is_nick()):
if (argc < 3):
return
nick = text[2]
channel = opt
reasonidx = 3
reason = u' '.join(text[reasonidx:])
if (nick != bot.config.core.nick):
bot.write([u'KICK', channel, nick], reason)
| [
"@",
"require_chanmsg",
"@",
"require_privilege",
"(",
"OP",
",",
"u'You are not a channel operator.'",
")",
"@",
"commands",
"(",
"u'kick'",
")",
"@",
"priority",
"(",
"u'high'",
")",
"def",
"kick",
"(",
"bot",
",",
"trigger",
")",
":",
"if",
"(",
"bot",
... | kick a user from the channel . | train | false |
34,066 | def dzip_exact(*dicts):
if (not same(*map(viewkeys, dicts))):
raise ValueError(('dict keys not all equal:\n\n%s' % _format_unequal_keys(dicts)))
return {k: tuple((d[k] for d in dicts)) for k in dicts[0]}
| [
"def",
"dzip_exact",
"(",
"*",
"dicts",
")",
":",
"if",
"(",
"not",
"same",
"(",
"*",
"map",
"(",
"viewkeys",
",",
"dicts",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'dict keys not all equal:\\n\\n%s'",
"%",
"_format_unequal_keys",
"(",
"dicts"... | parameters *dicts : iterable[dict] a sequence of dicts all sharing the same keys . | train | true |
34,067 | @pytest.mark.network
def test_run_method_should_return_no_matches_found_when_does_not_find_pkgs():
command = SearchCommand()
cmdline = '--index=https://pypi.python.org/pypi nonexistentpackage'
(options, args) = command.parse_args(cmdline.split())
status = command.run(options, args)
assert (status == NO_MATCHES_FOUND)
| [
"@",
"pytest",
".",
"mark",
".",
"network",
"def",
"test_run_method_should_return_no_matches_found_when_does_not_find_pkgs",
"(",
")",
":",
"command",
"=",
"SearchCommand",
"(",
")",
"cmdline",
"=",
"'--index=https://pypi.python.org/pypi nonexistentpackage'",
"(",
"options",
... | test searchcommand . | train | false |
34,068 | def write_backreferences(seen_backrefs, gallery_conf, target_dir, fname, snippet):
example_file = os.path.join(target_dir, fname)
build_target = os.path.relpath(target_dir, gallery_conf['src_dir'])
backrefs = scan_used_functions(example_file, gallery_conf)
for backref in backrefs:
include_path = os.path.join(gallery_conf['src_dir'], gallery_conf['mod_example_dir'], ('%s.examples' % backref))
seen = (backref in seen_backrefs)
with open(include_path, ('a' if seen else 'w')) as ex_file:
if (not seen):
heading = ('\n\nExamples using ``%s``' % backref)
ex_file.write((heading + '\n'))
ex_file.write((('^' * len(heading)) + '\n'))
ex_file.write(_thumbnail_div(build_target, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
| [
"def",
"write_backreferences",
"(",
"seen_backrefs",
",",
"gallery_conf",
",",
"target_dir",
",",
"fname",
",",
"snippet",
")",
":",
"example_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_dir",
",",
"fname",
")",
"build_target",
"=",
"os",
".",
... | writes down back reference files . | train | true |
34,069 | def submit_form(form, extra_values=None, open_http=None):
values = form.form_values()
if extra_values:
if hasattr(extra_values, 'items'):
extra_values = extra_values.items()
values.extend(extra_values)
if (open_http is None):
open_http = open_http_urllib
if form.action:
url = form.action
else:
url = form.base_url
return open_http(form.method, url, values)
| [
"def",
"submit_form",
"(",
"form",
",",
"extra_values",
"=",
"None",
",",
"open_http",
"=",
"None",
")",
":",
"values",
"=",
"form",
".",
"form_values",
"(",
")",
"if",
"extra_values",
":",
"if",
"hasattr",
"(",
"extra_values",
",",
"'items'",
")",
":",
... | helper function to submit a form . | train | true |
34,070 | def test_simple_class_based_view():
@hug.object.urls('/endpoint', requires=())
class MyClass(object, ):
@hug.object.get()
def my_method(self):
return 'hi there!'
@hug.object.post()
def my_method_two(self):
return 'bye'
assert (hug.test.get(api, 'endpoint').data == 'hi there!')
assert (hug.test.post(api, 'endpoint').data == 'bye')
| [
"def",
"test_simple_class_based_view",
"(",
")",
":",
"@",
"hug",
".",
"object",
".",
"urls",
"(",
"'/endpoint'",
",",
"requires",
"=",
"(",
")",
")",
"class",
"MyClass",
"(",
"object",
",",
")",
":",
"@",
"hug",
".",
"object",
".",
"get",
"(",
")",
... | test creating class based routers . | train | false |
34,071 | def disable_event_thread(self):
import nova.virt.libvirt.host
def evloop(*args, **kwargs):
pass
self.useFixture(fixtures.MonkeyPatch('nova.virt.libvirt.host.Host._init_events', evloop))
| [
"def",
"disable_event_thread",
"(",
"self",
")",
":",
"import",
"nova",
".",
"virt",
".",
"libvirt",
".",
"host",
"def",
"evloop",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"pass",
"self",
".",
"useFixture",
"(",
"fixtures",
".",
"MonkeyPatch",
... | disable nova libvirt driver event thread . | train | false |
34,073 | def random_objs(objs, album, number=1, time=None, equal_chance=False):
if equal_chance:
perm = _equal_chance_permutation(objs)
else:
perm = objs
random.shuffle(perm)
if time:
return _take_time(perm, (time * 60), album)
else:
return _take(perm, number)
| [
"def",
"random_objs",
"(",
"objs",
",",
"album",
",",
"number",
"=",
"1",
",",
"time",
"=",
"None",
",",
"equal_chance",
"=",
"False",
")",
":",
"if",
"equal_chance",
":",
"perm",
"=",
"_equal_chance_permutation",
"(",
"objs",
")",
"else",
":",
"perm",
... | get a random subset of the provided objs . | train | false |
34,074 | def _to_full_path(item, path_prefix):
if (not item):
return item
return (item[0], item[1], (path_prefix + item[2]))
| [
"def",
"_to_full_path",
"(",
"item",
",",
"path_prefix",
")",
":",
"if",
"(",
"not",
"item",
")",
":",
"return",
"item",
"return",
"(",
"item",
"[",
"0",
"]",
",",
"item",
"[",
"1",
"]",
",",
"(",
"path_prefix",
"+",
"item",
"[",
"2",
"]",
")",
... | rebuild entry with given path prefix . | train | false |
34,075 | def nics_skipped(name, nics, ipv6=False):
ret = {'name': ','.join(nics), 'changes': {}, 'result': True, 'comment': 'NICs skipped.'}
current_skipped_nics = __salt__['csf.get_skipped_nics'](ipv6=ipv6)
if (nics == current_skipped_nics):
return ret
result = __salt__['csf.skip_nics'](nics, ipv6=ipv6)
ret['changes']['Skipped NICs'] = 'Changed'
return ret
| [
"def",
"nics_skipped",
"(",
"name",
",",
"nics",
",",
"ipv6",
"=",
"False",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"','",
".",
"join",
"(",
"nics",
")",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"'NI... | name meaningless arg . | train | true |
34,076 | def module_file(module):
name = module.__file__
return (name[:(-1)] if name.endswith(u'.pyc') else name)
| [
"def",
"module_file",
"(",
"module",
")",
":",
"name",
"=",
"module",
".",
"__file__",
"return",
"(",
"name",
"[",
":",
"(",
"-",
"1",
")",
"]",
"if",
"name",
".",
"endswith",
"(",
"u'.pyc'",
")",
"else",
"name",
")"
] | return the correct original file name of a module . | train | false |
34,077 | def cmd_script(args):
if (len(args) < 1):
print 'usage: script <filename>'
return
run_script(args[0])
| [
"def",
"cmd_script",
"(",
"args",
")",
":",
"if",
"(",
"len",
"(",
"args",
")",
"<",
"1",
")",
":",
"print",
"'usage: script <filename>'",
"return",
"run_script",
"(",
"args",
"[",
"0",
"]",
")"
] | run a script . | train | false |
34,078 | def get_current_context(silent=False):
try:
return getattr(_local, 'stack')[(-1)]
except (AttributeError, IndexError):
if (not silent):
raise RuntimeError('There is no active click context.')
| [
"def",
"get_current_context",
"(",
"silent",
"=",
"False",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"_local",
",",
"'stack'",
")",
"[",
"(",
"-",
"1",
")",
"]",
"except",
"(",
"AttributeError",
",",
"IndexError",
")",
":",
"if",
"(",
"not",
"s... | returns the current click context . | train | false |
34,080 | def get_im_version():
try:
out = util.command_output(['convert', '--version'])
if ('imagemagick' in out.lower()):
pattern = '.+ (\\d+)\\.(\\d+)\\.(\\d+).*'
match = re.search(pattern, out)
if match:
return (int(match.group(1)), int(match.group(2)), int(match.group(3)))
return (0,)
except (subprocess.CalledProcessError, OSError) as exc:
log.debug(u'ImageMagick check `convert --version` failed: {}', exc)
return None
| [
"def",
"get_im_version",
"(",
")",
":",
"try",
":",
"out",
"=",
"util",
".",
"command_output",
"(",
"[",
"'convert'",
",",
"'--version'",
"]",
")",
"if",
"(",
"'imagemagick'",
"in",
"out",
".",
"lower",
"(",
")",
")",
":",
"pattern",
"=",
"'.+ (\\\\d+)... | return image magick version or none if it is unavailable try invoking imagemagicks "convert" . | train | false |
34,081 | def kivy_usage():
print (kivy_usage.__doc__ % basename(sys.argv[0]))
| [
"def",
"kivy_usage",
"(",
")",
":",
"print",
"(",
"kivy_usage",
".",
"__doc__",
"%",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")"
] | kivy usage: %s [option . | train | false |
34,083 | def _wrapper(orig, pre='', post='', err_=None, run_args=None, **kwargs):
ret = ''
cmd = _cmd(**kwargs)
cmd_str = ' '.join([x for x in (pre, cmd, post, orig)])
if (run_args and isinstance(run_args, dict)):
res = __salt__['cmd.run_all'](cmd_str, **run_args)
else:
res = __salt__['cmd.run_all'](cmd_str)
if isinstance(err_, dict):
for (k, v) in six.itermitems(res):
err_[k] = v
if (('retcode' in res) and (res['retcode'] != 0)):
msg = ' '.join([x for x in (res['stdout'], res['stderr']) if x])
ret = 'Unable to run "{0}" with run_args="{1}". Error: {2}'.format(cmd_str, run_args, msg)
log.error(ret)
else:
try:
ret = res['stdout']
except KeyError:
log.error("cmd.run_all did not return a dictionary with a key named 'stdout'")
return ret
| [
"def",
"_wrapper",
"(",
"orig",
",",
"pre",
"=",
"''",
",",
"post",
"=",
"''",
",",
"err_",
"=",
"None",
",",
"run_args",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"ret",
"=",
"''",
"cmd",
"=",
"_cmd",
"(",
"**",
"kwargs",
")",
"cmd_str",
"="... | helper function that wraps the execution of freebsd-update command . | train | true |
34,084 | def test_SymLogNorm():
norm = mcolors.SymLogNorm(3, vmax=5, linscale=1.2)
vals = np.array([(-30), (-1), 2, 6], dtype=float)
normed_vals = norm(vals)
expected = [0.0, 0.53980074, 0.826991, 1.02758204]
assert_array_almost_equal(normed_vals, expected)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
norm = mcolors.SymLogNorm(3, vmin=(-30), vmax=5, linscale=1.2)
normed_vals = norm(vals)
assert_array_almost_equal(normed_vals, expected)
| [
"def",
"test_SymLogNorm",
"(",
")",
":",
"norm",
"=",
"mcolors",
".",
"SymLogNorm",
"(",
"3",
",",
"vmax",
"=",
"5",
",",
"linscale",
"=",
"1.2",
")",
"vals",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"-",
"30",
")",
",",
"(",
"-",
"1",
")",
",... | test symlognorm behavior . | train | false |
34,087 | def dump_module_extensions(course, unit):
data = []
header = [_('Username'), _('Full Name'), _('Extended Due Date')]
query = StudentFieldOverride.objects.filter(course_id=course.id, location=unit.location, field='due')
for override in query:
due = DATE_FIELD.from_json(json.loads(override.value))
due = due.strftime('%Y-%m-%d %H:%M')
fullname = override.student.profile.name
data.append(dict(zip(header, (override.student.username, fullname, due))))
data.sort(key=(lambda x: x[header[0]]))
return {'header': header, 'title': _('Users with due date extensions for {0}').format(title_or_url(unit)), 'data': data}
| [
"def",
"dump_module_extensions",
"(",
"course",
",",
"unit",
")",
":",
"data",
"=",
"[",
"]",
"header",
"=",
"[",
"_",
"(",
"'Username'",
")",
",",
"_",
"(",
"'Full Name'",
")",
",",
"_",
"(",
"'Extended Due Date'",
")",
"]",
"query",
"=",
"StudentFiel... | dumps data about students with due date extensions for a particular module . | train | false |
34,088 | def decode_path(path):
if isinstance(path, unicode):
return path
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except AttributeError:
return nodes.reprunicode(path)
except UnicodeDecodeError:
try:
path = path.decode('utf-8', 'strict')
except UnicodeDecodeError:
path = path.decode('ascii', 'replace')
return nodes.reprunicode(path)
| [
"def",
"decode_path",
"(",
"path",
")",
":",
"if",
"isinstance",
"(",
"path",
",",
"unicode",
")",
":",
"return",
"path",
"try",
":",
"path",
"=",
"path",
".",
"decode",
"(",
"sys",
".",
"getfilesystemencoding",
"(",
")",
",",
"'strict'",
")",
"except"... | ensure path is unicode . | train | false |
34,092 | def get_domain_object(model, domain_object_ref):
if (domain_object_ref in ('system', 'System')):
return model.System
pkg = model.Package.get(domain_object_ref)
if pkg:
return pkg
group = model.Group.get(domain_object_ref)
if group:
return group
user = model.User.get(domain_object_ref)
if user:
return user
raise NotFound(('Domain object %r not found' % domain_object_ref))
| [
"def",
"get_domain_object",
"(",
"model",
",",
"domain_object_ref",
")",
":",
"if",
"(",
"domain_object_ref",
"in",
"(",
"'system'",
",",
"'System'",
")",
")",
":",
"return",
"model",
".",
"System",
"pkg",
"=",
"model",
".",
"Package",
".",
"get",
"(",
"... | for an id or name . | train | false |
34,093 | def _makeLineNumbers(howMany):
width = len(str(howMany))
labels = [('%*d' % (width, i)) for i in range(1, (howMany + 1))]
p = dom.Element('p')
p.setAttribute('class', 'py-linenumber')
t = dom.Text()
t.data = ('\n'.join(labels) + '\n')
p.appendChild(t)
return p
| [
"def",
"_makeLineNumbers",
"(",
"howMany",
")",
":",
"width",
"=",
"len",
"(",
"str",
"(",
"howMany",
")",
")",
"labels",
"=",
"[",
"(",
"'%*d'",
"%",
"(",
"width",
",",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"(",
"howMany",
"... | return an element which will render line numbers for a source listing . | train | false |
34,094 | def _keypair_get_count_by_user(*args, **kwargs):
return objects.KeyPairList.get_count_by_user(*args, **kwargs)
| [
"def",
"_keypair_get_count_by_user",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"return",
"objects",
".",
"KeyPairList",
".",
"get_count_by_user",
"(",
"*",
"args",
",",
"**",
"kwargs",
")"
] | helper method to avoid referencing objects . | train | false |
34,095 | def notificationNch():
a = L2PseudoLength(l2pLength=1)
b = TpPd(pd=6)
c = MessageType(mesType=32)
d = NtNRestOctets()
packet = (((a / b) / c) / d)
return packet
| [
"def",
"notificationNch",
"(",
")",
":",
"a",
"=",
"L2PseudoLength",
"(",
"l2pLength",
"=",
"1",
")",
"b",
"=",
"TpPd",
"(",
"pd",
"=",
"6",
")",
"c",
"=",
"MessageType",
"(",
"mesType",
"=",
"32",
")",
"d",
"=",
"NtNRestOctets",
"(",
")",
"packet"... | notification/nch section 9 . | train | true |
34,097 | def get_rev_label_from_changeset_revision(repo, changeset_revision, include_date=True, include_hash=True):
ctx = get_changectx_for_changeset(repo, changeset_revision)
if ctx:
rev = ('%04d' % ctx.rev())
label = get_revision_label_from_ctx(ctx, include_date=include_date)
else:
rev = '-1'
label = ('-1:%s' % changeset_revision)
return (rev, label)
| [
"def",
"get_rev_label_from_changeset_revision",
"(",
"repo",
",",
"changeset_revision",
",",
"include_date",
"=",
"True",
",",
"include_hash",
"=",
"True",
")",
":",
"ctx",
"=",
"get_changectx_for_changeset",
"(",
"repo",
",",
"changeset_revision",
")",
"if",
"ctx",... | given a changeset revision hash . | train | false |
34,098 | def eval_atom(parse_result):
result = next((k for k in parse_result if isinstance(k, numbers.Number)))
return result
| [
"def",
"eval_atom",
"(",
"parse_result",
")",
":",
"result",
"=",
"next",
"(",
"(",
"k",
"for",
"k",
"in",
"parse_result",
"if",
"isinstance",
"(",
"k",
",",
"numbers",
".",
"Number",
")",
")",
")",
"return",
"result"
] | return the value wrapped by the atom . | train | false |
34,099 | def cumprod(x, axis=None):
return CumOp(axis=axis, mode='mul')(x)
| [
"def",
"cumprod",
"(",
"x",
",",
"axis",
"=",
"None",
")",
":",
"return",
"CumOp",
"(",
"axis",
"=",
"axis",
",",
"mode",
"=",
"'mul'",
")",
"(",
"x",
")"
] | return the cumulative product of the elements along a given axis . | train | false |
34,100 | def getAwayPoints(points, radius):
awayPoints = []
oneOverOverlapDistance = (1000.0 / radius)
pixelDictionary = {}
for point in points:
x = int((point.real * oneOverOverlapDistance))
y = int((point.imag * oneOverOverlapDistance))
if (not getSquareIsOccupied(pixelDictionary, x, y)):
awayPoints.append(point)
pixelDictionary[(x, y)] = None
return awayPoints
| [
"def",
"getAwayPoints",
"(",
"points",
",",
"radius",
")",
":",
"awayPoints",
"=",
"[",
"]",
"oneOverOverlapDistance",
"=",
"(",
"1000.0",
"/",
"radius",
")",
"pixelDictionary",
"=",
"{",
"}",
"for",
"point",
"in",
"points",
":",
"x",
"=",
"int",
"(",
... | get a path with only the points that are far enough away from each other . | train | false |
34,101 | def filter_formstyle_summary(form, fields, *args, **kwargs):
def render_row(row_id, label, widget, comment, hidden=False):
controls = FIELDSET(LEGEND(label), widget)
return DIV(controls, _id=row_id)
if args:
row_id = form
label = fields
(widget, comment) = args
hidden = kwargs.get('hidden', False)
return render_row(row_id, label, widget, comment, hidden)
else:
parent = TAG['']()
for (row_id, label, widget, comment) in fields:
parent.append(render_row(row_id, label, widget, comment))
return parent
| [
"def",
"filter_formstyle_summary",
"(",
"form",
",",
"fields",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"def",
"render_row",
"(",
"row_id",
",",
"label",
",",
"widget",
",",
"comment",
",",
"hidden",
"=",
"False",
")",
":",
"controls",
"=",
"FI... | custom formstyle for filters on the incident summary page . | train | false |
34,102 | def FindPehash(results):
for r in results:
if (r['name'] == 'pecoff'):
res = r
if ('SignedData' not in res):
print 'PE File, but no signature data present.'
return
for hashes in ('md5', 'sha1', 'sha256', 'sha512'):
if (res['SignedData'][0][2].find(res[hashes]) != (-1)):
print ('Found matching %s hash in SignedData.' % hashes)
return
print 'PE File with signature data, NO hash matches.'
| [
"def",
"FindPehash",
"(",
"results",
")",
":",
"for",
"r",
"in",
"results",
":",
"if",
"(",
"r",
"[",
"'name'",
"]",
"==",
"'pecoff'",
")",
":",
"res",
"=",
"r",
"if",
"(",
"'SignedData'",
"not",
"in",
"res",
")",
":",
"print",
"'PE File, but no sign... | if file is pe/coff . | train | false |
34,103 | def _branch(results):
results = list(results)
ret = Result()
for result in results[:(-1)]:
ret += result
ret += result.expr_as_stmt()
for result in results[(-1):]:
ret += result
return ret
| [
"def",
"_branch",
"(",
"results",
")",
":",
"results",
"=",
"list",
"(",
"results",
")",
"ret",
"=",
"Result",
"(",
")",
"for",
"result",
"in",
"results",
"[",
":",
"(",
"-",
"1",
")",
"]",
":",
"ret",
"+=",
"result",
"ret",
"+=",
"result",
".",
... | make a branch out of a list of result objects this generates a result from the given sequence of results . | train | false |
34,104 | def pathname2url(pathname):
return urllib.quote(('///' + pathname.translate(__slash_dot)), '/$:')
| [
"def",
"pathname2url",
"(",
"pathname",
")",
":",
"return",
"urllib",
".",
"quote",
"(",
"(",
"'///'",
"+",
"pathname",
".",
"translate",
"(",
"__slash_dot",
")",
")",
",",
"'/$:'",
")"
] | os-specific conversion from a file system path to a relative url of the file scheme; not recommended for general use . | train | false |
34,106 | def writeDjangoObject(obj, encoder=None):
s = obj.pk
if (s is None):
encoder.writeObject(obj)
return
django_objects = getDjangoObjects(encoder.context)
kls = obj.__class__
try:
referenced_object = django_objects.getClassKey(kls, s)
except KeyError:
referenced_object = obj
django_objects.addClassKey(kls, s, obj)
encoder.writeObject(referenced_object)
| [
"def",
"writeDjangoObject",
"(",
"obj",
",",
"encoder",
"=",
"None",
")",
":",
"s",
"=",
"obj",
".",
"pk",
"if",
"(",
"s",
"is",
"None",
")",
":",
"encoder",
".",
"writeObject",
"(",
"obj",
")",
"return",
"django_objects",
"=",
"getDjangoObjects",
"(",... | the django orm creates new instances of objects for each db request . | train | true |
34,107 | def get_wyr(headers):
r = requests.get(url=API_URL, headers=headers)
data = r.json()
data['title'] = data['title'].strip().capitalize().rstrip('.?,:')
data['choicea'] = data['choicea'].strip().lower().rstrip('.?,!').lstrip('.')
data['choiceb'] = data['choiceb'].strip().lower().rstrip('.?,!').lstrip('.')
if data['tags']:
data['tags'] = data['tags'].lower().split(',')
else:
data['tags'] = []
if data['nsfw']:
data['tags'].append('nsfw')
return data
| [
"def",
"get_wyr",
"(",
"headers",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
"=",
"API_URL",
",",
"headers",
"=",
"headers",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"data",
"[",
"'title'",
"]",
"=",
"data",
"[",
"'title'",
"]",... | gets a entry from the rrrather api and cleans up the data . | train | false |
34,108 | def time_label(seconds, decimal=0, is_long=False):
return _get_label(TIME_UNITS, seconds, decimal, is_long)
| [
"def",
"time_label",
"(",
"seconds",
",",
"decimal",
"=",
"0",
",",
"is_long",
"=",
"False",
")",
":",
"return",
"_get_label",
"(",
"TIME_UNITS",
",",
"seconds",
",",
"decimal",
",",
"is_long",
")"
] | converts seconds into a time label truncated to its most significant units . | train | false |
34,109 | def measure(n):
m1 = np.random.normal(size=n)
m2 = np.random.normal(scale=0.5, size=n)
return ((m1 + m2), (m1 - m2))
| [
"def",
"measure",
"(",
"n",
")",
":",
"m1",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"n",
")",
"m2",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"scale",
"=",
"0.5",
",",
"size",
"=",
"n",
")",
"return",
"(",
"(",
"m1",
... | measurement model . | train | false |
34,112 | def eval_product(parse_result):
prod = 1.0
current_op = operator.mul
for token in parse_result:
if (token == '*'):
current_op = operator.mul
elif (token == '/'):
current_op = operator.truediv
else:
prod = current_op(prod, token)
return prod
| [
"def",
"eval_product",
"(",
"parse_result",
")",
":",
"prod",
"=",
"1.0",
"current_op",
"=",
"operator",
".",
"mul",
"for",
"token",
"in",
"parse_result",
":",
"if",
"(",
"token",
"==",
"'*'",
")",
":",
"current_op",
"=",
"operator",
".",
"mul",
"elif",
... | multiply the inputs . | train | false |
34,113 | def detect_format(stream):
for fmt in formats.available:
try:
if fmt.detect(stream):
return fmt.title
except AttributeError:
pass
| [
"def",
"detect_format",
"(",
"stream",
")",
":",
"for",
"fmt",
"in",
"formats",
".",
"available",
":",
"try",
":",
"if",
"fmt",
".",
"detect",
"(",
"stream",
")",
":",
"return",
"fmt",
".",
"title",
"except",
"AttributeError",
":",
"pass"
] | return format name of given stream . | train | false |
34,115 | def volume_type_get(context, id, inactive=False, expected_fields=None):
return IMPL.volume_type_get(context, id, inactive, expected_fields)
| [
"def",
"volume_type_get",
"(",
"context",
",",
"id",
",",
"inactive",
"=",
"False",
",",
"expected_fields",
"=",
"None",
")",
":",
"return",
"IMPL",
".",
"volume_type_get",
"(",
"context",
",",
"id",
",",
"inactive",
",",
"expected_fields",
")"
] | get volume type by id . | train | false |
34,116 | def parse_scoped_name(scoped_name):
scoped_name = scoped_name.strip()
if ('::' in scoped_name):
scoped_name = scoped_name.replace('::', ':')
(module_name, object_name) = scoped_name.rsplit(':', 1)
return (module_name, object_name)
| [
"def",
"parse_scoped_name",
"(",
"scoped_name",
")",
":",
"scoped_name",
"=",
"scoped_name",
".",
"strip",
"(",
")",
"if",
"(",
"'::'",
"in",
"scoped_name",
")",
":",
"scoped_name",
"=",
"scoped_name",
".",
"replace",
"(",
"'::'",
",",
"':'",
")",
"(",
"... | schema: my . | train | false |
34,117 | def clust_qual_ratio(dists, map_data, category):
(map_dict, comments) = map_data
(dist_headers, dmtx) = dists
if (len(set(dist_headers)) != len(dist_headers)):
raise RuntimeError('Error: distance matrix headers are non unique')
cats = sorted([map_dict[sam][category] for sam in map_dict])
diff_dists = []
same_dists = []
for i in range(len(dist_headers)):
for j in range(i):
if (map_dict[dist_headers[i]][category] == map_dict[dist_headers[j]][category]):
same_dists.append(dmtx[(i, j)])
else:
diff_dists.append(dmtx[(i, j)])
return (diff_dists, same_dists)
| [
"def",
"clust_qual_ratio",
"(",
"dists",
",",
"map_data",
",",
"category",
")",
":",
"(",
"map_dict",
",",
"comments",
")",
"=",
"map_data",
"(",
"dist_headers",
",",
"dmtx",
")",
"=",
"dists",
"if",
"(",
"len",
"(",
"set",
"(",
"dist_headers",
")",
")... | measures within category and between category dissimilarities input: distance matrix [header . | train | false |
34,118 | def _calc_num_themes_checkout(locks):
current_num = locks.count()
if (current_num < rvw.THEME_INITIAL_LOCKS):
return ((rvw.THEME_INITIAL_LOCKS - current_num), [])
else:
locks.update(expiry=get_updated_expiry())
return (0, [lock.theme for lock in locks])
| [
"def",
"_calc_num_themes_checkout",
"(",
"locks",
")",
":",
"current_num",
"=",
"locks",
".",
"count",
"(",
")",
"if",
"(",
"current_num",
"<",
"rvw",
".",
"THEME_INITIAL_LOCKS",
")",
":",
"return",
"(",
"(",
"rvw",
".",
"THEME_INITIAL_LOCKS",
"-",
"current_... | calculate number of themes to check out based on how many themes user currently has checked out . | train | false |
34,119 | def remove_event_handler(name, func):
for e in list(_events.get(name, [])):
if (e.func is func):
_events[name].remove(e)
| [
"def",
"remove_event_handler",
"(",
"name",
",",
"func",
")",
":",
"for",
"e",
"in",
"list",
"(",
"_events",
".",
"get",
"(",
"name",
",",
"[",
"]",
")",
")",
":",
"if",
"(",
"e",
".",
"func",
"is",
"func",
")",
":",
"_events",
"[",
"name",
"]"... | remove func from the handlers for event name . | train | false |
34,120 | def _is_changelist_popup(request):
if (IS_POPUP_VAR in request.GET):
return True
IS_LEGACY_POPUP_VAR = 'pop'
if (IS_LEGACY_POPUP_VAR in request.GET):
warnings.warn(('The `%s` GET parameter has been renamed to `%s`.' % (IS_LEGACY_POPUP_VAR, IS_POPUP_VAR)), PendingDeprecationWarning, 2)
return True
return False
| [
"def",
"_is_changelist_popup",
"(",
"request",
")",
":",
"if",
"(",
"IS_POPUP_VAR",
"in",
"request",
".",
"GET",
")",
":",
"return",
"True",
"IS_LEGACY_POPUP_VAR",
"=",
"'pop'",
"if",
"(",
"IS_LEGACY_POPUP_VAR",
"in",
"request",
".",
"GET",
")",
":",
"warnin... | returns true if the popup get parameter is set . | train | false |
34,121 | def filter_access_token(interaction, current_cassette):
request_uri = interaction.data['request']['uri']
response = interaction.data['response']
if (('api/v1/access_token' not in request_uri) or (response['status']['code'] != 200)):
return
body = response['body']['string']
try:
token = json.loads(body)['access_token']
except (KeyError, TypeError, ValueError):
return
current_cassette.placeholders.append(betamax.cassette.cassette.Placeholder(placeholder='<ACCESS_TOKEN>', replace=token))
| [
"def",
"filter_access_token",
"(",
"interaction",
",",
"current_cassette",
")",
":",
"request_uri",
"=",
"interaction",
".",
"data",
"[",
"'request'",
"]",
"[",
"'uri'",
"]",
"response",
"=",
"interaction",
".",
"data",
"[",
"'response'",
"]",
"if",
"(",
"("... | add betamax placeholder to filter access token . | train | false |
34,122 | @verbose
def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256, n_overlap=0, picks=None, proj=False, n_jobs=1, verbose=None):
(data, sfreq) = _check_psd_data(inst, tmin, tmax, picks, proj)
return _psd_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft, n_overlap=n_overlap, n_jobs=n_jobs)
| [
"@",
"verbose",
"def",
"psd_welch",
"(",
"inst",
",",
"fmin",
"=",
"0",
",",
"fmax",
"=",
"np",
".",
"inf",
",",
"tmin",
"=",
"None",
",",
"tmax",
"=",
"None",
",",
"n_fft",
"=",
"256",
",",
"n_overlap",
"=",
"0",
",",
"picks",
"=",
"None",
","... | compute the power spectral density using welchs method . | train | false |
34,123 | def buildRequestFrames(headers, data, frameFactory=None, streamID=1):
if (frameFactory is None):
frameFactory = FrameFactory()
frames = []
frames.append(frameFactory.buildHeadersFrame(headers=headers, streamID=streamID))
frames.extend((frameFactory.buildDataFrame(chunk, streamID=streamID) for chunk in data))
frames[(-1)].flags.add('END_STREAM')
return frames
| [
"def",
"buildRequestFrames",
"(",
"headers",
",",
"data",
",",
"frameFactory",
"=",
"None",
",",
"streamID",
"=",
"1",
")",
":",
"if",
"(",
"frameFactory",
"is",
"None",
")",
":",
"frameFactory",
"=",
"FrameFactory",
"(",
")",
"frames",
"=",
"[",
"]",
... | provides a sequence of http/2 frames that encode a single http request . | train | false |
34,124 | def db_update_group(**kwargs):
group_id = kwargs.pop('id')
asset_id_list = kwargs.pop('asset_select')
group = get_object(AssetGroup, id=group_id)
for asset_id in asset_id_list:
group_add_asset(group, asset_id)
AssetGroup.objects.filter(id=group_id).update(**kwargs)
| [
"def",
"db_update_group",
"(",
"**",
"kwargs",
")",
":",
"group_id",
"=",
"kwargs",
".",
"pop",
"(",
"'id'",
")",
"asset_id_list",
"=",
"kwargs",
".",
"pop",
"(",
"'asset_select'",
")",
"group",
"=",
"get_object",
"(",
"AssetGroup",
",",
"id",
"=",
"grou... | add a asset group in database . | train | false |
34,125 | def test_suggested_column_names_with_qualified_alias(completer, complete_event):
text = u'SELECT p. from custom.products p'
position = len(u'SELECT p.')
result = set(completer.get_completions(Document(text=text, cursor_position=position), complete_event))
assert (set(result) == set(testdata.columns(u'products', u'custom')))
| [
"def",
"test_suggested_column_names_with_qualified_alias",
"(",
"completer",
",",
"complete_event",
")",
":",
"text",
"=",
"u'SELECT p. from custom.products p'",
"position",
"=",
"len",
"(",
"u'SELECT p.'",
")",
"result",
"=",
"set",
"(",
"completer",
".",
"get_completi... | suggest column names on table alias and dot . | train | false |
34,126 | def OAuthGetRequestTokenCGI(outfile):
outfile.write('Status: 200\r\n')
outfile.write('Content-Type: text/plain\r\n')
outfile.write('\r\n')
outfile.write('oauth_token=REQUEST_TOKEN')
outfile.write('&')
outfile.write('oauth_token_secret=REQUEST_TOKEN_SECRET')
| [
"def",
"OAuthGetRequestTokenCGI",
"(",
"outfile",
")",
":",
"outfile",
".",
"write",
"(",
"'Status: 200\\r\\n'",
")",
"outfile",
".",
"write",
"(",
"'Content-Type: text/plain\\r\\n'",
")",
"outfile",
".",
"write",
"(",
"'\\r\\n'",
")",
"outfile",
".",
"write",
"... | runs the oauthgetrequesttoken cgi . | train | false |
34,127 | def testContinuousInterface(algo):
if (not issubclass(algo, bbo.ContinuousOptimizer)):
return True
x = algo(sf, xlist2)
assert isinstance(x.bestEvaluable, ndarray), 'not converted to array'
try:
algo(ife1, xa2)
return 'Failed to throw dimension mismatch error'
except ValueError:
pass
return True
| [
"def",
"testContinuousInterface",
"(",
"algo",
")",
":",
"if",
"(",
"not",
"issubclass",
"(",
"algo",
",",
"bbo",
".",
"ContinuousOptimizer",
")",
")",
":",
"return",
"True",
"x",
"=",
"algo",
"(",
"sf",
",",
"xlist2",
")",
"assert",
"isinstance",
"(",
... | test the specifics for the interface for continuousoptimizers . | train | false |
34,128 | def has_safe_repr(value):
if ((value is None) or (value is NotImplemented) or (value is Ellipsis)):
return True
if isinstance(value, ((bool, int, float, complex, range_type, Markup) + string_types)):
return True
if isinstance(value, (tuple, list, set, frozenset)):
for item in value:
if (not has_safe_repr(item)):
return False
return True
elif isinstance(value, dict):
for (key, value) in iteritems(value):
if (not has_safe_repr(key)):
return False
if (not has_safe_repr(value)):
return False
return True
return False
| [
"def",
"has_safe_repr",
"(",
"value",
")",
":",
"if",
"(",
"(",
"value",
"is",
"None",
")",
"or",
"(",
"value",
"is",
"NotImplemented",
")",
"or",
"(",
"value",
"is",
"Ellipsis",
")",
")",
":",
"return",
"True",
"if",
"isinstance",
"(",
"value",
",",... | does the node have a safe representation? . | train | true |
34,129 | def collect_roots(base_paths, file_sig='*.plug'):
result = set()
for path_or_list in base_paths:
if isinstance(path_or_list, (list, tuple)):
result |= collect_roots(base_paths=path_or_list, file_sig=file_sig)
elif (path_or_list is not None):
result |= find_roots(path_or_list, file_sig)
return result
| [
"def",
"collect_roots",
"(",
"base_paths",
",",
"file_sig",
"=",
"'*.plug'",
")",
":",
"result",
"=",
"set",
"(",
")",
"for",
"path_or_list",
"in",
"base_paths",
":",
"if",
"isinstance",
"(",
"path_or_list",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",... | collects all the paths from base_paths recursively that contains files of type file_sig . | train | false |
34,131 | def boto_volume_for_test(test, cluster_id):
region_name = u'some-test-region-1'
s = Boto3Session(botocore_session=botocore_get_session(), region_name=region_name)
ec2 = s.resource('ec2', region_name=region_name)
stubber = Stubber(ec2.meta.client)
stubber.activate()
volume_id = u'vol-{}'.format(random_name(test))
v = ec2.Volume(id=volume_id)
tags = []
if (cluster_id is not None):
tags.append(dict(Key=CLUSTER_ID_LABEL, Value=cluster_id))
v.meta.data = dict(Tags=tags)
return v
| [
"def",
"boto_volume_for_test",
"(",
"test",
",",
"cluster_id",
")",
":",
"region_name",
"=",
"u'some-test-region-1'",
"s",
"=",
"Boto3Session",
"(",
"botocore_session",
"=",
"botocore_get_session",
"(",
")",
",",
"region_name",
"=",
"region_name",
")",
"ec2",
"=",... | create an in-memory boto3 volume . | train | false |
34,133 | def logpow(x, m):
return tt.switch(tt.eq(x, 0), (- np.inf), (m * tt.log(x)))
| [
"def",
"logpow",
"(",
"x",
",",
"m",
")",
":",
"return",
"tt",
".",
"switch",
"(",
"tt",
".",
"eq",
"(",
"x",
",",
"0",
")",
",",
"(",
"-",
"np",
".",
"inf",
")",
",",
"(",
"m",
"*",
"tt",
".",
"log",
"(",
"x",
")",
")",
")"
] | calculates log since m*log(x) will fail when m . | train | false |
34,134 | def list_users_info():
_xml = '<RIBCL VERSION="2.0">\n <LOGIN USER_LOGIN="adminname" PASSWORD="password">\n <USER_INFO MODE="read">\n <GET_ALL_USER_INFO />\n </USER_INFO>\n </LOGIN>\n </RIBCL>'
return __execute_cmd('All_users_info', _xml)
| [
"def",
"list_users_info",
"(",
")",
":",
"_xml",
"=",
"'<RIBCL VERSION=\"2.0\">\\n <LOGIN USER_LOGIN=\"adminname\" PASSWORD=\"password\">\\n <USER_INFO MODE=\"read\">\\n <GET_ALL_USER_INFO />\\n </USER_INFO>\\n </LOGIN>... | list all users in detail cli example: . | train | false |
34,136 | def _tf():
if six.PY3:
return tempfile.TemporaryFile(mode='w', encoding='UTF-8')
return tempfile.TemporaryFile(mode='w')
| [
"def",
"_tf",
"(",
")",
":",
"if",
"six",
".",
"PY3",
":",
"return",
"tempfile",
".",
"TemporaryFile",
"(",
"mode",
"=",
"'w'",
",",
"encoding",
"=",
"'UTF-8'",
")",
"return",
"tempfile",
".",
"TemporaryFile",
"(",
"mode",
"=",
"'w'",
")"
] | open a temp file that looks a bunch like stdout . | train | false |
34,137 | def verify_valid_fasta_format(input_fasta_fp):
fasta_f = open(input_fasta_fp, 'U')
try:
for (label, seq) in parse_fasta(fasta_f):
continue
except RecordError:
raise RecordError(('Input fasta file not valid fasta format. Error ' + ('found at %s label and %s sequence ' % (label, seq))))
fasta_f.close()
| [
"def",
"verify_valid_fasta_format",
"(",
"input_fasta_fp",
")",
":",
"fasta_f",
"=",
"open",
"(",
"input_fasta_fp",
",",
"'U'",
")",
"try",
":",
"for",
"(",
"label",
",",
"seq",
")",
"in",
"parse_fasta",
"(",
"fasta_f",
")",
":",
"continue",
"except",
"Rec... | tests fasta filepath to determine if valid format input_fasta_fp: fasta filepath . | train | false |
34,138 | def Show(**options):
clf = options.pop('clf', True)
Config(**options)
pyplot.show()
if clf:
Clf()
| [
"def",
"Show",
"(",
"**",
"options",
")",
":",
"clf",
"=",
"options",
".",
"pop",
"(",
"'clf'",
",",
"True",
")",
"Config",
"(",
"**",
"options",
")",
"pyplot",
".",
"show",
"(",
")",
"if",
"clf",
":",
"Clf",
"(",
")"
] | shows the plot . | train | false |
34,139 | def _add_feedback_message_reference(user_id, reference):
model = feedback_models.UnsentFeedbackEmailModel.get(user_id, strict=False)
if (model is not None):
model.feedback_message_references.append(reference.to_dict())
model.put()
else:
model = feedback_models.UnsentFeedbackEmailModel(id=user_id, feedback_message_references=[reference.to_dict()])
model.put()
enqueue_feedback_message_batch_email_task(user_id)
| [
"def",
"_add_feedback_message_reference",
"(",
"user_id",
",",
"reference",
")",
":",
"model",
"=",
"feedback_models",
".",
"UnsentFeedbackEmailModel",
".",
"get",
"(",
"user_id",
",",
"strict",
"=",
"False",
")",
"if",
"(",
"model",
"is",
"not",
"None",
")",
... | adds a new message to the feedback message buffer that is used to generate the next notification email to the given user . | train | false |
34,140 | def fingerprint_callback(fingerprint):
config_print = fingerprint.replace(':', '_')
if config.has_option('trusted_servers', config_print):
return True
response = raw_input(('Connect to %s y/N? ' % fingerprint))
if response:
config.set('trusted_servers', config_print, 'trusted')
return (response == 'y')
| [
"def",
"fingerprint_callback",
"(",
"fingerprint",
")",
":",
"config_print",
"=",
"fingerprint",
".",
"replace",
"(",
"':'",
",",
"'_'",
")",
"if",
"config",
".",
"has_option",
"(",
"'trusted_servers'",
",",
"config_print",
")",
":",
"return",
"True",
"respons... | called during an ssl handshake with the fingerprint of the key of the remote host . | train | false |
34,141 | def lch2lab(lch):
lch = _prepare_lab_array(lch)
(c, h) = (lch[..., 1], lch[..., 2])
(lch[..., 1], lch[..., 2]) = ((c * np.cos(h)), (c * np.sin(h)))
return lch
| [
"def",
"lch2lab",
"(",
"lch",
")",
":",
"lch",
"=",
"_prepare_lab_array",
"(",
"lch",
")",
"(",
"c",
",",
"h",
")",
"=",
"(",
"lch",
"[",
"...",
",",
"1",
"]",
",",
"lch",
"[",
"...",
",",
"2",
"]",
")",
"(",
"lch",
"[",
"...",
",",
"1",
... | cie-lch to cie-lab color space conversion . | train | false |
34,142 | def _XXX(k, n, s):
x = (s % (string.replace(k.__module__, '.', '_'), k.__name__, n))
return x
| [
"def",
"_XXX",
"(",
"k",
",",
"n",
",",
"s",
")",
":",
"x",
"=",
"(",
"s",
"%",
"(",
"string",
".",
"replace",
"(",
"k",
".",
"__module__",
",",
"'.'",
",",
"'_'",
")",
",",
"k",
".",
"__name__",
",",
"n",
")",
")",
"return",
"x"
] | string manipulation garbage . | train | false |
34,143 | @require_role('admin')
def idc_del(request):
idc_ids = request.GET.get('id', '')
idc_id_list = idc_ids.split(',')
for idc_id in idc_id_list:
IDC.objects.filter(id=idc_id).delete()
return HttpResponseRedirect(reverse('idc_list'))
| [
"@",
"require_role",
"(",
"'admin'",
")",
"def",
"idc_del",
"(",
"request",
")",
":",
"idc_ids",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'id'",
",",
"''",
")",
"idc_id_list",
"=",
"idc_ids",
".",
"split",
"(",
"','",
")",
"for",
"idc_id",
"in",... | idc delete view . | train | false |
34,144 | def seek_delimiter(file, delimiter, blocksize):
if (file.tell() == 0):
return
last = ''
while True:
current = file.read(blocksize)
if (not current):
return
full = (last + current)
try:
i = full.index(delimiter)
file.seek(((file.tell() - (len(full) - i)) + len(delimiter)))
return
except ValueError:
pass
last = full[(- len(delimiter)):]
| [
"def",
"seek_delimiter",
"(",
"file",
",",
"delimiter",
",",
"blocksize",
")",
":",
"if",
"(",
"file",
".",
"tell",
"(",
")",
"==",
"0",
")",
":",
"return",
"last",
"=",
"''",
"while",
"True",
":",
"current",
"=",
"file",
".",
"read",
"(",
"blocksi... | seek current file to next byte after a delimiter bytestring this seeks the file to the next byte following the delimiter . | train | false |
34,146 | @task
@cmdopts([BOKCHOY_IMPORTS_DIR, BOKCHOY_IMPORTS_DIR_DEPR, BOKCHOY_DEFAULT_STORE, BOKCHOY_DEFAULT_STORE_DEPR])
@timed
def load_courses(options):
if ('imports_dir' in options):
msg = colorize('green', 'Importing courses from {}...'.format(options.imports_dir))
print msg
sh('DEFAULT_STORE={default_store} ./manage.py cms --settings=bok_choy import {import_dir}'.format(default_store=options.default_store, import_dir=options.imports_dir))
else:
print colorize('blue', '--imports-dir not set, skipping import')
| [
"@",
"task",
"@",
"cmdopts",
"(",
"[",
"BOKCHOY_IMPORTS_DIR",
",",
"BOKCHOY_IMPORTS_DIR_DEPR",
",",
"BOKCHOY_DEFAULT_STORE",
",",
"BOKCHOY_DEFAULT_STORE_DEPR",
"]",
")",
"@",
"timed",
"def",
"load_courses",
"(",
"options",
")",
":",
"if",
"(",
"'imports_dir'",
"in... | loads courses from options . | train | false |
34,147 | def _command(name, description=None):
def decorator(f):
commands[name] = f
if description:
descriptions[name] = description
return f
return decorator
| [
"def",
"_command",
"(",
"name",
",",
"description",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"commands",
"[",
"name",
"]",
"=",
"f",
"if",
"description",
":",
"descriptions",
"[",
"name",
"]",
"=",
"description",
"return",
"f",
... | decorate a function used to call a command . | train | false |
34,149 | def raw_mod(opts, name, functions, mod='modules'):
loader = LazyLoader(_module_dirs(opts, mod, 'rawmodule'), opts, tag='rawmodule', virtual_enable=False, pack={'__salt__': functions})
if (name not in loader.file_mapping):
return {}
loader._load_module(name)
return dict(loader._dict)
| [
"def",
"raw_mod",
"(",
"opts",
",",
"name",
",",
"functions",
",",
"mod",
"=",
"'modules'",
")",
":",
"loader",
"=",
"LazyLoader",
"(",
"_module_dirs",
"(",
"opts",
",",
"mod",
",",
"'rawmodule'",
")",
",",
"opts",
",",
"tag",
"=",
"'rawmodule'",
",",
... | returns a single module loaded raw and bypassing the __virtual__ function . | train | true |
34,151 | def notify_new_language(subproject, language, user):
mails = []
subscriptions = Profile.objects.subscribed_new_language(subproject.project, user)
users = set()
for subscription in subscriptions:
mails.append(subscription.notify_new_language(subproject, language, user))
users.add(subscription.user_id)
for owner in subproject.project.owners.all():
mails.append(owner.profile.notify_new_language(subproject, language, user))
mails.append(get_notification_email(u'en', u'ADMINS', u'new_language', subproject, {u'language': language, u'user': user}, user=user))
send_mails(mails)
| [
"def",
"notify_new_language",
"(",
"subproject",
",",
"language",
",",
"user",
")",
":",
"mails",
"=",
"[",
"]",
"subscriptions",
"=",
"Profile",
".",
"objects",
".",
"subscribed_new_language",
"(",
"subproject",
".",
"project",
",",
"user",
")",
"users",
"=... | notify subscribed users about new language requests . | train | false |
34,152 | def partition_by(inputs, attribute):
return group_by(inputs, (lambda x: x[0][attribute]))
| [
"def",
"partition_by",
"(",
"inputs",
",",
"attribute",
")",
":",
"return",
"group_by",
"(",
"inputs",
",",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"[",
"attribute",
"]",
")",
")"
] | returns a dict of inputs partitioned by the attribute each input is a pair . | train | false |
34,153 | def make_model(dense_layer_sizes, nb_filters, nb_conv, nb_pool):
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
for layer_size in dense_layer_sizes:
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
| [
"def",
"make_model",
"(",
"dense_layer_sizes",
",",
"nb_filters",
",",
"nb_conv",
",",
"nb_pool",
")",
":",
"model",
"=",
"Sequential",
"(",
")",
"model",
".",
"add",
"(",
"Convolution2D",
"(",
"nb_filters",
",",
"nb_conv",
",",
"nb_conv",
",",
"border_mode"... | creates model comprised of 2 convolutional layers followed by dense layers dense_layer_sizes: list of layer sizes . | train | false |
34,155 | def stop_replay():
_unpatch_httplib()
_unpatch_requests()
_unpatch_urllib3()
| [
"def",
"stop_replay",
"(",
")",
":",
"_unpatch_httplib",
"(",
")",
"_unpatch_requests",
"(",
")",
"_unpatch_urllib3",
"(",
")"
] | remove all patches installed by the httreplay library and end replay . | train | false |
34,156 | def _NSApp():
return msg(C('NSApplication'), n('sharedApplication'))
| [
"def",
"_NSApp",
"(",
")",
":",
"return",
"msg",
"(",
"C",
"(",
"'NSApplication'",
")",
",",
"n",
"(",
"'sharedApplication'",
")",
")"
] | return the global nsapplication instance . | train | false |
34,157 | @pytest.mark.cmd
@pytest.mark.django_db
def test_sync_stores_noargs(capfd, project0, project1):
project0.delete()
project1.delete()
capfd.readouterr()
call_command('sync_stores')
(out, err) = capfd.readouterr()
assert (out == '')
assert (err == '')
| [
"@",
"pytest",
".",
"mark",
".",
"cmd",
"@",
"pytest",
".",
"mark",
".",
"django_db",
"def",
"test_sync_stores_noargs",
"(",
"capfd",
",",
"project0",
",",
"project1",
")",
":",
"project0",
".",
"delete",
"(",
")",
"project1",
".",
"delete",
"(",
")",
... | site wide sync_stores . | train | false |
34,158 | def show_devices(verbose=False, **kwargs):
kwargs['find_all'] = True
devices = find(**kwargs)
strings = ''
for device in devices:
if (not verbose):
strings += ('%s, %s\n' % (device._str(), _try_lookup(_lu.device_classes, device.bDeviceClass)))
else:
strings += ('%s\n\n' % str(device))
return _DescriptorInfo(strings)
| [
"def",
"show_devices",
"(",
"verbose",
"=",
"False",
",",
"**",
"kwargs",
")",
":",
"kwargs",
"[",
"'find_all'",
"]",
"=",
"True",
"devices",
"=",
"find",
"(",
"**",
"kwargs",
")",
"strings",
"=",
"''",
"for",
"device",
"in",
"devices",
":",
"if",
"(... | show information about connected devices . | train | true |
34,159 | def num2epoch(d):
return ((np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY)
| [
"def",
"num2epoch",
"(",
"d",
")",
":",
"return",
"(",
"(",
"np",
".",
"asarray",
"(",
"d",
")",
"-",
"EPOCH_OFFSET",
")",
"*",
"SEC_PER_DAY",
")"
] | convert days since 0001 to epoch . | train | false |
34,163 | def MakePregMap(df):
d = defaultdict(list)
for (index, caseid) in df.caseid.iteritems():
d[caseid].append(index)
return d
| [
"def",
"MakePregMap",
"(",
"df",
")",
":",
"d",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"(",
"index",
",",
"caseid",
")",
"in",
"df",
".",
"caseid",
".",
"iteritems",
"(",
")",
":",
"d",
"[",
"caseid",
"]",
".",
"append",
"(",
"index",
")",
... | make a map from caseid to list of preg indices . | train | false |
34,164 | @should_dump_thread_stack
def stop_thread_stack_dump():
cancel_thread(SAVE_THREAD_PTR)
dump_thread_stack()
| [
"@",
"should_dump_thread_stack",
"def",
"stop_thread_stack_dump",
"(",
")",
":",
"cancel_thread",
"(",
"SAVE_THREAD_PTR",
")",
"dump_thread_stack",
"(",
")"
] | save profiling information . | train | false |
34,165 | def get_test_results_dir(env=None):
return os.path.join(get_ros_home(env), 'test_results')
| [
"def",
"get_test_results_dir",
"(",
"env",
"=",
"None",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"get_ros_home",
"(",
"env",
")",
",",
"'test_results'",
")"
] | get directory to use for writing test result files . | train | false |
34,166 | def ksubsets(superset, k):
return combinations(superset, k)
| [
"def",
"ksubsets",
"(",
"superset",
",",
"k",
")",
":",
"return",
"combinations",
"(",
"superset",
",",
"k",
")"
] | finds the subsets of size k in lexicographic order . | train | false |
34,167 | def coalesce_option_value(value, value_type, label=None):
value_type = value_type.lower()
try:
if (value_type in ('string', 'str')):
return_value = str(value)
elif (value_type == 'list'):
if isinstance(value, compat.string_type):
return_value = value.split(',')
else:
return_value = list(value)
elif (value_type == 'float'):
return_value = float(value)
elif (value_type in ['integer', 'int']):
return_value = int(value)
elif (value_type in ['bool', 'boolean']):
if (not value):
return_value = False
elif isinstance(value, compat.string_type):
return_value = (value.lower() in ['1', 'true', 'yes', 'on'])
else:
return_value = bool(value)
else:
raise ArgumentError(('Unknown option value type %s' % value_type))
except ValueError:
if label:
label = ('parameter %s ' % label)
else:
label = ''
raise ArgumentError(("Unable to convert %svalue '%s' into type %s" % (label, astring, value_type)))
return return_value
| [
"def",
"coalesce_option_value",
"(",
"value",
",",
"value_type",
",",
"label",
"=",
"None",
")",
":",
"value_type",
"=",
"value_type",
".",
"lower",
"(",
")",
"try",
":",
"if",
"(",
"value_type",
"in",
"(",
"'string'",
",",
"'str'",
")",
")",
":",
"ret... | convert string into an object value of value_type . | train | false |
34,168 | def suffix_replace(original, old, new):
return (original[:(- len(old))] + new)
| [
"def",
"suffix_replace",
"(",
"original",
",",
"old",
",",
"new",
")",
":",
"return",
"(",
"original",
"[",
":",
"(",
"-",
"len",
"(",
"old",
")",
")",
"]",
"+",
"new",
")"
] | replaces the old suffix of the original string by a new suffix . | train | false |
34,169 | def libvlc_media_list_player_set_media_player(p_mlp, p_mi):
f = (_Cfunctions.get('libvlc_media_list_player_set_media_player', None) or _Cfunction('libvlc_media_list_player_set_media_player', ((1,), (1,)), None, None, MediaListPlayer, MediaPlayer))
return f(p_mlp, p_mi)
| [
"def",
"libvlc_media_list_player_set_media_player",
"(",
"p_mlp",
",",
"p_mi",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_media_list_player_set_media_player'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_media_list_player_set_media_player'",... | replace media player in media_list_player with this instance . | train | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.