id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
21,039 | @pytest.mark.parametrize('parallel', [True, False])
def test_default_data_start(parallel, read_basic):
text = 'ignore this line\na b c\n1 2 3\n4 5 6'
table = read_basic(text, header_start=1, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
| [
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"'parallel'",
",",
"[",
"True",
",",
"False",
"]",
")",
"def",
"test_default_data_start",
"(",
"parallel",
",",
"read_basic",
")",
":",
"text",
"=",
"'ignore this line\\na b c\\n1 2 3\\n4 5 6'",
"table",
"=",
"read_basic",
"(",
"text",
",",
"header_start",
"=",
"1",
",",
"parallel",
"=",
"parallel",
")",
"expected",
"=",
"Table",
"(",
"[",
"[",
"1",
",",
"4",
"]",
",",
"[",
"2",
",",
"5",
"]",
",",
"[",
"3",
",",
"6",
"]",
"]",
",",
"names",
"=",
"(",
"'a'",
",",
"'b'",
",",
"'c'",
")",
")",
"assert_table_equal",
"(",
"table",
",",
"expected",
")"
] | if data_start is not explicitly passed to read() . | train | false |
21,040 | def notify_users(users, msg, **kwargs):
for user in users:
notify_user(user, msg, **kwargs)
| [
"def",
"notify_users",
"(",
"users",
",",
"msg",
",",
"**",
"kwargs",
")",
":",
"for",
"user",
"in",
"users",
":",
"notify_user",
"(",
"user",
",",
"msg",
",",
"**",
"kwargs",
")"
] | send a simple notification to an iterable of users . | train | false |
21,042 | def exception_when_false_wrapper(func, exception_class, value_error_message_template):
def g(target, *args, **kwargs):
val = func(target, *args, **kwargs)
if val:
return val
else:
raise exception_class((value_error_message_template % target))
g.__name__ = func.__name__
g.__doc__ = func.__doc__
return g
| [
"def",
"exception_when_false_wrapper",
"(",
"func",
",",
"exception_class",
",",
"value_error_message_template",
")",
":",
"def",
"g",
"(",
"target",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"val",
"=",
"func",
"(",
"target",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"if",
"val",
":",
"return",
"val",
"else",
":",
"raise",
"exception_class",
"(",
"(",
"value_error_message_template",
"%",
"target",
")",
")",
"g",
".",
"__name__",
"=",
"func",
".",
"__name__",
"g",
".",
"__doc__",
"=",
"func",
".",
"__doc__",
"return",
"g"
] | wrap a function to raise an exception when the return value is not true . | train | false |
21,043 | def _fancy_rename(oldname, newname):
if (not os.path.exists(newname)):
try:
os.rename(oldname, newname)
except OSError:
raise
return
try:
(fd, tmpfile) = tempfile.mkstemp('.tmp', prefix=(oldname + '.'), dir='.')
os.close(fd)
os.remove(tmpfile)
except OSError:
raise
try:
os.rename(newname, tmpfile)
except OSError:
raise
try:
os.rename(oldname, newname)
except OSError:
os.rename(tmpfile, newname)
raise
os.remove(tmpfile)
| [
"def",
"_fancy_rename",
"(",
"oldname",
",",
"newname",
")",
":",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"newname",
")",
")",
":",
"try",
":",
"os",
".",
"rename",
"(",
"oldname",
",",
"newname",
")",
"except",
"OSError",
":",
"raise",
"return",
"try",
":",
"(",
"fd",
",",
"tmpfile",
")",
"=",
"tempfile",
".",
"mkstemp",
"(",
"'.tmp'",
",",
"prefix",
"=",
"(",
"oldname",
"+",
"'.'",
")",
",",
"dir",
"=",
"'.'",
")",
"os",
".",
"close",
"(",
"fd",
")",
"os",
".",
"remove",
"(",
"tmpfile",
")",
"except",
"OSError",
":",
"raise",
"try",
":",
"os",
".",
"rename",
"(",
"newname",
",",
"tmpfile",
")",
"except",
"OSError",
":",
"raise",
"try",
":",
"os",
".",
"rename",
"(",
"oldname",
",",
"newname",
")",
"except",
"OSError",
":",
"os",
".",
"rename",
"(",
"tmpfile",
",",
"newname",
")",
"raise",
"os",
".",
"remove",
"(",
"tmpfile",
")"
] | rename file with temporary backup file to rollback if rename fails . | train | false |
21,044 | def iterable_middleware(app):
def inner(environ, start_response):
rv = app(environ, start_response)
class Iterable(object, ):
def __iter__(self):
return iter(rv)
if hasattr(rv, 'close'):
def close(self):
rv.close()
return Iterable()
return inner
| [
"def",
"iterable_middleware",
"(",
"app",
")",
":",
"def",
"inner",
"(",
"environ",
",",
"start_response",
")",
":",
"rv",
"=",
"app",
"(",
"environ",
",",
"start_response",
")",
"class",
"Iterable",
"(",
"object",
",",
")",
":",
"def",
"__iter__",
"(",
"self",
")",
":",
"return",
"iter",
"(",
"rv",
")",
"if",
"hasattr",
"(",
"rv",
",",
"'close'",
")",
":",
"def",
"close",
"(",
"self",
")",
":",
"rv",
".",
"close",
"(",
")",
"return",
"Iterable",
"(",
")",
"return",
"inner"
] | guarantee that the app returns an iterable . | train | false |
21,045 | def print_statistics(prefix=''):
for line in get_statistics(prefix):
print line
| [
"def",
"print_statistics",
"(",
"prefix",
"=",
"''",
")",
":",
"for",
"line",
"in",
"get_statistics",
"(",
"prefix",
")",
":",
"print",
"line"
] | print overall statistics . | train | false |
21,046 | def _createPrivateKey(key):
if (not isinstance(key, RSAKey)):
raise AssertionError()
if (not key.hasPrivateKey()):
raise AssertionError()
return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP, key.dQ, key.qInv)
| [
"def",
"_createPrivateKey",
"(",
"key",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"key",
",",
"RSAKey",
")",
")",
":",
"raise",
"AssertionError",
"(",
")",
"if",
"(",
"not",
"key",
".",
"hasPrivateKey",
"(",
")",
")",
":",
"raise",
"AssertionError",
"(",
")",
"return",
"_createPrivateRSAKey",
"(",
"key",
".",
"n",
",",
"key",
".",
"e",
",",
"key",
".",
"d",
",",
"key",
".",
"p",
",",
"key",
".",
"q",
",",
"key",
".",
"dP",
",",
"key",
".",
"dQ",
",",
"key",
".",
"qInv",
")"
] | create a new private key . | train | false |
21,047 | def matches_filter(finding, request):
name = request.args.get('name', None)
url = request.args.get('url', None)
if ((name is not None) and (url is not None)):
return ((name.lower() in finding.get_name().lower()) and (finding.get_url() is not None) and finding.get_url().url_string.startswith(url))
elif (name is not None):
return (name.lower() in finding.get_name().lower())
elif (url is not None):
return ((finding.get_url() is not None) and finding.get_url().url_string.startswith(url))
return True
| [
"def",
"matches_filter",
"(",
"finding",
",",
"request",
")",
":",
"name",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'name'",
",",
"None",
")",
"url",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'url'",
",",
"None",
")",
"if",
"(",
"(",
"name",
"is",
"not",
"None",
")",
"and",
"(",
"url",
"is",
"not",
"None",
")",
")",
":",
"return",
"(",
"(",
"name",
".",
"lower",
"(",
")",
"in",
"finding",
".",
"get_name",
"(",
")",
".",
"lower",
"(",
")",
")",
"and",
"(",
"finding",
".",
"get_url",
"(",
")",
"is",
"not",
"None",
")",
"and",
"finding",
".",
"get_url",
"(",
")",
".",
"url_string",
".",
"startswith",
"(",
"url",
")",
")",
"elif",
"(",
"name",
"is",
"not",
"None",
")",
":",
"return",
"(",
"name",
".",
"lower",
"(",
")",
"in",
"finding",
".",
"get_name",
"(",
")",
".",
"lower",
"(",
")",
")",
"elif",
"(",
"url",
"is",
"not",
"None",
")",
":",
"return",
"(",
"(",
"finding",
".",
"get_url",
"(",
")",
"is",
"not",
"None",
")",
"and",
"finding",
".",
"get_url",
"(",
")",
".",
"url_string",
".",
"startswith",
"(",
"url",
")",
")",
"return",
"True"
] | filters: * /scans/0/kb/?name= returns only vulnerabilities which contain the specified string in the vulnerability name . | train | false |
21,048 | def uninstall_twisted_reactor():
import twisted
if (not hasattr(twisted, '_kivy_twisted_reactor_installed')):
return
from kivy.base import EventLoop
global _twisted_reactor_stopper
_twisted_reactor_stopper()
EventLoop.unbind(on_stop=_twisted_reactor_stopper)
del twisted._kivy_twisted_reactor_installed
| [
"def",
"uninstall_twisted_reactor",
"(",
")",
":",
"import",
"twisted",
"if",
"(",
"not",
"hasattr",
"(",
"twisted",
",",
"'_kivy_twisted_reactor_installed'",
")",
")",
":",
"return",
"from",
"kivy",
".",
"base",
"import",
"EventLoop",
"global",
"_twisted_reactor_stopper",
"_twisted_reactor_stopper",
"(",
")",
"EventLoop",
".",
"unbind",
"(",
"on_stop",
"=",
"_twisted_reactor_stopper",
")",
"del",
"twisted",
".",
"_kivy_twisted_reactor_installed"
] | uninstalls the kivys threaded twisted reactor . | train | false |
21,049 | def task_log_start(task_id):
_tasks[thread.get_ident()] = task_id
| [
"def",
"task_log_start",
"(",
"task_id",
")",
":",
"_tasks",
"[",
"thread",
".",
"get_ident",
"(",
")",
"]",
"=",
"task_id"
] | associate a thread with a task . | train | false |
21,050 | def getBridgeLoops(layerThickness, loop):
halfWidth = (1.5 * layerThickness)
slightlyGreaterThanHalfWidth = (1.1 * halfWidth)
extrudateLoops = []
centers = intercircle.getCentersFromLoop(loop, slightlyGreaterThanHalfWidth)
for center in centers:
extrudateLoop = intercircle.getSimplifiedInsetFromClockwiseLoop(center, halfWidth)
if intercircle.isLargeSameDirection(extrudateLoop, center, halfWidth):
if (euclidean.isPathInsideLoop(loop, extrudateLoop) == euclidean.isWiddershins(loop)):
extrudateLoop.reverse()
extrudateLoops.append(extrudateLoop)
return extrudateLoops
| [
"def",
"getBridgeLoops",
"(",
"layerThickness",
",",
"loop",
")",
":",
"halfWidth",
"=",
"(",
"1.5",
"*",
"layerThickness",
")",
"slightlyGreaterThanHalfWidth",
"=",
"(",
"1.1",
"*",
"halfWidth",
")",
"extrudateLoops",
"=",
"[",
"]",
"centers",
"=",
"intercircle",
".",
"getCentersFromLoop",
"(",
"loop",
",",
"slightlyGreaterThanHalfWidth",
")",
"for",
"center",
"in",
"centers",
":",
"extrudateLoop",
"=",
"intercircle",
".",
"getSimplifiedInsetFromClockwiseLoop",
"(",
"center",
",",
"halfWidth",
")",
"if",
"intercircle",
".",
"isLargeSameDirection",
"(",
"extrudateLoop",
",",
"center",
",",
"halfWidth",
")",
":",
"if",
"(",
"euclidean",
".",
"isPathInsideLoop",
"(",
"loop",
",",
"extrudateLoop",
")",
"==",
"euclidean",
".",
"isWiddershins",
"(",
"loop",
")",
")",
":",
"extrudateLoop",
".",
"reverse",
"(",
")",
"extrudateLoops",
".",
"append",
"(",
"extrudateLoop",
")",
"return",
"extrudateLoops"
] | get the inset bridge loops from the loop . | train | false |
21,051 | @home_routes.route('/password/reset', methods=('GET', 'POST'))
def password_reset_view():
if (request.method == 'GET'):
return render_template('gentelella/admin/login/password_reminder.html')
if (request.method == 'POST'):
email = request.form['email']
user = DataGetter.get_user_by_email(email)
if user:
link = (request.host + url_for('.change_password_view', hash=user.reset_password))
send_email_with_reset_password_hash(email, link)
flash('Please go to the link sent to your email to reset your password')
return redirect(url_for('.login_view'))
| [
"@",
"home_routes",
".",
"route",
"(",
"'/password/reset'",
",",
"methods",
"=",
"(",
"'GET'",
",",
"'POST'",
")",
")",
"def",
"password_reset_view",
"(",
")",
":",
"if",
"(",
"request",
".",
"method",
"==",
"'GET'",
")",
":",
"return",
"render_template",
"(",
"'gentelella/admin/login/password_reminder.html'",
")",
"if",
"(",
"request",
".",
"method",
"==",
"'POST'",
")",
":",
"email",
"=",
"request",
".",
"form",
"[",
"'email'",
"]",
"user",
"=",
"DataGetter",
".",
"get_user_by_email",
"(",
"email",
")",
"if",
"user",
":",
"link",
"=",
"(",
"request",
".",
"host",
"+",
"url_for",
"(",
"'.change_password_view'",
",",
"hash",
"=",
"user",
".",
"reset_password",
")",
")",
"send_email_with_reset_password_hash",
"(",
"email",
",",
"link",
")",
"flash",
"(",
"'Please go to the link sent to your email to reset your password'",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'.login_view'",
")",
")"
] | password reset view . | train | false |
21,054 | def get_extended_due(course, unit, user):
try:
override = StudentFieldOverride.objects.get(course_id=course.id, student=user, location=unit.location, field='due')
return DATE_FIELD.from_json(json.loads(override.value))
except StudentFieldOverride.DoesNotExist:
return None
| [
"def",
"get_extended_due",
"(",
"course",
",",
"unit",
",",
"user",
")",
":",
"try",
":",
"override",
"=",
"StudentFieldOverride",
".",
"objects",
".",
"get",
"(",
"course_id",
"=",
"course",
".",
"id",
",",
"student",
"=",
"user",
",",
"location",
"=",
"unit",
".",
"location",
",",
"field",
"=",
"'due'",
")",
"return",
"DATE_FIELD",
".",
"from_json",
"(",
"json",
".",
"loads",
"(",
"override",
".",
"value",
")",
")",
"except",
"StudentFieldOverride",
".",
"DoesNotExist",
":",
"return",
"None"
] | gets the overridden due date for the given user on the given unit . | train | false |
21,055 | def vm_state(vm_=None):
with _get_xapi_session() as xapi:
info = {}
if vm_:
info[vm_] = _get_record_by_label(xapi, 'VM', vm_)['power_state']
return info
for vm_ in list_domains():
info[vm_] = _get_record_by_label(xapi, 'VM', vm_)['power_state']
return info
| [
"def",
"vm_state",
"(",
"vm_",
"=",
"None",
")",
":",
"with",
"_get_xapi_session",
"(",
")",
"as",
"xapi",
":",
"info",
"=",
"{",
"}",
"if",
"vm_",
":",
"info",
"[",
"vm_",
"]",
"=",
"_get_record_by_label",
"(",
"xapi",
",",
"'VM'",
",",
"vm_",
")",
"[",
"'power_state'",
"]",
"return",
"info",
"for",
"vm_",
"in",
"list_domains",
"(",
")",
":",
"info",
"[",
"vm_",
"]",
"=",
"_get_record_by_label",
"(",
"xapi",
",",
"'VM'",
",",
"vm_",
")",
"[",
"'power_state'",
"]",
"return",
"info"
] | return list of all the vms and their state . | train | true |
21,056 | def http_get(url):
return Effect(HTTPGet(url=url))
| [
"def",
"http_get",
"(",
"url",
")",
":",
"return",
"Effect",
"(",
"HTTPGet",
"(",
"url",
"=",
"url",
")",
")"
] | wrapper to create an :class:httpget effect . | train | false |
21,058 | def _penn_to_wordnet(tag):
if (tag in (u'NN', u'NNS', u'NNP', u'NNPS')):
return _wordnet.NOUN
if (tag in (u'JJ', u'JJR', u'JJS')):
return _wordnet.ADJ
if (tag in (u'VB', u'VBD', u'VBG', u'VBN', u'VBP', u'VBZ')):
return _wordnet.VERB
if (tag in (u'RB', u'RBR', u'RBS')):
return _wordnet.ADV
return None
| [
"def",
"_penn_to_wordnet",
"(",
"tag",
")",
":",
"if",
"(",
"tag",
"in",
"(",
"u'NN'",
",",
"u'NNS'",
",",
"u'NNP'",
",",
"u'NNPS'",
")",
")",
":",
"return",
"_wordnet",
".",
"NOUN",
"if",
"(",
"tag",
"in",
"(",
"u'JJ'",
",",
"u'JJR'",
",",
"u'JJS'",
")",
")",
":",
"return",
"_wordnet",
".",
"ADJ",
"if",
"(",
"tag",
"in",
"(",
"u'VB'",
",",
"u'VBD'",
",",
"u'VBG'",
",",
"u'VBN'",
",",
"u'VBP'",
",",
"u'VBZ'",
")",
")",
":",
"return",
"_wordnet",
".",
"VERB",
"if",
"(",
"tag",
"in",
"(",
"u'RB'",
",",
"u'RBR'",
",",
"u'RBS'",
")",
")",
":",
"return",
"_wordnet",
".",
"ADV",
"return",
"None"
] | converts a penn corpus tag into a wordnet tag . | train | false |
21,060 | def _get_comments_for_hard_deleted_versions(addon):
class PseudoVersion(object, ):
def __init__(self):
self.all_activity = []
all_files = ()
approvalnotes = None
compatible_apps_ordered = ()
releasenotes = None
status = 'Deleted'
deleted = True
channel = amo.RELEASE_CHANNEL_LISTED
@property
def created(self):
return self.all_activity[0].created
@property
def version(self):
return self.all_activity[0].activity_log.details.get('version', '[deleted]')
comments = CommentLog.objects.filter(activity_log__action__in=amo.LOG_REVIEW_QUEUE, activity_log__versionlog=None, activity_log__addonlog__addon=addon).order_by('created').select_related('activity_log')
comment_versions = defaultdict(PseudoVersion)
for c in comments:
c.version = c.activity_log.details.get('version', c.created)
comment_versions[c.version].all_activity.append(c)
return comment_versions.values()
| [
"def",
"_get_comments_for_hard_deleted_versions",
"(",
"addon",
")",
":",
"class",
"PseudoVersion",
"(",
"object",
",",
")",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"all_activity",
"=",
"[",
"]",
"all_files",
"=",
"(",
")",
"approvalnotes",
"=",
"None",
"compatible_apps_ordered",
"=",
"(",
")",
"releasenotes",
"=",
"None",
"status",
"=",
"'Deleted'",
"deleted",
"=",
"True",
"channel",
"=",
"amo",
".",
"RELEASE_CHANNEL_LISTED",
"@",
"property",
"def",
"created",
"(",
"self",
")",
":",
"return",
"self",
".",
"all_activity",
"[",
"0",
"]",
".",
"created",
"@",
"property",
"def",
"version",
"(",
"self",
")",
":",
"return",
"self",
".",
"all_activity",
"[",
"0",
"]",
".",
"activity_log",
".",
"details",
".",
"get",
"(",
"'version'",
",",
"'[deleted]'",
")",
"comments",
"=",
"CommentLog",
".",
"objects",
".",
"filter",
"(",
"activity_log__action__in",
"=",
"amo",
".",
"LOG_REVIEW_QUEUE",
",",
"activity_log__versionlog",
"=",
"None",
",",
"activity_log__addonlog__addon",
"=",
"addon",
")",
".",
"order_by",
"(",
"'created'",
")",
".",
"select_related",
"(",
"'activity_log'",
")",
"comment_versions",
"=",
"defaultdict",
"(",
"PseudoVersion",
")",
"for",
"c",
"in",
"comments",
":",
"c",
".",
"version",
"=",
"c",
".",
"activity_log",
".",
"details",
".",
"get",
"(",
"'version'",
",",
"c",
".",
"created",
")",
"comment_versions",
"[",
"c",
".",
"version",
"]",
".",
"all_activity",
".",
"append",
"(",
"c",
")",
"return",
"comment_versions",
".",
"values",
"(",
")"
] | versions are soft-deleted now but we need to grab review history for older deleted versions that were hard-deleted so the only record we have of them is in the review log . | train | false |
21,061 | def open_anything():
sys_name = platform.system()
if (sys_name == 'Darwin'):
base_cmd = 'open'
elif (sys_name == 'Windows'):
base_cmd = 'start'
else:
base_cmd = 'xdg-open'
return base_cmd
| [
"def",
"open_anything",
"(",
")",
":",
"sys_name",
"=",
"platform",
".",
"system",
"(",
")",
"if",
"(",
"sys_name",
"==",
"'Darwin'",
")",
":",
"base_cmd",
"=",
"'open'",
"elif",
"(",
"sys_name",
"==",
"'Windows'",
")",
":",
"base_cmd",
"=",
"'start'",
"else",
":",
"base_cmd",
"=",
"'xdg-open'",
"return",
"base_cmd"
] | return the system command that dispatches execution to the correct program . | train | false |
21,062 | @treeio_login_required
def widget_new_messages(request, response_format='html'):
query = (Q(reply_to__isnull=True) & (~ Q(read_by=request.user.profile)))
messages = Object.filter_by_request(request, Message.objects.filter(query))[:5]
return render_to_response('messaging/widgets/new_messages', {'messages': messages}, context_instance=RequestContext(request), response_format=response_format)
| [
"@",
"treeio_login_required",
"def",
"widget_new_messages",
"(",
"request",
",",
"response_format",
"=",
"'html'",
")",
":",
"query",
"=",
"(",
"Q",
"(",
"reply_to__isnull",
"=",
"True",
")",
"&",
"(",
"~",
"Q",
"(",
"read_by",
"=",
"request",
".",
"user",
".",
"profile",
")",
")",
")",
"messages",
"=",
"Object",
".",
"filter_by_request",
"(",
"request",
",",
"Message",
".",
"objects",
".",
"filter",
"(",
"query",
")",
")",
"[",
":",
"5",
"]",
"return",
"render_to_response",
"(",
"'messaging/widgets/new_messages'",
",",
"{",
"'messages'",
":",
"messages",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
",",
"response_format",
"=",
"response_format",
")"
] | a list of new messages . | train | false |
21,065 | def edit_permission(request, app=None, priv=None):
if (not request.user.is_superuser):
request.audit = {'operation': 'EDIT_PERMISSION', 'operationText': _get_failed_operation_text(request.user.username, 'EDIT_PERMISSION'), 'allowed': False}
raise PopupException(_('You must be a superuser to change permissions.'), error_code=401)
instance = HuePermission.objects.get(app=app, action=priv)
if (request.method == 'POST'):
form = PermissionsEditForm(request.POST, instance=instance)
if form.is_valid():
form.save()
request.info(_('Permission information updated'))
request.audit = {'operation': 'EDIT_PERMISSION', 'operationText': ('Successfully edited permissions: %(app)s/%(priv)s' % {'app': app, 'priv': priv})}
return redirect(reverse(list_permissions))
else:
form = PermissionsEditForm(instance=instance)
return render('edit_permissions.mako', request, dict(form=form, action=request.path, app=app, priv=priv))
| [
"def",
"edit_permission",
"(",
"request",
",",
"app",
"=",
"None",
",",
"priv",
"=",
"None",
")",
":",
"if",
"(",
"not",
"request",
".",
"user",
".",
"is_superuser",
")",
":",
"request",
".",
"audit",
"=",
"{",
"'operation'",
":",
"'EDIT_PERMISSION'",
",",
"'operationText'",
":",
"_get_failed_operation_text",
"(",
"request",
".",
"user",
".",
"username",
",",
"'EDIT_PERMISSION'",
")",
",",
"'allowed'",
":",
"False",
"}",
"raise",
"PopupException",
"(",
"_",
"(",
"'You must be a superuser to change permissions.'",
")",
",",
"error_code",
"=",
"401",
")",
"instance",
"=",
"HuePermission",
".",
"objects",
".",
"get",
"(",
"app",
"=",
"app",
",",
"action",
"=",
"priv",
")",
"if",
"(",
"request",
".",
"method",
"==",
"'POST'",
")",
":",
"form",
"=",
"PermissionsEditForm",
"(",
"request",
".",
"POST",
",",
"instance",
"=",
"instance",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"form",
".",
"save",
"(",
")",
"request",
".",
"info",
"(",
"_",
"(",
"'Permission information updated'",
")",
")",
"request",
".",
"audit",
"=",
"{",
"'operation'",
":",
"'EDIT_PERMISSION'",
",",
"'operationText'",
":",
"(",
"'Successfully edited permissions: %(app)s/%(priv)s'",
"%",
"{",
"'app'",
":",
"app",
",",
"'priv'",
":",
"priv",
"}",
")",
"}",
"return",
"redirect",
"(",
"reverse",
"(",
"list_permissions",
")",
")",
"else",
":",
"form",
"=",
"PermissionsEditForm",
"(",
"instance",
"=",
"instance",
")",
"return",
"render",
"(",
"'edit_permissions.mako'",
",",
"request",
",",
"dict",
"(",
"form",
"=",
"form",
",",
"action",
"=",
"request",
".",
"path",
",",
"app",
"=",
"app",
",",
"priv",
"=",
"priv",
")",
")"
] | edit_permission -> reply @type request: httprequest . | train | false |
21,066 | def AnalyzeDex(filename, decompiler='dad', session=None):
androconf.debug('AnalyzeDex')
if (not session):
session = CONF['SESSION']
with open(filename, 'r') as fd:
data = fd.read()
return session.addDEX(filename, data)
| [
"def",
"AnalyzeDex",
"(",
"filename",
",",
"decompiler",
"=",
"'dad'",
",",
"session",
"=",
"None",
")",
":",
"androconf",
".",
"debug",
"(",
"'AnalyzeDex'",
")",
"if",
"(",
"not",
"session",
")",
":",
"session",
"=",
"CONF",
"[",
"'SESSION'",
"]",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fd",
":",
"data",
"=",
"fd",
".",
"read",
"(",
")",
"return",
"session",
".",
"addDEX",
"(",
"filename",
",",
"data",
")"
] | analyze an android dex file and setup all stuff for a more quickly analysis ! . | train | false |
21,070 | def help_all_output_test(subcommand=''):
cmd = (get_ipython_cmd() + [subcommand, '--help-all'])
(out, err, rc) = get_output_error_code(cmd)
nt.assert_equal(rc, 0, err)
nt.assert_not_in('Traceback', err)
nt.assert_in('Options', out)
nt.assert_in('Class', out)
return (out, err)
| [
"def",
"help_all_output_test",
"(",
"subcommand",
"=",
"''",
")",
":",
"cmd",
"=",
"(",
"get_ipython_cmd",
"(",
")",
"+",
"[",
"subcommand",
",",
"'--help-all'",
"]",
")",
"(",
"out",
",",
"err",
",",
"rc",
")",
"=",
"get_output_error_code",
"(",
"cmd",
")",
"nt",
".",
"assert_equal",
"(",
"rc",
",",
"0",
",",
"err",
")",
"nt",
".",
"assert_not_in",
"(",
"'Traceback'",
",",
"err",
")",
"nt",
".",
"assert_in",
"(",
"'Options'",
",",
"out",
")",
"nt",
".",
"assert_in",
"(",
"'Class'",
",",
"out",
")",
"return",
"(",
"out",
",",
"err",
")"
] | test that ipython [subcommand] --help-all works . | train | false |
21,071 | def int2ip(ipint):
return socket.inet_ntoa(struct.pack('!I', ipint))
| [
"def",
"int2ip",
"(",
"ipint",
")",
":",
"return",
"socket",
".",
"inet_ntoa",
"(",
"struct",
".",
"pack",
"(",
"'!I'",
",",
"ipint",
")",
")"
] | converts the integer representation of an ip address to its classical decimal . | train | false |
21,074 | def getinnerframes(tb, context=1):
framelist = []
while tb:
framelist.append(((tb.tb_frame,) + getframeinfo(tb, context)))
tb = tb.tb_next
return framelist
| [
"def",
"getinnerframes",
"(",
"tb",
",",
"context",
"=",
"1",
")",
":",
"framelist",
"=",
"[",
"]",
"while",
"tb",
":",
"framelist",
".",
"append",
"(",
"(",
"(",
"tb",
".",
"tb_frame",
",",
")",
"+",
"getframeinfo",
"(",
"tb",
",",
"context",
")",
")",
")",
"tb",
"=",
"tb",
".",
"tb_next",
"return",
"framelist"
] | get a list of records for a tracebacks frame and all lower frames . | train | true |
21,075 | def point_to_tuple(point):
return (point.x(), point.y())
| [
"def",
"point_to_tuple",
"(",
"point",
")",
":",
"return",
"(",
"point",
".",
"x",
"(",
")",
",",
"point",
".",
"y",
"(",
")",
")"
] | convert a qpointf into a tuple . | train | false |
21,076 | def human_order_sorted(l):
def atoi(text):
return (int(text) if text.isdigit() else text)
def natural_keys(text):
if isinstance(text, tuple):
text = text[0]
return [atoi(c) for c in re.split(u'(\\d+)', text)]
return sorted(l, key=natural_keys)
| [
"def",
"human_order_sorted",
"(",
"l",
")",
":",
"def",
"atoi",
"(",
"text",
")",
":",
"return",
"(",
"int",
"(",
"text",
")",
"if",
"text",
".",
"isdigit",
"(",
")",
"else",
"text",
")",
"def",
"natural_keys",
"(",
"text",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"tuple",
")",
":",
"text",
"=",
"text",
"[",
"0",
"]",
"return",
"[",
"atoi",
"(",
"c",
")",
"for",
"c",
"in",
"re",
".",
"split",
"(",
"u'(\\\\d+)'",
",",
"text",
")",
"]",
"return",
"sorted",
"(",
"l",
",",
"key",
"=",
"natural_keys",
")"
] | sorts string in human order . | train | false |
21,077 | def get_active_contexts():
try:
return list(_active_contexts.contexts)
except AttributeError:
return []
| [
"def",
"get_active_contexts",
"(",
")",
":",
"try",
":",
"return",
"list",
"(",
"_active_contexts",
".",
"contexts",
")",
"except",
"AttributeError",
":",
"return",
"[",
"]"
] | returns all the active contexts for the current thread . | train | false |
21,078 | def clear_output():
input_text.delete(1.0, tk.END)
output_text.delete(1.0, tk.END)
return
| [
"def",
"clear_output",
"(",
")",
":",
"input_text",
".",
"delete",
"(",
"1.0",
",",
"tk",
".",
"END",
")",
"output_text",
".",
"delete",
"(",
"1.0",
",",
"tk",
".",
"END",
")",
"return"
] | clear the output window . | train | false |
21,079 | def read_system_config(path=SYSTEM_CONFIG_PATH):
result = {}
if os.path.exists(path):
try:
import ConfigParser
except ImportError:
print 'cannot parse electrum.conf. please install ConfigParser'
return
p = ConfigParser.ConfigParser()
try:
p.read(path)
for (k, v) in p.items('client'):
result[k] = v
except (ConfigParser.NoSectionError, ConfigParser.MissingSectionHeaderError):
pass
return result
| [
"def",
"read_system_config",
"(",
"path",
"=",
"SYSTEM_CONFIG_PATH",
")",
":",
"result",
"=",
"{",
"}",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"try",
":",
"import",
"ConfigParser",
"except",
"ImportError",
":",
"print",
"'cannot parse electrum.conf. please install ConfigParser'",
"return",
"p",
"=",
"ConfigParser",
".",
"ConfigParser",
"(",
")",
"try",
":",
"p",
".",
"read",
"(",
"path",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"p",
".",
"items",
"(",
"'client'",
")",
":",
"result",
"[",
"k",
"]",
"=",
"v",
"except",
"(",
"ConfigParser",
".",
"NoSectionError",
",",
"ConfigParser",
".",
"MissingSectionHeaderError",
")",
":",
"pass",
"return",
"result"
] | parse and return the system config settings in /etc/electrum . | train | false |
21,081 | @must_be_logged_in
def personal_access_token_list(auth, **kwargs):
token_list_url = api_v2_url('tokens/')
return {'token_list_url': token_list_url}
| [
"@",
"must_be_logged_in",
"def",
"personal_access_token_list",
"(",
"auth",
",",
"**",
"kwargs",
")",
":",
"token_list_url",
"=",
"api_v2_url",
"(",
"'tokens/'",
")",
"return",
"{",
"'token_list_url'",
":",
"token_list_url",
"}"
] | return token creation page with list of known tokens . | train | false |
21,083 | def _read_int16(f):
return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])
| [
"def",
"_read_int16",
"(",
"f",
")",
":",
"return",
"np",
".",
"int16",
"(",
"struct",
".",
"unpack",
"(",
"'>h'",
",",
"f",
".",
"read",
"(",
"4",
")",
"[",
"2",
":",
"4",
"]",
")",
"[",
"0",
"]",
")"
] | read a signed 16-bit integer . | train | false |
21,084 | def _test_form_maintains_based_on_rev(client, doc, view, post_data, trans_lang=None, locale=None):
if trans_lang:
translate_path = doc.slug
uri = urllib.quote(reverse('wiki.translate', locale=trans_lang, args=[translate_path]))
else:
uri = reverse(view, locale=locale, args=[doc.slug])
response = client.get(uri)
orig_rev = doc.current_revision
eq_(orig_rev.id, int(pq(response.content)('input[name=based_on]').attr('value')))
martha_rev = revision(document=doc)
martha_rev.is_approved = True
martha_rev.save()
post_data_copy = {'based_on': orig_rev.id, 'slug': orig_rev.slug}
post_data_copy.update(post_data)
response = client.post(uri, data=post_data_copy)
ok_((response.status_code in (200, 302)))
fred_rev = Revision.objects.all().order_by('-id')[0]
eq_(orig_rev, fred_rev.based_on)
| [
"def",
"_test_form_maintains_based_on_rev",
"(",
"client",
",",
"doc",
",",
"view",
",",
"post_data",
",",
"trans_lang",
"=",
"None",
",",
"locale",
"=",
"None",
")",
":",
"if",
"trans_lang",
":",
"translate_path",
"=",
"doc",
".",
"slug",
"uri",
"=",
"urllib",
".",
"quote",
"(",
"reverse",
"(",
"'wiki.translate'",
",",
"locale",
"=",
"trans_lang",
",",
"args",
"=",
"[",
"translate_path",
"]",
")",
")",
"else",
":",
"uri",
"=",
"reverse",
"(",
"view",
",",
"locale",
"=",
"locale",
",",
"args",
"=",
"[",
"doc",
".",
"slug",
"]",
")",
"response",
"=",
"client",
".",
"get",
"(",
"uri",
")",
"orig_rev",
"=",
"doc",
".",
"current_revision",
"eq_",
"(",
"orig_rev",
".",
"id",
",",
"int",
"(",
"pq",
"(",
"response",
".",
"content",
")",
"(",
"'input[name=based_on]'",
")",
".",
"attr",
"(",
"'value'",
")",
")",
")",
"martha_rev",
"=",
"revision",
"(",
"document",
"=",
"doc",
")",
"martha_rev",
".",
"is_approved",
"=",
"True",
"martha_rev",
".",
"save",
"(",
")",
"post_data_copy",
"=",
"{",
"'based_on'",
":",
"orig_rev",
".",
"id",
",",
"'slug'",
":",
"orig_rev",
".",
"slug",
"}",
"post_data_copy",
".",
"update",
"(",
"post_data",
")",
"response",
"=",
"client",
".",
"post",
"(",
"uri",
",",
"data",
"=",
"post_data_copy",
")",
"ok_",
"(",
"(",
"response",
".",
"status_code",
"in",
"(",
"200",
",",
"302",
")",
")",
")",
"fred_rev",
"=",
"Revision",
".",
"objects",
".",
"all",
"(",
")",
".",
"order_by",
"(",
"'-id'",
")",
"[",
"0",
"]",
"eq_",
"(",
"orig_rev",
",",
"fred_rev",
".",
"based_on",
")"
] | confirm that the based_on value set in the revision created by an edit or translate form is the current_revision of the document as of when the form was first loaded . | train | false |
21,085 | def collect_indexing_data(entries, text_record_lengths):
data = []
entries = sorted(entries, key=attrgetter(u'start'))
record_start = 0
for rec_length in text_record_lengths:
next_record_start = (record_start + rec_length)
local_entries = []
for entry in entries:
if (entry.start >= next_record_start):
break
if ((entry.start + entry.length) <= record_start):
continue
local_entries.append(fill_entry(entry, (entry.start - record_start), rec_length))
strands = separate_strands(local_entries)
data.append(strands)
record_start += rec_length
return data
| [
"def",
"collect_indexing_data",
"(",
"entries",
",",
"text_record_lengths",
")",
":",
"data",
"=",
"[",
"]",
"entries",
"=",
"sorted",
"(",
"entries",
",",
"key",
"=",
"attrgetter",
"(",
"u'start'",
")",
")",
"record_start",
"=",
"0",
"for",
"rec_length",
"in",
"text_record_lengths",
":",
"next_record_start",
"=",
"(",
"record_start",
"+",
"rec_length",
")",
"local_entries",
"=",
"[",
"]",
"for",
"entry",
"in",
"entries",
":",
"if",
"(",
"entry",
".",
"start",
">=",
"next_record_start",
")",
":",
"break",
"if",
"(",
"(",
"entry",
".",
"start",
"+",
"entry",
".",
"length",
")",
"<=",
"record_start",
")",
":",
"continue",
"local_entries",
".",
"append",
"(",
"fill_entry",
"(",
"entry",
",",
"(",
"entry",
".",
"start",
"-",
"record_start",
")",
",",
"rec_length",
")",
")",
"strands",
"=",
"separate_strands",
"(",
"local_entries",
")",
"data",
".",
"append",
"(",
"strands",
")",
"record_start",
"+=",
"rec_length",
"return",
"data"
] | for every text record calculate which index entries start . | train | false |
21,087 | def json_call(json, args=(), kwargs=None):
if (kwargs is None):
kwargs = {}
if isinstance(json, basestring):
symbol = json_lookup(json)
return symbol(*args, **kwargs)
elif isinstance(json, dict):
raise NotImplementedError('dict calling convention undefined', json)
elif isinstance(json, (tuple, list)):
raise NotImplementedError('seq calling convention undefined', json)
else:
raise TypeError(json)
| [
"def",
"json_call",
"(",
"json",
",",
"args",
"=",
"(",
")",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"(",
"kwargs",
"is",
"None",
")",
":",
"kwargs",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"json",
",",
"basestring",
")",
":",
"symbol",
"=",
"json_lookup",
"(",
"json",
")",
"return",
"symbol",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"elif",
"isinstance",
"(",
"json",
",",
"dict",
")",
":",
"raise",
"NotImplementedError",
"(",
"'dict calling convention undefined'",
",",
"json",
")",
"elif",
"isinstance",
"(",
"json",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"raise",
"NotImplementedError",
"(",
"'seq calling convention undefined'",
",",
"json",
")",
"else",
":",
"raise",
"TypeError",
"(",
"json",
")"
] | return a dataset class instance based on a string . | train | false |
21,088 | def cms_rheader(r, tabs=[]):
if (r.representation != 'html'):
return None
record = r.record
if (record is None):
return None
table = r.table
resourcename = r.name
T = current.T
if (resourcename == 'series'):
tabs = [(T('Basic Details'), None), (T('Posts'), 'post')]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH(('%s: ' % table.name.label)), record.name)), rheader_tabs)
elif (resourcename == 'post'):
tabs = [(T('Basic Details'), None)]
if record.replies:
tabs.append((T('Comments'), 'discuss'))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH(('%s: ' % table.name.label)), record.name)), rheader_tabs)
return rheader
| [
"def",
"cms_rheader",
"(",
"r",
",",
"tabs",
"=",
"[",
"]",
")",
":",
"if",
"(",
"r",
".",
"representation",
"!=",
"'html'",
")",
":",
"return",
"None",
"record",
"=",
"r",
".",
"record",
"if",
"(",
"record",
"is",
"None",
")",
":",
"return",
"None",
"table",
"=",
"r",
".",
"table",
"resourcename",
"=",
"r",
".",
"name",
"T",
"=",
"current",
".",
"T",
"if",
"(",
"resourcename",
"==",
"'series'",
")",
":",
"tabs",
"=",
"[",
"(",
"T",
"(",
"'Basic Details'",
")",
",",
"None",
")",
",",
"(",
"T",
"(",
"'Posts'",
")",
",",
"'post'",
")",
"]",
"rheader_tabs",
"=",
"s3_rheader_tabs",
"(",
"r",
",",
"tabs",
")",
"rheader",
"=",
"DIV",
"(",
"TABLE",
"(",
"TR",
"(",
"TH",
"(",
"(",
"'%s: '",
"%",
"table",
".",
"name",
".",
"label",
")",
")",
",",
"record",
".",
"name",
")",
")",
",",
"rheader_tabs",
")",
"elif",
"(",
"resourcename",
"==",
"'post'",
")",
":",
"tabs",
"=",
"[",
"(",
"T",
"(",
"'Basic Details'",
")",
",",
"None",
")",
"]",
"if",
"record",
".",
"replies",
":",
"tabs",
".",
"append",
"(",
"(",
"T",
"(",
"'Comments'",
")",
",",
"'discuss'",
")",
")",
"rheader_tabs",
"=",
"s3_rheader_tabs",
"(",
"r",
",",
"tabs",
")",
"rheader",
"=",
"DIV",
"(",
"TABLE",
"(",
"TR",
"(",
"TH",
"(",
"(",
"'%s: '",
"%",
"table",
".",
"name",
".",
"label",
")",
")",
",",
"record",
".",
"name",
")",
")",
",",
"rheader_tabs",
")",
"return",
"rheader"
] | cms resource headers . | train | false |
21,089 | def remove_obsolete(jobs, new_jobs):
now = time.time()
limit = (now - 259200)
olds = jobs.keys()
for old in olds:
tm = jobs[old]['time']
if (old not in new_jobs):
if (jobs[old].get('status', ' ')[0] in ('G', 'B')):
jobs[old]['status'] = 'X'
if ((jobs[old]['status'] == 'X') and (tm < limit)):
logging.debug('Purging link %s', old)
del jobs[old]
| [
"def",
"remove_obsolete",
"(",
"jobs",
",",
"new_jobs",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"limit",
"=",
"(",
"now",
"-",
"259200",
")",
"olds",
"=",
"jobs",
".",
"keys",
"(",
")",
"for",
"old",
"in",
"olds",
":",
"tm",
"=",
"jobs",
"[",
"old",
"]",
"[",
"'time'",
"]",
"if",
"(",
"old",
"not",
"in",
"new_jobs",
")",
":",
"if",
"(",
"jobs",
"[",
"old",
"]",
".",
"get",
"(",
"'status'",
",",
"' '",
")",
"[",
"0",
"]",
"in",
"(",
"'G'",
",",
"'B'",
")",
")",
":",
"jobs",
"[",
"old",
"]",
"[",
"'status'",
"]",
"=",
"'X'",
"if",
"(",
"(",
"jobs",
"[",
"old",
"]",
"[",
"'status'",
"]",
"==",
"'X'",
")",
"and",
"(",
"tm",
"<",
"limit",
")",
")",
":",
"logging",
".",
"debug",
"(",
"'Purging link %s'",
",",
"old",
")",
"del",
"jobs",
"[",
"old",
"]"
] | expire g/b links that are not in new_jobs expired links older than 3 days are removed from jobs . | train | false |
21,090 | def get_demultiplex_data(ids_bcs_added_field, fasta_label, fasta_seq, bc_lens, all_bcs, barcode_type='golay_12', max_bc_errors=1.5, disable_bc_correction=False, added_demultiplex_field=None):
for bc_len in bc_lens:
curr_bc = fasta_seq[0:bc_len]
(corrected_bc, num_errors, added_field) = get_curr_bc_added_field(curr_bc, ids_bcs_added_field, fasta_label, all_bcs, barcode_type, disable_bc_correction, added_demultiplex_field)
if added_field:
if ((corrected_bc, added_field) in ids_bcs_added_field.keys()):
break
elif (corrected_bc is not None):
break
return (curr_bc, corrected_bc, num_errors, added_field)
| [
"def",
"get_demultiplex_data",
"(",
"ids_bcs_added_field",
",",
"fasta_label",
",",
"fasta_seq",
",",
"bc_lens",
",",
"all_bcs",
",",
"barcode_type",
"=",
"'golay_12'",
",",
"max_bc_errors",
"=",
"1.5",
",",
"disable_bc_correction",
"=",
"False",
",",
"added_demultiplex_field",
"=",
"None",
")",
":",
"for",
"bc_len",
"in",
"bc_lens",
":",
"curr_bc",
"=",
"fasta_seq",
"[",
"0",
":",
"bc_len",
"]",
"(",
"corrected_bc",
",",
"num_errors",
",",
"added_field",
")",
"=",
"get_curr_bc_added_field",
"(",
"curr_bc",
",",
"ids_bcs_added_field",
",",
"fasta_label",
",",
"all_bcs",
",",
"barcode_type",
",",
"disable_bc_correction",
",",
"added_demultiplex_field",
")",
"if",
"added_field",
":",
"if",
"(",
"(",
"corrected_bc",
",",
"added_field",
")",
"in",
"ids_bcs_added_field",
".",
"keys",
"(",
")",
")",
":",
"break",
"elif",
"(",
"corrected_bc",
"is",
"not",
"None",
")",
":",
"break",
"return",
"(",
"curr_bc",
",",
"corrected_bc",
",",
"num_errors",
",",
"added_field",
")"
] | attempts to find bc in a given sequence and added demultiplex field ids_bcs_added_field: dict of : sampleid fasta_label: full fasta label . | train | false |
21,091 | def survey_getWidgetFromQuestion(question_id):
qtable = current.s3db.survey_question
question = current.db((qtable.id == question_id)).select(qtable.type, limitby=(0, 1)).first()
question_type = question.type
widget_obj = survey_question_type[question_type](question_id)
return widget_obj
| [
"def",
"survey_getWidgetFromQuestion",
"(",
"question_id",
")",
":",
"qtable",
"=",
"current",
".",
"s3db",
".",
"survey_question",
"question",
"=",
"current",
".",
"db",
"(",
"(",
"qtable",
".",
"id",
"==",
"question_id",
")",
")",
".",
"select",
"(",
"qtable",
".",
"type",
",",
"limitby",
"=",
"(",
"0",
",",
"1",
")",
")",
".",
"first",
"(",
")",
"question_type",
"=",
"question",
".",
"type",
"widget_obj",
"=",
"survey_question_type",
"[",
"question_type",
"]",
"(",
"question_id",
")",
"return",
"widget_obj"
] | function that gets the right widget for the question . | train | false |
21,092 | def clear_dataframe_indexer_caches(df):
for attr in _INDEXER_NAMES:
try:
delattr(df, attr)
except AttributeError:
pass
| [
"def",
"clear_dataframe_indexer_caches",
"(",
"df",
")",
":",
"for",
"attr",
"in",
"_INDEXER_NAMES",
":",
"try",
":",
"delattr",
"(",
"df",
",",
"attr",
")",
"except",
"AttributeError",
":",
"pass"
] | clear cached attributes from a pandas dataframe . | train | false |
21,093 | def clean_kwargs(**kwargs):
ret = {}
for (key, val) in six.iteritems(kwargs):
if (not key.startswith('__')):
ret[key] = val
return ret
| [
"def",
"clean_kwargs",
"(",
"**",
"kwargs",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"(",
"key",
",",
"val",
")",
"in",
"six",
".",
"iteritems",
"(",
"kwargs",
")",
":",
"if",
"(",
"not",
"key",
".",
"startswith",
"(",
"'__'",
")",
")",
":",
"ret",
"[",
"key",
"]",
"=",
"val",
"return",
"ret"
] | return a dict without any of the __pub* keys from the kwargs dict passed into the execution module functions . | train | true |
21,094 | def cubical_graph(create_using=None):
description = ['adjacencylist', 'Platonic Cubical Graph', 8, [[2, 4, 5], [1, 3, 8], [2, 4, 7], [1, 3, 6], [1, 6, 8], [4, 5, 7], [3, 6, 8], [2, 5, 7]]]
G = make_small_undirected_graph(description, create_using)
return G
| [
"def",
"cubical_graph",
"(",
"create_using",
"=",
"None",
")",
":",
"description",
"=",
"[",
"'adjacencylist'",
",",
"'Platonic Cubical Graph'",
",",
"8",
",",
"[",
"[",
"2",
",",
"4",
",",
"5",
"]",
",",
"[",
"1",
",",
"3",
",",
"8",
"]",
",",
"[",
"2",
",",
"4",
",",
"7",
"]",
",",
"[",
"1",
",",
"3",
",",
"6",
"]",
",",
"[",
"1",
",",
"6",
",",
"8",
"]",
",",
"[",
"4",
",",
"5",
",",
"7",
"]",
",",
"[",
"3",
",",
"6",
",",
"8",
"]",
",",
"[",
"2",
",",
"5",
",",
"7",
"]",
"]",
"]",
"G",
"=",
"make_small_undirected_graph",
"(",
"description",
",",
"create_using",
")",
"return",
"G"
] | return the 3-regular platonic cubical graph . | train | false |
21,095 | def column_type_str(column):
type_ = column.type
if (type(type_) in (types.Integer, types.SmallInteger)):
return 'int'
if (type(type_) == types.Boolean):
return 'bool'
if (type(type_) == types.Unicode):
return (u'unicode \u2013 %s' % column.info['format'])
if (type(type_) == types.UnicodeText):
return (u'unicode \u2013 %s' % column.info['format'])
if (type(type_) == types.Enum):
return ('enum: [%s]' % ', '.join(type_.enums))
if (type(type_) == markdown.MarkdownColumn):
return 'markdown'
raise ValueError(repr(type_))
| [
"def",
"column_type_str",
"(",
"column",
")",
":",
"type_",
"=",
"column",
".",
"type",
"if",
"(",
"type",
"(",
"type_",
")",
"in",
"(",
"types",
".",
"Integer",
",",
"types",
".",
"SmallInteger",
")",
")",
":",
"return",
"'int'",
"if",
"(",
"type",
"(",
"type_",
")",
"==",
"types",
".",
"Boolean",
")",
":",
"return",
"'bool'",
"if",
"(",
"type",
"(",
"type_",
")",
"==",
"types",
".",
"Unicode",
")",
":",
"return",
"(",
"u'unicode \\u2013 %s'",
"%",
"column",
".",
"info",
"[",
"'format'",
"]",
")",
"if",
"(",
"type",
"(",
"type_",
")",
"==",
"types",
".",
"UnicodeText",
")",
":",
"return",
"(",
"u'unicode \\u2013 %s'",
"%",
"column",
".",
"info",
"[",
"'format'",
"]",
")",
"if",
"(",
"type",
"(",
"type_",
")",
"==",
"types",
".",
"Enum",
")",
":",
"return",
"(",
"'enum: [%s]'",
"%",
"', '",
".",
"join",
"(",
"type_",
".",
"enums",
")",
")",
"if",
"(",
"type",
"(",
"type_",
")",
"==",
"markdown",
".",
"MarkdownColumn",
")",
":",
"return",
"'markdown'",
"raise",
"ValueError",
"(",
"repr",
"(",
"type_",
")",
")"
] | extract the type name from a sqla column . | train | false |
21,096 | def _autoserv_command_line(machines, profiles, extra_args, job=None, queue_entry=None, verbose=True):
autoserv_argv = [_autoserv_path, '-p', '-r', drone_manager.WORKING_DIRECTORY]
if machines:
if profiles:
machines = [('%s#%s' % (m, p)) for (m, p) in zip(machines, profiles)]
autoserv_argv += ['-m', ','.join(machines)]
if (job or queue_entry):
if (not job):
job = queue_entry.job
autoserv_argv += ['-u', job.owner, '-l', job.name]
if verbose:
autoserv_argv.append('--verbose')
return (autoserv_argv + extra_args)
| [
"def",
"_autoserv_command_line",
"(",
"machines",
",",
"profiles",
",",
"extra_args",
",",
"job",
"=",
"None",
",",
"queue_entry",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"autoserv_argv",
"=",
"[",
"_autoserv_path",
",",
"'-p'",
",",
"'-r'",
",",
"drone_manager",
".",
"WORKING_DIRECTORY",
"]",
"if",
"machines",
":",
"if",
"profiles",
":",
"machines",
"=",
"[",
"(",
"'%s#%s'",
"%",
"(",
"m",
",",
"p",
")",
")",
"for",
"(",
"m",
",",
"p",
")",
"in",
"zip",
"(",
"machines",
",",
"profiles",
")",
"]",
"autoserv_argv",
"+=",
"[",
"'-m'",
",",
"','",
".",
"join",
"(",
"machines",
")",
"]",
"if",
"(",
"job",
"or",
"queue_entry",
")",
":",
"if",
"(",
"not",
"job",
")",
":",
"job",
"=",
"queue_entry",
".",
"job",
"autoserv_argv",
"+=",
"[",
"'-u'",
",",
"job",
".",
"owner",
",",
"'-l'",
",",
"job",
".",
"name",
"]",
"if",
"verbose",
":",
"autoserv_argv",
".",
"append",
"(",
"'--verbose'",
")",
"return",
"(",
"autoserv_argv",
"+",
"extra_args",
")"
] | builds an autoserv command line composed of the executable and parameters :type machines: list . | train | false |
21,097 | def service_is_up(service):
last_heartbeat = (service['updated_at'] or service['created_at'])
elapsed = total_seconds((timeutils.utcnow() - last_heartbeat))
return (abs(elapsed) <= FLAGS.service_down_time)
| [
"def",
"service_is_up",
"(",
"service",
")",
":",
"last_heartbeat",
"=",
"(",
"service",
"[",
"'updated_at'",
"]",
"or",
"service",
"[",
"'created_at'",
"]",
")",
"elapsed",
"=",
"total_seconds",
"(",
"(",
"timeutils",
".",
"utcnow",
"(",
")",
"-",
"last_heartbeat",
")",
")",
"return",
"(",
"abs",
"(",
"elapsed",
")",
"<=",
"FLAGS",
".",
"service_down_time",
")"
] | check whether a service is up based on last heartbeat . | train | false |
21,099 | def fmin_pass_expr_memo_ctrl(f):
f.fmin_pass_expr_memo_ctrl = True
return f
| [
"def",
"fmin_pass_expr_memo_ctrl",
"(",
"f",
")",
":",
"f",
".",
"fmin_pass_expr_memo_ctrl",
"=",
"True",
"return",
"f"
] | mark a function as expecting kwargs expr . | train | false |
21,101 | def get_past_timestamp(idx, timestamps):
if (idx == 0):
return get_future_timestamp(idx, timestamps)
if timestamps[idx]:
res = timestamps[idx][(-1)]
return res
else:
return get_past_timestamp((idx - 1), timestamps)
| [
"def",
"get_past_timestamp",
"(",
"idx",
",",
"timestamps",
")",
":",
"if",
"(",
"idx",
"==",
"0",
")",
":",
"return",
"get_future_timestamp",
"(",
"idx",
",",
"timestamps",
")",
"if",
"timestamps",
"[",
"idx",
"]",
":",
"res",
"=",
"timestamps",
"[",
"idx",
"]",
"[",
"(",
"-",
"1",
")",
"]",
"return",
"res",
"else",
":",
"return",
"get_past_timestamp",
"(",
"(",
"idx",
"-",
"1",
")",
",",
"timestamps",
")"
] | recursive function to find the most recent valid timestamp in the past . | train | false |
21,102 | def timeout_for_protocol(reactor, protocol):
return Timeout(reactor, (2 * PING_INTERVAL.seconds), (lambda : protocol.transport.abortConnection()))
| [
"def",
"timeout_for_protocol",
"(",
"reactor",
",",
"protocol",
")",
":",
"return",
"Timeout",
"(",
"reactor",
",",
"(",
"2",
"*",
"PING_INTERVAL",
".",
"seconds",
")",
",",
"(",
"lambda",
":",
"protocol",
".",
"transport",
".",
"abortConnection",
"(",
")",
")",
")"
] | create a timeout for inactive amp connections that will abort the connection when the timeout is reached . | train | false |
21,103 | def _stub_urandom(n):
randomData = [random.randrange(256) for n in xrange(n)]
return ''.join(map(chr, randomData))
| [
"def",
"_stub_urandom",
"(",
"n",
")",
":",
"randomData",
"=",
"[",
"random",
".",
"randrange",
"(",
"256",
")",
"for",
"n",
"in",
"xrange",
"(",
"n",
")",
"]",
"return",
"''",
".",
"join",
"(",
"map",
"(",
"chr",
",",
"randomData",
")",
")"
] | provide random data in versions of python prior to 2 . | train | false |
21,104 | def _burkardt_13_power(n, p):
if ((n != int(n)) or (n < 2)):
raise ValueError('n must be an integer greater than 1')
n = int(n)
if ((p != int(p)) or (p < 0)):
raise ValueError('p must be a non-negative integer')
p = int(p)
(a, b) = divmod(p, n)
large = np.power(10.0, ((- n) * a))
small = (large * np.power(10.0, (- n)))
return (np.diag(([large] * (n - b)), b) + np.diag(([small] * b), (b - n)))
| [
"def",
"_burkardt_13_power",
"(",
"n",
",",
"p",
")",
":",
"if",
"(",
"(",
"n",
"!=",
"int",
"(",
"n",
")",
")",
"or",
"(",
"n",
"<",
"2",
")",
")",
":",
"raise",
"ValueError",
"(",
"'n must be an integer greater than 1'",
")",
"n",
"=",
"int",
"(",
"n",
")",
"if",
"(",
"(",
"p",
"!=",
"int",
"(",
"p",
")",
")",
"or",
"(",
"p",
"<",
"0",
")",
")",
":",
"raise",
"ValueError",
"(",
"'p must be a non-negative integer'",
")",
"p",
"=",
"int",
"(",
"p",
")",
"(",
"a",
",",
"b",
")",
"=",
"divmod",
"(",
"p",
",",
"n",
")",
"large",
"=",
"np",
".",
"power",
"(",
"10.0",
",",
"(",
"(",
"-",
"n",
")",
"*",
"a",
")",
")",
"small",
"=",
"(",
"large",
"*",
"np",
".",
"power",
"(",
"10.0",
",",
"(",
"-",
"n",
")",
")",
")",
"return",
"(",
"np",
".",
"diag",
"(",
"(",
"[",
"large",
"]",
"*",
"(",
"n",
"-",
"b",
")",
")",
",",
"b",
")",
"+",
"np",
".",
"diag",
"(",
"(",
"[",
"small",
"]",
"*",
"b",
")",
",",
"(",
"b",
"-",
"n",
")",
")",
")"
] | a helper function for testing matrix functions . | train | false |
21,105 | @register.tag('get_current_language')
def do_get_current_language(parser, token):
args = token.contents.split()
if ((len(args) != 3) or (args[1] != 'as')):
raise TemplateSyntaxError(("'get_current_language' requires 'as variable' (got %r)" % args))
return GetCurrentLanguageNode(args[2])
| [
"@",
"register",
".",
"tag",
"(",
"'get_current_language'",
")",
"def",
"do_get_current_language",
"(",
"parser",
",",
"token",
")",
":",
"args",
"=",
"token",
".",
"contents",
".",
"split",
"(",
")",
"if",
"(",
"(",
"len",
"(",
"args",
")",
"!=",
"3",
")",
"or",
"(",
"args",
"[",
"1",
"]",
"!=",
"'as'",
")",
")",
":",
"raise",
"TemplateSyntaxError",
"(",
"(",
"\"'get_current_language' requires 'as variable' (got %r)\"",
"%",
"args",
")",
")",
"return",
"GetCurrentLanguageNode",
"(",
"args",
"[",
"2",
"]",
")"
] | this will store the current language in the context . | train | false |
21,106 | def create_release_branch_main(args, base_path, top_level):
options = CreateReleaseBranchOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write(('%s: %s\n' % (base_path.basename(), e)))
raise SystemExit(1)
version = options['flocker-version']
path = FilePath(__file__).path
try:
base_branch = calculate_base_branch(version=version, path=path)
create_release_branch(version=version, base_branch=base_branch)
except NotARelease:
sys.stderr.write(("%s: Can't create a release branch for non-release.\n" % (base_path.basename(),)))
raise SystemExit(1)
except TagExists:
sys.stderr.write(('%s: Tag already exists for this release.\n' % (base_path.basename(),)))
raise SystemExit(1)
except BranchExists:
sys.stderr.write(('%s: The release branch already exists.\n' % (base_path.basename(),)))
raise SystemExit(1)
| [
"def",
"create_release_branch_main",
"(",
"args",
",",
"base_path",
",",
"top_level",
")",
":",
"options",
"=",
"CreateReleaseBranchOptions",
"(",
")",
"try",
":",
"options",
".",
"parseOptions",
"(",
"args",
")",
"except",
"UsageError",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"(",
"'%s: %s\\n'",
"%",
"(",
"base_path",
".",
"basename",
"(",
")",
",",
"e",
")",
")",
")",
"raise",
"SystemExit",
"(",
"1",
")",
"version",
"=",
"options",
"[",
"'flocker-version'",
"]",
"path",
"=",
"FilePath",
"(",
"__file__",
")",
".",
"path",
"try",
":",
"base_branch",
"=",
"calculate_base_branch",
"(",
"version",
"=",
"version",
",",
"path",
"=",
"path",
")",
"create_release_branch",
"(",
"version",
"=",
"version",
",",
"base_branch",
"=",
"base_branch",
")",
"except",
"NotARelease",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"(",
"\"%s: Can't create a release branch for non-release.\\n\"",
"%",
"(",
"base_path",
".",
"basename",
"(",
")",
",",
")",
")",
")",
"raise",
"SystemExit",
"(",
"1",
")",
"except",
"TagExists",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"(",
"'%s: Tag already exists for this release.\\n'",
"%",
"(",
"base_path",
".",
"basename",
"(",
")",
",",
")",
")",
")",
"raise",
"SystemExit",
"(",
"1",
")",
"except",
"BranchExists",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"(",
"'%s: The release branch already exists.\\n'",
"%",
"(",
"base_path",
".",
"basename",
"(",
")",
",",
")",
")",
")",
"raise",
"SystemExit",
"(",
"1",
")"
] | create a release branch . | train | false |
21,109 | def hash_file(filename, size=None, method='md5'):
chunksize = 4096
fsize = os.path.getsize(filename)
if ((not size) or (size > fsize)):
size = fsize
f = open(filename, 'rb')
try:
hash = utils.hash(method)
except ValueError:
logging.error(('Unknown hash type %s, returning None' % method))
while (size > 0):
if (chunksize > size):
chunksize = size
data = f.read(chunksize)
if (len(data) == 0):
logging.debug(('Nothing left to read but size=%d' % size))
break
hash.update(data)
size -= len(data)
f.close()
return hash.hexdigest()
| [
"def",
"hash_file",
"(",
"filename",
",",
"size",
"=",
"None",
",",
"method",
"=",
"'md5'",
")",
":",
"chunksize",
"=",
"4096",
"fsize",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"filename",
")",
"if",
"(",
"(",
"not",
"size",
")",
"or",
"(",
"size",
">",
"fsize",
")",
")",
":",
"size",
"=",
"fsize",
"f",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"try",
":",
"hash",
"=",
"utils",
".",
"hash",
"(",
"method",
")",
"except",
"ValueError",
":",
"logging",
".",
"error",
"(",
"(",
"'Unknown hash type %s, returning None'",
"%",
"method",
")",
")",
"while",
"(",
"size",
">",
"0",
")",
":",
"if",
"(",
"chunksize",
">",
"size",
")",
":",
"chunksize",
"=",
"size",
"data",
"=",
"f",
".",
"read",
"(",
"chunksize",
")",
"if",
"(",
"len",
"(",
"data",
")",
"==",
"0",
")",
":",
"logging",
".",
"debug",
"(",
"(",
"'Nothing left to read but size=%d'",
"%",
"size",
")",
")",
"break",
"hash",
".",
"update",
"(",
"data",
")",
"size",
"-=",
"len",
"(",
"data",
")",
"f",
".",
"close",
"(",
")",
"return",
"hash",
".",
"hexdigest",
"(",
")"
] | calculates an hash on a file by path . | train | false |
21,110 | def consts(t):
for elt in t:
r = repr(elt)
if r.startswith('<code object'):
(yield ('<code object %s>' % elt.co_name))
else:
(yield r)
| [
"def",
"consts",
"(",
"t",
")",
":",
"for",
"elt",
"in",
"t",
":",
"r",
"=",
"repr",
"(",
"elt",
")",
"if",
"r",
".",
"startswith",
"(",
"'<code object'",
")",
":",
"(",
"yield",
"(",
"'<code object %s>'",
"%",
"elt",
".",
"co_name",
")",
")",
"else",
":",
"(",
"yield",
"r",
")"
] | yield a doctest-safe sequence of object reprs . | train | false |
21,111 | def reset_shortcuts():
CONF.reset_to_defaults(section='shortcuts')
| [
"def",
"reset_shortcuts",
"(",
")",
":",
"CONF",
".",
"reset_to_defaults",
"(",
"section",
"=",
"'shortcuts'",
")"
] | reset keyboard shortcuts to default values . | train | false |
21,112 | def get_test_modules(module):
modpath = ('odoo.addons.' + module)
try:
mod = importlib.import_module('.tests', modpath)
except Exception as e:
if (str(e) != 'No module named tests'):
_logger.exception('Can not `import %s`.', module)
return []
if (hasattr(mod, 'fast_suite') or hasattr(mod, 'checks')):
_logger.warn('Found deprecated fast_suite or checks attribute in test module %s. These have no effect in or after version 8.0.', mod.__name__)
result = [mod_obj for (name, mod_obj) in inspect.getmembers(mod, inspect.ismodule) if name.startswith('test_')]
return result
| [
"def",
"get_test_modules",
"(",
"module",
")",
":",
"modpath",
"=",
"(",
"'odoo.addons.'",
"+",
"module",
")",
"try",
":",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"'.tests'",
",",
"modpath",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"(",
"str",
"(",
"e",
")",
"!=",
"'No module named tests'",
")",
":",
"_logger",
".",
"exception",
"(",
"'Can not `import %s`.'",
",",
"module",
")",
"return",
"[",
"]",
"if",
"(",
"hasattr",
"(",
"mod",
",",
"'fast_suite'",
")",
"or",
"hasattr",
"(",
"mod",
",",
"'checks'",
")",
")",
":",
"_logger",
".",
"warn",
"(",
"'Found deprecated fast_suite or checks attribute in test module %s. These have no effect in or after version 8.0.'",
",",
"mod",
".",
"__name__",
")",
"result",
"=",
"[",
"mod_obj",
"for",
"(",
"name",
",",
"mod_obj",
")",
"in",
"inspect",
".",
"getmembers",
"(",
"mod",
",",
"inspect",
".",
"ismodule",
")",
"if",
"name",
".",
"startswith",
"(",
"'test_'",
")",
"]",
"return",
"result"
] | return a list of module for the addons potentially containing tests to feed unittest . | train | false |
21,113 | def _mxwarn(msg):
warn(('Possible MaxFilter bug: %s, more info: http://imaging.mrc-cbu.cam.ac.uk/meg/maxbugs' % msg))
| [
"def",
"_mxwarn",
"(",
"msg",
")",
":",
"warn",
"(",
"(",
"'Possible MaxFilter bug: %s, more info: http://imaging.mrc-cbu.cam.ac.uk/meg/maxbugs'",
"%",
"msg",
")",
")"
] | warn about a bug . | train | false |
21,116 | def signalHandler(signum, frame):
raise Timeout('Timeout exceed!')
| [
"def",
"signalHandler",
"(",
"signum",
",",
"frame",
")",
":",
"raise",
"Timeout",
"(",
"'Timeout exceed!'",
")"
] | signal handler to catch timeout signal: raise timeout exception . | train | false |
21,117 | def upper_tri(operator):
entries = (operator.size[0] * operator.size[1])
size = (((entries - operator.size[0]) // 2), 1)
return lo.LinOp(lo.UPPER_TRI, size, [operator], None)
| [
"def",
"upper_tri",
"(",
"operator",
")",
":",
"entries",
"=",
"(",
"operator",
".",
"size",
"[",
"0",
"]",
"*",
"operator",
".",
"size",
"[",
"1",
"]",
")",
"size",
"=",
"(",
"(",
"(",
"entries",
"-",
"operator",
".",
"size",
"[",
"0",
"]",
")",
"//",
"2",
")",
",",
"1",
")",
"return",
"lo",
".",
"LinOp",
"(",
"lo",
".",
"UPPER_TRI",
",",
"size",
",",
"[",
"operator",
"]",
",",
"None",
")"
] | vectorized upper triangular portion of a square matrix . | train | false |
21,119 | def _validate_quota(quota):
if (quota is not None):
if (not isinstance(quota, dict)):
raise TypeError('quota must be a dictionary, provided value: {0}'.format(quota))
periods = ['DAY', 'WEEK', 'MONTH']
if (('period' not in quota) or (quota['period'] not in periods)):
raise ValueError('quota must have a valid period specified, valid values are {0}'.format(','.join(periods)))
if ('limit' not in quota):
raise ValueError('quota limit must have a valid value')
| [
"def",
"_validate_quota",
"(",
"quota",
")",
":",
"if",
"(",
"quota",
"is",
"not",
"None",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"quota",
",",
"dict",
")",
")",
":",
"raise",
"TypeError",
"(",
"'quota must be a dictionary, provided value: {0}'",
".",
"format",
"(",
"quota",
")",
")",
"periods",
"=",
"[",
"'DAY'",
",",
"'WEEK'",
",",
"'MONTH'",
"]",
"if",
"(",
"(",
"'period'",
"not",
"in",
"quota",
")",
"or",
"(",
"quota",
"[",
"'period'",
"]",
"not",
"in",
"periods",
")",
")",
":",
"raise",
"ValueError",
"(",
"'quota must have a valid period specified, valid values are {0}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"periods",
")",
")",
")",
"if",
"(",
"'limit'",
"not",
"in",
"quota",
")",
":",
"raise",
"ValueError",
"(",
"'quota limit must have a valid value'",
")"
] | helper to verify that quota parameters are valid . | train | true |
21,120 | def group_exists(name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.describe_replication_groups(name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
return False
| [
"def",
"group_exists",
"(",
"name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"try",
":",
"conn",
".",
"describe_replication_groups",
"(",
"name",
")",
"return",
"True",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"e",
")",
"return",
"False"
] | check to see if a replication group exists . | train | true |
21,121 | def test_daophot_types():
table = ascii.read('t/daophot2.dat', Reader=ascii.Daophot)
assert (table['LID'].dtype.char in 'fd')
assert (table['MAG'].dtype.char in 'fd')
assert (table['PIER'].dtype.char in 'US')
assert (table['ID'].dtype.char in 'il')
| [
"def",
"test_daophot_types",
"(",
")",
":",
"table",
"=",
"ascii",
".",
"read",
"(",
"'t/daophot2.dat'",
",",
"Reader",
"=",
"ascii",
".",
"Daophot",
")",
"assert",
"(",
"table",
"[",
"'LID'",
"]",
".",
"dtype",
".",
"char",
"in",
"'fd'",
")",
"assert",
"(",
"table",
"[",
"'MAG'",
"]",
".",
"dtype",
".",
"char",
"in",
"'fd'",
")",
"assert",
"(",
"table",
"[",
"'PIER'",
"]",
".",
"dtype",
".",
"char",
"in",
"'US'",
")",
"assert",
"(",
"table",
"[",
"'ID'",
"]",
".",
"dtype",
".",
"char",
"in",
"'il'",
")"
] | test specific data types which are different from what would be inferred automatically based only data values . | train | false |
21,123 | @pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write():
t = Table([['<em>x</em>'], ['<em>y</em>']], names=['a', 'b'])
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'})
expected = ' <tr>\n <td><em>x</em></td>\n <td><em>y</em></td>\n </tr>'
assert (expected in out.getvalue())
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']})
assert (expected in out.getvalue())
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']})
expected = ' <tr>\n <td><em>x</em></td>\n <td><em>y</em></td>\n </tr>'
assert (expected in out.getvalue())
| [
"@",
"pytest",
".",
"mark",
".",
"skipif",
"(",
"'not HAS_BLEACH'",
")",
"def",
"test_raw_html_write",
"(",
")",
":",
"t",
"=",
"Table",
"(",
"[",
"[",
"'<em>x</em>'",
"]",
",",
"[",
"'<em>y</em>'",
"]",
"]",
",",
"names",
"=",
"[",
"'a'",
",",
"'b'",
"]",
")",
"out",
"=",
"StringIO",
"(",
")",
"t",
".",
"write",
"(",
"out",
",",
"format",
"=",
"'ascii.html'",
",",
"htmldict",
"=",
"{",
"'raw_html_cols'",
":",
"'a'",
"}",
")",
"expected",
"=",
"' <tr>\\n <td><em>x</em></td>\\n <td><em>y</em></td>\\n </tr>'",
"assert",
"(",
"expected",
"in",
"out",
".",
"getvalue",
"(",
")",
")",
"out",
"=",
"StringIO",
"(",
")",
"t",
".",
"write",
"(",
"out",
",",
"format",
"=",
"'ascii.html'",
",",
"htmldict",
"=",
"{",
"'raw_html_cols'",
":",
"[",
"'a'",
"]",
"}",
")",
"assert",
"(",
"expected",
"in",
"out",
".",
"getvalue",
"(",
")",
")",
"out",
"=",
"StringIO",
"(",
")",
"t",
".",
"write",
"(",
"out",
",",
"format",
"=",
"'ascii.html'",
",",
"htmldict",
"=",
"{",
"'raw_html_cols'",
":",
"[",
"'a'",
",",
"'b'",
"]",
"}",
")",
"expected",
"=",
"' <tr>\\n <td><em>x</em></td>\\n <td><em>y</em></td>\\n </tr>'",
"assert",
"(",
"expected",
"in",
"out",
".",
"getvalue",
"(",
")",
")"
] | test that columns can contain raw html which is not escaped . | train | false |
21,124 | def correct_preds(mat, p, target=None, cutoff=0.5):
if (not target):
target = mat
if isinstance(p, EigenMatrix):
err_code = _eigenmat.correct_preds(mat.p_mat, p.p_mat, target.p_mat, ct.c_float(cutoff))
else:
raise ValueError, 'Value must be of type EigenMatrix.'
if err_code:
raise generate_exception(err_code)
return target
| [
"def",
"correct_preds",
"(",
"mat",
",",
"p",
",",
"target",
"=",
"None",
",",
"cutoff",
"=",
"0.5",
")",
":",
"if",
"(",
"not",
"target",
")",
":",
"target",
"=",
"mat",
"if",
"isinstance",
"(",
"p",
",",
"EigenMatrix",
")",
":",
"err_code",
"=",
"_eigenmat",
".",
"correct_preds",
"(",
"mat",
".",
"p_mat",
",",
"p",
".",
"p_mat",
",",
"target",
".",
"p_mat",
",",
"ct",
".",
"c_float",
"(",
"cutoff",
")",
")",
"else",
":",
"raise",
"ValueError",
",",
"'Value must be of type EigenMatrix.'",
"if",
"err_code",
":",
"raise",
"generate_exception",
"(",
"err_code",
")",
"return",
"target"
] | compute mat* + . | train | false |
21,125 | def _validate_archive(filename):
is_file_like = hasattr(filename, 'read')
if ((not is_file_like) and os.path.isfile(filename)):
file_format = os.path.splitext(filename)[(-1)]
if (file_format not in SUPPORTED_FORMATS):
if (file_format == '.xls'):
msg = 'openpyxl does not support the old .xls file format, please use xlrd to read this file, or convert it to the more recent .xlsx file format.'
elif (file_format == '.xlsb'):
msg = 'openpyxl does not support binary format .xlsb, please convert this file to .xlsx format if you want to open it with openpyxl'
else:
msg = ('openpyxl does not support %s file format, please check you can open it with Excel first. Supported formats are: %s' % (file_format, ','.join(SUPPORTED_FORMATS)))
raise InvalidFileException(msg)
if is_file_like:
if (getattr(filename, 'encoding', None) is not None):
raise IOError('File-object must be opened in binary mode')
try:
archive = ZipFile(filename, 'r', ZIP_DEFLATED)
except BadZipfile:
f = repair_central_directory(filename, is_file_like)
archive = ZipFile(f, 'r', ZIP_DEFLATED)
return archive
| [
"def",
"_validate_archive",
"(",
"filename",
")",
":",
"is_file_like",
"=",
"hasattr",
"(",
"filename",
",",
"'read'",
")",
"if",
"(",
"(",
"not",
"is_file_like",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
")",
":",
"file_format",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"(",
"-",
"1",
")",
"]",
"if",
"(",
"file_format",
"not",
"in",
"SUPPORTED_FORMATS",
")",
":",
"if",
"(",
"file_format",
"==",
"'.xls'",
")",
":",
"msg",
"=",
"'openpyxl does not support the old .xls file format, please use xlrd to read this file, or convert it to the more recent .xlsx file format.'",
"elif",
"(",
"file_format",
"==",
"'.xlsb'",
")",
":",
"msg",
"=",
"'openpyxl does not support binary format .xlsb, please convert this file to .xlsx format if you want to open it with openpyxl'",
"else",
":",
"msg",
"=",
"(",
"'openpyxl does not support %s file format, please check you can open it with Excel first. Supported formats are: %s'",
"%",
"(",
"file_format",
",",
"','",
".",
"join",
"(",
"SUPPORTED_FORMATS",
")",
")",
")",
"raise",
"InvalidFileException",
"(",
"msg",
")",
"if",
"is_file_like",
":",
"if",
"(",
"getattr",
"(",
"filename",
",",
"'encoding'",
",",
"None",
")",
"is",
"not",
"None",
")",
":",
"raise",
"IOError",
"(",
"'File-object must be opened in binary mode'",
")",
"try",
":",
"archive",
"=",
"ZipFile",
"(",
"filename",
",",
"'r'",
",",
"ZIP_DEFLATED",
")",
"except",
"BadZipfile",
":",
"f",
"=",
"repair_central_directory",
"(",
"filename",
",",
"is_file_like",
")",
"archive",
"=",
"ZipFile",
"(",
"f",
",",
"'r'",
",",
"ZIP_DEFLATED",
")",
"return",
"archive"
] | check the file is a valid zipfile . | train | false |
21,126 | def test_grouped_slicing(T1):
for masked in (False, True):
t1 = Table(T1, masked=masked)
tg = t1.group_by('a')
tg2 = tg[3:5]
assert np.all((tg2.groups.indices == np.array([0, len(tg2)])))
assert (tg2.groups.keys is None)
| [
"def",
"test_grouped_slicing",
"(",
"T1",
")",
":",
"for",
"masked",
"in",
"(",
"False",
",",
"True",
")",
":",
"t1",
"=",
"Table",
"(",
"T1",
",",
"masked",
"=",
"masked",
")",
"tg",
"=",
"t1",
".",
"group_by",
"(",
"'a'",
")",
"tg2",
"=",
"tg",
"[",
"3",
":",
"5",
"]",
"assert",
"np",
".",
"all",
"(",
"(",
"tg2",
".",
"groups",
".",
"indices",
"==",
"np",
".",
"array",
"(",
"[",
"0",
",",
"len",
"(",
"tg2",
")",
"]",
")",
")",
")",
"assert",
"(",
"tg2",
".",
"groups",
".",
"keys",
"is",
"None",
")"
] | test that slicing a table removes previous grouping . | train | false |
21,127 | def detect_images_and_galleries(generators):
for generator in generators:
if isinstance(generator, ArticlesGenerator):
for article in itertools.chain(generator.articles, generator.translations, generator.drafts):
detect_image(generator, article)
detect_gallery(generator, article)
elif isinstance(generator, PagesGenerator):
for page in itertools.chain(generator.pages, generator.translations, generator.hidden_pages):
detect_image(generator, page)
detect_gallery(generator, page)
| [
"def",
"detect_images_and_galleries",
"(",
"generators",
")",
":",
"for",
"generator",
"in",
"generators",
":",
"if",
"isinstance",
"(",
"generator",
",",
"ArticlesGenerator",
")",
":",
"for",
"article",
"in",
"itertools",
".",
"chain",
"(",
"generator",
".",
"articles",
",",
"generator",
".",
"translations",
",",
"generator",
".",
"drafts",
")",
":",
"detect_image",
"(",
"generator",
",",
"article",
")",
"detect_gallery",
"(",
"generator",
",",
"article",
")",
"elif",
"isinstance",
"(",
"generator",
",",
"PagesGenerator",
")",
":",
"for",
"page",
"in",
"itertools",
".",
"chain",
"(",
"generator",
".",
"pages",
",",
"generator",
".",
"translations",
",",
"generator",
".",
"hidden_pages",
")",
":",
"detect_image",
"(",
"generator",
",",
"page",
")",
"detect_gallery",
"(",
"generator",
",",
"page",
")"
] | runs generator on both pages and articles . | train | true |
21,128 | def _add_entities(hass, entity_ids):
attributes = {'test_attr': 5, 'test_attr_10': 'nice'}
for (idx, entity_id) in enumerate(entity_ids):
hass.states.set(entity_id, 'state{}'.format(idx), attributes)
hass.block_till_done()
recorder._INSTANCE.block_till_done()
db_states = recorder.query('States')
states = recorder.execute(db_states)
assert (db_states[0].event_id is not None)
return states
| [
"def",
"_add_entities",
"(",
"hass",
",",
"entity_ids",
")",
":",
"attributes",
"=",
"{",
"'test_attr'",
":",
"5",
",",
"'test_attr_10'",
":",
"'nice'",
"}",
"for",
"(",
"idx",
",",
"entity_id",
")",
"in",
"enumerate",
"(",
"entity_ids",
")",
":",
"hass",
".",
"states",
".",
"set",
"(",
"entity_id",
",",
"'state{}'",
".",
"format",
"(",
"idx",
")",
",",
"attributes",
")",
"hass",
".",
"block_till_done",
"(",
")",
"recorder",
".",
"_INSTANCE",
".",
"block_till_done",
"(",
")",
"db_states",
"=",
"recorder",
".",
"query",
"(",
"'States'",
")",
"states",
"=",
"recorder",
".",
"execute",
"(",
"db_states",
")",
"assert",
"(",
"db_states",
"[",
"0",
"]",
".",
"event_id",
"is",
"not",
"None",
")",
"return",
"states"
] | add entities . | train | false |
21,129 | @LocalContext
def run_assembly_exitcode(assembly):
p = run_assembly(assembly)
p.wait_for_close()
return p.poll()
| [
"@",
"LocalContext",
"def",
"run_assembly_exitcode",
"(",
"assembly",
")",
":",
"p",
"=",
"run_assembly",
"(",
"assembly",
")",
"p",
".",
"wait_for_close",
"(",
")",
"return",
"p",
".",
"poll",
"(",
")"
] | given an assembly listing . | train | false |
21,130 | def is_json_request():
content_type = request.content_type
return (content_type and ('application/json' in content_type))
| [
"def",
"is_json_request",
"(",
")",
":",
"content_type",
"=",
"request",
".",
"content_type",
"return",
"(",
"content_type",
"and",
"(",
"'application/json'",
"in",
"content_type",
")",
")"
] | return true if the current request is a json/ajax request . | train | false |
21,131 | def delete_server_cert(cert_name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.delete_server_cert(cert_name)
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to delete certificate {0}.'
log.error(msg.format(cert_name))
return False
| [
"def",
"delete_server_cert",
"(",
"cert_name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"try",
":",
"return",
"conn",
".",
"delete_server_cert",
"(",
"cert_name",
")",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"e",
")",
"msg",
"=",
"'Failed to delete certificate {0}.'",
"log",
".",
"error",
"(",
"msg",
".",
"format",
"(",
"cert_name",
")",
")",
"return",
"False"
] | deletes a certificate from amazon . | train | true |
21,132 | @util.positional(1)
def import_file_set(file_set, modules=None, _open=open):
if isinstance(file_set, basestring):
encoded_file = _open(file_set, 'rb')
try:
encoded_file_set = encoded_file.read()
finally:
encoded_file.close()
file_set = protobuf.decode_message(descriptor.FileSet, encoded_file_set)
for file_descriptor in file_set.files:
if (not file_descriptor.package.startswith('protorpc.')):
import_file(file_descriptor, modules=modules)
| [
"@",
"util",
".",
"positional",
"(",
"1",
")",
"def",
"import_file_set",
"(",
"file_set",
",",
"modules",
"=",
"None",
",",
"_open",
"=",
"open",
")",
":",
"if",
"isinstance",
"(",
"file_set",
",",
"basestring",
")",
":",
"encoded_file",
"=",
"_open",
"(",
"file_set",
",",
"'rb'",
")",
"try",
":",
"encoded_file_set",
"=",
"encoded_file",
".",
"read",
"(",
")",
"finally",
":",
"encoded_file",
".",
"close",
"(",
")",
"file_set",
"=",
"protobuf",
".",
"decode_message",
"(",
"descriptor",
".",
"FileSet",
",",
"encoded_file_set",
")",
"for",
"file_descriptor",
"in",
"file_set",
".",
"files",
":",
"if",
"(",
"not",
"file_descriptor",
".",
"package",
".",
"startswith",
"(",
"'protorpc.'",
")",
")",
":",
"import_file",
"(",
"file_descriptor",
",",
"modules",
"=",
"modules",
")"
] | import fileset in to module space . | train | false |
21,133 | def _get_col_o2m(cls, fk_col_name, deferrable=None, initially=None, ondelete=None, onupdate=None):
assert (cls.Attributes.table_name is not None), ('%r has no table name.' % cls)
(col_args, col_kwargs) = sanitize_args(cls.Attributes.sqla_column_args)
(pk_column,) = get_pk_columns(cls)
(pk_key, pk_spyne_type) = pk_column
pk_sqla_type = _get_sqlalchemy_type(pk_spyne_type)
if (fk_col_name is None):
fk_col_name = '_'.join([cls.Attributes.table_name, pk_key])
(yield [(fk_col_name, pk_sqla_type)])
fk = ForeignKey(('%s.%s' % (cls.Attributes.table_name, pk_key)), deferrable=deferrable, initially=initially, ondelete=ondelete, onupdate=onupdate)
col = Column(fk_col_name, pk_sqla_type, fk, *col_args, **col_kwargs)
(yield col)
| [
"def",
"_get_col_o2m",
"(",
"cls",
",",
"fk_col_name",
",",
"deferrable",
"=",
"None",
",",
"initially",
"=",
"None",
",",
"ondelete",
"=",
"None",
",",
"onupdate",
"=",
"None",
")",
":",
"assert",
"(",
"cls",
".",
"Attributes",
".",
"table_name",
"is",
"not",
"None",
")",
",",
"(",
"'%r has no table name.'",
"%",
"cls",
")",
"(",
"col_args",
",",
"col_kwargs",
")",
"=",
"sanitize_args",
"(",
"cls",
".",
"Attributes",
".",
"sqla_column_args",
")",
"(",
"pk_column",
",",
")",
"=",
"get_pk_columns",
"(",
"cls",
")",
"(",
"pk_key",
",",
"pk_spyne_type",
")",
"=",
"pk_column",
"pk_sqla_type",
"=",
"_get_sqlalchemy_type",
"(",
"pk_spyne_type",
")",
"if",
"(",
"fk_col_name",
"is",
"None",
")",
":",
"fk_col_name",
"=",
"'_'",
".",
"join",
"(",
"[",
"cls",
".",
"Attributes",
".",
"table_name",
",",
"pk_key",
"]",
")",
"(",
"yield",
"[",
"(",
"fk_col_name",
",",
"pk_sqla_type",
")",
"]",
")",
"fk",
"=",
"ForeignKey",
"(",
"(",
"'%s.%s'",
"%",
"(",
"cls",
".",
"Attributes",
".",
"table_name",
",",
"pk_key",
")",
")",
",",
"deferrable",
"=",
"deferrable",
",",
"initially",
"=",
"initially",
",",
"ondelete",
"=",
"ondelete",
",",
"onupdate",
"=",
"onupdate",
")",
"col",
"=",
"Column",
"(",
"fk_col_name",
",",
"pk_sqla_type",
",",
"fk",
",",
"*",
"col_args",
",",
"**",
"col_kwargs",
")",
"(",
"yield",
"col",
")"
] | gets the parent class and returns a column that points to the primary key of the parent . | train | false |
21,135 | def clone_container(container, dest_dir):
dest_dir = os.path.abspath(os.path.realpath(dest_dir))
clone_data = container.clone_data(dest_dir)
cls = type(container)
if (cls is Container):
return cls(None, None, container.log, clone_data=clone_data)
return cls(None, container.log, clone_data=clone_data)
| [
"def",
"clone_container",
"(",
"container",
",",
"dest_dir",
")",
":",
"dest_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"dest_dir",
")",
")",
"clone_data",
"=",
"container",
".",
"clone_data",
"(",
"dest_dir",
")",
"cls",
"=",
"type",
"(",
"container",
")",
"if",
"(",
"cls",
"is",
"Container",
")",
":",
"return",
"cls",
"(",
"None",
",",
"None",
",",
"container",
".",
"log",
",",
"clone_data",
"=",
"clone_data",
")",
"return",
"cls",
"(",
"None",
",",
"container",
".",
"log",
",",
"clone_data",
"=",
"clone_data",
")"
] | efficiently clone a container using hard links . | train | false |
21,136 | def save_image(image, destination=None, filename=None, **options):
if (destination is None):
destination = BytesIO()
filename = (filename or '')
Image.init()
format = Image.EXTENSION.get(os.path.splitext(filename)[1].lower(), 'JPEG')
if (format in ('JPEG', 'WEBP')):
options.setdefault('quality', 85)
saved = False
if (format == 'JPEG'):
if (settings.THUMBNAIL_PROGRESSIVE and (max(image.size) >= settings.THUMBNAIL_PROGRESSIVE)):
options['progressive'] = True
try:
image.save(destination, format=format, optimize=1, **options)
saved = True
except IOError:
pass
if (not saved):
image.save(destination, format=format, **options)
if hasattr(destination, 'seek'):
destination.seek(0)
return destination
| [
"def",
"save_image",
"(",
"image",
",",
"destination",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"**",
"options",
")",
":",
"if",
"(",
"destination",
"is",
"None",
")",
":",
"destination",
"=",
"BytesIO",
"(",
")",
"filename",
"=",
"(",
"filename",
"or",
"''",
")",
"Image",
".",
"init",
"(",
")",
"format",
"=",
"Image",
".",
"EXTENSION",
".",
"get",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
",",
"'JPEG'",
")",
"if",
"(",
"format",
"in",
"(",
"'JPEG'",
",",
"'WEBP'",
")",
")",
":",
"options",
".",
"setdefault",
"(",
"'quality'",
",",
"85",
")",
"saved",
"=",
"False",
"if",
"(",
"format",
"==",
"'JPEG'",
")",
":",
"if",
"(",
"settings",
".",
"THUMBNAIL_PROGRESSIVE",
"and",
"(",
"max",
"(",
"image",
".",
"size",
")",
">=",
"settings",
".",
"THUMBNAIL_PROGRESSIVE",
")",
")",
":",
"options",
"[",
"'progressive'",
"]",
"=",
"True",
"try",
":",
"image",
".",
"save",
"(",
"destination",
",",
"format",
"=",
"format",
",",
"optimize",
"=",
"1",
",",
"**",
"options",
")",
"saved",
"=",
"True",
"except",
"IOError",
":",
"pass",
"if",
"(",
"not",
"saved",
")",
":",
"image",
".",
"save",
"(",
"destination",
",",
"format",
"=",
"format",
",",
"**",
"options",
")",
"if",
"hasattr",
"(",
"destination",
",",
"'seek'",
")",
":",
"destination",
".",
"seek",
"(",
"0",
")",
"return",
"destination"
] | save image to the specified path . | train | true |
21,138 | def add_ordered_mock_handlers(opener, meth_spec):
handlers = []
count = 0
for meths in meth_spec:
class MockHandlerSubclass(MockHandler, ):
pass
h = MockHandlerSubclass(meths)
h.handler_order += count
h.add_parent(opener)
count = (count + 1)
handlers.append(h)
opener.add_handler(h)
return handlers
| [
"def",
"add_ordered_mock_handlers",
"(",
"opener",
",",
"meth_spec",
")",
":",
"handlers",
"=",
"[",
"]",
"count",
"=",
"0",
"for",
"meths",
"in",
"meth_spec",
":",
"class",
"MockHandlerSubclass",
"(",
"MockHandler",
",",
")",
":",
"pass",
"h",
"=",
"MockHandlerSubclass",
"(",
"meths",
")",
"h",
".",
"handler_order",
"+=",
"count",
"h",
".",
"add_parent",
"(",
"opener",
")",
"count",
"=",
"(",
"count",
"+",
"1",
")",
"handlers",
".",
"append",
"(",
"h",
")",
"opener",
".",
"add_handler",
"(",
"h",
")",
"return",
"handlers"
] | create mockhandlers and add them to an openerdirector . | train | false |
21,140 | def parse_refs(container, refspecs):
if (not isinstance(refspecs, list)):
refspecs = [refspecs]
ret = []
for refspec in refspecs:
ret.append(parse_ref(container, refspec))
return ret
| [
"def",
"parse_refs",
"(",
"container",
",",
"refspecs",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"refspecs",
",",
"list",
")",
")",
":",
"refspecs",
"=",
"[",
"refspecs",
"]",
"ret",
"=",
"[",
"]",
"for",
"refspec",
"in",
"refspecs",
":",
"ret",
".",
"append",
"(",
"parse_ref",
"(",
"container",
",",
"refspec",
")",
")",
"return",
"ret"
] | parse a list of refspecs to a list of refs . | train | false |
21,141 | def reservation_expire(context):
return IMPL.reservation_expire(context)
| [
"def",
"reservation_expire",
"(",
"context",
")",
":",
"return",
"IMPL",
".",
"reservation_expire",
"(",
"context",
")"
] | roll back any expired reservations . | train | false |
21,142 | def contract_creation_exceptions():
return {sa.Table: ['ml2_geneve_allocations', 'ml2_geneve_endpoints'], sa.Index: ['ml2_geneve_allocations']}
| [
"def",
"contract_creation_exceptions",
"(",
")",
":",
"return",
"{",
"sa",
".",
"Table",
":",
"[",
"'ml2_geneve_allocations'",
",",
"'ml2_geneve_endpoints'",
"]",
",",
"sa",
".",
"Index",
":",
"[",
"'ml2_geneve_allocations'",
"]",
"}"
] | return create exceptions . | train | false |
21,143 | def md5(s):
m = hash_util.md5()
m.update(s)
return m.hexdigest()
| [
"def",
"md5",
"(",
"s",
")",
":",
"m",
"=",
"hash_util",
".",
"md5",
"(",
")",
"m",
".",
"update",
"(",
"s",
")",
"return",
"m",
".",
"hexdigest",
"(",
")"
] | print the md5 sums of the release files . | train | false |
21,144 | def office_type():
return s3_rest_controller()
| [
"def",
"office_type",
"(",
")",
":",
"return",
"s3_rest_controller",
"(",
")"
] | restful crud controller . | train | false |
21,145 | @jinja2.contextfunction
@library.global_function
def breadcrumbs(context, items=list(), add_default=True, id=None):
if add_default:
first_crumb = u'Home'
crumbs = [(reverse('home'), _lazy(first_crumb))]
else:
crumbs = []
if items:
try:
crumbs += items
except TypeError:
crumbs.append(items)
c = {'breadcrumbs': crumbs, 'id': id}
return jinja2.Markup(render_to_string('layout/breadcrumbs.html', c))
| [
"@",
"jinja2",
".",
"contextfunction",
"@",
"library",
".",
"global_function",
"def",
"breadcrumbs",
"(",
"context",
",",
"items",
"=",
"list",
"(",
")",
",",
"add_default",
"=",
"True",
",",
"id",
"=",
"None",
")",
":",
"if",
"add_default",
":",
"first_crumb",
"=",
"u'Home'",
"crumbs",
"=",
"[",
"(",
"reverse",
"(",
"'home'",
")",
",",
"_lazy",
"(",
"first_crumb",
")",
")",
"]",
"else",
":",
"crumbs",
"=",
"[",
"]",
"if",
"items",
":",
"try",
":",
"crumbs",
"+=",
"items",
"except",
"TypeError",
":",
"crumbs",
".",
"append",
"(",
"items",
")",
"c",
"=",
"{",
"'breadcrumbs'",
":",
"crumbs",
",",
"'id'",
":",
"id",
"}",
"return",
"jinja2",
".",
"Markup",
"(",
"render_to_string",
"(",
"'layout/breadcrumbs.html'",
",",
"c",
")",
")"
] | create the breadcrumb trail to this page of documentation . | train | false |
21,146 | def get_stuck_jobs(recency_msecs):
threshold_time = (datetime.datetime.utcnow() - datetime.timedelta(0, 0, 0, recency_msecs))
shard_state_model_class = mapreduce_model.ShardState
recent_job_models = shard_state_model_class.all()
stuck_jobs = []
for job_model in recent_job_models:
if ((job_model.update_time > threshold_time) and (job_model.retries > 0)):
stuck_jobs.append(job_model)
return stuck_jobs
| [
"def",
"get_stuck_jobs",
"(",
"recency_msecs",
")",
":",
"threshold_time",
"=",
"(",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"0",
",",
"0",
",",
"0",
",",
"recency_msecs",
")",
")",
"shard_state_model_class",
"=",
"mapreduce_model",
".",
"ShardState",
"recent_job_models",
"=",
"shard_state_model_class",
".",
"all",
"(",
")",
"stuck_jobs",
"=",
"[",
"]",
"for",
"job_model",
"in",
"recent_job_models",
":",
"if",
"(",
"(",
"job_model",
".",
"update_time",
">",
"threshold_time",
")",
"and",
"(",
"job_model",
".",
"retries",
">",
"0",
")",
")",
":",
"stuck_jobs",
".",
"append",
"(",
"job_model",
")",
"return",
"stuck_jobs"
] | returns a list of jobs which were last updated at most recency_msecs milliseconds ago and have experienced more than one retry . | train | false |
21,149 | def osx_standby():
try:
subprocess.call(['osascript', '-e', 'tell app "System Events" to sleep'])
time.sleep(10)
except:
logging.error(T('Failed to standby system'))
logging.info('Traceback: ', exc_info=True)
| [
"def",
"osx_standby",
"(",
")",
":",
"try",
":",
"subprocess",
".",
"call",
"(",
"[",
"'osascript'",
",",
"'-e'",
",",
"'tell app \"System Events\" to sleep'",
"]",
")",
"time",
".",
"sleep",
"(",
"10",
")",
"except",
":",
"logging",
".",
"error",
"(",
"T",
"(",
"'Failed to standby system'",
")",
")",
"logging",
".",
"info",
"(",
"'Traceback: '",
",",
"exc_info",
"=",
"True",
")"
] | make osx system sleep . | train | false |
21,150 | def _determinism_source_date_epoch(format, string, keyword='CreationDate'):
buff = check_output([sys.executable, u'-R', u'-c', (u'import matplotlib; matplotlib._called_from_pytest = True; matplotlib.use(%r); from matplotlib.testing.determinism import _determinism_save;_determinism_save(%r,%r)' % (format, u'', format))])
find_keyword = re.compile((('.*' + keyword) + '.*'))
key = find_keyword.search(buff)
if key:
print(key.group())
else:
print((u'Timestamp keyword (%s) not found!' % keyword))
assert (string in buff)
| [
"def",
"_determinism_source_date_epoch",
"(",
"format",
",",
"string",
",",
"keyword",
"=",
"'CreationDate'",
")",
":",
"buff",
"=",
"check_output",
"(",
"[",
"sys",
".",
"executable",
",",
"u'-R'",
",",
"u'-c'",
",",
"(",
"u'import matplotlib; matplotlib._called_from_pytest = True; matplotlib.use(%r); from matplotlib.testing.determinism import _determinism_save;_determinism_save(%r,%r)'",
"%",
"(",
"format",
",",
"u''",
",",
"format",
")",
")",
"]",
")",
"find_keyword",
"=",
"re",
".",
"compile",
"(",
"(",
"(",
"'.*'",
"+",
"keyword",
")",
"+",
"'.*'",
")",
")",
"key",
"=",
"find_keyword",
".",
"search",
"(",
"buff",
")",
"if",
"key",
":",
"print",
"(",
"key",
".",
"group",
"(",
")",
")",
"else",
":",
"print",
"(",
"(",
"u'Timestamp keyword (%s) not found!'",
"%",
"keyword",
")",
")",
"assert",
"(",
"string",
"in",
"buff",
")"
] | test source_date_epoch support . | train | false |
21,151 | def getChainText(fileName, procedure):
text = ''
if (fileName.endswith('.gcode') or fileName.endswith('.svg')):
text = archive.getFileText(fileName)
procedures = getProcedures(procedure, text)
return getChainTextFromProcedures(fileName, procedures, text)
| [
"def",
"getChainText",
"(",
"fileName",
",",
"procedure",
")",
":",
"text",
"=",
"''",
"if",
"(",
"fileName",
".",
"endswith",
"(",
"'.gcode'",
")",
"or",
"fileName",
".",
"endswith",
"(",
"'.svg'",
")",
")",
":",
"text",
"=",
"archive",
".",
"getFileText",
"(",
"fileName",
")",
"procedures",
"=",
"getProcedures",
"(",
"procedure",
",",
"text",
")",
"return",
"getChainTextFromProcedures",
"(",
"fileName",
",",
"procedures",
",",
"text",
")"
] | get a crafted shape file . | train | false |
21,153 | @task
def apt(packages):
return sudo((u'apt-get install -y -q ' + packages))
| [
"@",
"task",
"def",
"apt",
"(",
"packages",
")",
":",
"return",
"sudo",
"(",
"(",
"u'apt-get install -y -q '",
"+",
"packages",
")",
")"
] | installs one or more system packages via apt . | train | false |
21,154 | def get_license_text(license):
if (license in GPL_LICENSES):
(name, version) = GPL_LICENSES[license]
return BASE_GPL.format(name=name, version=version).splitlines()
elif (license == OSI):
return BASE_OSI.splitlines()
else:
return ''
| [
"def",
"get_license_text",
"(",
"license",
")",
":",
"if",
"(",
"license",
"in",
"GPL_LICENSES",
")",
":",
"(",
"name",
",",
"version",
")",
"=",
"GPL_LICENSES",
"[",
"license",
"]",
"return",
"BASE_GPL",
".",
"format",
"(",
"name",
"=",
"name",
",",
"version",
"=",
"version",
")",
".",
"splitlines",
"(",
")",
"elif",
"(",
"license",
"==",
"OSI",
")",
":",
"return",
"BASE_OSI",
".",
"splitlines",
"(",
")",
"else",
":",
"return",
"''"
] | get the python license header for a license . | train | false |
21,155 | def show_current(name):
try:
return _read_link(name)
except OSError:
log.error('alternative: {0} does not exist'.format(name))
return False
| [
"def",
"show_current",
"(",
"name",
")",
":",
"try",
":",
"return",
"_read_link",
"(",
"name",
")",
"except",
"OSError",
":",
"log",
".",
"error",
"(",
"'alternative: {0} does not exist'",
".",
"format",
"(",
"name",
")",
")",
"return",
"False"
] | display the current highest-priority alternative for a given alternatives link cli example: . | train | false |
21,156 | def invertQTransform(tr):
try:
import numpy.linalg
arr = np.array([[tr.m11(), tr.m12(), tr.m13()], [tr.m21(), tr.m22(), tr.m23()], [tr.m31(), tr.m32(), tr.m33()]])
inv = numpy.linalg.inv(arr)
return QtGui.QTransform(inv[(0, 0)], inv[(0, 1)], inv[(0, 2)], inv[(1, 0)], inv[(1, 1)], inv[(1, 2)], inv[(2, 0)], inv[(2, 1)])
except ImportError:
inv = tr.inverted()
if (inv[1] is False):
raise Exception('Transform is not invertible.')
return inv[0]
| [
"def",
"invertQTransform",
"(",
"tr",
")",
":",
"try",
":",
"import",
"numpy",
".",
"linalg",
"arr",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"tr",
".",
"m11",
"(",
")",
",",
"tr",
".",
"m12",
"(",
")",
",",
"tr",
".",
"m13",
"(",
")",
"]",
",",
"[",
"tr",
".",
"m21",
"(",
")",
",",
"tr",
".",
"m22",
"(",
")",
",",
"tr",
".",
"m23",
"(",
")",
"]",
",",
"[",
"tr",
".",
"m31",
"(",
")",
",",
"tr",
".",
"m32",
"(",
")",
",",
"tr",
".",
"m33",
"(",
")",
"]",
"]",
")",
"inv",
"=",
"numpy",
".",
"linalg",
".",
"inv",
"(",
"arr",
")",
"return",
"QtGui",
".",
"QTransform",
"(",
"inv",
"[",
"(",
"0",
",",
"0",
")",
"]",
",",
"inv",
"[",
"(",
"0",
",",
"1",
")",
"]",
",",
"inv",
"[",
"(",
"0",
",",
"2",
")",
"]",
",",
"inv",
"[",
"(",
"1",
",",
"0",
")",
"]",
",",
"inv",
"[",
"(",
"1",
",",
"1",
")",
"]",
",",
"inv",
"[",
"(",
"1",
",",
"2",
")",
"]",
",",
"inv",
"[",
"(",
"2",
",",
"0",
")",
"]",
",",
"inv",
"[",
"(",
"2",
",",
"1",
")",
"]",
")",
"except",
"ImportError",
":",
"inv",
"=",
"tr",
".",
"inverted",
"(",
")",
"if",
"(",
"inv",
"[",
"1",
"]",
"is",
"False",
")",
":",
"raise",
"Exception",
"(",
"'Transform is not invertible.'",
")",
"return",
"inv",
"[",
"0",
"]"
] | return a qtransform that is the inverse of *tr* . | train | false |
21,157 | @utils.arg('ip', metavar='<ip>', help=_('IP address.'))
@utils.arg('name', metavar='<name>', help=_('DNS name.'))
@utils.arg('domain', metavar='<domain>', help=_('DNS domain.'))
@utils.arg('--type', metavar='<type>', help=_('DNS type (e.g. "A")'), default='A')
@deprecated_network
def do_dns_create(cs, args):
cs.dns_entries.create(args.domain, args.name, args.ip, args.type)
| [
"@",
"utils",
".",
"arg",
"(",
"'ip'",
",",
"metavar",
"=",
"'<ip>'",
",",
"help",
"=",
"_",
"(",
"'IP address.'",
")",
")",
"@",
"utils",
".",
"arg",
"(",
"'name'",
",",
"metavar",
"=",
"'<name>'",
",",
"help",
"=",
"_",
"(",
"'DNS name.'",
")",
")",
"@",
"utils",
".",
"arg",
"(",
"'domain'",
",",
"metavar",
"=",
"'<domain>'",
",",
"help",
"=",
"_",
"(",
"'DNS domain.'",
")",
")",
"@",
"utils",
".",
"arg",
"(",
"'--type'",
",",
"metavar",
"=",
"'<type>'",
",",
"help",
"=",
"_",
"(",
"'DNS type (e.g. \"A\")'",
")",
",",
"default",
"=",
"'A'",
")",
"@",
"deprecated_network",
"def",
"do_dns_create",
"(",
"cs",
",",
"args",
")",
":",
"cs",
".",
"dns_entries",
".",
"create",
"(",
"args",
".",
"domain",
",",
"args",
".",
"name",
",",
"args",
".",
"ip",
",",
"args",
".",
"type",
")"
] | create a dns entry for domain . | train | false |
21,158 | def streams(url, **params):
session = Livestreamer()
return session.streams(url, **params)
| [
"def",
"streams",
"(",
"url",
",",
"**",
"params",
")",
":",
"session",
"=",
"Livestreamer",
"(",
")",
"return",
"session",
".",
"streams",
"(",
"url",
",",
"**",
"params",
")"
] | attempts to find a plugin and extract streams from the *url* . | train | false |
21,159 | def input_(prompt=None):
if prompt:
if isinstance(prompt, unicode):
prompt = prompt.encode(_encoding(), 'replace')
print(prompt, end=' ')
try:
resp = raw_input()
except EOFError:
raise UserError('stdin stream ended while input required')
return resp.decode((sys.stdin.encoding or 'utf8'), 'ignore')
| [
"def",
"input_",
"(",
"prompt",
"=",
"None",
")",
":",
"if",
"prompt",
":",
"if",
"isinstance",
"(",
"prompt",
",",
"unicode",
")",
":",
"prompt",
"=",
"prompt",
".",
"encode",
"(",
"_encoding",
"(",
")",
",",
"'replace'",
")",
"print",
"(",
"prompt",
",",
"end",
"=",
"' '",
")",
"try",
":",
"resp",
"=",
"raw_input",
"(",
")",
"except",
"EOFError",
":",
"raise",
"UserError",
"(",
"'stdin stream ended while input required'",
")",
"return",
"resp",
".",
"decode",
"(",
"(",
"sys",
".",
"stdin",
".",
"encoding",
"or",
"'utf8'",
")",
",",
"'ignore'",
")"
] | like input . | train | false |
21,160 | def delete_ikepolicy(ikepolicy, profile=None):
conn = _auth(profile)
return conn.delete_ikepolicy(ikepolicy)
| [
"def",
"delete_ikepolicy",
"(",
"ikepolicy",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_auth",
"(",
"profile",
")",
"return",
"conn",
".",
"delete_ikepolicy",
"(",
"ikepolicy",
")"
] | deletes the specified ikepolicy cli example: . | train | false |
21,161 | def getMaximumByPathComplex(path):
maximum = complex((-999999999.0), (-999999999.0))
for point in path:
maximum = getMaximum(maximum, point)
return maximum
| [
"def",
"getMaximumByPathComplex",
"(",
"path",
")",
":",
"maximum",
"=",
"complex",
"(",
"(",
"-",
"999999999.0",
")",
",",
"(",
"-",
"999999999.0",
")",
")",
"for",
"point",
"in",
"path",
":",
"maximum",
"=",
"getMaximum",
"(",
"maximum",
",",
"point",
")",
"return",
"maximum"
] | get a complex with each component the maximum of the respective components of a list of complex points . | train | false |
21,163 | def callback_for(h, fd, flag, *default):
try:
if (flag & READ):
return h.readers[fd]
if (flag & WRITE):
if (fd in h.consolidate):
return h.consolidate_callback
return h.writers[fd]
except KeyError:
if default:
return default[0]
raise
| [
"def",
"callback_for",
"(",
"h",
",",
"fd",
",",
"flag",
",",
"*",
"default",
")",
":",
"try",
":",
"if",
"(",
"flag",
"&",
"READ",
")",
":",
"return",
"h",
".",
"readers",
"[",
"fd",
"]",
"if",
"(",
"flag",
"&",
"WRITE",
")",
":",
"if",
"(",
"fd",
"in",
"h",
".",
"consolidate",
")",
":",
"return",
"h",
".",
"consolidate_callback",
"return",
"h",
".",
"writers",
"[",
"fd",
"]",
"except",
"KeyError",
":",
"if",
"default",
":",
"return",
"default",
"[",
"0",
"]",
"raise"
] | return the callback used for hub+fd+flag . | train | false |
21,164 | def declarative_base(bind=None, metadata=None, mapper=None, cls=object, name='Base', constructor=_declarative_constructor, class_registry=None, metaclass=DeclarativeMeta):
lcl_metadata = (metadata or MetaData())
if bind:
lcl_metadata.bind = bind
if (class_registry is None):
class_registry = weakref.WeakValueDictionary()
bases = (((not isinstance(cls, tuple)) and (cls,)) or cls)
class_dict = dict(_decl_class_registry=class_registry, metadata=lcl_metadata)
if isinstance(cls, type):
class_dict['__doc__'] = cls.__doc__
if constructor:
class_dict['__init__'] = constructor
if mapper:
class_dict['__mapper_cls__'] = mapper
return metaclass(name, bases, class_dict)
| [
"def",
"declarative_base",
"(",
"bind",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
"mapper",
"=",
"None",
",",
"cls",
"=",
"object",
",",
"name",
"=",
"'Base'",
",",
"constructor",
"=",
"_declarative_constructor",
",",
"class_registry",
"=",
"None",
",",
"metaclass",
"=",
"DeclarativeMeta",
")",
":",
"lcl_metadata",
"=",
"(",
"metadata",
"or",
"MetaData",
"(",
")",
")",
"if",
"bind",
":",
"lcl_metadata",
".",
"bind",
"=",
"bind",
"if",
"(",
"class_registry",
"is",
"None",
")",
":",
"class_registry",
"=",
"weakref",
".",
"WeakValueDictionary",
"(",
")",
"bases",
"=",
"(",
"(",
"(",
"not",
"isinstance",
"(",
"cls",
",",
"tuple",
")",
")",
"and",
"(",
"cls",
",",
")",
")",
"or",
"cls",
")",
"class_dict",
"=",
"dict",
"(",
"_decl_class_registry",
"=",
"class_registry",
",",
"metadata",
"=",
"lcl_metadata",
")",
"if",
"isinstance",
"(",
"cls",
",",
"type",
")",
":",
"class_dict",
"[",
"'__doc__'",
"]",
"=",
"cls",
".",
"__doc__",
"if",
"constructor",
":",
"class_dict",
"[",
"'__init__'",
"]",
"=",
"constructor",
"if",
"mapper",
":",
"class_dict",
"[",
"'__mapper_cls__'",
"]",
"=",
"mapper",
"return",
"metaclass",
"(",
"name",
",",
"bases",
",",
"class_dict",
")"
] | construct a base class for declarative class definitions . | train | false |
21,165 | def free_lock(id):
try:
filename = _lock_file(id)
os.rename(filename, (filename + '.redundant'))
os.remove((filename + '.redundant'))
return True
except:
return False
| [
"def",
"free_lock",
"(",
"id",
")",
":",
"try",
":",
"filename",
"=",
"_lock_file",
"(",
"id",
")",
"os",
".",
"rename",
"(",
"filename",
",",
"(",
"filename",
"+",
"'.redundant'",
")",
")",
"os",
".",
"remove",
"(",
"(",
"filename",
"+",
"'.redundant'",
")",
")",
"return",
"True",
"except",
":",
"return",
"False"
] | attempts to free lock id . | train | false |
21,166 | def get_folder_and_search_path(path, sep):
folder = (path[:(path.rfind(sep) + 1)] if (path != sep) else sep)
search_path = path[(path.rfind(sep) + 1):]
return (folder, search_path)
| [
"def",
"get_folder_and_search_path",
"(",
"path",
",",
"sep",
")",
":",
"folder",
"=",
"(",
"path",
"[",
":",
"(",
"path",
".",
"rfind",
"(",
"sep",
")",
"+",
"1",
")",
"]",
"if",
"(",
"path",
"!=",
"sep",
")",
"else",
"sep",
")",
"search_path",
"=",
"path",
"[",
"(",
"path",
".",
"rfind",
"(",
"sep",
")",
"+",
"1",
")",
":",
"]",
"return",
"(",
"folder",
",",
"search_path",
")"
] | breakdown the search path . | train | false |
21,167 | def fail_on_npm_install(arg):
if ('npm install' in arg):
raise BuildFailure('Subprocess return code: 1')
else:
return
| [
"def",
"fail_on_npm_install",
"(",
"arg",
")",
":",
"if",
"(",
"'npm install'",
"in",
"arg",
")",
":",
"raise",
"BuildFailure",
"(",
"'Subprocess return code: 1'",
")",
"else",
":",
"return"
] | for our tests . | train | false |
21,169 | def __del(collection, item, _sa_initiator=None):
if (_sa_initiator is not False):
executor = collection._sa_adapter
if executor:
executor.fire_remove_event(item, _sa_initiator)
| [
"def",
"__del",
"(",
"collection",
",",
"item",
",",
"_sa_initiator",
"=",
"None",
")",
":",
"if",
"(",
"_sa_initiator",
"is",
"not",
"False",
")",
":",
"executor",
"=",
"collection",
".",
"_sa_adapter",
"if",
"executor",
":",
"executor",
".",
"fire_remove_event",
"(",
"item",
",",
"_sa_initiator",
")"
] | do the actual delete . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.