id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
20,099 | def check_duplicates():
s3.prep = (lambda r: (r.method == 'check_duplicates'))
return s3_rest_controller(module, 'person')
| [
"def",
"check_duplicates",
"(",
")",
":",
"s3",
".",
"prep",
"=",
"(",
"lambda",
"r",
":",
"(",
"r",
".",
"method",
"==",
"'check_duplicates'",
")",
")",
"return",
"s3_rest_controller",
"(",
"module",
",",
"'person'",
")"
] | person rest controller - limited to just check_duplicates for use in s3addpersonwidget2 - allows differential access permissions . | train | false |
20,101 | def _build_emrfs_properties(emrfs_args):
emrfs_properties = OrderedDict()
if _need_to_configure_consistent_view(emrfs_args):
_update_properties_for_consistent_view(emrfs_properties, emrfs_args)
if _need_to_configure_sse(emrfs_args):
_update_properties_for_sse(emrfs_properties, emrfs_args)
if _need_to_configure_cse(emrfs_args, 'KMS'):
_update_properties_for_cse(emrfs_properties, emrfs_args, 'KMS')
if _need_to_configure_cse(emrfs_args, 'CUSTOM'):
_update_properties_for_cse(emrfs_properties, emrfs_args, 'CUSTOM')
if ('Args' in emrfs_args):
for arg_value in emrfs_args.get('Args'):
(key, value) = emrutils.split_to_key_value(arg_value)
emrfs_properties[key] = value
return emrfs_properties
| [
"def",
"_build_emrfs_properties",
"(",
"emrfs_args",
")",
":",
"emrfs_properties",
"=",
"OrderedDict",
"(",
")",
"if",
"_need_to_configure_consistent_view",
"(",
"emrfs_args",
")",
":",
"_update_properties_for_consistent_view",
"(",
"emrfs_properties",
",",
"emrfs_args",
")",
"if",
"_need_to_configure_sse",
"(",
"emrfs_args",
")",
":",
"_update_properties_for_sse",
"(",
"emrfs_properties",
",",
"emrfs_args",
")",
"if",
"_need_to_configure_cse",
"(",
"emrfs_args",
",",
"'KMS'",
")",
":",
"_update_properties_for_cse",
"(",
"emrfs_properties",
",",
"emrfs_args",
",",
"'KMS'",
")",
"if",
"_need_to_configure_cse",
"(",
"emrfs_args",
",",
"'CUSTOM'",
")",
":",
"_update_properties_for_cse",
"(",
"emrfs_properties",
",",
"emrfs_args",
",",
"'CUSTOM'",
")",
"if",
"(",
"'Args'",
"in",
"emrfs_args",
")",
":",
"for",
"arg_value",
"in",
"emrfs_args",
".",
"get",
"(",
"'Args'",
")",
":",
"(",
"key",
",",
"value",
")",
"=",
"emrutils",
".",
"split_to_key_value",
"(",
"arg_value",
")",
"emrfs_properties",
"[",
"key",
"]",
"=",
"value",
"return",
"emrfs_properties"
] | assumption: emrfs_args is valid i . | train | false |
20,102 | def _iter_dir(dir_, saltenv):
ret = []
for fn_ in os.listdir(dir_):
path = os.path.join(dir_, fn_)
if os.path.isdir(path):
(yield _iter_dir(path, saltenv))
elif os.path.isfile(path):
with salt.utils.fopen(path) as fp_:
if salt.utils.istextfile(fp_):
ret.append({'path': six.text_type(path), 'saltenv': six.text_type(saltenv), 'content': six.text_type(fp_.read())})
else:
ret.append({'path': six.text_type(path), 'saltenv': six.text_type(saltenv), 'content': u'bin'})
(yield ret)
| [
"def",
"_iter_dir",
"(",
"dir_",
",",
"saltenv",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"fn_",
"in",
"os",
".",
"listdir",
"(",
"dir_",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_",
",",
"fn_",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"(",
"yield",
"_iter_dir",
"(",
"path",
",",
"saltenv",
")",
")",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"with",
"salt",
".",
"utils",
".",
"fopen",
"(",
"path",
")",
"as",
"fp_",
":",
"if",
"salt",
".",
"utils",
".",
"istextfile",
"(",
"fp_",
")",
":",
"ret",
".",
"append",
"(",
"{",
"'path'",
":",
"six",
".",
"text_type",
"(",
"path",
")",
",",
"'saltenv'",
":",
"six",
".",
"text_type",
"(",
"saltenv",
")",
",",
"'content'",
":",
"six",
".",
"text_type",
"(",
"fp_",
".",
"read",
"(",
")",
")",
"}",
")",
"else",
":",
"ret",
".",
"append",
"(",
"{",
"'path'",
":",
"six",
".",
"text_type",
"(",
"path",
")",
",",
"'saltenv'",
":",
"six",
".",
"text_type",
"(",
"saltenv",
")",
",",
"'content'",
":",
"u'bin'",
"}",
")",
"(",
"yield",
"ret",
")"
] | walk a dir path looking for files and marking their content type . | train | false |
20,103 | def make_non_outlier_interval(d1, d2):
return graph_objs.Scatter(x=[0, 0], y=[d1, d2], name='', mode='lines', line=graph_objs.Line(width=1.5, color='rgb(0,0,0)'))
| [
"def",
"make_non_outlier_interval",
"(",
"d1",
",",
"d2",
")",
":",
"return",
"graph_objs",
".",
"Scatter",
"(",
"x",
"=",
"[",
"0",
",",
"0",
"]",
",",
"y",
"=",
"[",
"d1",
",",
"d2",
"]",
",",
"name",
"=",
"''",
",",
"mode",
"=",
"'lines'",
",",
"line",
"=",
"graph_objs",
".",
"Line",
"(",
"width",
"=",
"1.5",
",",
"color",
"=",
"'rgb(0,0,0)'",
")",
")"
] | returns the scatterplot fig of most of a violin plot . | train | false |
20,104 | def get_max_data_extent(net, layer, rc, is_conv):
if is_conv:
conv_size = net.blobs[layer].data.shape[2:4]
layer_slice_middle = ((conv_size[0] / 2), ((conv_size[0] / 2) + 1), (conv_size[1] / 2), ((conv_size[1] / 2) + 1))
data_slice = rc.convert_region(layer, 'data', layer_slice_middle)
return ((data_slice[1] - data_slice[0]), (data_slice[3] - data_slice[2]))
else:
return net.blobs['data'].data.shape[2:4]
| [
"def",
"get_max_data_extent",
"(",
"net",
",",
"layer",
",",
"rc",
",",
"is_conv",
")",
":",
"if",
"is_conv",
":",
"conv_size",
"=",
"net",
".",
"blobs",
"[",
"layer",
"]",
".",
"data",
".",
"shape",
"[",
"2",
":",
"4",
"]",
"layer_slice_middle",
"=",
"(",
"(",
"conv_size",
"[",
"0",
"]",
"/",
"2",
")",
",",
"(",
"(",
"conv_size",
"[",
"0",
"]",
"/",
"2",
")",
"+",
"1",
")",
",",
"(",
"conv_size",
"[",
"1",
"]",
"/",
"2",
")",
",",
"(",
"(",
"conv_size",
"[",
"1",
"]",
"/",
"2",
")",
"+",
"1",
")",
")",
"data_slice",
"=",
"rc",
".",
"convert_region",
"(",
"layer",
",",
"'data'",
",",
"layer_slice_middle",
")",
"return",
"(",
"(",
"data_slice",
"[",
"1",
"]",
"-",
"data_slice",
"[",
"0",
"]",
")",
",",
"(",
"data_slice",
"[",
"3",
"]",
"-",
"data_slice",
"[",
"2",
"]",
")",
")",
"else",
":",
"return",
"net",
".",
"blobs",
"[",
"'data'",
"]",
".",
"data",
".",
"shape",
"[",
"2",
":",
"4",
"]"
] | gets the maximum size of the data layer that can influence a unit on layer . | train | false |
20,105 | def password_validators_help_texts(password_validators=None):
help_texts = []
if (password_validators is None):
password_validators = get_default_password_validators()
for validator in password_validators:
help_texts.append(validator.get_help_text())
return help_texts
| [
"def",
"password_validators_help_texts",
"(",
"password_validators",
"=",
"None",
")",
":",
"help_texts",
"=",
"[",
"]",
"if",
"(",
"password_validators",
"is",
"None",
")",
":",
"password_validators",
"=",
"get_default_password_validators",
"(",
")",
"for",
"validator",
"in",
"password_validators",
":",
"help_texts",
".",
"append",
"(",
"validator",
".",
"get_help_text",
"(",
")",
")",
"return",
"help_texts"
] | return a list of all help texts of all configured validators . | train | false |
20,106 | def hydrate_target(target_adaptor, hydrated_fields):
kwargs = target_adaptor.kwargs()
for field in hydrated_fields:
kwargs[field.name] = field.value
return HydratedTarget(target_adaptor.address, TargetAdaptor(**kwargs), tuple(target_adaptor.dependencies))
| [
"def",
"hydrate_target",
"(",
"target_adaptor",
",",
"hydrated_fields",
")",
":",
"kwargs",
"=",
"target_adaptor",
".",
"kwargs",
"(",
")",
"for",
"field",
"in",
"hydrated_fields",
":",
"kwargs",
"[",
"field",
".",
"name",
"]",
"=",
"field",
".",
"value",
"return",
"HydratedTarget",
"(",
"target_adaptor",
".",
"address",
",",
"TargetAdaptor",
"(",
"**",
"kwargs",
")",
",",
"tuple",
"(",
"target_adaptor",
".",
"dependencies",
")",
")"
] | construct a hydratedtarget from a targetadaptor and hydrated versions of its adapted fields . | train | false |
20,108 | @register.simple_tag
def admin_static_url():
return (getattr(settings, 'ADMIN_MEDIA_PREFIX', None) or ''.join([settings.STATIC_URL, 'admin/']))
| [
"@",
"register",
".",
"simple_tag",
"def",
"admin_static_url",
"(",
")",
":",
"return",
"(",
"getattr",
"(",
"settings",
",",
"'ADMIN_MEDIA_PREFIX'",
",",
"None",
")",
"or",
"''",
".",
"join",
"(",
"[",
"settings",
".",
"STATIC_URL",
",",
"'admin/'",
"]",
")",
")"
] | if set . | train | false |
20,109 | def clean_data_container(data_container):
result = []
for (key, value, path, setter) in data_container.iter_setters():
if value.isdigit():
_type = 'number'
else:
_type = 'string'
result.append(('%s=%s' % (key, _type)))
return '&'.join(result)
| [
"def",
"clean_data_container",
"(",
"data_container",
")",
":",
"result",
"=",
"[",
"]",
"for",
"(",
"key",
",",
"value",
",",
"path",
",",
"setter",
")",
"in",
"data_container",
".",
"iter_setters",
"(",
")",
":",
"if",
"value",
".",
"isdigit",
"(",
")",
":",
"_type",
"=",
"'number'",
"else",
":",
"_type",
"=",
"'string'",
"result",
".",
"append",
"(",
"(",
"'%s=%s'",
"%",
"(",
"key",
",",
"_type",
")",
")",
")",
"return",
"'&'",
".",
"join",
"(",
"result",
")"
] | a simplified/serialized version of the data container . | train | false |
20,111 | def _send_feedback_thread_status_change_emails(recipient_list, feedback_message_reference, old_status, new_status, exploration_id, has_suggestion):
can_users_receive_email = email_manager.can_users_receive_thread_email(recipient_list, exploration_id, has_suggestion)
for (index, recipient_id) in enumerate(recipient_list):
if can_users_receive_email[index]:
transaction_services.run_in_transaction(_enqueue_feedback_thread_status_change_email_task, recipient_id, feedback_message_reference, old_status, new_status)
| [
"def",
"_send_feedback_thread_status_change_emails",
"(",
"recipient_list",
",",
"feedback_message_reference",
",",
"old_status",
",",
"new_status",
",",
"exploration_id",
",",
"has_suggestion",
")",
":",
"can_users_receive_email",
"=",
"email_manager",
".",
"can_users_receive_thread_email",
"(",
"recipient_list",
",",
"exploration_id",
",",
"has_suggestion",
")",
"for",
"(",
"index",
",",
"recipient_id",
")",
"in",
"enumerate",
"(",
"recipient_list",
")",
":",
"if",
"can_users_receive_email",
"[",
"index",
"]",
":",
"transaction_services",
".",
"run_in_transaction",
"(",
"_enqueue_feedback_thread_status_change_email_task",
",",
"recipient_id",
",",
"feedback_message_reference",
",",
"old_status",
",",
"new_status",
")"
] | notifies the given recipients about the status change . | train | false |
20,112 | @register.inclusion_tag('filebrowser/include/_response.html', takes_context=True)
def query_string(context, add=None, remove=None):
add = string_to_dict(add)
remove = string_to_list(remove)
params = context['query'].copy()
response = get_query_string(params, add, remove)
return {'response': response}
| [
"@",
"register",
".",
"inclusion_tag",
"(",
"'filebrowser/include/_response.html'",
",",
"takes_context",
"=",
"True",
")",
"def",
"query_string",
"(",
"context",
",",
"add",
"=",
"None",
",",
"remove",
"=",
"None",
")",
":",
"add",
"=",
"string_to_dict",
"(",
"add",
")",
"remove",
"=",
"string_to_list",
"(",
"remove",
")",
"params",
"=",
"context",
"[",
"'query'",
"]",
".",
"copy",
"(",
")",
"response",
"=",
"get_query_string",
"(",
"params",
",",
"add",
",",
"remove",
")",
"return",
"{",
"'response'",
":",
"response",
"}"
] | allows the addition and removal of query string parameters . | train | false |
20,113 | def change_UPDATE_FREQUENCY(freq):
sickbeard.UPDATE_FREQUENCY = try_int(freq, sickbeard.DEFAULT_UPDATE_FREQUENCY)
if (sickbeard.UPDATE_FREQUENCY < sickbeard.MIN_UPDATE_FREQUENCY):
sickbeard.UPDATE_FREQUENCY = sickbeard.MIN_UPDATE_FREQUENCY
sickbeard.versionCheckScheduler.cycleTime = datetime.timedelta(hours=sickbeard.UPDATE_FREQUENCY)
| [
"def",
"change_UPDATE_FREQUENCY",
"(",
"freq",
")",
":",
"sickbeard",
".",
"UPDATE_FREQUENCY",
"=",
"try_int",
"(",
"freq",
",",
"sickbeard",
".",
"DEFAULT_UPDATE_FREQUENCY",
")",
"if",
"(",
"sickbeard",
".",
"UPDATE_FREQUENCY",
"<",
"sickbeard",
".",
"MIN_UPDATE_FREQUENCY",
")",
":",
"sickbeard",
".",
"UPDATE_FREQUENCY",
"=",
"sickbeard",
".",
"MIN_UPDATE_FREQUENCY",
"sickbeard",
".",
"versionCheckScheduler",
".",
"cycleTime",
"=",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"sickbeard",
".",
"UPDATE_FREQUENCY",
")"
] | change frequency of daily updater thread . | train | false |
20,114 | def loadProfile(filename, allMachines=False):
global settingsList
profileParser = ConfigParser.ConfigParser()
try:
profileParser.read(filename)
except ConfigParser.ParsingError:
return
if allMachines:
n = 0
while profileParser.has_section(('profile_%d' % n)):
for set in settingsList:
if set.isPreference():
continue
section = ('profile_%d' % n)
if set.isAlteration():
section = ('alterations_%d' % n)
if profileParser.has_option(section, set.getName()):
set.setValue(unicode(profileParser.get(section, set.getName()), 'utf-8', 'replace'), n)
n += 1
else:
for set in settingsList:
if set.isPreference():
continue
section = 'profile'
if set.isAlteration():
section = 'alterations'
if profileParser.has_option(section, set.getName()):
set.setValue(unicode(profileParser.get(section, set.getName()), 'utf-8', 'replace'))
if (getProfileSetting('retraction_combing') == '1'):
putProfileSetting('retraction_combing', 'All')
| [
"def",
"loadProfile",
"(",
"filename",
",",
"allMachines",
"=",
"False",
")",
":",
"global",
"settingsList",
"profileParser",
"=",
"ConfigParser",
".",
"ConfigParser",
"(",
")",
"try",
":",
"profileParser",
".",
"read",
"(",
"filename",
")",
"except",
"ConfigParser",
".",
"ParsingError",
":",
"return",
"if",
"allMachines",
":",
"n",
"=",
"0",
"while",
"profileParser",
".",
"has_section",
"(",
"(",
"'profile_%d'",
"%",
"n",
")",
")",
":",
"for",
"set",
"in",
"settingsList",
":",
"if",
"set",
".",
"isPreference",
"(",
")",
":",
"continue",
"section",
"=",
"(",
"'profile_%d'",
"%",
"n",
")",
"if",
"set",
".",
"isAlteration",
"(",
")",
":",
"section",
"=",
"(",
"'alterations_%d'",
"%",
"n",
")",
"if",
"profileParser",
".",
"has_option",
"(",
"section",
",",
"set",
".",
"getName",
"(",
")",
")",
":",
"set",
".",
"setValue",
"(",
"unicode",
"(",
"profileParser",
".",
"get",
"(",
"section",
",",
"set",
".",
"getName",
"(",
")",
")",
",",
"'utf-8'",
",",
"'replace'",
")",
",",
"n",
")",
"n",
"+=",
"1",
"else",
":",
"for",
"set",
"in",
"settingsList",
":",
"if",
"set",
".",
"isPreference",
"(",
")",
":",
"continue",
"section",
"=",
"'profile'",
"if",
"set",
".",
"isAlteration",
"(",
")",
":",
"section",
"=",
"'alterations'",
"if",
"profileParser",
".",
"has_option",
"(",
"section",
",",
"set",
".",
"getName",
"(",
")",
")",
":",
"set",
".",
"setValue",
"(",
"unicode",
"(",
"profileParser",
".",
"get",
"(",
"section",
",",
"set",
".",
"getName",
"(",
")",
")",
",",
"'utf-8'",
",",
"'replace'",
")",
")",
"if",
"(",
"getProfileSetting",
"(",
"'retraction_combing'",
")",
"==",
"'1'",
")",
":",
"putProfileSetting",
"(",
"'retraction_combing'",
",",
"'All'",
")"
] | read a profile file as active profile settings . | train | false |
20,115 | @cronjobs.register
def update_global_totals(date=None):
raise_if_reindex_in_progress('amo')
if date:
date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
today = (date or (datetime.date.today() - datetime.timedelta(days=1)))
today_jobs = [dict(job=job, date=today) for job in tasks._get_daily_jobs(date)]
max_update = (date or UpdateCount.objects.aggregate(max=Max('date'))['max'])
metrics_jobs = [dict(job=job, date=max_update) for job in tasks._get_metrics_jobs(date)]
ts = [tasks.update_global_totals.subtask(kwargs=kw) for kw in (today_jobs + metrics_jobs)]
TaskSet(ts).apply_async()
| [
"@",
"cronjobs",
".",
"register",
"def",
"update_global_totals",
"(",
"date",
"=",
"None",
")",
":",
"raise_if_reindex_in_progress",
"(",
"'amo'",
")",
"if",
"date",
":",
"date",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"date",
",",
"'%Y-%m-%d'",
")",
".",
"date",
"(",
")",
"today",
"=",
"(",
"date",
"or",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
")",
")",
"today_jobs",
"=",
"[",
"dict",
"(",
"job",
"=",
"job",
",",
"date",
"=",
"today",
")",
"for",
"job",
"in",
"tasks",
".",
"_get_daily_jobs",
"(",
"date",
")",
"]",
"max_update",
"=",
"(",
"date",
"or",
"UpdateCount",
".",
"objects",
".",
"aggregate",
"(",
"max",
"=",
"Max",
"(",
"'date'",
")",
")",
"[",
"'max'",
"]",
")",
"metrics_jobs",
"=",
"[",
"dict",
"(",
"job",
"=",
"job",
",",
"date",
"=",
"max_update",
")",
"for",
"job",
"in",
"tasks",
".",
"_get_metrics_jobs",
"(",
"date",
")",
"]",
"ts",
"=",
"[",
"tasks",
".",
"update_global_totals",
".",
"subtask",
"(",
"kwargs",
"=",
"kw",
")",
"for",
"kw",
"in",
"(",
"today_jobs",
"+",
"metrics_jobs",
")",
"]",
"TaskSet",
"(",
"ts",
")",
".",
"apply_async",
"(",
")"
] | update global statistics totals . | train | false |
20,116 | def _dummy_process(text, *args, **kwargs):
return text
| [
"def",
"_dummy_process",
"(",
"text",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"return",
"text"
] | pass-through processor . | train | false |
20,118 | def find_dir(p):
x = 'x'
while (x and (not os.path.exists(p))):
(p, x) = os.path.split(p)
return p
| [
"def",
"find_dir",
"(",
"p",
")",
":",
"x",
"=",
"'x'",
"while",
"(",
"x",
"and",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"p",
")",
")",
")",
":",
"(",
"p",
",",
"x",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"p",
")",
"return",
"p"
] | return first folder level that exists in this path . | train | false |
20,119 | @core_helper
def follow_button(obj_type, obj_id):
obj_type = obj_type.lower()
assert (obj_type in _follow_objects)
if c.user:
context = {'model': model, 'session': model.Session, 'user': c.user}
action = ('am_following_%s' % obj_type)
following = logic.get_action(action)(context, {'id': obj_id})
return snippet('snippets/follow_button.html', following=following, obj_id=obj_id, obj_type=obj_type)
return ''
| [
"@",
"core_helper",
"def",
"follow_button",
"(",
"obj_type",
",",
"obj_id",
")",
":",
"obj_type",
"=",
"obj_type",
".",
"lower",
"(",
")",
"assert",
"(",
"obj_type",
"in",
"_follow_objects",
")",
"if",
"c",
".",
"user",
":",
"context",
"=",
"{",
"'model'",
":",
"model",
",",
"'session'",
":",
"model",
".",
"Session",
",",
"'user'",
":",
"c",
".",
"user",
"}",
"action",
"=",
"(",
"'am_following_%s'",
"%",
"obj_type",
")",
"following",
"=",
"logic",
".",
"get_action",
"(",
"action",
")",
"(",
"context",
",",
"{",
"'id'",
":",
"obj_id",
"}",
")",
"return",
"snippet",
"(",
"'snippets/follow_button.html'",
",",
"following",
"=",
"following",
",",
"obj_id",
"=",
"obj_id",
",",
"obj_type",
"=",
"obj_type",
")",
"return",
"''"
] | return a follow button for the given object type and id . | train | false |
20,120 | def error_grad(expr):
return {var: None for var in expr.variables()}
| [
"def",
"error_grad",
"(",
"expr",
")",
":",
"return",
"{",
"var",
":",
"None",
"for",
"var",
"in",
"expr",
".",
"variables",
"(",
")",
"}"
] | returns a gradient of all none . | train | false |
20,121 | @utils.arg('server', metavar='<server>', help=_('Name or ID of server.'))
def do_suspend(cs, args):
_find_server(cs, args.server).suspend()
| [
"@",
"utils",
".",
"arg",
"(",
"'server'",
",",
"metavar",
"=",
"'<server>'",
",",
"help",
"=",
"_",
"(",
"'Name or ID of server.'",
")",
")",
"def",
"do_suspend",
"(",
"cs",
",",
"args",
")",
":",
"_find_server",
"(",
"cs",
",",
"args",
".",
"server",
")",
".",
"suspend",
"(",
")"
] | suspend a server . | train | false |
20,122 | def _contains_hidden_files(n):
for sub in n:
name = sub.name
if ((len(name) > 1) and name.startswith('.')):
return True
return False
| [
"def",
"_contains_hidden_files",
"(",
"n",
")",
":",
"for",
"sub",
"in",
"n",
":",
"name",
"=",
"sub",
".",
"name",
"if",
"(",
"(",
"len",
"(",
"name",
")",
">",
"1",
")",
"and",
"name",
".",
"startswith",
"(",
"'.'",
")",
")",
":",
"return",
"True",
"return",
"False"
] | return true if n contains files starting with a . | train | false |
20,123 | def filter_by_latest_downloadable_changeset_revision_that_has_missing_tool_test_components(trans, repository):
repository_metadata = get_latest_downloadable_repository_metadata_if_it_includes_tools(trans, repository)
if ((repository_metadata is not None) and repository_metadata.missing_test_components):
return repository_metadata.changeset_revision
return None
| [
"def",
"filter_by_latest_downloadable_changeset_revision_that_has_missing_tool_test_components",
"(",
"trans",
",",
"repository",
")",
":",
"repository_metadata",
"=",
"get_latest_downloadable_repository_metadata_if_it_includes_tools",
"(",
"trans",
",",
"repository",
")",
"if",
"(",
"(",
"repository_metadata",
"is",
"not",
"None",
")",
"and",
"repository_metadata",
".",
"missing_test_components",
")",
":",
"return",
"repository_metadata",
".",
"changeset_revision",
"return",
"None"
] | inspect the latest downloadable changeset revision for the received repository to see if it includes tools that are either missing functional tests or functional test data . | train | false |
20,124 | @login_required
def hosting(request):
if (not appsettings.OFFER_HOSTING):
return redirect(u'home')
if (request.method == u'POST'):
form = HostingForm(request.POST)
if form.is_valid():
context = form.cleaned_data
context[u'username'] = request.user.username
mail_admins_contact(request, u'Hosting request for %(project)s', HOSTING_TEMPLATE, context, form.cleaned_data[u'email'])
return redirect(u'home')
else:
initial = get_initial_contact(request)
form = HostingForm(initial=initial)
return render(request, u'accounts/hosting.html', {u'form': form, u'title': _(u'Hosting')})
| [
"@",
"login_required",
"def",
"hosting",
"(",
"request",
")",
":",
"if",
"(",
"not",
"appsettings",
".",
"OFFER_HOSTING",
")",
":",
"return",
"redirect",
"(",
"u'home'",
")",
"if",
"(",
"request",
".",
"method",
"==",
"u'POST'",
")",
":",
"form",
"=",
"HostingForm",
"(",
"request",
".",
"POST",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"context",
"=",
"form",
".",
"cleaned_data",
"context",
"[",
"u'username'",
"]",
"=",
"request",
".",
"user",
".",
"username",
"mail_admins_contact",
"(",
"request",
",",
"u'Hosting request for %(project)s'",
",",
"HOSTING_TEMPLATE",
",",
"context",
",",
"form",
".",
"cleaned_data",
"[",
"u'email'",
"]",
")",
"return",
"redirect",
"(",
"u'home'",
")",
"else",
":",
"initial",
"=",
"get_initial_contact",
"(",
"request",
")",
"form",
"=",
"HostingForm",
"(",
"initial",
"=",
"initial",
")",
"return",
"render",
"(",
"request",
",",
"u'accounts/hosting.html'",
",",
"{",
"u'form'",
":",
"form",
",",
"u'title'",
":",
"_",
"(",
"u'Hosting'",
")",
"}",
")"
] | form for hosting request . | train | false |
20,125 | def numpy_scalar(data):
if ((data.ndim > 0) and ((len(data.shape) == 0) or (builtins.max(data.shape) == 0))):
assert numpy.all((numpy.array([]) == data))
raise EmptyConstantError()
try:
numpy.complex(data)
return data
except Exception:
raise NotScalarConstantError('v.data is non-numeric, non-scalar, or has more than one unique value', data)
| [
"def",
"numpy_scalar",
"(",
"data",
")",
":",
"if",
"(",
"(",
"data",
".",
"ndim",
">",
"0",
")",
"and",
"(",
"(",
"len",
"(",
"data",
".",
"shape",
")",
"==",
"0",
")",
"or",
"(",
"builtins",
".",
"max",
"(",
"data",
".",
"shape",
")",
"==",
"0",
")",
")",
")",
":",
"assert",
"numpy",
".",
"all",
"(",
"(",
"numpy",
".",
"array",
"(",
"[",
"]",
")",
"==",
"data",
")",
")",
"raise",
"EmptyConstantError",
"(",
")",
"try",
":",
"numpy",
".",
"complex",
"(",
"data",
")",
"return",
"data",
"except",
"Exception",
":",
"raise",
"NotScalarConstantError",
"(",
"'v.data is non-numeric, non-scalar, or has more than one unique value'",
",",
"data",
")"
] | return a scalar stored in a numpy ndarray . | train | false |
20,126 | def get_words_from_dictionary(lemmas):
words = set()
for lemma in lemmas:
words.update(set(lemmas[lemma]))
return words
| [
"def",
"get_words_from_dictionary",
"(",
"lemmas",
")",
":",
"words",
"=",
"set",
"(",
")",
"for",
"lemma",
"in",
"lemmas",
":",
"words",
".",
"update",
"(",
"set",
"(",
"lemmas",
"[",
"lemma",
"]",
")",
")",
"return",
"words"
] | get original set of words used for analysis . | train | false |
20,130 | def desktop_lockdown(name, user=None, disable_application_handlers=None, disable_command_line=None, disable_lock_screen=None, disable_log_out=None, disable_print_setup=None, disable_printing=None, disable_save_to_disk=None, disable_user_switching=None, user_administration_disabled=None, **kwargs):
gnome_kwargs = {'user': user, 'schema': 'org.gnome.desktop.lockdown'}
preferences = ['disable_application_handlers', 'disable_command_line', 'disable_lock_screen', 'disable_log_out', 'disable_print_setup', 'disable_printing', 'disable_save_to_disk', 'disable_user_switching', 'user_administration_disabled']
preferences_hash = {}
for pref in preferences:
if ((pref in locals()) and (locals()[pref] is not None)):
key = re.sub('_', '-', pref)
preferences_hash[key] = locals()[pref]
return _do(name, gnome_kwargs, preferences_hash)
| [
"def",
"desktop_lockdown",
"(",
"name",
",",
"user",
"=",
"None",
",",
"disable_application_handlers",
"=",
"None",
",",
"disable_command_line",
"=",
"None",
",",
"disable_lock_screen",
"=",
"None",
",",
"disable_log_out",
"=",
"None",
",",
"disable_print_setup",
"=",
"None",
",",
"disable_printing",
"=",
"None",
",",
"disable_save_to_disk",
"=",
"None",
",",
"disable_user_switching",
"=",
"None",
",",
"user_administration_disabled",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"gnome_kwargs",
"=",
"{",
"'user'",
":",
"user",
",",
"'schema'",
":",
"'org.gnome.desktop.lockdown'",
"}",
"preferences",
"=",
"[",
"'disable_application_handlers'",
",",
"'disable_command_line'",
",",
"'disable_lock_screen'",
",",
"'disable_log_out'",
",",
"'disable_print_setup'",
",",
"'disable_printing'",
",",
"'disable_save_to_disk'",
",",
"'disable_user_switching'",
",",
"'user_administration_disabled'",
"]",
"preferences_hash",
"=",
"{",
"}",
"for",
"pref",
"in",
"preferences",
":",
"if",
"(",
"(",
"pref",
"in",
"locals",
"(",
")",
")",
"and",
"(",
"locals",
"(",
")",
"[",
"pref",
"]",
"is",
"not",
"None",
")",
")",
":",
"key",
"=",
"re",
".",
"sub",
"(",
"'_'",
",",
"'-'",
",",
"pref",
")",
"preferences_hash",
"[",
"key",
"]",
"=",
"locals",
"(",
")",
"[",
"pref",
"]",
"return",
"_do",
"(",
"name",
",",
"gnome_kwargs",
",",
"preferences_hash",
")"
] | desktop_lockdown: sets values in the org . | train | true |
20,131 | def CAN_ASSIGN_OWNER(article, user):
return _is_staff_for_article(article, user)
| [
"def",
"CAN_ASSIGN_OWNER",
"(",
"article",
",",
"user",
")",
":",
"return",
"_is_staff_for_article",
"(",
"article",
",",
"user",
")"
] | is user allowed to change group of article to one of its own groups? . | train | false |
20,132 | def _get_creator_counts(query, count, page):
creator_counts = query.facet_counts()['creator_id']['terms']
total = len(creator_counts)
creator_counts = creator_counts[((page - 1) * count):(page * count)]
user_ids = [x['term'] for x in creator_counts]
results = UserMappingType.search().filter(id__in=user_ids).values_dict('id', 'username', 'display_name', 'avatar', 'twitter_usernames', 'last_contribution_date')[:count]
results = UserMappingType.reshape(results)
user_lookup = {}
for r in results:
lcd = r.get('last_contribution_date', None)
if lcd:
delta = (datetime.now() - lcd)
r['days_since_last_activity'] = delta.days
else:
r['days_since_last_activity'] = None
user_lookup[r['id']] = r
for item in creator_counts:
item['user'] = user_lookup.get(item['term'], None)
return ([item for item in creator_counts if (item['user'] is not None)], total)
| [
"def",
"_get_creator_counts",
"(",
"query",
",",
"count",
",",
"page",
")",
":",
"creator_counts",
"=",
"query",
".",
"facet_counts",
"(",
")",
"[",
"'creator_id'",
"]",
"[",
"'terms'",
"]",
"total",
"=",
"len",
"(",
"creator_counts",
")",
"creator_counts",
"=",
"creator_counts",
"[",
"(",
"(",
"page",
"-",
"1",
")",
"*",
"count",
")",
":",
"(",
"page",
"*",
"count",
")",
"]",
"user_ids",
"=",
"[",
"x",
"[",
"'term'",
"]",
"for",
"x",
"in",
"creator_counts",
"]",
"results",
"=",
"UserMappingType",
".",
"search",
"(",
")",
".",
"filter",
"(",
"id__in",
"=",
"user_ids",
")",
".",
"values_dict",
"(",
"'id'",
",",
"'username'",
",",
"'display_name'",
",",
"'avatar'",
",",
"'twitter_usernames'",
",",
"'last_contribution_date'",
")",
"[",
":",
"count",
"]",
"results",
"=",
"UserMappingType",
".",
"reshape",
"(",
"results",
")",
"user_lookup",
"=",
"{",
"}",
"for",
"r",
"in",
"results",
":",
"lcd",
"=",
"r",
".",
"get",
"(",
"'last_contribution_date'",
",",
"None",
")",
"if",
"lcd",
":",
"delta",
"=",
"(",
"datetime",
".",
"now",
"(",
")",
"-",
"lcd",
")",
"r",
"[",
"'days_since_last_activity'",
"]",
"=",
"delta",
".",
"days",
"else",
":",
"r",
"[",
"'days_since_last_activity'",
"]",
"=",
"None",
"user_lookup",
"[",
"r",
"[",
"'id'",
"]",
"]",
"=",
"r",
"for",
"item",
"in",
"creator_counts",
":",
"item",
"[",
"'user'",
"]",
"=",
"user_lookup",
".",
"get",
"(",
"item",
"[",
"'term'",
"]",
",",
"None",
")",
"return",
"(",
"[",
"item",
"for",
"item",
"in",
"creator_counts",
"if",
"(",
"item",
"[",
"'user'",
"]",
"is",
"not",
"None",
")",
"]",
",",
"total",
")"
] | get the list of top contributors with the contribution count . | train | false |
20,134 | def test_pushdpopd(xonsh_builtins):
xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE)
dirstack.cd([PARENT])
owd = os.getcwd()
assert (owd.casefold() == xonsh_builtins.__xonsh_env__['PWD'].casefold())
dirstack.pushd([HERE])
wd = os.getcwd()
assert (wd.casefold() == HERE.casefold())
dirstack.popd([])
assert (owd.casefold() == os.getcwd().casefold()), 'popd returned cwd to expected dir'
| [
"def",
"test_pushdpopd",
"(",
"xonsh_builtins",
")",
":",
"xonsh_builtins",
".",
"__xonsh_env__",
"=",
"Env",
"(",
"CDPATH",
"=",
"PARENT",
",",
"PWD",
"=",
"HERE",
")",
"dirstack",
".",
"cd",
"(",
"[",
"PARENT",
"]",
")",
"owd",
"=",
"os",
".",
"getcwd",
"(",
")",
"assert",
"(",
"owd",
".",
"casefold",
"(",
")",
"==",
"xonsh_builtins",
".",
"__xonsh_env__",
"[",
"'PWD'",
"]",
".",
"casefold",
"(",
")",
")",
"dirstack",
".",
"pushd",
"(",
"[",
"HERE",
"]",
")",
"wd",
"=",
"os",
".",
"getcwd",
"(",
")",
"assert",
"(",
"wd",
".",
"casefold",
"(",
")",
"==",
"HERE",
".",
"casefold",
"(",
")",
")",
"dirstack",
".",
"popd",
"(",
"[",
"]",
")",
"assert",
"(",
"owd",
".",
"casefold",
"(",
")",
"==",
"os",
".",
"getcwd",
"(",
")",
".",
"casefold",
"(",
")",
")",
",",
"'popd returned cwd to expected dir'"
] | simple non-unc push/pop to verify we didnt break nonunc case . | train | false |
20,135 | def _get_outerhtml(html_node):
html_string = lxml.html.tostring(html_node)
return re.sub('[^>]*$', '', html_string, count=1)
| [
"def",
"_get_outerhtml",
"(",
"html_node",
")",
":",
"html_string",
"=",
"lxml",
".",
"html",
".",
"tostring",
"(",
"html_node",
")",
"return",
"re",
".",
"sub",
"(",
"'[^>]*$'",
",",
"''",
",",
"html_string",
",",
"count",
"=",
"1",
")"
] | get a string representation of an html node . | train | false |
20,138 | def __ipv6(value):
return (salt.utils.validate.net.ipv6_addr(value), value, 'IPv6 address')
| [
"def",
"__ipv6",
"(",
"value",
")",
":",
"return",
"(",
"salt",
".",
"utils",
".",
"validate",
".",
"net",
".",
"ipv6_addr",
"(",
"value",
")",
",",
"value",
",",
"'IPv6 address'",
")"
] | validate an ipv6 address . | train | false |
20,140 | def _check_scale(scale):
if (np.isscalar(scale) and (scale <= 0)):
raise ValueError(('scale must be positive, not %s' % scale))
| [
"def",
"_check_scale",
"(",
"scale",
")",
":",
"if",
"(",
"np",
".",
"isscalar",
"(",
"scale",
")",
"and",
"(",
"scale",
"<=",
"0",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'scale must be positive, not %s'",
"%",
"scale",
")",
")"
] | ensure valid scale value is passed . | train | false |
20,141 | def make_mail(subject, text_template, html_template, context_vars, from_email, to_email, headers=None, **extra_kwargs):
default_headers = {'Reply-To': settings.DEFAULT_REPLY_TO_EMAIL}
if (headers is not None):
default_headers.update(headers)
headers = default_headers
mail = EmailMultiAlternatives(subject, render_email(text_template, context_vars), from_email, [to_email], headers=headers, **extra_kwargs)
if html_template:
html = transform(render_email(html_template, context_vars), ('https://' + Site.objects.get_current().domain))
mail.attach_alternative(html, 'text/html')
return mail
| [
"def",
"make_mail",
"(",
"subject",
",",
"text_template",
",",
"html_template",
",",
"context_vars",
",",
"from_email",
",",
"to_email",
",",
"headers",
"=",
"None",
",",
"**",
"extra_kwargs",
")",
":",
"default_headers",
"=",
"{",
"'Reply-To'",
":",
"settings",
".",
"DEFAULT_REPLY_TO_EMAIL",
"}",
"if",
"(",
"headers",
"is",
"not",
"None",
")",
":",
"default_headers",
".",
"update",
"(",
"headers",
")",
"headers",
"=",
"default_headers",
"mail",
"=",
"EmailMultiAlternatives",
"(",
"subject",
",",
"render_email",
"(",
"text_template",
",",
"context_vars",
")",
",",
"from_email",
",",
"[",
"to_email",
"]",
",",
"headers",
"=",
"headers",
",",
"**",
"extra_kwargs",
")",
"if",
"html_template",
":",
"html",
"=",
"transform",
"(",
"render_email",
"(",
"html_template",
",",
"context_vars",
")",
",",
"(",
"'https://'",
"+",
"Site",
".",
"objects",
".",
"get_current",
"(",
")",
".",
"domain",
")",
")",
"mail",
".",
"attach_alternative",
"(",
"html",
",",
"'text/html'",
")",
"return",
"mail"
] | return an instance of emailmultialternative with both plaintext and html versions . | train | false |
20,143 | def utctotimestamp(dt):
return total_seconds((dt - epoch))
| [
"def",
"utctotimestamp",
"(",
"dt",
")",
":",
"return",
"total_seconds",
"(",
"(",
"dt",
"-",
"epoch",
")",
")"
] | convert a timestamp to seconds . | train | false |
20,144 | def sha512_digest(instr):
if six.PY3:
b = salt.utils.to_bytes(instr)
return hashlib.sha512(b).hexdigest()
return hashlib.sha512(instr).hexdigest()
| [
"def",
"sha512_digest",
"(",
"instr",
")",
":",
"if",
"six",
".",
"PY3",
":",
"b",
"=",
"salt",
".",
"utils",
".",
"to_bytes",
"(",
"instr",
")",
"return",
"hashlib",
".",
"sha512",
"(",
"b",
")",
".",
"hexdigest",
"(",
")",
"return",
"hashlib",
".",
"sha512",
"(",
"instr",
")",
".",
"hexdigest",
"(",
")"
] | generate an sha512 hash of a given string . | train | false |
20,145 | @memoize
def mixin(*args):
if (len(args) == 1):
return args[0]
name = ('Mixin_%s' % '_'.join((cls.__name__ for cls in args)))
return type(name, args, {})
| [
"@",
"memoize",
"def",
"mixin",
"(",
"*",
"args",
")",
":",
"if",
"(",
"len",
"(",
"args",
")",
"==",
"1",
")",
":",
"return",
"args",
"[",
"0",
"]",
"name",
"=",
"(",
"'Mixin_%s'",
"%",
"'_'",
".",
"join",
"(",
"(",
"cls",
".",
"__name__",
"for",
"cls",
"in",
"args",
")",
")",
")",
"return",
"type",
"(",
"name",
",",
"args",
",",
"{",
"}",
")"
] | dynamically creates a class that inherits from all the classes passed as parameters . | train | false |
20,147 | def _function_matcher(matcher_func):
def match(node):
try:
return matcher_func(node)
except (LookupError, AttributeError, ValueError, TypeError):
return False
return match
| [
"def",
"_function_matcher",
"(",
"matcher_func",
")",
":",
"def",
"match",
"(",
"node",
")",
":",
"try",
":",
"return",
"matcher_func",
"(",
"node",
")",
"except",
"(",
"LookupError",
",",
"AttributeError",
",",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"False",
"return",
"match"
] | safer attribute lookup -- returns false instead of raising an error . | train | false |
20,148 | def _check_and_install_ruby(ret, ruby, default=False, user=None):
ret = _check_ruby(ret, ruby, user=user)
if (not ret['result']):
if __salt__['rvm.install_ruby'](ruby, runas=user):
ret['result'] = True
ret['changes'][ruby] = 'Installed'
ret['comment'] = 'Successfully installed ruby.'
ret['default'] = False
else:
ret['result'] = False
ret['comment'] = 'Could not install ruby.'
return ret
if default:
__salt__['rvm.set_default'](ruby, runas=user)
return ret
| [
"def",
"_check_and_install_ruby",
"(",
"ret",
",",
"ruby",
",",
"default",
"=",
"False",
",",
"user",
"=",
"None",
")",
":",
"ret",
"=",
"_check_ruby",
"(",
"ret",
",",
"ruby",
",",
"user",
"=",
"user",
")",
"if",
"(",
"not",
"ret",
"[",
"'result'",
"]",
")",
":",
"if",
"__salt__",
"[",
"'rvm.install_ruby'",
"]",
"(",
"ruby",
",",
"runas",
"=",
"user",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'changes'",
"]",
"[",
"ruby",
"]",
"=",
"'Installed'",
"ret",
"[",
"'comment'",
"]",
"=",
"'Successfully installed ruby.'",
"ret",
"[",
"'default'",
"]",
"=",
"False",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Could not install ruby.'",
"return",
"ret",
"if",
"default",
":",
"__salt__",
"[",
"'rvm.set_default'",
"]",
"(",
"ruby",
",",
"runas",
"=",
"user",
")",
"return",
"ret"
] | verify that ruby is installed . | train | true |
20,149 | def intr(n):
return int(round(n))
| [
"def",
"intr",
"(",
"n",
")",
":",
"return",
"int",
"(",
"round",
"(",
"n",
")",
")"
] | returns a correctly rounded integer . | train | false |
20,150 | def get_dataset_directory(dataset_name, create_directory=True):
path = os.path.join(_dataset_root, dataset_name)
if create_directory:
try:
os.makedirs(path)
except OSError:
pass
return path
| [
"def",
"get_dataset_directory",
"(",
"dataset_name",
",",
"create_directory",
"=",
"True",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_dataset_root",
",",
"dataset_name",
")",
"if",
"create_directory",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"OSError",
":",
"pass",
"return",
"path"
] | gets the path to the directory of given dataset . | train | false |
20,151 | def kaiser_beta(a):
if (a > 50):
beta = (0.1102 * (a - 8.7))
elif (a > 21):
beta = ((0.5842 * ((a - 21) ** 0.4)) + (0.07886 * (a - 21)))
else:
beta = 0.0
return beta
| [
"def",
"kaiser_beta",
"(",
"a",
")",
":",
"if",
"(",
"a",
">",
"50",
")",
":",
"beta",
"=",
"(",
"0.1102",
"*",
"(",
"a",
"-",
"8.7",
")",
")",
"elif",
"(",
"a",
">",
"21",
")",
":",
"beta",
"=",
"(",
"(",
"0.5842",
"*",
"(",
"(",
"a",
"-",
"21",
")",
"**",
"0.4",
")",
")",
"+",
"(",
"0.07886",
"*",
"(",
"a",
"-",
"21",
")",
")",
")",
"else",
":",
"beta",
"=",
"0.0",
"return",
"beta"
] | compute the kaiser parameter beta . | train | false |
20,152 | def idzp_svd(eps, A):
A = np.asfortranarray(A)
(m, n) = A.shape
(k, iU, iV, iS, w, ier) = _id.idzp_svd(eps, A)
if ier:
raise _RETCODE_ERROR
U = w[(iU - 1):((iU + (m * k)) - 1)].reshape((m, k), order='F')
V = w[(iV - 1):((iV + (n * k)) - 1)].reshape((n, k), order='F')
S = w[(iS - 1):((iS + k) - 1)]
return (U, V, S)
| [
"def",
"idzp_svd",
"(",
"eps",
",",
"A",
")",
":",
"A",
"=",
"np",
".",
"asfortranarray",
"(",
"A",
")",
"(",
"m",
",",
"n",
")",
"=",
"A",
".",
"shape",
"(",
"k",
",",
"iU",
",",
"iV",
",",
"iS",
",",
"w",
",",
"ier",
")",
"=",
"_id",
".",
"idzp_svd",
"(",
"eps",
",",
"A",
")",
"if",
"ier",
":",
"raise",
"_RETCODE_ERROR",
"U",
"=",
"w",
"[",
"(",
"iU",
"-",
"1",
")",
":",
"(",
"(",
"iU",
"+",
"(",
"m",
"*",
"k",
")",
")",
"-",
"1",
")",
"]",
".",
"reshape",
"(",
"(",
"m",
",",
"k",
")",
",",
"order",
"=",
"'F'",
")",
"V",
"=",
"w",
"[",
"(",
"iV",
"-",
"1",
")",
":",
"(",
"(",
"iV",
"+",
"(",
"n",
"*",
"k",
")",
")",
"-",
"1",
")",
"]",
".",
"reshape",
"(",
"(",
"n",
",",
"k",
")",
",",
"order",
"=",
"'F'",
")",
"S",
"=",
"w",
"[",
"(",
"iS",
"-",
"1",
")",
":",
"(",
"(",
"iS",
"+",
"k",
")",
"-",
"1",
")",
"]",
"return",
"(",
"U",
",",
"V",
",",
"S",
")"
] | compute svd of a complex matrix to a specified relative precision . | train | false |
20,153 | def adjust_workers(num_flows, num_cpus, worker_sockets, log_fh=None):
qiime_config = load_qiime_config()
min_per_core = int(qiime_config['denoiser_min_per_core'])
if (num_flows < ((num_cpus - 1) * min_per_core)):
if log_fh:
log_fh.write('Adjusting number of workers:\n')
log_fh.write(('flows: %d cpus:%d\n' % (num_flows, num_cpus)))
per_core = max(min_per_core, ((num_flows / num_cpus) + 1))
for i in range(num_cpus):
if ((i * per_core) > num_flows):
worker_sock = worker_sockets.pop()
worker_sock.close()
num_cpus = (num_cpus - 1)
if log_fh:
log_fh.write(('released worker %d\n' % i))
if log_fh:
log_fh.write(('New number of cpus:%d\n' % num_cpus))
if ((num_cpus == 0) or (num_cpus != len(worker_sockets))):
raise ValueError('Adjust_workers screwed up!')
return num_cpus
| [
"def",
"adjust_workers",
"(",
"num_flows",
",",
"num_cpus",
",",
"worker_sockets",
",",
"log_fh",
"=",
"None",
")",
":",
"qiime_config",
"=",
"load_qiime_config",
"(",
")",
"min_per_core",
"=",
"int",
"(",
"qiime_config",
"[",
"'denoiser_min_per_core'",
"]",
")",
"if",
"(",
"num_flows",
"<",
"(",
"(",
"num_cpus",
"-",
"1",
")",
"*",
"min_per_core",
")",
")",
":",
"if",
"log_fh",
":",
"log_fh",
".",
"write",
"(",
"'Adjusting number of workers:\\n'",
")",
"log_fh",
".",
"write",
"(",
"(",
"'flows: %d cpus:%d\\n'",
"%",
"(",
"num_flows",
",",
"num_cpus",
")",
")",
")",
"per_core",
"=",
"max",
"(",
"min_per_core",
",",
"(",
"(",
"num_flows",
"/",
"num_cpus",
")",
"+",
"1",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_cpus",
")",
":",
"if",
"(",
"(",
"i",
"*",
"per_core",
")",
">",
"num_flows",
")",
":",
"worker_sock",
"=",
"worker_sockets",
".",
"pop",
"(",
")",
"worker_sock",
".",
"close",
"(",
")",
"num_cpus",
"=",
"(",
"num_cpus",
"-",
"1",
")",
"if",
"log_fh",
":",
"log_fh",
".",
"write",
"(",
"(",
"'released worker %d\\n'",
"%",
"i",
")",
")",
"if",
"log_fh",
":",
"log_fh",
".",
"write",
"(",
"(",
"'New number of cpus:%d\\n'",
"%",
"num_cpus",
")",
")",
"if",
"(",
"(",
"num_cpus",
"==",
"0",
")",
"or",
"(",
"num_cpus",
"!=",
"len",
"(",
"worker_sockets",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Adjust_workers screwed up!'",
")",
"return",
"num_cpus"
] | stop workers no longer needed . | train | false |
20,154 | def filter_name(query, table, name, language, name_attribute='name'):
if (language is None):
query = query.filter((getattr(table, name_attribute) == name))
else:
names_table = table.names_table
name_column = getattr(names_table, name_attribute)
query = query.join(names_table)
query = query.filter((names_table.foreign_id == table.id))
query = query.filter((names_table.local_language_id == language.id))
if isinstance(name, tuple):
query = query.filter((name_column in name))
else:
query = query.filter((name_column == name))
return query
| [
"def",
"filter_name",
"(",
"query",
",",
"table",
",",
"name",
",",
"language",
",",
"name_attribute",
"=",
"'name'",
")",
":",
"if",
"(",
"language",
"is",
"None",
")",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"(",
"getattr",
"(",
"table",
",",
"name_attribute",
")",
"==",
"name",
")",
")",
"else",
":",
"names_table",
"=",
"table",
".",
"names_table",
"name_column",
"=",
"getattr",
"(",
"names_table",
",",
"name_attribute",
")",
"query",
"=",
"query",
".",
"join",
"(",
"names_table",
")",
"query",
"=",
"query",
".",
"filter",
"(",
"(",
"names_table",
".",
"foreign_id",
"==",
"table",
".",
"id",
")",
")",
"query",
"=",
"query",
".",
"filter",
"(",
"(",
"names_table",
".",
"local_language_id",
"==",
"language",
".",
"id",
")",
")",
"if",
"isinstance",
"(",
"name",
",",
"tuple",
")",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"(",
"name_column",
"in",
"name",
")",
")",
"else",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"(",
"name_column",
"==",
"name",
")",
")",
"return",
"query"
] | filter a query by name . | train | false |
20,156 | def apply_security_groups(name, security_groups, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(security_groups, string_types):
security_groups = json.loads(security_groups)
try:
conn.apply_security_groups_to_lb(name, security_groups)
msg = 'Applied security_groups on ELB {0}'.format(name)
log.info(msg)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to appply security_groups on ELB {0}: {1}'
msg = msg.format(name, e.message)
log.error(msg)
return False
| [
"def",
"apply_security_groups",
"(",
"name",
",",
"security_groups",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"isinstance",
"(",
"security_groups",
",",
"string_types",
")",
":",
"security_groups",
"=",
"json",
".",
"loads",
"(",
"security_groups",
")",
"try",
":",
"conn",
".",
"apply_security_groups_to_lb",
"(",
"name",
",",
"security_groups",
")",
"msg",
"=",
"'Applied security_groups on ELB {0}'",
".",
"format",
"(",
"name",
")",
"log",
".",
"info",
"(",
"msg",
")",
"return",
"True",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"e",
")",
"msg",
"=",
"'Failed to appply security_groups on ELB {0}: {1}'",
"msg",
"=",
"msg",
".",
"format",
"(",
"name",
",",
"e",
".",
"message",
")",
"log",
".",
"error",
"(",
"msg",
")",
"return",
"False"
] | apply security groups to elb . | train | true |
20,157 | def new(rsa_key):
return PKCS115_SigScheme(rsa_key)
| [
"def",
"new",
"(",
"rsa_key",
")",
":",
"return",
"PKCS115_SigScheme",
"(",
"rsa_key",
")"
] | return a signature scheme object pkcs115_sigscheme that can be used to perform pkcs#1 v1 . | train | false |
20,158 | def catch_notimplementederror(f):
def wrapped_func(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except NotImplementedError:
frame = traceback.extract_tb(sys.exc_info()[2])[(-1)]
LOG.error(('%(driver)s does not implement %(method)s' % {'driver': type(self.connection), 'method': frame[2]}))
wrapped_func.__name__ = f.__name__
wrapped_func.__doc__ = f.__doc__
return wrapped_func
| [
"def",
"catch_notimplementederror",
"(",
"f",
")",
":",
"def",
"wrapped_func",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"try",
":",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"except",
"NotImplementedError",
":",
"frame",
"=",
"traceback",
".",
"extract_tb",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"[",
"(",
"-",
"1",
")",
"]",
"LOG",
".",
"error",
"(",
"(",
"'%(driver)s does not implement %(method)s'",
"%",
"{",
"'driver'",
":",
"type",
"(",
"self",
".",
"connection",
")",
",",
"'method'",
":",
"frame",
"[",
"2",
"]",
"}",
")",
")",
"wrapped_func",
".",
"__name__",
"=",
"f",
".",
"__name__",
"wrapped_func",
".",
"__doc__",
"=",
"f",
".",
"__doc__",
"return",
"wrapped_func"
] | decorator to simplify catching drivers raising notimplementederror if a particular call makes a driver raise notimplementederror . | train | false |
20,159 | def hmac_signature(string, shared_secret, challenge_hmac):
if six.PY3:
msg = salt.utils.to_bytes(string)
key = salt.utils.to_bytes(shared_secret)
challenge = salt.utils.to_bytes(challenge_hmac)
else:
msg = string
key = shared_secret
challenge = challenge_hmac
hmac_hash = hmac.new(key, msg, hashlib.sha256)
valid_hmac = base64.b64encode(hmac_hash.digest())
return (valid_hmac == challenge)
| [
"def",
"hmac_signature",
"(",
"string",
",",
"shared_secret",
",",
"challenge_hmac",
")",
":",
"if",
"six",
".",
"PY3",
":",
"msg",
"=",
"salt",
".",
"utils",
".",
"to_bytes",
"(",
"string",
")",
"key",
"=",
"salt",
".",
"utils",
".",
"to_bytes",
"(",
"shared_secret",
")",
"challenge",
"=",
"salt",
".",
"utils",
".",
"to_bytes",
"(",
"challenge_hmac",
")",
"else",
":",
"msg",
"=",
"string",
"key",
"=",
"shared_secret",
"challenge",
"=",
"challenge_hmac",
"hmac_hash",
"=",
"hmac",
".",
"new",
"(",
"key",
",",
"msg",
",",
"hashlib",
".",
"sha256",
")",
"valid_hmac",
"=",
"base64",
".",
"b64encode",
"(",
"hmac_hash",
".",
"digest",
"(",
")",
")",
"return",
"(",
"valid_hmac",
"==",
"challenge",
")"
] | verify a challenging hmac signature against a string / shared-secret . | train | true |
20,160 | def create_python27_start_cmd(app_name, login_ip, port, load_balancer_host, xmpp_ip):
db_location = DATASTORE_PATH
cmd = ['/usr/bin/python2', (constants.APPSCALE_HOME + '/AppServer/dev_appserver.py'), ('--port ' + str(port)), ('--admin_port ' + str((port + 10000))), ('--login_server ' + login_ip), '--skip_sdk_update_check', ('--nginx_host ' + str(load_balancer_host)), '--require_indexes', '--enable_sendmail', ('--xmpp_path ' + xmpp_ip), ('--php_executable_path=' + str(PHP_CGI_LOCATION)), ((('--uaserver_path ' + db_location) + ':') + str(constants.UA_SERVER_PORT)), ((('--datastore_path ' + db_location) + ':') + str(constants.DB_SERVER_PORT)), (('/var/apps/' + app_name) + '/app'), ('--host ' + appscale_info.get_private_ip())]
if (app_name in TRUSTED_APPS):
cmd.extend([TRUSTED_FLAG])
return ' '.join(cmd)
| [
"def",
"create_python27_start_cmd",
"(",
"app_name",
",",
"login_ip",
",",
"port",
",",
"load_balancer_host",
",",
"xmpp_ip",
")",
":",
"db_location",
"=",
"DATASTORE_PATH",
"cmd",
"=",
"[",
"'/usr/bin/python2'",
",",
"(",
"constants",
".",
"APPSCALE_HOME",
"+",
"'/AppServer/dev_appserver.py'",
")",
",",
"(",
"'--port '",
"+",
"str",
"(",
"port",
")",
")",
",",
"(",
"'--admin_port '",
"+",
"str",
"(",
"(",
"port",
"+",
"10000",
")",
")",
")",
",",
"(",
"'--login_server '",
"+",
"login_ip",
")",
",",
"'--skip_sdk_update_check'",
",",
"(",
"'--nginx_host '",
"+",
"str",
"(",
"load_balancer_host",
")",
")",
",",
"'--require_indexes'",
",",
"'--enable_sendmail'",
",",
"(",
"'--xmpp_path '",
"+",
"xmpp_ip",
")",
",",
"(",
"'--php_executable_path='",
"+",
"str",
"(",
"PHP_CGI_LOCATION",
")",
")",
",",
"(",
"(",
"(",
"'--uaserver_path '",
"+",
"db_location",
")",
"+",
"':'",
")",
"+",
"str",
"(",
"constants",
".",
"UA_SERVER_PORT",
")",
")",
",",
"(",
"(",
"(",
"'--datastore_path '",
"+",
"db_location",
")",
"+",
"':'",
")",
"+",
"str",
"(",
"constants",
".",
"DB_SERVER_PORT",
")",
")",
",",
"(",
"(",
"'/var/apps/'",
"+",
"app_name",
")",
"+",
"'/app'",
")",
",",
"(",
"'--host '",
"+",
"appscale_info",
".",
"get_private_ip",
"(",
")",
")",
"]",
"if",
"(",
"app_name",
"in",
"TRUSTED_APPS",
")",
":",
"cmd",
".",
"extend",
"(",
"[",
"TRUSTED_FLAG",
"]",
")",
"return",
"' '",
".",
"join",
"(",
"cmd",
")"
] | creates the start command to run the python application server . | train | false |
20,161 | def explore_account(c):
while True:
print
folderflags = {}
data = c.list_folders()
for (flags, delimiter, name) in data:
folderflags[name] = flags
for name in sorted(folderflags.keys()):
print ('%-30s %s' % (name, ' '.join(folderflags[name])))
print
reply = raw_input('Type a folder name, or "q" to quit: ').strip()
if reply.lower().startswith('q'):
break
if (reply in folderflags):
explore_folder(c, reply)
else:
print 'Error: no folder named', repr(reply)
| [
"def",
"explore_account",
"(",
"c",
")",
":",
"while",
"True",
":",
"print",
"folderflags",
"=",
"{",
"}",
"data",
"=",
"c",
".",
"list_folders",
"(",
")",
"for",
"(",
"flags",
",",
"delimiter",
",",
"name",
")",
"in",
"data",
":",
"folderflags",
"[",
"name",
"]",
"=",
"flags",
"for",
"name",
"in",
"sorted",
"(",
"folderflags",
".",
"keys",
"(",
")",
")",
":",
"print",
"(",
"'%-30s %s'",
"%",
"(",
"name",
",",
"' '",
".",
"join",
"(",
"folderflags",
"[",
"name",
"]",
")",
")",
")",
"print",
"reply",
"=",
"raw_input",
"(",
"'Type a folder name, or \"q\" to quit: '",
")",
".",
"strip",
"(",
")",
"if",
"reply",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'q'",
")",
":",
"break",
"if",
"(",
"reply",
"in",
"folderflags",
")",
":",
"explore_folder",
"(",
"c",
",",
"reply",
")",
"else",
":",
"print",
"'Error: no folder named'",
",",
"repr",
"(",
"reply",
")"
] | display the folders in this imap account and let the user choose one . | train | false |
20,163 | def _get_grain(proxy, name):
grains = _retrieve_grains(proxy)
if (grains.get('result', False) and grains.get('out', {})):
return grains.get('out').get(name)
| [
"def",
"_get_grain",
"(",
"proxy",
",",
"name",
")",
":",
"grains",
"=",
"_retrieve_grains",
"(",
"proxy",
")",
"if",
"(",
"grains",
".",
"get",
"(",
"'result'",
",",
"False",
")",
"and",
"grains",
".",
"get",
"(",
"'out'",
",",
"{",
"}",
")",
")",
":",
"return",
"grains",
".",
"get",
"(",
"'out'",
")",
".",
"get",
"(",
"name",
")"
] | retrieves the grain value from the cached dictionary . | train | false |
20,164 | def is_newer_than(after, seconds):
if isinstance(after, basestring):
after = parse_strtime(after).replace(tzinfo=None)
return ((after - utcnow()) > datetime.timedelta(seconds=seconds))
| [
"def",
"is_newer_than",
"(",
"after",
",",
"seconds",
")",
":",
"if",
"isinstance",
"(",
"after",
",",
"basestring",
")",
":",
"after",
"=",
"parse_strtime",
"(",
"after",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"return",
"(",
"(",
"after",
"-",
"utcnow",
"(",
")",
")",
">",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"seconds",
")",
")"
] | return true if after is newer than seconds . | train | false |
20,165 | def characters(probabilities):
return [id2bi(c) for c in np.argmax(probabilities, 1)]
| [
"def",
"characters",
"(",
"probabilities",
")",
":",
"return",
"[",
"id2bi",
"(",
"c",
")",
"for",
"c",
"in",
"np",
".",
"argmax",
"(",
"probabilities",
",",
"1",
")",
"]"
] | turn a 1-hot encoding or a probability distribution over the possible characters back into its character representation . | train | false |
20,166 | def test_ast_good_import_from():
can_compile(u'(import [x [y]])')
| [
"def",
"test_ast_good_import_from",
"(",
")",
":",
"can_compile",
"(",
"u'(import [x [y]])'",
")"
] | make sure ast can compile valid selective import . | train | false |
20,167 | def create_permission_grant(role_db, resource_uid, resource_type, permission_types):
permission_grant_db = PermissionGrantDB(resource_uid=resource_uid, resource_type=resource_type, permission_types=permission_types)
permission_grant_db = PermissionGrant.add_or_update(permission_grant_db)
role_db.update(push__permission_grants=str(permission_grant_db.id))
return permission_grant_db
| [
"def",
"create_permission_grant",
"(",
"role_db",
",",
"resource_uid",
",",
"resource_type",
",",
"permission_types",
")",
":",
"permission_grant_db",
"=",
"PermissionGrantDB",
"(",
"resource_uid",
"=",
"resource_uid",
",",
"resource_type",
"=",
"resource_type",
",",
"permission_types",
"=",
"permission_types",
")",
"permission_grant_db",
"=",
"PermissionGrant",
".",
"add_or_update",
"(",
"permission_grant_db",
")",
"role_db",
".",
"update",
"(",
"push__permission_grants",
"=",
"str",
"(",
"permission_grant_db",
".",
"id",
")",
")",
"return",
"permission_grant_db"
] | create a new permission grant and add it to the provided role . | train | false |
20,168 | def get_hasher(algorithm=u'default'):
if hasattr(algorithm, u'algorithm'):
return algorithm
elif (algorithm == u'default'):
if (PREFERRED_HASHER is None):
load_hashers()
return PREFERRED_HASHER
else:
if (HASHERS is None):
load_hashers()
if (algorithm not in HASHERS):
raise ValueError((u"Unknown password hashing algorithm '%s'. Did you specify it in the PASSWORD_HASHERS setting?" % algorithm))
return HASHERS[algorithm]
| [
"def",
"get_hasher",
"(",
"algorithm",
"=",
"u'default'",
")",
":",
"if",
"hasattr",
"(",
"algorithm",
",",
"u'algorithm'",
")",
":",
"return",
"algorithm",
"elif",
"(",
"algorithm",
"==",
"u'default'",
")",
":",
"if",
"(",
"PREFERRED_HASHER",
"is",
"None",
")",
":",
"load_hashers",
"(",
")",
"return",
"PREFERRED_HASHER",
"else",
":",
"if",
"(",
"HASHERS",
"is",
"None",
")",
":",
"load_hashers",
"(",
")",
"if",
"(",
"algorithm",
"not",
"in",
"HASHERS",
")",
":",
"raise",
"ValueError",
"(",
"(",
"u\"Unknown password hashing algorithm '%s'. Did you specify it in the PASSWORD_HASHERS setting?\"",
"%",
"algorithm",
")",
")",
"return",
"HASHERS",
"[",
"algorithm",
"]"
] | returns an instance of a loaded password hasher . | train | false |
20,173 | def set_hsa_kernel(fn):
mod = fn.module
fn.calling_convention = CC_SPIR_KERNEL
ocl_kernels = mod.get_or_insert_named_metadata('opencl.kernels')
ocl_kernels.add(lc.MetaData.get(mod, [fn, gen_arg_addrspace_md(fn), gen_arg_access_qual_md(fn), gen_arg_type(fn), gen_arg_type_qual(fn), gen_arg_base_type(fn)]))
make_constant = (lambda x: lc.Constant.int(lc.Type.int(), x))
spir_version_constant = [make_constant(x) for x in SPIR_VERSION]
spir_version = mod.get_or_insert_named_metadata('opencl.spir.version')
if (not spir_version.operands):
spir_version.add(lc.MetaData.get(mod, spir_version_constant))
ocl_version = mod.get_or_insert_named_metadata('opencl.ocl.version')
if (not ocl_version.operands):
ocl_version.add(lc.MetaData.get(mod, spir_version_constant))
| [
"def",
"set_hsa_kernel",
"(",
"fn",
")",
":",
"mod",
"=",
"fn",
".",
"module",
"fn",
".",
"calling_convention",
"=",
"CC_SPIR_KERNEL",
"ocl_kernels",
"=",
"mod",
".",
"get_or_insert_named_metadata",
"(",
"'opencl.kernels'",
")",
"ocl_kernels",
".",
"add",
"(",
"lc",
".",
"MetaData",
".",
"get",
"(",
"mod",
",",
"[",
"fn",
",",
"gen_arg_addrspace_md",
"(",
"fn",
")",
",",
"gen_arg_access_qual_md",
"(",
"fn",
")",
",",
"gen_arg_type",
"(",
"fn",
")",
",",
"gen_arg_type_qual",
"(",
"fn",
")",
",",
"gen_arg_base_type",
"(",
"fn",
")",
"]",
")",
")",
"make_constant",
"=",
"(",
"lambda",
"x",
":",
"lc",
".",
"Constant",
".",
"int",
"(",
"lc",
".",
"Type",
".",
"int",
"(",
")",
",",
"x",
")",
")",
"spir_version_constant",
"=",
"[",
"make_constant",
"(",
"x",
")",
"for",
"x",
"in",
"SPIR_VERSION",
"]",
"spir_version",
"=",
"mod",
".",
"get_or_insert_named_metadata",
"(",
"'opencl.spir.version'",
")",
"if",
"(",
"not",
"spir_version",
".",
"operands",
")",
":",
"spir_version",
".",
"add",
"(",
"lc",
".",
"MetaData",
".",
"get",
"(",
"mod",
",",
"spir_version_constant",
")",
")",
"ocl_version",
"=",
"mod",
".",
"get_or_insert_named_metadata",
"(",
"'opencl.ocl.version'",
")",
"if",
"(",
"not",
"ocl_version",
".",
"operands",
")",
":",
"ocl_version",
".",
"add",
"(",
"lc",
".",
"MetaData",
".",
"get",
"(",
"mod",
",",
"spir_version_constant",
")",
")"
] | ensure fn is usable as a spir kernel . | train | false |
20,174 | def xblock_has_own_studio_page(xblock, parent_xblock=None):
category = xblock.category
if is_unit(xblock, parent_xblock):
return True
elif (category == 'vertical'):
if (parent_xblock is None):
parent_xblock = get_parent_xblock(xblock)
return (is_unit(parent_xblock) if parent_xblock else False)
return xblock.has_children
| [
"def",
"xblock_has_own_studio_page",
"(",
"xblock",
",",
"parent_xblock",
"=",
"None",
")",
":",
"category",
"=",
"xblock",
".",
"category",
"if",
"is_unit",
"(",
"xblock",
",",
"parent_xblock",
")",
":",
"return",
"True",
"elif",
"(",
"category",
"==",
"'vertical'",
")",
":",
"if",
"(",
"parent_xblock",
"is",
"None",
")",
":",
"parent_xblock",
"=",
"get_parent_xblock",
"(",
"xblock",
")",
"return",
"(",
"is_unit",
"(",
"parent_xblock",
")",
"if",
"parent_xblock",
"else",
"False",
")",
"return",
"xblock",
".",
"has_children"
] | returns true if the specified xblock has an associated studio page . | train | false |
20,175 | def stopping_criteria(num_iterations, validation_error, training_error):
if ((num_iterations % 100) == 0):
if VERBOSE:
print(('error: %s' % validation_error))
if (num_iterations >= 2000):
return True
return False
| [
"def",
"stopping_criteria",
"(",
"num_iterations",
",",
"validation_error",
",",
"training_error",
")",
":",
"if",
"(",
"(",
"num_iterations",
"%",
"100",
")",
"==",
"0",
")",
":",
"if",
"VERBOSE",
":",
"print",
"(",
"(",
"'error: %s'",
"%",
"validation_error",
")",
")",
"if",
"(",
"num_iterations",
">=",
"2000",
")",
":",
"return",
"True",
"return",
"False"
] | define when to stop iterating . | train | false |
20,176 | def _collapse_whitespace(text):
return re.sub('\\s+', ' ', text)
| [
"def",
"_collapse_whitespace",
"(",
"text",
")",
":",
"return",
"re",
".",
"sub",
"(",
"'\\\\s+'",
",",
"' '",
",",
"text",
")"
] | collapses sequences of whitespace characters in text to a single space . | train | false |
20,179 | @cli.command('open')
@click.option('-i', '--image', 'images', type=click.Path(), multiple=True, help='The image file to open.')
@generator
def open_cmd(images):
for image in images:
try:
click.echo(('Opening "%s"' % image))
if (image == '-'):
img = Image.open(click.get_binary_stdin())
img.filename = '-'
else:
img = Image.open(image)
(yield img)
except Exception as e:
click.echo(('Could not open image "%s": %s' % (image, e)), err=True)
| [
"@",
"cli",
".",
"command",
"(",
"'open'",
")",
"@",
"click",
".",
"option",
"(",
"'-i'",
",",
"'--image'",
",",
"'images'",
",",
"type",
"=",
"click",
".",
"Path",
"(",
")",
",",
"multiple",
"=",
"True",
",",
"help",
"=",
"'The image file to open.'",
")",
"@",
"generator",
"def",
"open_cmd",
"(",
"images",
")",
":",
"for",
"image",
"in",
"images",
":",
"try",
":",
"click",
".",
"echo",
"(",
"(",
"'Opening \"%s\"'",
"%",
"image",
")",
")",
"if",
"(",
"image",
"==",
"'-'",
")",
":",
"img",
"=",
"Image",
".",
"open",
"(",
"click",
".",
"get_binary_stdin",
"(",
")",
")",
"img",
".",
"filename",
"=",
"'-'",
"else",
":",
"img",
"=",
"Image",
".",
"open",
"(",
"image",
")",
"(",
"yield",
"img",
")",
"except",
"Exception",
"as",
"e",
":",
"click",
".",
"echo",
"(",
"(",
"'Could not open image \"%s\": %s'",
"%",
"(",
"image",
",",
"e",
")",
")",
",",
"err",
"=",
"True",
")"
] | loads one or multiple images for processing . | train | false |
20,180 | def addQuoteWord(evaluatorWords, word):
if (len(word) < 2):
evaluatorWords.append(word)
return
firstCharacter = word[0]
if (firstCharacter == '$'):
dotIndex = word.find('.', 1)
if (dotIndex > (-1)):
evaluatorWords.append(word[:dotIndex])
evaluatorWords.append(word[dotIndex:])
return
if ((firstCharacter != '"') and (firstCharacter != "'")):
evaluatorWords.append(word)
return
nextQuoteIndex = word.find(firstCharacter, 1)
if ((nextQuoteIndex < 0) or (nextQuoteIndex == (len(word) - 1))):
evaluatorWords.append(word)
return
nextQuoteIndex += 1
evaluatorWords.append(word[:nextQuoteIndex])
evaluatorWords.append(word[nextQuoteIndex:])
| [
"def",
"addQuoteWord",
"(",
"evaluatorWords",
",",
"word",
")",
":",
"if",
"(",
"len",
"(",
"word",
")",
"<",
"2",
")",
":",
"evaluatorWords",
".",
"append",
"(",
"word",
")",
"return",
"firstCharacter",
"=",
"word",
"[",
"0",
"]",
"if",
"(",
"firstCharacter",
"==",
"'$'",
")",
":",
"dotIndex",
"=",
"word",
".",
"find",
"(",
"'.'",
",",
"1",
")",
"if",
"(",
"dotIndex",
">",
"(",
"-",
"1",
")",
")",
":",
"evaluatorWords",
".",
"append",
"(",
"word",
"[",
":",
"dotIndex",
"]",
")",
"evaluatorWords",
".",
"append",
"(",
"word",
"[",
"dotIndex",
":",
"]",
")",
"return",
"if",
"(",
"(",
"firstCharacter",
"!=",
"'\"'",
")",
"and",
"(",
"firstCharacter",
"!=",
"\"'\"",
")",
")",
":",
"evaluatorWords",
".",
"append",
"(",
"word",
")",
"return",
"nextQuoteIndex",
"=",
"word",
".",
"find",
"(",
"firstCharacter",
",",
"1",
")",
"if",
"(",
"(",
"nextQuoteIndex",
"<",
"0",
")",
"or",
"(",
"nextQuoteIndex",
"==",
"(",
"len",
"(",
"word",
")",
"-",
"1",
")",
")",
")",
":",
"evaluatorWords",
".",
"append",
"(",
"word",
")",
"return",
"nextQuoteIndex",
"+=",
"1",
"evaluatorWords",
".",
"append",
"(",
"word",
"[",
":",
"nextQuoteIndex",
"]",
")",
"evaluatorWords",
".",
"append",
"(",
"word",
"[",
"nextQuoteIndex",
":",
"]",
")"
] | add quote word and remainder if the word starts with a quote character or dollar sign . | train | false |
20,181 | def get_discussion_categories_ids(course, user, include_all=False):
accessible_discussion_ids = [xblock.discussion_id for xblock in get_accessible_discussion_xblocks(course, user, include_all=include_all)]
return (course.top_level_discussion_topic_ids + accessible_discussion_ids)
| [
"def",
"get_discussion_categories_ids",
"(",
"course",
",",
"user",
",",
"include_all",
"=",
"False",
")",
":",
"accessible_discussion_ids",
"=",
"[",
"xblock",
".",
"discussion_id",
"for",
"xblock",
"in",
"get_accessible_discussion_xblocks",
"(",
"course",
",",
"user",
",",
"include_all",
"=",
"include_all",
")",
"]",
"return",
"(",
"course",
".",
"top_level_discussion_topic_ids",
"+",
"accessible_discussion_ids",
")"
] | returns a list of available ids of categories for the course that are accessible to the given user . | train | false |
20,182 | def _is_user_profile_visible(self, user=None):
try:
if hasattr(self, u'is_private'):
is_private = self.is_private
else:
is_private = self.get_profile().is_private
return ((user and ((user == self) or user.is_staff)) or (not is_private))
except Profile.DoesNotExist:
return True
| [
"def",
"_is_user_profile_visible",
"(",
"self",
",",
"user",
"=",
"None",
")",
":",
"try",
":",
"if",
"hasattr",
"(",
"self",
",",
"u'is_private'",
")",
":",
"is_private",
"=",
"self",
".",
"is_private",
"else",
":",
"is_private",
"=",
"self",
".",
"get_profile",
"(",
")",
".",
"is_private",
"return",
"(",
"(",
"user",
"and",
"(",
"(",
"user",
"==",
"self",
")",
"or",
"user",
".",
"is_staff",
")",
")",
"or",
"(",
"not",
"is_private",
")",
")",
"except",
"Profile",
".",
"DoesNotExist",
":",
"return",
"True"
] | get whether or not a users profile is viewable by a given user . | train | false |
20,183 | def get_TextField(kwargs):
kwargs['validators'].append(validators.length(max=500))
return f.TextField(**kwargs)
| [
"def",
"get_TextField",
"(",
"kwargs",
")",
":",
"kwargs",
"[",
"'validators'",
"]",
".",
"append",
"(",
"validators",
".",
"length",
"(",
"max",
"=",
"500",
")",
")",
"return",
"f",
".",
"TextField",
"(",
"**",
"kwargs",
")"
] | returns a textfield . | train | false |
20,185 | def _check_pyopengl_3D():
global USE_TEX_3D
USE_TEX_3D = True
try:
import OpenGL.GL as _gl
except ImportError:
raise ImportError('PyOpenGL is required for 3D texture support')
return _gl
| [
"def",
"_check_pyopengl_3D",
"(",
")",
":",
"global",
"USE_TEX_3D",
"USE_TEX_3D",
"=",
"True",
"try",
":",
"import",
"OpenGL",
".",
"GL",
"as",
"_gl",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'PyOpenGL is required for 3D texture support'",
")",
"return",
"_gl"
] | helper to ensure users have opengl for 3d texture support . | train | true |
20,186 | def debug_mode():
if (platform.system() == 'Windows'):
temp_dir = os.environ['Temp'].replace('\\', '/')
else:
temp_dir = '/tmp/'
log_handler = logging.FileHandler('{0:s}/onionshare_server.log'.format(temp_dir))
log_handler.setLevel(logging.WARNING)
app.logger.addHandler(log_handler)
| [
"def",
"debug_mode",
"(",
")",
":",
"if",
"(",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
")",
":",
"temp_dir",
"=",
"os",
".",
"environ",
"[",
"'Temp'",
"]",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"else",
":",
"temp_dir",
"=",
"'/tmp/'",
"log_handler",
"=",
"logging",
".",
"FileHandler",
"(",
"'{0:s}/onionshare_server.log'",
".",
"format",
"(",
"temp_dir",
")",
")",
"log_handler",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")",
"app",
".",
"logger",
".",
"addHandler",
"(",
"log_handler",
")"
] | turn on debugging mode . | train | false |
20,187 | def get_current_request_hostname():
hostname = None
request = get_current_request()
if request:
hostname = request.META.get('HTTP_HOST')
return hostname
| [
"def",
"get_current_request_hostname",
"(",
")",
":",
"hostname",
"=",
"None",
"request",
"=",
"get_current_request",
"(",
")",
"if",
"request",
":",
"hostname",
"=",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_HOST'",
")",
"return",
"hostname"
] | this method will return the hostname that was used in the current django request . | train | false |
20,188 | def add_user_milestone(user, milestone):
if (not settings.FEATURES.get('MILESTONES_APP')):
return None
return milestones_api.add_user_milestone(user, milestone)
| [
"def",
"add_user_milestone",
"(",
"user",
",",
"milestone",
")",
":",
"if",
"(",
"not",
"settings",
".",
"FEATURES",
".",
"get",
"(",
"'MILESTONES_APP'",
")",
")",
":",
"return",
"None",
"return",
"milestones_api",
".",
"add_user_milestone",
"(",
"user",
",",
"milestone",
")"
] | client api operation adapter/wrapper . | train | false |
20,189 | def nickel_round(value, quant=Decimal('0.05'), rounding=ROUND_HALF_UP):
assert isinstance(value, Decimal)
assert isinstance(quant, Decimal)
return ((value / quant).quantize(1, rounding=rounding) * quant)
| [
"def",
"nickel_round",
"(",
"value",
",",
"quant",
"=",
"Decimal",
"(",
"'0.05'",
")",
",",
"rounding",
"=",
"ROUND_HALF_UP",
")",
":",
"assert",
"isinstance",
"(",
"value",
",",
"Decimal",
")",
"assert",
"isinstance",
"(",
"quant",
",",
"Decimal",
")",
"return",
"(",
"(",
"value",
"/",
"quant",
")",
".",
"quantize",
"(",
"1",
",",
"rounding",
"=",
"rounding",
")",
"*",
"quant",
")"
] | round decimal value to nearest quant . | train | false |
20,190 | def check_for_sabnzbd(url, upload_nzbs, allow_browser=True):
if (allow_browser is None):
allow_browser = True
if is_sabnzbd_running(url):
if upload_nzbs:
from sabnzbd.utils.upload import upload_file
prev = sabnzbd.set_https_verification(0)
for f in upload_nzbs:
upload_file(url, f)
sabnzbd.set_https_verification(prev)
else:
url = url[:(url.rfind('/') + 1)]
launch_a_browser(url, force=allow_browser)
exit_sab(0)
return True
return False
| [
"def",
"check_for_sabnzbd",
"(",
"url",
",",
"upload_nzbs",
",",
"allow_browser",
"=",
"True",
")",
":",
"if",
"(",
"allow_browser",
"is",
"None",
")",
":",
"allow_browser",
"=",
"True",
"if",
"is_sabnzbd_running",
"(",
"url",
")",
":",
"if",
"upload_nzbs",
":",
"from",
"sabnzbd",
".",
"utils",
".",
"upload",
"import",
"upload_file",
"prev",
"=",
"sabnzbd",
".",
"set_https_verification",
"(",
"0",
")",
"for",
"f",
"in",
"upload_nzbs",
":",
"upload_file",
"(",
"url",
",",
"f",
")",
"sabnzbd",
".",
"set_https_verification",
"(",
"prev",
")",
"else",
":",
"url",
"=",
"url",
"[",
":",
"(",
"url",
".",
"rfind",
"(",
"'/'",
")",
"+",
"1",
")",
"]",
"launch_a_browser",
"(",
"url",
",",
"force",
"=",
"allow_browser",
")",
"exit_sab",
"(",
"0",
")",
"return",
"True",
"return",
"False"
] | check for a running instance of sabnzbd on this port allow_browser==true|none will launch the browser . | train | false |
20,191 | def site_directory_contains_stale_files(site_directory):
if os.path.exists(site_directory):
if os.listdir(site_directory):
return True
return False
| [
"def",
"site_directory_contains_stale_files",
"(",
"site_directory",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"site_directory",
")",
":",
"if",
"os",
".",
"listdir",
"(",
"site_directory",
")",
":",
"return",
"True",
"return",
"False"
] | check if the site directory contains stale files from a previous build . | train | false |
20,192 | def route_url(route_name, request, *elements, **kw):
return request.route_url(route_name, *elements, **kw)
| [
"def",
"route_url",
"(",
"route_name",
",",
"request",
",",
"*",
"elements",
",",
"**",
"kw",
")",
":",
"return",
"request",
".",
"route_url",
"(",
"route_name",
",",
"*",
"elements",
",",
"**",
"kw",
")"
] | this is a backwards compatibility function . | train | false |
20,193 | def _get_blank_label(dataset):
category_index = dataset.label_name_to_index['category']
category_to_name = dataset.label_to_value_funcs[category_index]
blank_label = 5
try:
blank_name = category_to_name(blank_label)
except ValueError:
return None
assert (blank_name == 'blank')
blank_rowmask = (dataset.y[:, category_index] == blank_label)
blank_labels = dataset.y[blank_rowmask, :]
if (not blank_rowmask.any()):
return None
if (not numpy.all((blank_labels[0, :] == blank_labels[1:, :]))):
raise ValueError("Expected all labels of category 'blank' to have the same value, but they differed.")
return blank_labels[0, :].copy()
| [
"def",
"_get_blank_label",
"(",
"dataset",
")",
":",
"category_index",
"=",
"dataset",
".",
"label_name_to_index",
"[",
"'category'",
"]",
"category_to_name",
"=",
"dataset",
".",
"label_to_value_funcs",
"[",
"category_index",
"]",
"blank_label",
"=",
"5",
"try",
":",
"blank_name",
"=",
"category_to_name",
"(",
"blank_label",
")",
"except",
"ValueError",
":",
"return",
"None",
"assert",
"(",
"blank_name",
"==",
"'blank'",
")",
"blank_rowmask",
"=",
"(",
"dataset",
".",
"y",
"[",
":",
",",
"category_index",
"]",
"==",
"blank_label",
")",
"blank_labels",
"=",
"dataset",
".",
"y",
"[",
"blank_rowmask",
",",
":",
"]",
"if",
"(",
"not",
"blank_rowmask",
".",
"any",
"(",
")",
")",
":",
"return",
"None",
"if",
"(",
"not",
"numpy",
".",
"all",
"(",
"(",
"blank_labels",
"[",
"0",
",",
":",
"]",
"==",
"blank_labels",
"[",
"1",
":",
",",
":",
"]",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected all labels of category 'blank' to have the same value, but they differed.\"",
")",
"return",
"blank_labels",
"[",
"0",
",",
":",
"]",
".",
"copy",
"(",
")"
] | returns the label vector associated with blank images . | train | false |
20,194 | def word_break(el, max_width=40, avoid_elements=_avoid_word_break_elements, avoid_classes=_avoid_word_break_classes, break_character=unichr(8203)):
if (el.tag in _avoid_word_break_elements):
return
class_name = el.get('class')
if class_name:
dont_break = False
class_name = class_name.split()
for avoid in avoid_classes:
if (avoid in class_name):
dont_break = True
break
if dont_break:
return
if el.text:
el.text = _break_text(el.text, max_width, break_character)
for child in el:
word_break(child, max_width=max_width, avoid_elements=avoid_elements, avoid_classes=avoid_classes, break_character=break_character)
if child.tail:
child.tail = _break_text(child.tail, max_width, break_character)
| [
"def",
"word_break",
"(",
"el",
",",
"max_width",
"=",
"40",
",",
"avoid_elements",
"=",
"_avoid_word_break_elements",
",",
"avoid_classes",
"=",
"_avoid_word_break_classes",
",",
"break_character",
"=",
"unichr",
"(",
"8203",
")",
")",
":",
"if",
"(",
"el",
".",
"tag",
"in",
"_avoid_word_break_elements",
")",
":",
"return",
"class_name",
"=",
"el",
".",
"get",
"(",
"'class'",
")",
"if",
"class_name",
":",
"dont_break",
"=",
"False",
"class_name",
"=",
"class_name",
".",
"split",
"(",
")",
"for",
"avoid",
"in",
"avoid_classes",
":",
"if",
"(",
"avoid",
"in",
"class_name",
")",
":",
"dont_break",
"=",
"True",
"break",
"if",
"dont_break",
":",
"return",
"if",
"el",
".",
"text",
":",
"el",
".",
"text",
"=",
"_break_text",
"(",
"el",
".",
"text",
",",
"max_width",
",",
"break_character",
")",
"for",
"child",
"in",
"el",
":",
"word_break",
"(",
"child",
",",
"max_width",
"=",
"max_width",
",",
"avoid_elements",
"=",
"avoid_elements",
",",
"avoid_classes",
"=",
"avoid_classes",
",",
"break_character",
"=",
"break_character",
")",
"if",
"child",
".",
"tail",
":",
"child",
".",
"tail",
"=",
"_break_text",
"(",
"child",
".",
"tail",
",",
"max_width",
",",
"break_character",
")"
] | breaks any long words found in the body of the text . | train | true |
20,195 | def load_model_class(model_path):
dot = model_path.rindex('.')
module_name = model_path[:dot]
class_name = model_path[(dot + 1):]
try:
_class = getattr(import_module(module_name), class_name)
return _class
except (ImportError, AttributeError):
raise ImproperlyConfigured(('%s cannot be imported' % model_path))
| [
"def",
"load_model_class",
"(",
"model_path",
")",
":",
"dot",
"=",
"model_path",
".",
"rindex",
"(",
"'.'",
")",
"module_name",
"=",
"model_path",
"[",
":",
"dot",
"]",
"class_name",
"=",
"model_path",
"[",
"(",
"dot",
"+",
"1",
")",
":",
"]",
"try",
":",
"_class",
"=",
"getattr",
"(",
"import_module",
"(",
"module_name",
")",
",",
"class_name",
")",
"return",
"_class",
"except",
"(",
"ImportError",
",",
"AttributeError",
")",
":",
"raise",
"ImproperlyConfigured",
"(",
"(",
"'%s cannot be imported'",
"%",
"model_path",
")",
")"
] | load by import a class by a string path like: module . | train | true |
20,196 | def describe_api_stages(restApiId, deploymentId, region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stages = conn.get_stages(restApiId=restApiId, deploymentId=deploymentId)
return {'stages': [_convert_datetime_str(stage) for stage in stages['item']]}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
| [
"def",
"describe_api_stages",
"(",
"restApiId",
",",
"deploymentId",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"stages",
"=",
"conn",
".",
"get_stages",
"(",
"restApiId",
"=",
"restApiId",
",",
"deploymentId",
"=",
"deploymentId",
")",
"return",
"{",
"'stages'",
":",
"[",
"_convert_datetime_str",
"(",
"stage",
")",
"for",
"stage",
"in",
"stages",
"[",
"'item'",
"]",
"]",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'error'",
":",
"salt",
".",
"utils",
".",
"boto3",
".",
"get_error",
"(",
"e",
")",
"}"
] | get all api stages for a given apiid and deploymentid cli example: . | train | false |
20,197 | def fmatch_best(needle, haystack, min_ratio=0.6):
try:
return sorted(fmatch_iter(needle, haystack, min_ratio), reverse=True)[0][1]
except IndexError:
pass
| [
"def",
"fmatch_best",
"(",
"needle",
",",
"haystack",
",",
"min_ratio",
"=",
"0.6",
")",
":",
"try",
":",
"return",
"sorted",
"(",
"fmatch_iter",
"(",
"needle",
",",
"haystack",
",",
"min_ratio",
")",
",",
"reverse",
"=",
"True",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"except",
"IndexError",
":",
"pass"
] | fuzzy match - find best match . | train | false |
20,198 | def get_func_args(func):
if inspect.isfunction(func):
(func_args, _, _, _) = inspect.getargspec(func)
elif hasattr(func, '__call__'):
try:
(func_args, _, _, _) = inspect.getargspec(func.__call__)
except Exception:
func_args = []
else:
raise TypeError(('%s is not callable' % type(func)))
return func_args
| [
"def",
"get_func_args",
"(",
"func",
")",
":",
"if",
"inspect",
".",
"isfunction",
"(",
"func",
")",
":",
"(",
"func_args",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"elif",
"hasattr",
"(",
"func",
",",
"'__call__'",
")",
":",
"try",
":",
"(",
"func_args",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
".",
"__call__",
")",
"except",
"Exception",
":",
"func_args",
"=",
"[",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"(",
"'%s is not callable'",
"%",
"type",
"(",
"func",
")",
")",
")",
"return",
"func_args"
] | return the argument name list of a callable . | train | false |
20,199 | def Callable(deprecation=None, removal=None, alternative=None, description=None):
def _inner(fun):
@wraps(fun)
def __inner(*args, **kwargs):
from .imports import qualname
warn(description=(description or qualname(fun)), deprecation=deprecation, removal=removal, alternative=alternative, stacklevel=3)
return fun(*args, **kwargs)
return __inner
return _inner
| [
"def",
"Callable",
"(",
"deprecation",
"=",
"None",
",",
"removal",
"=",
"None",
",",
"alternative",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"def",
"_inner",
"(",
"fun",
")",
":",
"@",
"wraps",
"(",
"fun",
")",
"def",
"__inner",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"from",
".",
"imports",
"import",
"qualname",
"warn",
"(",
"description",
"=",
"(",
"description",
"or",
"qualname",
"(",
"fun",
")",
")",
",",
"deprecation",
"=",
"deprecation",
",",
"removal",
"=",
"removal",
",",
"alternative",
"=",
"alternative",
",",
"stacklevel",
"=",
"3",
")",
"return",
"fun",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"__inner",
"return",
"_inner"
] | decorator for deprecated functions . | train | false |
20,200 | def mean_subtraction_cumulation(timeseries):
series = pandas.Series([(x[1] if x[1] else 0) for x in timeseries])
series = (series - series[0:(len(series) - 1)].mean())
stdDev = series[0:(len(series) - 1)].std()
expAverage = pandas.stats.moments.ewma(series, com=15)
return (abs(series.iget((-1))) > (3 * stdDev))
| [
"def",
"mean_subtraction_cumulation",
"(",
"timeseries",
")",
":",
"series",
"=",
"pandas",
".",
"Series",
"(",
"[",
"(",
"x",
"[",
"1",
"]",
"if",
"x",
"[",
"1",
"]",
"else",
"0",
")",
"for",
"x",
"in",
"timeseries",
"]",
")",
"series",
"=",
"(",
"series",
"-",
"series",
"[",
"0",
":",
"(",
"len",
"(",
"series",
")",
"-",
"1",
")",
"]",
".",
"mean",
"(",
")",
")",
"stdDev",
"=",
"series",
"[",
"0",
":",
"(",
"len",
"(",
"series",
")",
"-",
"1",
")",
"]",
".",
"std",
"(",
")",
"expAverage",
"=",
"pandas",
".",
"stats",
".",
"moments",
".",
"ewma",
"(",
"series",
",",
"com",
"=",
"15",
")",
"return",
"(",
"abs",
"(",
"series",
".",
"iget",
"(",
"(",
"-",
"1",
")",
")",
")",
">",
"(",
"3",
"*",
"stdDev",
")",
")"
] | a timeseries is anomalous if the value of the next datapoint in the series is farther than three standard deviations out in cumulative terms after subtracting the mean from each data point . | train | false |
20,201 | def _service_is_sysv(name):
script = '/etc/init.d/{0}'.format(name)
return ((not _service_is_upstart(name)) and os.access(script, os.X_OK))
| [
"def",
"_service_is_sysv",
"(",
"name",
")",
":",
"script",
"=",
"'/etc/init.d/{0}'",
".",
"format",
"(",
"name",
")",
"return",
"(",
"(",
"not",
"_service_is_upstart",
"(",
"name",
")",
")",
"and",
"os",
".",
"access",
"(",
"script",
",",
"os",
".",
"X_OK",
")",
")"
] | return true if the service is a system v service ; otherwise return false . | train | true |
20,202 | def md5_hash_for_file(fname):
with open(fname, 'rb') as f:
md5sum = md5()
for block in iter((lambda : f.read(MD5_BLOCK_READ_BYTES)), ''):
md5sum.update(block)
return md5sum.hexdigest()
| [
"def",
"md5_hash_for_file",
"(",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
",",
"'rb'",
")",
"as",
"f",
":",
"md5sum",
"=",
"md5",
"(",
")",
"for",
"block",
"in",
"iter",
"(",
"(",
"lambda",
":",
"f",
".",
"read",
"(",
"MD5_BLOCK_READ_BYTES",
")",
")",
",",
"''",
")",
":",
"md5sum",
".",
"update",
"(",
"block",
")",
"return",
"md5sum",
".",
"hexdigest",
"(",
")"
] | get the md5 checksum of a file . | train | false |
20,204 | def learning_phase():
graph = tf.get_default_graph()
if (graph not in _GRAPH_LEARNING_PHASES):
phase = tf.placeholder(dtype='bool', name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
| [
"def",
"learning_phase",
"(",
")",
":",
"graph",
"=",
"tf",
".",
"get_default_graph",
"(",
")",
"if",
"(",
"graph",
"not",
"in",
"_GRAPH_LEARNING_PHASES",
")",
":",
"phase",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"'bool'",
",",
"name",
"=",
"'keras_learning_phase'",
")",
"_GRAPH_LEARNING_PHASES",
"[",
"graph",
"]",
"=",
"phase",
"return",
"_GRAPH_LEARNING_PHASES",
"[",
"graph",
"]"
] | returns the learning phase flag . | train | false |
20,205 | def pbvv_seq(v, x):
if (not (isscalar(v) and isscalar(x))):
raise ValueError('arguments must be scalars.')
n = int(v)
v0 = (v - n)
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = (n1 + v0)
(dv, dp, pdf, pdd) = specfun.pbvv(v1, x)
return (dv[:(n1 + 1)], dp[:(n1 + 1)])
| [
"def",
"pbvv_seq",
"(",
"v",
",",
"x",
")",
":",
"if",
"(",
"not",
"(",
"isscalar",
"(",
"v",
")",
"and",
"isscalar",
"(",
"x",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"'arguments must be scalars.'",
")",
"n",
"=",
"int",
"(",
"v",
")",
"v0",
"=",
"(",
"v",
"-",
"n",
")",
"if",
"(",
"n",
"<=",
"1",
")",
":",
"n1",
"=",
"1",
"else",
":",
"n1",
"=",
"n",
"v1",
"=",
"(",
"n1",
"+",
"v0",
")",
"(",
"dv",
",",
"dp",
",",
"pdf",
",",
"pdd",
")",
"=",
"specfun",
".",
"pbvv",
"(",
"v1",
",",
"x",
")",
"return",
"(",
"dv",
"[",
":",
"(",
"n1",
"+",
"1",
")",
"]",
",",
"dp",
"[",
":",
"(",
"n1",
"+",
"1",
")",
"]",
")"
] | parabolic cylinder functions vv(x) and derivatives . | train | false |
20,206 | def _document_redirect_to_create(document_slug, document_locale, slug_dict):
url = reverse('wiki.create', locale=document_locale)
if (slug_dict['length'] > 1):
parent_doc = get_object_or_404(Document, locale=document_locale, slug=slug_dict['parent'], is_template=0)
url = urlparams(url, parent=parent_doc.id, slug=slug_dict['specific'])
else:
url = urlparams(url, slug=document_slug)
return url
| [
"def",
"_document_redirect_to_create",
"(",
"document_slug",
",",
"document_locale",
",",
"slug_dict",
")",
":",
"url",
"=",
"reverse",
"(",
"'wiki.create'",
",",
"locale",
"=",
"document_locale",
")",
"if",
"(",
"slug_dict",
"[",
"'length'",
"]",
">",
"1",
")",
":",
"parent_doc",
"=",
"get_object_or_404",
"(",
"Document",
",",
"locale",
"=",
"document_locale",
",",
"slug",
"=",
"slug_dict",
"[",
"'parent'",
"]",
",",
"is_template",
"=",
"0",
")",
"url",
"=",
"urlparams",
"(",
"url",
",",
"parent",
"=",
"parent_doc",
".",
"id",
",",
"slug",
"=",
"slug_dict",
"[",
"'specific'",
"]",
")",
"else",
":",
"url",
"=",
"urlparams",
"(",
"url",
",",
"slug",
"=",
"document_slug",
")",
"return",
"url"
] | when a document doesnt exist but the user can create it . | train | false |
20,207 | def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
try:
server = profitbricks.get_server(datacenter_id, server_id)
volume_id = server['properties']['bootVolume']['id']
volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
except Exception:
e = get_exception()
module.fail_json(msg=("failed to remove the server's boot volume: %s" % str(e)))
| [
"def",
"_remove_boot_volume",
"(",
"module",
",",
"profitbricks",
",",
"datacenter_id",
",",
"server_id",
")",
":",
"try",
":",
"server",
"=",
"profitbricks",
".",
"get_server",
"(",
"datacenter_id",
",",
"server_id",
")",
"volume_id",
"=",
"server",
"[",
"'properties'",
"]",
"[",
"'bootVolume'",
"]",
"[",
"'id'",
"]",
"volume_response",
"=",
"profitbricks",
".",
"delete_volume",
"(",
"datacenter_id",
",",
"volume_id",
")",
"except",
"Exception",
":",
"e",
"=",
"get_exception",
"(",
")",
"module",
".",
"fail_json",
"(",
"msg",
"=",
"(",
"\"failed to remove the server's boot volume: %s\"",
"%",
"str",
"(",
"e",
")",
")",
")"
] | remove the boot volume from the server . | train | false |
20,210 | @handle_response_format
@treeio_login_required
def liability_add(request, response_format='html'):
liabilities = Object.filter_by_request(request, Liability.objects, mode='r')
if request.POST:
if ('cancel' not in request.POST):
liability = Liability()
form = LiabilityForm(request.user.profile, request.POST, instance=liability)
if form.is_valid():
liability = form.save(commit=False)
liability.source = liability.account.owner
convert(liability, 'value')
liability.set_user_from_request(request)
return HttpResponseRedirect(reverse('finance_liability_view', args=[liability.id]))
else:
return HttpResponseRedirect(reverse('finance_index_liabilities'))
else:
form = LiabilityForm(request.user.profile)
return render_to_response('finance/liability_add', {'form': form, 'liabilities': liabilities}, context_instance=RequestContext(request), response_format=response_format)
| [
"@",
"handle_response_format",
"@",
"treeio_login_required",
"def",
"liability_add",
"(",
"request",
",",
"response_format",
"=",
"'html'",
")",
":",
"liabilities",
"=",
"Object",
".",
"filter_by_request",
"(",
"request",
",",
"Liability",
".",
"objects",
",",
"mode",
"=",
"'r'",
")",
"if",
"request",
".",
"POST",
":",
"if",
"(",
"'cancel'",
"not",
"in",
"request",
".",
"POST",
")",
":",
"liability",
"=",
"Liability",
"(",
")",
"form",
"=",
"LiabilityForm",
"(",
"request",
".",
"user",
".",
"profile",
",",
"request",
".",
"POST",
",",
"instance",
"=",
"liability",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"liability",
"=",
"form",
".",
"save",
"(",
"commit",
"=",
"False",
")",
"liability",
".",
"source",
"=",
"liability",
".",
"account",
".",
"owner",
"convert",
"(",
"liability",
",",
"'value'",
")",
"liability",
".",
"set_user_from_request",
"(",
"request",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'finance_liability_view'",
",",
"args",
"=",
"[",
"liability",
".",
"id",
"]",
")",
")",
"else",
":",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'finance_index_liabilities'",
")",
")",
"else",
":",
"form",
"=",
"LiabilityForm",
"(",
"request",
".",
"user",
".",
"profile",
")",
"return",
"render_to_response",
"(",
"'finance/liability_add'",
",",
"{",
"'form'",
":",
"form",
",",
"'liabilities'",
":",
"liabilities",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
",",
"response_format",
"=",
"response_format",
")"
] | new liability form . | train | false |
20,211 | def synthesize_versioned_document(document, delta, resource_def):
versioned_doc = {}
id_field = versioned_id_field(resource_def)
if (id_field not in delta):
abort(400, description=debug_error_message(('You must include %s in any projection with a version query.' % id_field)))
delta[resource_def['id_field']] = delta[id_field]
del delta[id_field]
fields = versioned_fields(resource_def)
for field in document:
if (field not in fields):
versioned_doc[field] = document[field]
versioned_doc.update(delta)
return versioned_doc
| [
"def",
"synthesize_versioned_document",
"(",
"document",
",",
"delta",
",",
"resource_def",
")",
":",
"versioned_doc",
"=",
"{",
"}",
"id_field",
"=",
"versioned_id_field",
"(",
"resource_def",
")",
"if",
"(",
"id_field",
"not",
"in",
"delta",
")",
":",
"abort",
"(",
"400",
",",
"description",
"=",
"debug_error_message",
"(",
"(",
"'You must include %s in any projection with a version query.'",
"%",
"id_field",
")",
")",
")",
"delta",
"[",
"resource_def",
"[",
"'id_field'",
"]",
"]",
"=",
"delta",
"[",
"id_field",
"]",
"del",
"delta",
"[",
"id_field",
"]",
"fields",
"=",
"versioned_fields",
"(",
"resource_def",
")",
"for",
"field",
"in",
"document",
":",
"if",
"(",
"field",
"not",
"in",
"fields",
")",
":",
"versioned_doc",
"[",
"field",
"]",
"=",
"document",
"[",
"field",
"]",
"versioned_doc",
".",
"update",
"(",
"delta",
")",
"return",
"versioned_doc"
] | synthesizes a versioned document from the latest document and the values of all versioned fields from the old version . | train | false |
20,212 | def cbMailboxList(result, proto):
result = [e[2] for e in result]
s = '\n'.join([('%d. %s' % ((n + 1), m)) for (n, m) in zip(range(len(result)), result)])
if (not s):
return defer.fail(Exception('No mailboxes exist on server!'))
return proto.prompt((s + '\nWhich mailbox? [1] ')).addCallback(cbPickMailbox, proto, result)
| [
"def",
"cbMailboxList",
"(",
"result",
",",
"proto",
")",
":",
"result",
"=",
"[",
"e",
"[",
"2",
"]",
"for",
"e",
"in",
"result",
"]",
"s",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"(",
"'%d. %s'",
"%",
"(",
"(",
"n",
"+",
"1",
")",
",",
"m",
")",
")",
"for",
"(",
"n",
",",
"m",
")",
"in",
"zip",
"(",
"range",
"(",
"len",
"(",
"result",
")",
")",
",",
"result",
")",
"]",
")",
"if",
"(",
"not",
"s",
")",
":",
"return",
"defer",
".",
"fail",
"(",
"Exception",
"(",
"'No mailboxes exist on server!'",
")",
")",
"return",
"proto",
".",
"prompt",
"(",
"(",
"s",
"+",
"'\\nWhich mailbox? [1] '",
")",
")",
".",
"addCallback",
"(",
"cbPickMailbox",
",",
"proto",
",",
"result",
")"
] | callback invoked when a list of mailboxes has been retrieved . | train | false |
20,214 | def get_resource_title(resource, is_list, append_resource=True):
if hasattr(resource, 'verbose_name'):
normalized_title = resource.verbose_name
else:
class_name = resource.__class__.__name__
class_name = class_name.replace('Resource', '')
normalized_title = title(uncamelcase(class_name, ' '))
if is_list:
s = ('%s List' % normalized_title)
else:
s = normalized_title
if append_resource:
s += ' Resource'
return s
| [
"def",
"get_resource_title",
"(",
"resource",
",",
"is_list",
",",
"append_resource",
"=",
"True",
")",
":",
"if",
"hasattr",
"(",
"resource",
",",
"'verbose_name'",
")",
":",
"normalized_title",
"=",
"resource",
".",
"verbose_name",
"else",
":",
"class_name",
"=",
"resource",
".",
"__class__",
".",
"__name__",
"class_name",
"=",
"class_name",
".",
"replace",
"(",
"'Resource'",
",",
"''",
")",
"normalized_title",
"=",
"title",
"(",
"uncamelcase",
"(",
"class_name",
",",
"' '",
")",
")",
"if",
"is_list",
":",
"s",
"=",
"(",
"'%s List'",
"%",
"normalized_title",
")",
"else",
":",
"s",
"=",
"normalized_title",
"if",
"append_resource",
":",
"s",
"+=",
"' Resource'",
"return",
"s"
] | returns a human-readable name for the resource . | train | false |
20,215 | def p_const(p):
try:
val = _cast(p[2])(p[5])
except AssertionError:
raise ThriftParserError(('Type error for constant %s at line %d' % (p[3], p.lineno(3))))
setattr(thrift_stack[(-1)], p[3], val)
_add_thrift_meta('consts', val)
| [
"def",
"p_const",
"(",
"p",
")",
":",
"try",
":",
"val",
"=",
"_cast",
"(",
"p",
"[",
"2",
"]",
")",
"(",
"p",
"[",
"5",
"]",
")",
"except",
"AssertionError",
":",
"raise",
"ThriftParserError",
"(",
"(",
"'Type error for constant %s at line %d'",
"%",
"(",
"p",
"[",
"3",
"]",
",",
"p",
".",
"lineno",
"(",
"3",
")",
")",
")",
")",
"setattr",
"(",
"thrift_stack",
"[",
"(",
"-",
"1",
")",
"]",
",",
"p",
"[",
"3",
"]",
",",
"val",
")",
"_add_thrift_meta",
"(",
"'consts'",
",",
"val",
")"
] | const : const field_type identifier = const_value | const field_type identifier = const_value sep . | train | false |
20,216 | def test_bool():
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert (bool(t) is True)
assert (bool(t[0]) is True)
assert (bool(t[:0]) is False)
| [
"def",
"test_bool",
"(",
")",
":",
"t",
"=",
"Time",
"(",
"np",
".",
"arange",
"(",
"50000",
",",
"50010",
")",
",",
"format",
"=",
"'mjd'",
",",
"scale",
"=",
"'utc'",
")",
"assert",
"(",
"bool",
"(",
"t",
")",
"is",
"True",
")",
"assert",
"(",
"bool",
"(",
"t",
"[",
"0",
"]",
")",
"is",
"True",
")",
"assert",
"(",
"bool",
"(",
"t",
"[",
":",
"0",
"]",
")",
"is",
"False",
")"
] | any time object should evaluate to true unless it is empty [#3520] . | train | false |
20,217 | def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
f = ConvNd(_single(stride), _single(padding), _single(dilation), False, _single(0), groups)
return (f(input, weight, bias) if (bias is not None) else f(input, weight))
| [
"def",
"conv1d",
"(",
"input",
",",
"weight",
",",
"bias",
"=",
"None",
",",
"stride",
"=",
"1",
",",
"padding",
"=",
"0",
",",
"dilation",
"=",
"1",
",",
"groups",
"=",
"1",
")",
":",
"f",
"=",
"ConvNd",
"(",
"_single",
"(",
"stride",
")",
",",
"_single",
"(",
"padding",
")",
",",
"_single",
"(",
"dilation",
")",
",",
"False",
",",
"_single",
"(",
"0",
")",
",",
"groups",
")",
"return",
"(",
"f",
"(",
"input",
",",
"weight",
",",
"bias",
")",
"if",
"(",
"bias",
"is",
"not",
"None",
")",
"else",
"f",
"(",
"input",
",",
"weight",
")",
")"
] | applies a 1d convolution over an input signal composed of several input planes . | train | false |
20,218 | def submit_cohort_students(request, course_key, file_name):
task_type = 'cohort_students'
task_class = cohort_students
task_input = {'file_name': file_name}
task_key = ''
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
| [
"def",
"submit_cohort_students",
"(",
"request",
",",
"course_key",
",",
"file_name",
")",
":",
"task_type",
"=",
"'cohort_students'",
"task_class",
"=",
"cohort_students",
"task_input",
"=",
"{",
"'file_name'",
":",
"file_name",
"}",
"task_key",
"=",
"''",
"return",
"submit_task",
"(",
"request",
",",
"task_type",
",",
"task_class",
",",
"course_key",
",",
"task_input",
",",
"task_key",
")"
] | request to have students cohorted in bulk . | train | false |
20,219 | def solidity_resolve_address(hex_code, library_symbol, library_address):
if library_address.startswith('0x'):
raise ValueError('Address should not contain the 0x prefix')
try:
decode_hex(library_address)
except TypeError:
raise ValueError('library_address contains invalid characters, it must be hex encoded.')
if ((len(library_symbol) != 40) or (len(library_address) != 40)):
raise ValueError('Address with wrong length')
return hex_code.replace(library_symbol, library_address)
| [
"def",
"solidity_resolve_address",
"(",
"hex_code",
",",
"library_symbol",
",",
"library_address",
")",
":",
"if",
"library_address",
".",
"startswith",
"(",
"'0x'",
")",
":",
"raise",
"ValueError",
"(",
"'Address should not contain the 0x prefix'",
")",
"try",
":",
"decode_hex",
"(",
"library_address",
")",
"except",
"TypeError",
":",
"raise",
"ValueError",
"(",
"'library_address contains invalid characters, it must be hex encoded.'",
")",
"if",
"(",
"(",
"len",
"(",
"library_symbol",
")",
"!=",
"40",
")",
"or",
"(",
"len",
"(",
"library_address",
")",
"!=",
"40",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Address with wrong length'",
")",
"return",
"hex_code",
".",
"replace",
"(",
"library_symbol",
",",
"library_address",
")"
] | change the bytecode to use the given library address . | train | true |
20,220 | def getNewRepository():
return ExportRepository()
| [
"def",
"getNewRepository",
"(",
")",
":",
"return",
"ExportRepository",
"(",
")"
] | get the repository constructor . | train | false |
20,221 | def _get_js_translation_entries(filename):
js_entries = set()
for entry in polib.pofile(filename):
if entry.obsolete:
continue
for occ in entry.occurrences:
if occ[0].endswith(u'.js'):
js_entries.add(entry.msgid)
return js_entries
| [
"def",
"_get_js_translation_entries",
"(",
"filename",
")",
":",
"js_entries",
"=",
"set",
"(",
")",
"for",
"entry",
"in",
"polib",
".",
"pofile",
"(",
"filename",
")",
":",
"if",
"entry",
".",
"obsolete",
":",
"continue",
"for",
"occ",
"in",
"entry",
".",
"occurrences",
":",
"if",
"occ",
"[",
"0",
"]",
".",
"endswith",
"(",
"u'.js'",
")",
":",
"js_entries",
".",
"add",
"(",
"entry",
".",
"msgid",
")",
"return",
"js_entries"
] | extract ids of po entries that are used in javascript files . | train | false |
20,223 | def _ovr_decision_function(predictions, confidences, n_classes):
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range((i + 1), n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[((predictions[:, k] == 0), i)] += 1
votes[((predictions[:, k] == 1), j)] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if (max_confidences == min_confidences):
return votes
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = ((0.5 - eps) / max_abs_confidence)
return (votes + (sum_of_confidences * scale))
| [
"def",
"_ovr_decision_function",
"(",
"predictions",
",",
"confidences",
",",
"n_classes",
")",
":",
"n_samples",
"=",
"predictions",
".",
"shape",
"[",
"0",
"]",
"votes",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_samples",
",",
"n_classes",
")",
")",
"sum_of_confidences",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_samples",
",",
"n_classes",
")",
")",
"k",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"n_classes",
")",
":",
"for",
"j",
"in",
"range",
"(",
"(",
"i",
"+",
"1",
")",
",",
"n_classes",
")",
":",
"sum_of_confidences",
"[",
":",
",",
"i",
"]",
"-=",
"confidences",
"[",
":",
",",
"k",
"]",
"sum_of_confidences",
"[",
":",
",",
"j",
"]",
"+=",
"confidences",
"[",
":",
",",
"k",
"]",
"votes",
"[",
"(",
"(",
"predictions",
"[",
":",
",",
"k",
"]",
"==",
"0",
")",
",",
"i",
")",
"]",
"+=",
"1",
"votes",
"[",
"(",
"(",
"predictions",
"[",
":",
",",
"k",
"]",
"==",
"1",
")",
",",
"j",
")",
"]",
"+=",
"1",
"k",
"+=",
"1",
"max_confidences",
"=",
"sum_of_confidences",
".",
"max",
"(",
")",
"min_confidences",
"=",
"sum_of_confidences",
".",
"min",
"(",
")",
"if",
"(",
"max_confidences",
"==",
"min_confidences",
")",
":",
"return",
"votes",
"eps",
"=",
"np",
".",
"finfo",
"(",
"sum_of_confidences",
".",
"dtype",
")",
".",
"eps",
"max_abs_confidence",
"=",
"max",
"(",
"abs",
"(",
"max_confidences",
")",
",",
"abs",
"(",
"min_confidences",
")",
")",
"scale",
"=",
"(",
"(",
"0.5",
"-",
"eps",
")",
"/",
"max_abs_confidence",
")",
"return",
"(",
"votes",
"+",
"(",
"sum_of_confidences",
"*",
"scale",
")",
")"
] | compute a continuous . | train | false |
20,224 | def invalid_example_number(region_code):
if (not _is_valid_region_code(region_code)):
return None
metadata = PhoneMetadata.metadata_for_region(region_code.upper())
desc = _number_desc_for_type(metadata, PhoneNumberType.FIXED_LINE)
if (desc.example_number is None):
return None
example_number = desc.example_number
phone_number_length = (len(example_number) - 1)
while (phone_number_length >= _MIN_LENGTH_FOR_NSN):
number_to_try = example_number[:phone_number_length]
try:
possibly_valid_number = parse(number_to_try, region_code)
if (not is_valid_number(possibly_valid_number)):
return possibly_valid_number
except NumberParseException:
pass
phone_number_length -= 1
return None
| [
"def",
"invalid_example_number",
"(",
"region_code",
")",
":",
"if",
"(",
"not",
"_is_valid_region_code",
"(",
"region_code",
")",
")",
":",
"return",
"None",
"metadata",
"=",
"PhoneMetadata",
".",
"metadata_for_region",
"(",
"region_code",
".",
"upper",
"(",
")",
")",
"desc",
"=",
"_number_desc_for_type",
"(",
"metadata",
",",
"PhoneNumberType",
".",
"FIXED_LINE",
")",
"if",
"(",
"desc",
".",
"example_number",
"is",
"None",
")",
":",
"return",
"None",
"example_number",
"=",
"desc",
".",
"example_number",
"phone_number_length",
"=",
"(",
"len",
"(",
"example_number",
")",
"-",
"1",
")",
"while",
"(",
"phone_number_length",
">=",
"_MIN_LENGTH_FOR_NSN",
")",
":",
"number_to_try",
"=",
"example_number",
"[",
":",
"phone_number_length",
"]",
"try",
":",
"possibly_valid_number",
"=",
"parse",
"(",
"number_to_try",
",",
"region_code",
")",
"if",
"(",
"not",
"is_valid_number",
"(",
"possibly_valid_number",
")",
")",
":",
"return",
"possibly_valid_number",
"except",
"NumberParseException",
":",
"pass",
"phone_number_length",
"-=",
"1",
"return",
"None"
] | gets an invalid number for the specified region . | train | true |
20,225 | def breadth_first_search(node, visit=(lambda node: False), traversable=(lambda node, edge: True)):
q = [node]
_visited = {}
while q:
node = q.pop(0)
if (not (node.id in _visited)):
if visit(node):
return True
q.extend((n for n in node.links if (traversable(node, node.links.edge(n)) is not False)))
_visited[node.id] = True
return False
| [
"def",
"breadth_first_search",
"(",
"node",
",",
"visit",
"=",
"(",
"lambda",
"node",
":",
"False",
")",
",",
"traversable",
"=",
"(",
"lambda",
"node",
",",
"edge",
":",
"True",
")",
")",
":",
"q",
"=",
"[",
"node",
"]",
"_visited",
"=",
"{",
"}",
"while",
"q",
":",
"node",
"=",
"q",
".",
"pop",
"(",
"0",
")",
"if",
"(",
"not",
"(",
"node",
".",
"id",
"in",
"_visited",
")",
")",
":",
"if",
"visit",
"(",
"node",
")",
":",
"return",
"True",
"q",
".",
"extend",
"(",
"(",
"n",
"for",
"n",
"in",
"node",
".",
"links",
"if",
"(",
"traversable",
"(",
"node",
",",
"node",
".",
"links",
".",
"edge",
"(",
"n",
")",
")",
"is",
"not",
"False",
")",
")",
")",
"_visited",
"[",
"node",
".",
"id",
"]",
"=",
"True",
"return",
"False"
] | visits all the nodes connected to the given root node . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.