id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
21,458 | def list_orphaned_instances(orphaned_instances):
for (vm_ref, vm_rec, orphaned_instance) in orphaned_instances:
if CONF.verbose:
print ('ORPHANED INSTANCE (%s)' % orphaned_instance.name)
else:
print orphaned_instance.name
| [
"def",
"list_orphaned_instances",
"(",
"orphaned_instances",
")",
":",
"for",
"(",
"vm_ref",
",",
"vm_rec",
",",
"orphaned_instance",
")",
"in",
"orphaned_instances",
":",
"if",
"CONF",
".",
"verbose",
":",
"print",
"(",
"'ORPHANED INSTANCE (%s)'",
"%",
"orphaned_instance",
".",
"name",
")",
"else",
":",
"print",
"orphaned_instance",
".",
"name"
] | list orphaned instances . | train | false |
21,459 | @utils.arg('--host', metavar='<hostname>', default=None, help=_('Name of host.'))
@utils.arg('--binary', metavar='<binary>', default=None, help=_('Service binary.'))
def do_service_list(cs, args):
result = cs.services.list(host=args.host, binary=args.binary)
columns = ['Binary', 'Host', 'Zone', 'Status', 'State', 'Updated_at']
if (result and hasattr(result[0], 'disabled_reason')):
columns.append('Disabled Reason')
if (result and hasattr(result[0], 'id')):
columns.insert(0, 'Id')
utils.print_list(result, columns)
| [
"@",
"utils",
".",
"arg",
"(",
"'--host'",
",",
"metavar",
"=",
"'<hostname>'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"_",
"(",
"'Name of host.'",
")",
")",
"@",
"utils",
".",
"arg",
"(",
"'--binary'",
",",
"metavar",
"=",
"'<binary>'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"_",
"(",
"'Service binary.'",
")",
")",
"def",
"do_service_list",
"(",
"cs",
",",
"args",
")",
":",
"result",
"=",
"cs",
".",
"services",
".",
"list",
"(",
"host",
"=",
"args",
".",
"host",
",",
"binary",
"=",
"args",
".",
"binary",
")",
"columns",
"=",
"[",
"'Binary'",
",",
"'Host'",
",",
"'Zone'",
",",
"'Status'",
",",
"'State'",
",",
"'Updated_at'",
"]",
"if",
"(",
"result",
"and",
"hasattr",
"(",
"result",
"[",
"0",
"]",
",",
"'disabled_reason'",
")",
")",
":",
"columns",
".",
"append",
"(",
"'Disabled Reason'",
")",
"if",
"(",
"result",
"and",
"hasattr",
"(",
"result",
"[",
"0",
"]",
",",
"'id'",
")",
")",
":",
"columns",
".",
"insert",
"(",
"0",
",",
"'Id'",
")",
"utils",
".",
"print_list",
"(",
"result",
",",
"columns",
")"
] | show a list of all running services . | train | false |
21,463 | def _compute_next_job_for_tasks(queue_entries, special_tasks):
next_job_id = None
hqe_index = 0
for task in special_tasks:
if task.queue_entry:
next_job_id = task.queue_entry.job.id
elif (task.time_started is not None):
for queue_entry in queue_entries[hqe_index:]:
if (queue_entry.started_on is None):
continue
if (queue_entry.started_on < task.time_started):
break
next_job_id = queue_entry.job.id
task.next_job_id = next_job_id
if (next_job_id is not None):
for queue_entry in queue_entries[hqe_index:]:
if (queue_entry.job.id < next_job_id):
break
hqe_index += 1
| [
"def",
"_compute_next_job_for_tasks",
"(",
"queue_entries",
",",
"special_tasks",
")",
":",
"next_job_id",
"=",
"None",
"hqe_index",
"=",
"0",
"for",
"task",
"in",
"special_tasks",
":",
"if",
"task",
".",
"queue_entry",
":",
"next_job_id",
"=",
"task",
".",
"queue_entry",
".",
"job",
".",
"id",
"elif",
"(",
"task",
".",
"time_started",
"is",
"not",
"None",
")",
":",
"for",
"queue_entry",
"in",
"queue_entries",
"[",
"hqe_index",
":",
"]",
":",
"if",
"(",
"queue_entry",
".",
"started_on",
"is",
"None",
")",
":",
"continue",
"if",
"(",
"queue_entry",
".",
"started_on",
"<",
"task",
".",
"time_started",
")",
":",
"break",
"next_job_id",
"=",
"queue_entry",
".",
"job",
".",
"id",
"task",
".",
"next_job_id",
"=",
"next_job_id",
"if",
"(",
"next_job_id",
"is",
"not",
"None",
")",
":",
"for",
"queue_entry",
"in",
"queue_entries",
"[",
"hqe_index",
":",
"]",
":",
"if",
"(",
"queue_entry",
".",
"job",
".",
"id",
"<",
"next_job_id",
")",
":",
"break",
"hqe_index",
"+=",
"1"
] | for each task . | train | false |
21,464 | def echo_scsi_command(path, content):
args = ['-a', path]
kwargs = dict(process_input=content, run_as_root=True)
utils.execute('tee', *args, **kwargs)
| [
"def",
"echo_scsi_command",
"(",
"path",
",",
"content",
")",
":",
"args",
"=",
"[",
"'-a'",
",",
"path",
"]",
"kwargs",
"=",
"dict",
"(",
"process_input",
"=",
"content",
",",
"run_as_root",
"=",
"True",
")",
"utils",
".",
"execute",
"(",
"'tee'",
",",
"*",
"args",
",",
"**",
"kwargs",
")"
] | used to echo strings to scsi subsystem . | train | false |
21,465 | def make_path_status(r, t=None):
rpath = ('/recipes/' + r)
tpath = ((t and ('/tasks/' + t)) or '')
return ((rpath + tpath) + '/status')
| [
"def",
"make_path_status",
"(",
"r",
",",
"t",
"=",
"None",
")",
":",
"rpath",
"=",
"(",
"'/recipes/'",
"+",
"r",
")",
"tpath",
"=",
"(",
"(",
"t",
"and",
"(",
"'/tasks/'",
"+",
"t",
")",
")",
"or",
"''",
")",
"return",
"(",
"(",
"rpath",
"+",
"tpath",
")",
"+",
"'/status'",
")"
] | converts id into a beaker path to status file given a recipe id and/or a task id . | train | false |
21,466 | def ComputeMD5Hex(byte_str):
hasher = hashlib.md5()
hasher.update(byte_str)
return hasher.hexdigest()
| [
"def",
"ComputeMD5Hex",
"(",
"byte_str",
")",
":",
"hasher",
"=",
"hashlib",
".",
"md5",
"(",
")",
"hasher",
".",
"update",
"(",
"byte_str",
")",
"return",
"hasher",
".",
"hexdigest",
"(",
")"
] | compute md5 hash of "byte_str" and return it encoded as hex string . | train | false |
21,467 | def unallow(ip):
return _access_rule('unallow', ip)
| [
"def",
"unallow",
"(",
"ip",
")",
":",
"return",
"_access_rule",
"(",
"'unallow'",
",",
"ip",
")"
] | remove a rule from the csf denied hosts see :func:_access_rule . | train | false |
21,468 | def spewer(frame, s, ignored):
from twisted.python import reflect
if frame.f_locals.has_key('self'):
se = frame.f_locals['self']
if hasattr(se, '__class__'):
k = reflect.qual(se.__class__)
else:
k = reflect.qual(type(se))
print ('method %s of %s at %s' % (frame.f_code.co_name, k, id(se)))
else:
print ('function %s in %s, line %s' % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno))
| [
"def",
"spewer",
"(",
"frame",
",",
"s",
",",
"ignored",
")",
":",
"from",
"twisted",
".",
"python",
"import",
"reflect",
"if",
"frame",
".",
"f_locals",
".",
"has_key",
"(",
"'self'",
")",
":",
"se",
"=",
"frame",
".",
"f_locals",
"[",
"'self'",
"]",
"if",
"hasattr",
"(",
"se",
",",
"'__class__'",
")",
":",
"k",
"=",
"reflect",
".",
"qual",
"(",
"se",
".",
"__class__",
")",
"else",
":",
"k",
"=",
"reflect",
".",
"qual",
"(",
"type",
"(",
"se",
")",
")",
"print",
"(",
"'method %s of %s at %s'",
"%",
"(",
"frame",
".",
"f_code",
".",
"co_name",
",",
"k",
",",
"id",
"(",
"se",
")",
")",
")",
"else",
":",
"print",
"(",
"'function %s in %s, line %s'",
"%",
"(",
"frame",
".",
"f_code",
".",
"co_name",
",",
"frame",
".",
"f_code",
".",
"co_filename",
",",
"frame",
".",
"f_lineno",
")",
")"
] | a trace function for sys . | train | false |
21,469 | def query_from_params(params):
try:
query = params.pop('q')
except KeyError:
return []
try:
return [(([neg] + pval.split(':', 1)) if (':' in pval) else [neg, pval, None]) for (neg, pval) in (((True, x[1:]) if (x[:1] in '!-') else (False, x)) for x in shlex.split(query))]
except ValueError as exc:
sys.stdout.write(js_alert('param-parsing', 'warning', "Parameter parsing error. Check the server's logs for more information."))
sys.stderr.write(('IVRE: WARNING: parameter parsing error [%s (%r)]\n' % (exc.message, exc)))
sys.exit(0)
| [
"def",
"query_from_params",
"(",
"params",
")",
":",
"try",
":",
"query",
"=",
"params",
".",
"pop",
"(",
"'q'",
")",
"except",
"KeyError",
":",
"return",
"[",
"]",
"try",
":",
"return",
"[",
"(",
"(",
"[",
"neg",
"]",
"+",
"pval",
".",
"split",
"(",
"':'",
",",
"1",
")",
")",
"if",
"(",
"':'",
"in",
"pval",
")",
"else",
"[",
"neg",
",",
"pval",
",",
"None",
"]",
")",
"for",
"(",
"neg",
",",
"pval",
")",
"in",
"(",
"(",
"(",
"True",
",",
"x",
"[",
"1",
":",
"]",
")",
"if",
"(",
"x",
"[",
":",
"1",
"]",
"in",
"'!-'",
")",
"else",
"(",
"False",
",",
"x",
")",
")",
"for",
"x",
"in",
"shlex",
".",
"split",
"(",
"query",
")",
")",
"]",
"except",
"ValueError",
"as",
"exc",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"js_alert",
"(",
"'param-parsing'",
",",
"'warning'",
",",
"\"Parameter parsing error. Check the server's logs for more information.\"",
")",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"(",
"'IVRE: WARNING: parameter parsing error [%s (%r)]\\n'",
"%",
"(",
"exc",
".",
"message",
",",
"exc",
")",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | this function *consumes* the q parameter and returns the query as a list of three elements list: [boolean neg . | train | false |
21,470 | def _fill_buffer(buff, in_data, frame_count, time_info, status_flags):
buff.put(in_data)
return (None, pyaudio.paContinue)
| [
"def",
"_fill_buffer",
"(",
"buff",
",",
"in_data",
",",
"frame_count",
",",
"time_info",
",",
"status_flags",
")",
":",
"buff",
".",
"put",
"(",
"in_data",
")",
"return",
"(",
"None",
",",
"pyaudio",
".",
"paContinue",
")"
] | continuously collect data from the audio stream . | train | false |
21,471 | def generate_error(request, cls, e, tb, include_traceback=False):
import traceback
if hasattr(cls, '_amf_code'):
code = cls._amf_code
else:
code = cls.__name__
details = None
rootCause = None
if include_traceback:
details = traceback.format_exception(cls, e, tb)
rootCause = e
faultDetail = None
faultString = None
if hasattr(e, 'message'):
faultString = unicode(e.message)
elif (hasattr(e, 'args') and e.args):
if isinstance(e.args[0], pyamf.python.str_types):
faultString = unicode(e.args[0])
if details:
faultDetail = unicode(details)
return messaging.ErrorMessage(messageId=generate_random_id(), clientId=generate_random_id(), timestamp=calendar.timegm(time.gmtime()), correlationId=request.messageId, faultCode=code, faultString=faultString, faultDetail=faultDetail, extendedData=details, rootCause=rootCause)
| [
"def",
"generate_error",
"(",
"request",
",",
"cls",
",",
"e",
",",
"tb",
",",
"include_traceback",
"=",
"False",
")",
":",
"import",
"traceback",
"if",
"hasattr",
"(",
"cls",
",",
"'_amf_code'",
")",
":",
"code",
"=",
"cls",
".",
"_amf_code",
"else",
":",
"code",
"=",
"cls",
".",
"__name__",
"details",
"=",
"None",
"rootCause",
"=",
"None",
"if",
"include_traceback",
":",
"details",
"=",
"traceback",
".",
"format_exception",
"(",
"cls",
",",
"e",
",",
"tb",
")",
"rootCause",
"=",
"e",
"faultDetail",
"=",
"None",
"faultString",
"=",
"None",
"if",
"hasattr",
"(",
"e",
",",
"'message'",
")",
":",
"faultString",
"=",
"unicode",
"(",
"e",
".",
"message",
")",
"elif",
"(",
"hasattr",
"(",
"e",
",",
"'args'",
")",
"and",
"e",
".",
"args",
")",
":",
"if",
"isinstance",
"(",
"e",
".",
"args",
"[",
"0",
"]",
",",
"pyamf",
".",
"python",
".",
"str_types",
")",
":",
"faultString",
"=",
"unicode",
"(",
"e",
".",
"args",
"[",
"0",
"]",
")",
"if",
"details",
":",
"faultDetail",
"=",
"unicode",
"(",
"details",
")",
"return",
"messaging",
".",
"ErrorMessage",
"(",
"messageId",
"=",
"generate_random_id",
"(",
")",
",",
"clientId",
"=",
"generate_random_id",
"(",
")",
",",
"timestamp",
"=",
"calendar",
".",
"timegm",
"(",
"time",
".",
"gmtime",
"(",
")",
")",
",",
"correlationId",
"=",
"request",
".",
"messageId",
",",
"faultCode",
"=",
"code",
",",
"faultString",
"=",
"faultString",
",",
"faultDetail",
"=",
"faultDetail",
",",
"extendedData",
"=",
"details",
",",
"rootCause",
"=",
"rootCause",
")"
] | builds an l{errormessage<pyamf . | train | true |
21,474 | def recognize_log_derivative(a, d, DE, z=None):
z = (z or Dummy('z'))
(a, d) = a.cancel(d, include=True)
(p, a) = a.div(d)
pz = Poly(z, DE.t)
Dd = derivation(d, DE)
q = (a - (pz * Dd))
(r, R) = d.resultant(q, includePRS=True)
r = Poly(r, z)
(Np, Sp) = splitfactor_sqf(r, DE, coefficientD=True, z=z)
for (s, i) in Sp:
a = real_roots(s.as_poly(z))
if any(((not j.is_Integer) for j in a)):
return False
return True
| [
"def",
"recognize_log_derivative",
"(",
"a",
",",
"d",
",",
"DE",
",",
"z",
"=",
"None",
")",
":",
"z",
"=",
"(",
"z",
"or",
"Dummy",
"(",
"'z'",
")",
")",
"(",
"a",
",",
"d",
")",
"=",
"a",
".",
"cancel",
"(",
"d",
",",
"include",
"=",
"True",
")",
"(",
"p",
",",
"a",
")",
"=",
"a",
".",
"div",
"(",
"d",
")",
"pz",
"=",
"Poly",
"(",
"z",
",",
"DE",
".",
"t",
")",
"Dd",
"=",
"derivation",
"(",
"d",
",",
"DE",
")",
"q",
"=",
"(",
"a",
"-",
"(",
"pz",
"*",
"Dd",
")",
")",
"(",
"r",
",",
"R",
")",
"=",
"d",
".",
"resultant",
"(",
"q",
",",
"includePRS",
"=",
"True",
")",
"r",
"=",
"Poly",
"(",
"r",
",",
"z",
")",
"(",
"Np",
",",
"Sp",
")",
"=",
"splitfactor_sqf",
"(",
"r",
",",
"DE",
",",
"coefficientD",
"=",
"True",
",",
"z",
"=",
"z",
")",
"for",
"(",
"s",
",",
"i",
")",
"in",
"Sp",
":",
"a",
"=",
"real_roots",
"(",
"s",
".",
"as_poly",
"(",
"z",
")",
")",
"if",
"any",
"(",
"(",
"(",
"not",
"j",
".",
"is_Integer",
")",
"for",
"j",
"in",
"a",
")",
")",
":",
"return",
"False",
"return",
"True"
] | there exists a v in k(x)* such that f = dv/v where f a rational function if and only if f can be written as f = a/d where d is squarefree . | train | false |
21,475 | def render_power(children):
if (len(children) == 1):
return children[0]
children_latex = [k.latex for k in children if (k.latex != '^')]
children_latex[(-1)] = children[(-1)].sans_parens
raise_power = (lambda x, y: u'{}^{{{}}}'.format(y, x))
latex = reduce(raise_power, reversed(children_latex))
return LatexRendered(latex, tall=True)
| [
"def",
"render_power",
"(",
"children",
")",
":",
"if",
"(",
"len",
"(",
"children",
")",
"==",
"1",
")",
":",
"return",
"children",
"[",
"0",
"]",
"children_latex",
"=",
"[",
"k",
".",
"latex",
"for",
"k",
"in",
"children",
"if",
"(",
"k",
".",
"latex",
"!=",
"'^'",
")",
"]",
"children_latex",
"[",
"(",
"-",
"1",
")",
"]",
"=",
"children",
"[",
"(",
"-",
"1",
")",
"]",
".",
"sans_parens",
"raise_power",
"=",
"(",
"lambda",
"x",
",",
"y",
":",
"u'{}^{{{}}}'",
".",
"format",
"(",
"y",
",",
"x",
")",
")",
"latex",
"=",
"reduce",
"(",
"raise_power",
",",
"reversed",
"(",
"children_latex",
")",
")",
"return",
"LatexRendered",
"(",
"latex",
",",
"tall",
"=",
"True",
")"
] | combine powers so that the latex is wrapped in curly braces correctly . | train | false |
21,476 | def direct_to_template(request, template, extra_context=None, mimetype=None, **kwargs):
if (extra_context is None):
extra_context = {}
dictionary = {'params': kwargs}
for (key, value) in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
c = RequestContext(request, dictionary)
t = loader.get_template(template)
return HttpResponse(t.render(c), content_type=mimetype)
| [
"def",
"direct_to_template",
"(",
"request",
",",
"template",
",",
"extra_context",
"=",
"None",
",",
"mimetype",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"extra_context",
"is",
"None",
")",
":",
"extra_context",
"=",
"{",
"}",
"dictionary",
"=",
"{",
"'params'",
":",
"kwargs",
"}",
"for",
"(",
"key",
",",
"value",
")",
"in",
"extra_context",
".",
"items",
"(",
")",
":",
"if",
"callable",
"(",
"value",
")",
":",
"dictionary",
"[",
"key",
"]",
"=",
"value",
"(",
")",
"else",
":",
"dictionary",
"[",
"key",
"]",
"=",
"value",
"c",
"=",
"RequestContext",
"(",
"request",
",",
"dictionary",
")",
"t",
"=",
"loader",
".",
"get_template",
"(",
"template",
")",
"return",
"HttpResponse",
"(",
"t",
".",
"render",
"(",
"c",
")",
",",
"content_type",
"=",
"mimetype",
")"
] | render a given template with any extra url parameters in the context as {{ params }} . | train | true |
21,478 | def test_suggested_column_names_from_schema_qualifed_table(completer, complete_event):
text = u'SELECT from custom.products'
position = len(u'SELECT ')
result = set(completer.get_completions(Document(text=text, cursor_position=position), complete_event))
assert (set(result) == set(((testdata.columns(u'products', u'custom') + testdata.functions()) + list((testdata.builtin_functions() + testdata.keywords())))))
| [
"def",
"test_suggested_column_names_from_schema_qualifed_table",
"(",
"completer",
",",
"complete_event",
")",
":",
"text",
"=",
"u'SELECT from custom.products'",
"position",
"=",
"len",
"(",
"u'SELECT '",
")",
"result",
"=",
"set",
"(",
"completer",
".",
"get_completions",
"(",
"Document",
"(",
"text",
"=",
"text",
",",
"cursor_position",
"=",
"position",
")",
",",
"complete_event",
")",
")",
"assert",
"(",
"set",
"(",
"result",
")",
"==",
"set",
"(",
"(",
"(",
"testdata",
".",
"columns",
"(",
"u'products'",
",",
"u'custom'",
")",
"+",
"testdata",
".",
"functions",
"(",
")",
")",
"+",
"list",
"(",
"(",
"testdata",
".",
"builtin_functions",
"(",
")",
"+",
"testdata",
".",
"keywords",
"(",
")",
")",
")",
")",
")",
")"
] | suggest column and function names when selecting from a qualified-table . | train | false |
21,480 | def default_signal_map():
name_map = {'SIGTSTP': None, 'SIGTTIN': None, 'SIGTTOU': None, 'SIGTERM': 'terminate'}
signal_map = {}
for (name, target) in list(name_map.items()):
if hasattr(signal, name):
signal_map[getattr(signal, name)] = target
return signal_map
| [
"def",
"default_signal_map",
"(",
")",
":",
"name_map",
"=",
"{",
"'SIGTSTP'",
":",
"None",
",",
"'SIGTTIN'",
":",
"None",
",",
"'SIGTTOU'",
":",
"None",
",",
"'SIGTERM'",
":",
"'terminate'",
"}",
"signal_map",
"=",
"{",
"}",
"for",
"(",
"name",
",",
"target",
")",
"in",
"list",
"(",
"name_map",
".",
"items",
"(",
")",
")",
":",
"if",
"hasattr",
"(",
"signal",
",",
"name",
")",
":",
"signal_map",
"[",
"getattr",
"(",
"signal",
",",
"name",
")",
"]",
"=",
"target",
"return",
"signal_map"
] | create the default signal map for this system . | train | true |
21,482 | def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False):
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if ((n_cycles.size != 1) and (n_cycles.size != len(freqs))):
raise ValueError('n_cycles should be fixed or defined for each frequency.')
for (k, f) in enumerate(freqs):
if (len(n_cycles) != 1):
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
if (sigma is None):
sigma_t = (this_n_cycles / ((2.0 * np.pi) * f))
else:
sigma_t = (this_n_cycles / ((2.0 * np.pi) * sigma))
t = np.arange(0.0, (5.0 * sigma_t), (1.0 / sfreq))
t = np.r_[((- t[::(-1)]), t[1:])]
oscillation = np.exp(((((2.0 * 1j) * np.pi) * f) * t))
gaussian_enveloppe = np.exp(((- (t ** 2)) / (2.0 * (sigma_t ** 2))))
if zero_mean:
real_offset = np.exp(((-2) * (((np.pi * f) * sigma_t) ** 2)))
oscillation -= real_offset
W = (oscillation * gaussian_enveloppe)
W /= (sqrt(0.5) * linalg.norm(W.ravel()))
Ws.append(W)
return Ws
| [
"def",
"morlet",
"(",
"sfreq",
",",
"freqs",
",",
"n_cycles",
"=",
"7.0",
",",
"sigma",
"=",
"None",
",",
"zero_mean",
"=",
"False",
")",
":",
"Ws",
"=",
"list",
"(",
")",
"n_cycles",
"=",
"np",
".",
"atleast_1d",
"(",
"n_cycles",
")",
"if",
"(",
"(",
"n_cycles",
".",
"size",
"!=",
"1",
")",
"and",
"(",
"n_cycles",
".",
"size",
"!=",
"len",
"(",
"freqs",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"'n_cycles should be fixed or defined for each frequency.'",
")",
"for",
"(",
"k",
",",
"f",
")",
"in",
"enumerate",
"(",
"freqs",
")",
":",
"if",
"(",
"len",
"(",
"n_cycles",
")",
"!=",
"1",
")",
":",
"this_n_cycles",
"=",
"n_cycles",
"[",
"k",
"]",
"else",
":",
"this_n_cycles",
"=",
"n_cycles",
"[",
"0",
"]",
"if",
"(",
"sigma",
"is",
"None",
")",
":",
"sigma_t",
"=",
"(",
"this_n_cycles",
"/",
"(",
"(",
"2.0",
"*",
"np",
".",
"pi",
")",
"*",
"f",
")",
")",
"else",
":",
"sigma_t",
"=",
"(",
"this_n_cycles",
"/",
"(",
"(",
"2.0",
"*",
"np",
".",
"pi",
")",
"*",
"sigma",
")",
")",
"t",
"=",
"np",
".",
"arange",
"(",
"0.0",
",",
"(",
"5.0",
"*",
"sigma_t",
")",
",",
"(",
"1.0",
"/",
"sfreq",
")",
")",
"t",
"=",
"np",
".",
"r_",
"[",
"(",
"(",
"-",
"t",
"[",
":",
":",
"(",
"-",
"1",
")",
"]",
")",
",",
"t",
"[",
"1",
":",
"]",
")",
"]",
"oscillation",
"=",
"np",
".",
"exp",
"(",
"(",
"(",
"(",
"(",
"2.0",
"*",
"1j",
")",
"*",
"np",
".",
"pi",
")",
"*",
"f",
")",
"*",
"t",
")",
")",
"gaussian_enveloppe",
"=",
"np",
".",
"exp",
"(",
"(",
"(",
"-",
"(",
"t",
"**",
"2",
")",
")",
"/",
"(",
"2.0",
"*",
"(",
"sigma_t",
"**",
"2",
")",
")",
")",
")",
"if",
"zero_mean",
":",
"real_offset",
"=",
"np",
".",
"exp",
"(",
"(",
"(",
"-",
"2",
")",
"*",
"(",
"(",
"(",
"np",
".",
"pi",
"*",
"f",
")",
"*",
"sigma_t",
")",
"**",
"2",
")",
")",
")",
"oscillation",
"-=",
"real_offset",
"W",
"=",
"(",
"oscillation",
"*",
"gaussian_enveloppe",
")",
"W",
"/=",
"(",
"sqrt",
"(",
"0.5",
")",
"*",
"linalg",
".",
"norm",
"(",
"W",
".",
"ravel",
"(",
")",
")",
")",
"Ws",
".",
"append",
"(",
"W",
")",
"return",
"Ws"
] | complex morlet wavelet . | train | false |
21,483 | def UpdateUserCredentials(client_id, client_secret, refresh_token, manager_account_id, developer_token):
app_user = AppUser.query((AppUser.user == users.get_current_user())).fetch()[0]
app_user.client_id = client_id
app_user.client_secret = client_secret
app_user.refresh_token = refresh_token
app_user.manager_account_id = manager_account_id
app_user.developer_token = developer_token
app_user.put()
| [
"def",
"UpdateUserCredentials",
"(",
"client_id",
",",
"client_secret",
",",
"refresh_token",
",",
"manager_account_id",
",",
"developer_token",
")",
":",
"app_user",
"=",
"AppUser",
".",
"query",
"(",
"(",
"AppUser",
".",
"user",
"==",
"users",
".",
"get_current_user",
"(",
")",
")",
")",
".",
"fetch",
"(",
")",
"[",
"0",
"]",
"app_user",
".",
"client_id",
"=",
"client_id",
"app_user",
".",
"client_secret",
"=",
"client_secret",
"app_user",
".",
"refresh_token",
"=",
"refresh_token",
"app_user",
".",
"manager_account_id",
"=",
"manager_account_id",
"app_user",
".",
"developer_token",
"=",
"developer_token",
"app_user",
".",
"put",
"(",
")"
] | update the credentials associated with application user . | train | true |
21,484 | def _DoRemapping(element, map):
if ((map is not None) and (element is not None)):
if (not callable(map)):
map = map.get
if (isinstance(element, list) or isinstance(element, tuple)):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
| [
"def",
"_DoRemapping",
"(",
"element",
",",
"map",
")",
":",
"if",
"(",
"(",
"map",
"is",
"not",
"None",
")",
"and",
"(",
"element",
"is",
"not",
"None",
")",
")",
":",
"if",
"(",
"not",
"callable",
"(",
"map",
")",
")",
":",
"map",
"=",
"map",
".",
"get",
"if",
"(",
"isinstance",
"(",
"element",
",",
"list",
")",
"or",
"isinstance",
"(",
"element",
",",
"tuple",
")",
")",
":",
"element",
"=",
"filter",
"(",
"None",
",",
"[",
"map",
"(",
"elem",
")",
"for",
"elem",
"in",
"element",
"]",
")",
"else",
":",
"element",
"=",
"map",
"(",
"element",
")",
"return",
"element"
] | if |element| then remap it through |map| . | train | false |
21,485 | def _find_topomap_coords(info, picks, layout=None):
if (len(picks) == 0):
raise ValueError('Need more than 0 channels.')
if (layout is not None):
chs = [info['chs'][i] for i in picks]
pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs]
pos = np.asarray(pos)
else:
pos = _auto_topomap_coords(info, picks)
return pos
| [
"def",
"_find_topomap_coords",
"(",
"info",
",",
"picks",
",",
"layout",
"=",
"None",
")",
":",
"if",
"(",
"len",
"(",
"picks",
")",
"==",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'Need more than 0 channels.'",
")",
"if",
"(",
"layout",
"is",
"not",
"None",
")",
":",
"chs",
"=",
"[",
"info",
"[",
"'chs'",
"]",
"[",
"i",
"]",
"for",
"i",
"in",
"picks",
"]",
"pos",
"=",
"[",
"layout",
".",
"pos",
"[",
"layout",
".",
"names",
".",
"index",
"(",
"ch",
"[",
"'ch_name'",
"]",
")",
"]",
"for",
"ch",
"in",
"chs",
"]",
"pos",
"=",
"np",
".",
"asarray",
"(",
"pos",
")",
"else",
":",
"pos",
"=",
"_auto_topomap_coords",
"(",
"info",
",",
"picks",
")",
"return",
"pos"
] | guess the e/meg layout and return appropriate topomap coordinates . | train | false |
21,486 | def query_for_course(course_key, category=None):
if getattr(course_key, 'deprecated', False):
prefix = '_id'
else:
prefix = 'content_son'
dbkey = SON([('{}.tag'.format(prefix), XASSET_LOCATION_TAG), ('{}.org'.format(prefix), course_key.org), ('{}.course'.format(prefix), course_key.course)])
if category:
dbkey['{}.category'.format(prefix)] = category
if getattr(course_key, 'deprecated', False):
dbkey['{}.run'.format(prefix)] = {'$exists': False}
else:
dbkey['{}.run'.format(prefix)] = course_key.run
return dbkey
| [
"def",
"query_for_course",
"(",
"course_key",
",",
"category",
"=",
"None",
")",
":",
"if",
"getattr",
"(",
"course_key",
",",
"'deprecated'",
",",
"False",
")",
":",
"prefix",
"=",
"'_id'",
"else",
":",
"prefix",
"=",
"'content_son'",
"dbkey",
"=",
"SON",
"(",
"[",
"(",
"'{}.tag'",
".",
"format",
"(",
"prefix",
")",
",",
"XASSET_LOCATION_TAG",
")",
",",
"(",
"'{}.org'",
".",
"format",
"(",
"prefix",
")",
",",
"course_key",
".",
"org",
")",
",",
"(",
"'{}.course'",
".",
"format",
"(",
"prefix",
")",
",",
"course_key",
".",
"course",
")",
"]",
")",
"if",
"category",
":",
"dbkey",
"[",
"'{}.category'",
".",
"format",
"(",
"prefix",
")",
"]",
"=",
"category",
"if",
"getattr",
"(",
"course_key",
",",
"'deprecated'",
",",
"False",
")",
":",
"dbkey",
"[",
"'{}.run'",
".",
"format",
"(",
"prefix",
")",
"]",
"=",
"{",
"'$exists'",
":",
"False",
"}",
"else",
":",
"dbkey",
"[",
"'{}.run'",
".",
"format",
"(",
"prefix",
")",
"]",
"=",
"course_key",
".",
"run",
"return",
"dbkey"
] | construct a son object that will query for all assets possibly limited to the given type in the course using the index in mongo_indexes . | train | false |
21,487 | @web.app.route('/content-length')
def total_content_length():
return ('Total content-length recieved: %i' % stats['content-length'])
| [
"@",
"web",
".",
"app",
".",
"route",
"(",
"'/content-length'",
")",
"def",
"total_content_length",
"(",
")",
":",
"return",
"(",
"'Total content-length recieved: %i'",
"%",
"stats",
"[",
"'content-length'",
"]",
")"
] | add a route to the locust web app . | train | false |
21,488 | def css_url():
return (get_bootstrap_setting(u'css_url') or bootstrap_url(u'css/bootstrap.min.css'))
| [
"def",
"css_url",
"(",
")",
":",
"return",
"(",
"get_bootstrap_setting",
"(",
"u'css_url'",
")",
"or",
"bootstrap_url",
"(",
"u'css/bootstrap.min.css'",
")",
")"
] | return the full url to the bootstrap css file . | train | false |
21,489 | def getRemoteIP():
retVal = None
try:
retVal = socket.gethostbyname(conf.hostname)
except socket.gaierror:
errMsg = 'address resolution problem '
errMsg += ("occurred for hostname '%s'" % conf.hostname)
singleTimeLogMessage(errMsg, logging.ERROR)
return retVal
| [
"def",
"getRemoteIP",
"(",
")",
":",
"retVal",
"=",
"None",
"try",
":",
"retVal",
"=",
"socket",
".",
"gethostbyname",
"(",
"conf",
".",
"hostname",
")",
"except",
"socket",
".",
"gaierror",
":",
"errMsg",
"=",
"'address resolution problem '",
"errMsg",
"+=",
"(",
"\"occurred for hostname '%s'\"",
"%",
"conf",
".",
"hostname",
")",
"singleTimeLogMessage",
"(",
"errMsg",
",",
"logging",
".",
"ERROR",
")",
"return",
"retVal"
] | get remote/target ip address . | train | false |
21,490 | def outputters(opts):
ret = LazyLoader(_module_dirs(opts, 'output', ext_type_dirs='outputter_dirs'), opts, tag='output')
wrapped_ret = FilterDictWrapper(ret, '.output')
ret.pack['__salt__'] = wrapped_ret
return wrapped_ret
| [
"def",
"outputters",
"(",
"opts",
")",
":",
"ret",
"=",
"LazyLoader",
"(",
"_module_dirs",
"(",
"opts",
",",
"'output'",
",",
"ext_type_dirs",
"=",
"'outputter_dirs'",
")",
",",
"opts",
",",
"tag",
"=",
"'output'",
")",
"wrapped_ret",
"=",
"FilterDictWrapper",
"(",
"ret",
",",
"'.output'",
")",
"ret",
".",
"pack",
"[",
"'__salt__'",
"]",
"=",
"wrapped_ret",
"return",
"wrapped_ret"
] | returns the outputters modules . | train | true |
21,491 | def get_strict_version(normalized):
return distutils.version.StrictVersion(normalized.replace('.dev', 'a'))
| [
"def",
"get_strict_version",
"(",
"normalized",
")",
":",
"return",
"distutils",
".",
"version",
".",
"StrictVersion",
"(",
"normalized",
".",
"replace",
"(",
"'.dev'",
",",
"'a'",
")",
")"
] | converts a normalized version to a strict version . | train | false |
21,492 | def _initial_nodes(n):
fit = ((0.49082003 * n) - 4.37859653)
turnover = around(fit).astype(int)
ia = arange(1, int((floor((n * 0.5)) + 1)))
ib = ia[::(-1)]
xasq = _initial_nodes_a(n, ia[:(turnover + 1)])
xbsq = _initial_nodes_b(n, ib[(turnover + 1):])
iv = sqrt(hstack([xasq, xbsq]))
if ((n % 2) == 1):
iv = hstack([0.0, iv])
return iv
| [
"def",
"_initial_nodes",
"(",
"n",
")",
":",
"fit",
"=",
"(",
"(",
"0.49082003",
"*",
"n",
")",
"-",
"4.37859653",
")",
"turnover",
"=",
"around",
"(",
"fit",
")",
".",
"astype",
"(",
"int",
")",
"ia",
"=",
"arange",
"(",
"1",
",",
"int",
"(",
"(",
"floor",
"(",
"(",
"n",
"*",
"0.5",
")",
")",
"+",
"1",
")",
")",
")",
"ib",
"=",
"ia",
"[",
":",
":",
"(",
"-",
"1",
")",
"]",
"xasq",
"=",
"_initial_nodes_a",
"(",
"n",
",",
"ia",
"[",
":",
"(",
"turnover",
"+",
"1",
")",
"]",
")",
"xbsq",
"=",
"_initial_nodes_b",
"(",
"n",
",",
"ib",
"[",
"(",
"turnover",
"+",
"1",
")",
":",
"]",
")",
"iv",
"=",
"sqrt",
"(",
"hstack",
"(",
"[",
"xasq",
",",
"xbsq",
"]",
")",
")",
"if",
"(",
"(",
"n",
"%",
"2",
")",
"==",
"1",
")",
":",
"iv",
"=",
"hstack",
"(",
"[",
"0.0",
",",
"iv",
"]",
")",
"return",
"iv"
] | initial guesses for the hermite roots computes an initial approximation to the non-negative roots :math:x_k of the hermite polynomial :math:h_n of order :math:n . | train | false |
21,493 | def get_action_class_instance(action_cls, config=None, action_service=None):
kwargs = {}
kwargs['config'] = config
kwargs['action_service'] = action_service
try:
action_instance = action_cls(**kwargs)
except TypeError as e:
if ("unexpected keyword argument 'action_service'" not in str(e)):
raise e
LOG.debug(('Action class (%s) constructor doesn\'t take "action_service" argument, falling back to late assignment...' % action_cls.__class__.__name__))
action_service = kwargs.pop('action_service', None)
action_instance = action_cls(**kwargs)
action_instance.action_service = action_service
return action_instance
| [
"def",
"get_action_class_instance",
"(",
"action_cls",
",",
"config",
"=",
"None",
",",
"action_service",
"=",
"None",
")",
":",
"kwargs",
"=",
"{",
"}",
"kwargs",
"[",
"'config'",
"]",
"=",
"config",
"kwargs",
"[",
"'action_service'",
"]",
"=",
"action_service",
"try",
":",
"action_instance",
"=",
"action_cls",
"(",
"**",
"kwargs",
")",
"except",
"TypeError",
"as",
"e",
":",
"if",
"(",
"\"unexpected keyword argument 'action_service'\"",
"not",
"in",
"str",
"(",
"e",
")",
")",
":",
"raise",
"e",
"LOG",
".",
"debug",
"(",
"(",
"'Action class (%s) constructor doesn\\'t take \"action_service\" argument, falling back to late assignment...'",
"%",
"action_cls",
".",
"__class__",
".",
"__name__",
")",
")",
"action_service",
"=",
"kwargs",
".",
"pop",
"(",
"'action_service'",
",",
"None",
")",
"action_instance",
"=",
"action_cls",
"(",
"**",
"kwargs",
")",
"action_instance",
".",
"action_service",
"=",
"action_service",
"return",
"action_instance"
] | instantiate and return action class instance . | train | false |
21,494 | def chordal_graph_treewidth(G):
if (not is_chordal(G)):
raise nx.NetworkXError('Input graph is not chordal.')
max_clique = (-1)
for clique in nx.chordal_graph_cliques(G):
max_clique = max(max_clique, len(clique))
return (max_clique - 1)
| [
"def",
"chordal_graph_treewidth",
"(",
"G",
")",
":",
"if",
"(",
"not",
"is_chordal",
"(",
"G",
")",
")",
":",
"raise",
"nx",
".",
"NetworkXError",
"(",
"'Input graph is not chordal.'",
")",
"max_clique",
"=",
"(",
"-",
"1",
")",
"for",
"clique",
"in",
"nx",
".",
"chordal_graph_cliques",
"(",
"G",
")",
":",
"max_clique",
"=",
"max",
"(",
"max_clique",
",",
"len",
"(",
"clique",
")",
")",
"return",
"(",
"max_clique",
"-",
"1",
")"
] | returns the treewidth of the chordal graph g . | train | false |
21,496 | def solexa_quality_from_phred(phred_quality):
if (phred_quality is None):
return None
elif (phred_quality > 0):
return max((-5.0), (10 * log(((10 ** (phred_quality / 10.0)) - 1), 10)))
elif (phred_quality == 0):
return (-5.0)
else:
raise ValueError(('PHRED qualities must be positive (or zero), not %r' % phred_quality))
| [
"def",
"solexa_quality_from_phred",
"(",
"phred_quality",
")",
":",
"if",
"(",
"phred_quality",
"is",
"None",
")",
":",
"return",
"None",
"elif",
"(",
"phred_quality",
">",
"0",
")",
":",
"return",
"max",
"(",
"(",
"-",
"5.0",
")",
",",
"(",
"10",
"*",
"log",
"(",
"(",
"(",
"10",
"**",
"(",
"phred_quality",
"/",
"10.0",
")",
")",
"-",
"1",
")",
",",
"10",
")",
")",
")",
"elif",
"(",
"phred_quality",
"==",
"0",
")",
":",
"return",
"(",
"-",
"5.0",
")",
"else",
":",
"raise",
"ValueError",
"(",
"(",
"'PHRED qualities must be positive (or zero), not %r'",
"%",
"phred_quality",
")",
")"
] | covert a phred quality to a solexa quality . | train | false |
21,497 | def effect_noise(size, sigma):
return Image()._new(core.effect_noise(size, sigma))
| [
"def",
"effect_noise",
"(",
"size",
",",
"sigma",
")",
":",
"return",
"Image",
"(",
")",
".",
"_new",
"(",
"core",
".",
"effect_noise",
"(",
"size",
",",
"sigma",
")",
")"
] | generate gaussian noise centered around 128 . | train | false |
21,498 | def test_max_similarity():
submission = SubmissionFactory.build(similarity=0, mt_similarity=0)
assert (submission.max_similarity == 0)
submission = SubmissionFactory.build(similarity=0.5, mt_similarity=0.6)
assert (submission.max_similarity == 0.6)
submission = SubmissionFactory.build(similarity=0.5, mt_similarity=None)
assert (submission.max_similarity == 0.5)
submission = SubmissionFactory.build(similarity=None, mt_similarity=None)
assert (submission.max_similarity == 0)
| [
"def",
"test_max_similarity",
"(",
")",
":",
"submission",
"=",
"SubmissionFactory",
".",
"build",
"(",
"similarity",
"=",
"0",
",",
"mt_similarity",
"=",
"0",
")",
"assert",
"(",
"submission",
".",
"max_similarity",
"==",
"0",
")",
"submission",
"=",
"SubmissionFactory",
".",
"build",
"(",
"similarity",
"=",
"0.5",
",",
"mt_similarity",
"=",
"0.6",
")",
"assert",
"(",
"submission",
".",
"max_similarity",
"==",
"0.6",
")",
"submission",
"=",
"SubmissionFactory",
".",
"build",
"(",
"similarity",
"=",
"0.5",
",",
"mt_similarity",
"=",
"None",
")",
"assert",
"(",
"submission",
".",
"max_similarity",
"==",
"0.5",
")",
"submission",
"=",
"SubmissionFactory",
".",
"build",
"(",
"similarity",
"=",
"None",
",",
"mt_similarity",
"=",
"None",
")",
"assert",
"(",
"submission",
".",
"max_similarity",
"==",
"0",
")"
] | tests that the maximum similarity is properly returned . | train | false |
21,500 | def assert_true_or_false_with_in(logical_line):
res = (asse_true_false_with_in_or_not_in.search(logical_line) or asse_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
(yield (0, 'N334: Use assertIn/NotIn(A, B) rather than assertTrue/False(A in/not in B) when checking collection contents.'))
| [
"def",
"assert_true_or_false_with_in",
"(",
"logical_line",
")",
":",
"res",
"=",
"(",
"asse_true_false_with_in_or_not_in",
".",
"search",
"(",
"logical_line",
")",
"or",
"asse_true_false_with_in_or_not_in_spaces",
".",
"search",
"(",
"logical_line",
")",
")",
"if",
"res",
":",
"(",
"yield",
"(",
"0",
",",
"'N334: Use assertIn/NotIn(A, B) rather than assertTrue/False(A in/not in B) when checking collection contents.'",
")",
")"
] | check for asserttrue/false . | train | false |
21,501 | def c_socialize(client):
cmds = ('ooc Hello!', 'ooc Testing ...', 'ooc Testing ... times 2', 'say Yo!', 'emote stands looking around.')
return cmds
| [
"def",
"c_socialize",
"(",
"client",
")",
":",
"cmds",
"=",
"(",
"'ooc Hello!'",
",",
"'ooc Testing ...'",
",",
"'ooc Testing ... times 2'",
",",
"'say Yo!'",
",",
"'emote stands looking around.'",
")",
"return",
"cmds"
] | socializechats on channel . | train | false |
21,502 | def check_libcloud_or_fail():
if (not HAS_LIBCLOUD):
raise LibcloudNotFound('apache-libcloud is required.')
| [
"def",
"check_libcloud_or_fail",
"(",
")",
":",
"if",
"(",
"not",
"HAS_LIBCLOUD",
")",
":",
"raise",
"LibcloudNotFound",
"(",
"'apache-libcloud is required.'",
")"
] | checks if libcloud is installed and fails if not . | train | false |
21,503 | def _relpath_from_file(archive_dir, from_file):
return os.path.relpath(archive_dir, os.path.dirname(from_file))
| [
"def",
"_relpath_from_file",
"(",
"archive_dir",
",",
"from_file",
")",
":",
"return",
"os",
".",
"path",
".",
"relpath",
"(",
"archive_dir",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"from_file",
")",
")"
] | path to a directory from a file . | train | false |
21,504 | def test_regression_4210():
crd = SkyCoord((0 * u.deg), (0 * u.deg), distance=(1 * u.AU))
ecl = crd.geocentrictrueecliptic
ecl.lon
from ..builtin_frames import ecliptic
for frame_name in ecliptic.__all__:
eclcls = getattr(ecliptic, frame_name)
eclobj = eclcls((1 * u.deg), (2 * u.deg), (3 * u.AU))
eclobj.lat
eclobj.lon
eclobj.distance
| [
"def",
"test_regression_4210",
"(",
")",
":",
"crd",
"=",
"SkyCoord",
"(",
"(",
"0",
"*",
"u",
".",
"deg",
")",
",",
"(",
"0",
"*",
"u",
".",
"deg",
")",
",",
"distance",
"=",
"(",
"1",
"*",
"u",
".",
"AU",
")",
")",
"ecl",
"=",
"crd",
".",
"geocentrictrueecliptic",
"ecl",
".",
"lon",
"from",
".",
".",
"builtin_frames",
"import",
"ecliptic",
"for",
"frame_name",
"in",
"ecliptic",
".",
"__all__",
":",
"eclcls",
"=",
"getattr",
"(",
"ecliptic",
",",
"frame_name",
")",
"eclobj",
"=",
"eclcls",
"(",
"(",
"1",
"*",
"u",
".",
"deg",
")",
",",
"(",
"2",
"*",
"u",
".",
"deg",
")",
",",
"(",
"3",
"*",
"u",
".",
"AU",
")",
")",
"eclobj",
".",
"lat",
"eclobj",
".",
"lon",
"eclobj",
".",
"distance"
] | issue: URL related pr with actual change: URL . | train | false |
21,506 | @pytest.fixture
def filesystem_loader():
here = os.path.dirname(os.path.abspath(__file__))
return loaders.FileSystemLoader((here + '/res/templates'))
| [
"@",
"pytest",
".",
"fixture",
"def",
"filesystem_loader",
"(",
")",
":",
"here",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"return",
"loaders",
".",
"FileSystemLoader",
"(",
"(",
"here",
"+",
"'/res/templates'",
")",
")"
] | returns filesystemloader initialized to res/templates directory . | train | false |
21,509 | def addConnectionVertexes(connectionVertexes, geometryOutput):
if (geometryOutput.__class__ == list):
for element in geometryOutput:
addConnectionVertexes(connectionVertexes, element)
return
if (geometryOutput.__class__ != dict):
return
for geometryOutputKey in geometryOutput.keys():
geometryOutputValue = geometryOutput[geometryOutputKey]
if ((geometryOutputKey == 'connectionStart') or (geometryOutputKey == 'connectionEnd')):
connectionVertexes.append(geometryOutputValue)
elif (geometryOutputKey == 'vertex'):
for vertex in geometryOutputValue:
connectionVertexes.append(vertex)
else:
addConnectionVertexes(connectionVertexes, geometryOutputValue)
| [
"def",
"addConnectionVertexes",
"(",
"connectionVertexes",
",",
"geometryOutput",
")",
":",
"if",
"(",
"geometryOutput",
".",
"__class__",
"==",
"list",
")",
":",
"for",
"element",
"in",
"geometryOutput",
":",
"addConnectionVertexes",
"(",
"connectionVertexes",
",",
"element",
")",
"return",
"if",
"(",
"geometryOutput",
".",
"__class__",
"!=",
"dict",
")",
":",
"return",
"for",
"geometryOutputKey",
"in",
"geometryOutput",
".",
"keys",
"(",
")",
":",
"geometryOutputValue",
"=",
"geometryOutput",
"[",
"geometryOutputKey",
"]",
"if",
"(",
"(",
"geometryOutputKey",
"==",
"'connectionStart'",
")",
"or",
"(",
"geometryOutputKey",
"==",
"'connectionEnd'",
")",
")",
":",
"connectionVertexes",
".",
"append",
"(",
"geometryOutputValue",
")",
"elif",
"(",
"geometryOutputKey",
"==",
"'vertex'",
")",
":",
"for",
"vertex",
"in",
"geometryOutputValue",
":",
"connectionVertexes",
".",
"append",
"(",
"vertex",
")",
"else",
":",
"addConnectionVertexes",
"(",
"connectionVertexes",
",",
"geometryOutputValue",
")"
] | add the connections and vertexes . | train | false |
21,510 | def _should_keep_module(name):
return ((name in ('__builtin__', 'sys', 'codecs', 'encodings', 'site', 'google', 'crontab', 'pwd')) or name.startswith('google.') or name.startswith('encodings.') or ('mysql' in name.lower()))
| [
"def",
"_should_keep_module",
"(",
"name",
")",
":",
"return",
"(",
"(",
"name",
"in",
"(",
"'__builtin__'",
",",
"'sys'",
",",
"'codecs'",
",",
"'encodings'",
",",
"'site'",
",",
"'google'",
",",
"'crontab'",
",",
"'pwd'",
")",
")",
"or",
"name",
".",
"startswith",
"(",
"'google.'",
")",
"or",
"name",
".",
"startswith",
"(",
"'encodings.'",
")",
"or",
"(",
"'mysql'",
"in",
"name",
".",
"lower",
"(",
")",
")",
")"
] | returns true if the module should be retained after sandboxing . | train | false |
21,511 | def _CanPlaceOnSingleLine(uwline):
indent_amt = (style.Get(u'INDENT_WIDTH') * uwline.depth)
last = uwline.last
last_index = (-1)
if (last.is_comment and re.search(u'^#+\\s+pylint:', last.value.strip(), re.IGNORECASE)):
last = last.previous_token
last_index = (-2)
if (last is None):
return True
return (((last.total_length + indent_amt) <= style.Get(u'COLUMN_LIMIT')) and (not any((tok.is_comment for tok in uwline.tokens[:last_index]))))
| [
"def",
"_CanPlaceOnSingleLine",
"(",
"uwline",
")",
":",
"indent_amt",
"=",
"(",
"style",
".",
"Get",
"(",
"u'INDENT_WIDTH'",
")",
"*",
"uwline",
".",
"depth",
")",
"last",
"=",
"uwline",
".",
"last",
"last_index",
"=",
"(",
"-",
"1",
")",
"if",
"(",
"last",
".",
"is_comment",
"and",
"re",
".",
"search",
"(",
"u'^#+\\\\s+pylint:'",
",",
"last",
".",
"value",
".",
"strip",
"(",
")",
",",
"re",
".",
"IGNORECASE",
")",
")",
":",
"last",
"=",
"last",
".",
"previous_token",
"last_index",
"=",
"(",
"-",
"2",
")",
"if",
"(",
"last",
"is",
"None",
")",
":",
"return",
"True",
"return",
"(",
"(",
"(",
"last",
".",
"total_length",
"+",
"indent_amt",
")",
"<=",
"style",
".",
"Get",
"(",
"u'COLUMN_LIMIT'",
")",
")",
"and",
"(",
"not",
"any",
"(",
"(",
"tok",
".",
"is_comment",
"for",
"tok",
"in",
"uwline",
".",
"tokens",
"[",
":",
"last_index",
"]",
")",
")",
")",
")"
] | determine if the unwrapped line can go on a single line . | train | false |
21,512 | def alert_history():
output = s3_rest_controller('cap', 'alert_history', rheader=s3db.cap_history_rheader)
return output
| [
"def",
"alert_history",
"(",
")",
":",
"output",
"=",
"s3_rest_controller",
"(",
"'cap'",
",",
"'alert_history'",
",",
"rheader",
"=",
"s3db",
".",
"cap_history_rheader",
")",
"return",
"output"
] | restful crud controller . | train | false |
21,513 | @user_merged.connect
def update_mailchimp_subscription(user, list_name, subscription, send_goodbye=True):
if subscription:
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
else:
try:
mailchimp_utils.unsubscribe_mailchimp_async(list_name, user._id, username=user.username, send_goodbye=send_goodbye)
except mailchimp_utils.mailchimp.ListNotSubscribedError:
raise HTTPError(http.BAD_REQUEST, data=dict(message_short='ListNotSubscribedError', message_long='The user is already unsubscribed from this mailing list.', error_type='not_subscribed'))
| [
"@",
"user_merged",
".",
"connect",
"def",
"update_mailchimp_subscription",
"(",
"user",
",",
"list_name",
",",
"subscription",
",",
"send_goodbye",
"=",
"True",
")",
":",
"if",
"subscription",
":",
"mailchimp_utils",
".",
"subscribe_mailchimp",
"(",
"list_name",
",",
"user",
".",
"_id",
")",
"else",
":",
"try",
":",
"mailchimp_utils",
".",
"unsubscribe_mailchimp_async",
"(",
"list_name",
",",
"user",
".",
"_id",
",",
"username",
"=",
"user",
".",
"username",
",",
"send_goodbye",
"=",
"send_goodbye",
")",
"except",
"mailchimp_utils",
".",
"mailchimp",
".",
"ListNotSubscribedError",
":",
"raise",
"HTTPError",
"(",
"http",
".",
"BAD_REQUEST",
",",
"data",
"=",
"dict",
"(",
"message_short",
"=",
"'ListNotSubscribedError'",
",",
"message_long",
"=",
"'The user is already unsubscribed from this mailing list.'",
",",
"error_type",
"=",
"'not_subscribed'",
")",
")"
] | update mailing list subscription in mailchimp . | train | false |
21,516 | def do_default(value, default_value=u'', boolean=False):
if ((boolean and (not value)) or isinstance(value, Undefined)):
return default_value
return value
| [
"def",
"do_default",
"(",
"value",
",",
"default_value",
"=",
"u''",
",",
"boolean",
"=",
"False",
")",
":",
"if",
"(",
"(",
"boolean",
"and",
"(",
"not",
"value",
")",
")",
"or",
"isinstance",
"(",
"value",
",",
"Undefined",
")",
")",
":",
"return",
"default_value",
"return",
"value"
] | if the value is undefined it will return the passed default value . | train | true |
21,517 | @skip('silverlight')
def test_file_multiple_reads():
l = []
for i in xrange(10):
l.append(marshal.dumps({i: i}))
data = ''.join(l)
f = file('tempfile.txt', 'w')
f.write(data)
f.close()
f = file('tempfile.txt')
for i in xrange(10):
obj = marshal.load(f)
AreEqual(obj, {i: i})
f.close()
delete_files('tempfile.txt')
| [
"@",
"skip",
"(",
"'silverlight'",
")",
"def",
"test_file_multiple_reads",
"(",
")",
":",
"l",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"10",
")",
":",
"l",
".",
"append",
"(",
"marshal",
".",
"dumps",
"(",
"{",
"i",
":",
"i",
"}",
")",
")",
"data",
"=",
"''",
".",
"join",
"(",
"l",
")",
"f",
"=",
"file",
"(",
"'tempfile.txt'",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"data",
")",
"f",
".",
"close",
"(",
")",
"f",
"=",
"file",
"(",
"'tempfile.txt'",
")",
"for",
"i",
"in",
"xrange",
"(",
"10",
")",
":",
"obj",
"=",
"marshal",
".",
"load",
"(",
"f",
")",
"AreEqual",
"(",
"obj",
",",
"{",
"i",
":",
"i",
"}",
")",
"f",
".",
"close",
"(",
")",
"delete_files",
"(",
"'tempfile.txt'",
")"
] | calling load w/ a file should only advance the length of the file . | train | false |
21,518 | def _document_frequency(X):
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
| [
"def",
"_document_frequency",
"(",
"X",
")",
":",
"if",
"sp",
".",
"isspmatrix_csr",
"(",
"X",
")",
":",
"return",
"bincount",
"(",
"X",
".",
"indices",
",",
"minlength",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"else",
":",
"return",
"np",
".",
"diff",
"(",
"sp",
".",
"csc_matrix",
"(",
"X",
",",
"copy",
"=",
"False",
")",
".",
"indptr",
")"
] | count the number of non-zero values for each feature in sparse x . | train | true |
21,519 | def bridge_has_port(bridge, is_port_predicate):
try:
ifaces = bridge.get_iface_name_list()
except RuntimeError as e:
LOG.error(_LE('Cannot obtain interface list for bridge %(bridge)s: %(err)s'), {'bridge': bridge.br_name, 'err': e})
return False
return any((iface for iface in ifaces if is_port_predicate(iface)))
| [
"def",
"bridge_has_port",
"(",
"bridge",
",",
"is_port_predicate",
")",
":",
"try",
":",
"ifaces",
"=",
"bridge",
".",
"get_iface_name_list",
"(",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"LOG",
".",
"error",
"(",
"_LE",
"(",
"'Cannot obtain interface list for bridge %(bridge)s: %(err)s'",
")",
",",
"{",
"'bridge'",
":",
"bridge",
".",
"br_name",
",",
"'err'",
":",
"e",
"}",
")",
"return",
"False",
"return",
"any",
"(",
"(",
"iface",
"for",
"iface",
"in",
"ifaces",
"if",
"is_port_predicate",
"(",
"iface",
")",
")",
")"
] | true if there is an ovs port for which is_port_predicate is true . | train | false |
21,520 | def analyse_show(name):
job = SeriesSorter(None, name, None, None)
job.match(force=True)
if job.is_match():
job.get_values()
info = job.show_info
show_name = info.get('show_name', '').replace('.', ' ').replace('_', ' ')
show_name = show_name.replace(' ', ' ')
return (show_name, info.get('season_num', ''), info.get('episode_num', ''), info.get('ep_name', ''))
| [
"def",
"analyse_show",
"(",
"name",
")",
":",
"job",
"=",
"SeriesSorter",
"(",
"None",
",",
"name",
",",
"None",
",",
"None",
")",
"job",
".",
"match",
"(",
"force",
"=",
"True",
")",
"if",
"job",
".",
"is_match",
"(",
")",
":",
"job",
".",
"get_values",
"(",
")",
"info",
"=",
"job",
".",
"show_info",
"show_name",
"=",
"info",
".",
"get",
"(",
"'show_name'",
",",
"''",
")",
".",
"replace",
"(",
"'.'",
",",
"' '",
")",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
"show_name",
"=",
"show_name",
".",
"replace",
"(",
"' '",
",",
"' '",
")",
"return",
"(",
"show_name",
",",
"info",
".",
"get",
"(",
"'season_num'",
",",
"''",
")",
",",
"info",
".",
"get",
"(",
"'episode_num'",
",",
"''",
")",
",",
"info",
".",
"get",
"(",
"'ep_name'",
",",
"''",
")",
")"
] | do a quick seasonsort check and return basic facts . | train | false |
21,521 | def AddDiagnosticSyntaxMatch(line_num, column_num, line_end_num=None, column_end_num=None, is_error=True):
group = (u'YcmErrorSection' if is_error else u'YcmWarningSection')
(line_num, column_num) = LineAndColumnNumbersClamped(line_num, column_num)
if ((not line_end_num) or (not column_end_num)):
return GetIntValue(u"matchadd('{0}', '\\%{1}l\\%{2}c')".format(group, line_num, column_num))
(line_end_num, column_end_num) = LineAndColumnNumbersClamped(line_end_num, (column_end_num - 1))
column_end_num += 1
return GetIntValue(u"matchadd('{0}', '\\%{1}l\\%{2}c\\_.\\{{-}}\\%{3}l\\%{4}c')".format(group, line_num, column_num, line_end_num, column_end_num))
| [
"def",
"AddDiagnosticSyntaxMatch",
"(",
"line_num",
",",
"column_num",
",",
"line_end_num",
"=",
"None",
",",
"column_end_num",
"=",
"None",
",",
"is_error",
"=",
"True",
")",
":",
"group",
"=",
"(",
"u'YcmErrorSection'",
"if",
"is_error",
"else",
"u'YcmWarningSection'",
")",
"(",
"line_num",
",",
"column_num",
")",
"=",
"LineAndColumnNumbersClamped",
"(",
"line_num",
",",
"column_num",
")",
"if",
"(",
"(",
"not",
"line_end_num",
")",
"or",
"(",
"not",
"column_end_num",
")",
")",
":",
"return",
"GetIntValue",
"(",
"u\"matchadd('{0}', '\\\\%{1}l\\\\%{2}c')\"",
".",
"format",
"(",
"group",
",",
"line_num",
",",
"column_num",
")",
")",
"(",
"line_end_num",
",",
"column_end_num",
")",
"=",
"LineAndColumnNumbersClamped",
"(",
"line_end_num",
",",
"(",
"column_end_num",
"-",
"1",
")",
")",
"column_end_num",
"+=",
"1",
"return",
"GetIntValue",
"(",
"u\"matchadd('{0}', '\\\\%{1}l\\\\%{2}c\\\\_.\\\\{{-}}\\\\%{3}l\\\\%{4}c')\"",
".",
"format",
"(",
"group",
",",
"line_num",
",",
"column_num",
",",
"line_end_num",
",",
"column_end_num",
")",
")"
] | highlight a range in the current window starting from included to excluded . | train | false |
21,522 | def normalize_timestamp_to_query_time(df, time, tz, inplace=False, ts_field='timestamp'):
if (not inplace):
df = df.copy()
dtidx = pd.DatetimeIndex(df.loc[:, ts_field], tz='utc')
dtidx_local_time = dtidx.tz_convert(tz)
to_roll_forward = mask_between_time(dtidx_local_time, time, _midnight, include_end=False)
df.loc[(to_roll_forward, ts_field)] = (dtidx_local_time[to_roll_forward] + datetime.timedelta(days=1)).normalize().tz_localize(None).tz_localize('utc').normalize()
df.loc[((~ to_roll_forward), ts_field)] = dtidx[(~ to_roll_forward)].normalize()
return df
| [
"def",
"normalize_timestamp_to_query_time",
"(",
"df",
",",
"time",
",",
"tz",
",",
"inplace",
"=",
"False",
",",
"ts_field",
"=",
"'timestamp'",
")",
":",
"if",
"(",
"not",
"inplace",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"dtidx",
"=",
"pd",
".",
"DatetimeIndex",
"(",
"df",
".",
"loc",
"[",
":",
",",
"ts_field",
"]",
",",
"tz",
"=",
"'utc'",
")",
"dtidx_local_time",
"=",
"dtidx",
".",
"tz_convert",
"(",
"tz",
")",
"to_roll_forward",
"=",
"mask_between_time",
"(",
"dtidx_local_time",
",",
"time",
",",
"_midnight",
",",
"include_end",
"=",
"False",
")",
"df",
".",
"loc",
"[",
"(",
"to_roll_forward",
",",
"ts_field",
")",
"]",
"=",
"(",
"dtidx_local_time",
"[",
"to_roll_forward",
"]",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
")",
".",
"normalize",
"(",
")",
".",
"tz_localize",
"(",
"None",
")",
".",
"tz_localize",
"(",
"'utc'",
")",
".",
"normalize",
"(",
")",
"df",
".",
"loc",
"[",
"(",
"(",
"~",
"to_roll_forward",
")",
",",
"ts_field",
")",
"]",
"=",
"dtidx",
"[",
"(",
"~",
"to_roll_forward",
")",
"]",
".",
"normalize",
"(",
")",
"return",
"df"
] | update the timestamp field of a dataframe to normalize dates around some data query time/timezone . | train | false |
21,523 | def get_apps_root(*append):
return __get_root('apps', *append)
| [
"def",
"get_apps_root",
"(",
"*",
"append",
")",
":",
"return",
"__get_root",
"(",
"'apps'",
",",
"*",
"append",
")"
] | returns the directory for apps . | train | false |
21,524 | def check_rosdeps(name):
def check(n, filename):
nodes = get_nodes_by_name(n, name)
rosdeps = [e.attributes for e in nodes]
names = [d['name'].value for d in rosdeps]
return [ROSDep(n) for n in names]
return check
| [
"def",
"check_rosdeps",
"(",
"name",
")",
":",
"def",
"check",
"(",
"n",
",",
"filename",
")",
":",
"nodes",
"=",
"get_nodes_by_name",
"(",
"n",
",",
"name",
")",
"rosdeps",
"=",
"[",
"e",
".",
"attributes",
"for",
"e",
"in",
"nodes",
"]",
"names",
"=",
"[",
"d",
"[",
"'name'",
"]",
".",
"value",
"for",
"d",
"in",
"rosdeps",
"]",
"return",
"[",
"ROSDep",
"(",
"n",
")",
"for",
"n",
"in",
"names",
"]",
"return",
"check"
] | validator for stack rosdeps . | train | false |
21,526 | def set_aggregate_facts(facts):
all_hostnames = set()
internal_hostnames = set()
kube_svc_ip = first_ip(facts['common']['portal_net'])
if ('common' in facts):
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
facts['common']['kube_svc_ip'] = kube_svc_ip
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
cluster_domain = facts['common']['dns_domain']
if ('master' in facts):
if ('cluster_hostname' in facts['master']):
all_hostnames.add(facts['master']['cluster_hostname'])
if ('cluster_public_hostname' in facts['master']):
all_hostnames.add(facts['master']['cluster_public_hostname'])
svc_names = ['openshift', 'openshift.default', 'openshift.default.svc', ('openshift.default.svc.' + cluster_domain), 'kubernetes', 'kubernetes.default', 'kubernetes.default.svc', ('kubernetes.default.svc.' + cluster_domain)]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
all_hostnames.add(kube_svc_ip)
internal_hostnames.add(kube_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
return facts
| [
"def",
"set_aggregate_facts",
"(",
"facts",
")",
":",
"all_hostnames",
"=",
"set",
"(",
")",
"internal_hostnames",
"=",
"set",
"(",
")",
"kube_svc_ip",
"=",
"first_ip",
"(",
"facts",
"[",
"'common'",
"]",
"[",
"'portal_net'",
"]",
")",
"if",
"(",
"'common'",
"in",
"facts",
")",
":",
"all_hostnames",
".",
"add",
"(",
"facts",
"[",
"'common'",
"]",
"[",
"'hostname'",
"]",
")",
"all_hostnames",
".",
"add",
"(",
"facts",
"[",
"'common'",
"]",
"[",
"'public_hostname'",
"]",
")",
"all_hostnames",
".",
"add",
"(",
"facts",
"[",
"'common'",
"]",
"[",
"'ip'",
"]",
")",
"all_hostnames",
".",
"add",
"(",
"facts",
"[",
"'common'",
"]",
"[",
"'public_ip'",
"]",
")",
"facts",
"[",
"'common'",
"]",
"[",
"'kube_svc_ip'",
"]",
"=",
"kube_svc_ip",
"internal_hostnames",
".",
"add",
"(",
"facts",
"[",
"'common'",
"]",
"[",
"'hostname'",
"]",
")",
"internal_hostnames",
".",
"add",
"(",
"facts",
"[",
"'common'",
"]",
"[",
"'ip'",
"]",
")",
"cluster_domain",
"=",
"facts",
"[",
"'common'",
"]",
"[",
"'dns_domain'",
"]",
"if",
"(",
"'master'",
"in",
"facts",
")",
":",
"if",
"(",
"'cluster_hostname'",
"in",
"facts",
"[",
"'master'",
"]",
")",
":",
"all_hostnames",
".",
"add",
"(",
"facts",
"[",
"'master'",
"]",
"[",
"'cluster_hostname'",
"]",
")",
"if",
"(",
"'cluster_public_hostname'",
"in",
"facts",
"[",
"'master'",
"]",
")",
":",
"all_hostnames",
".",
"add",
"(",
"facts",
"[",
"'master'",
"]",
"[",
"'cluster_public_hostname'",
"]",
")",
"svc_names",
"=",
"[",
"'openshift'",
",",
"'openshift.default'",
",",
"'openshift.default.svc'",
",",
"(",
"'openshift.default.svc.'",
"+",
"cluster_domain",
")",
",",
"'kubernetes'",
",",
"'kubernetes.default'",
",",
"'kubernetes.default.svc'",
",",
"(",
"'kubernetes.default.svc.'",
"+",
"cluster_domain",
")",
"]",
"all_hostnames",
".",
"update",
"(",
"svc_names",
")",
"internal_hostnames",
".",
"update",
"(",
"svc_names",
")",
"all_hostnames",
".",
"add",
"(",
"kube_svc_ip",
")",
"internal_hostnames",
".",
"add",
"(",
"kube_svc_ip",
")",
"facts",
"[",
"'common'",
"]",
"[",
"'all_hostnames'",
"]",
"=",
"list",
"(",
"all_hostnames",
")",
"facts",
"[",
"'common'",
"]",
"[",
"'internal_hostnames'",
"]",
"=",
"list",
"(",
"internal_hostnames",
")",
"return",
"facts"
] | set aggregate facts args: facts : existing facts returns: dict: the facts dict updated with aggregated facts . | train | false |
21,527 | def processXMLElementByFunction(manipulationFunction, xmlElement):
if ('target' not in xmlElement.attributeDictionary):
print 'Warning, there was no target in processXMLElementByFunction in solid for:'
print xmlElement
return
target = evaluate.getEvaluatedLinkValue(str(xmlElement.attributeDictionary['target']).strip(), xmlElement)
if (target.__class__.__name__ == 'XMLElement'):
manipulationFunction(target, xmlElement)
return
lineation.processXMLElementByGeometry(target, xmlElement)
manipulationFunction(xmlElement, xmlElement)
| [
"def",
"processXMLElementByFunction",
"(",
"manipulationFunction",
",",
"xmlElement",
")",
":",
"if",
"(",
"'target'",
"not",
"in",
"xmlElement",
".",
"attributeDictionary",
")",
":",
"print",
"'Warning, there was no target in processXMLElementByFunction in solid for:'",
"print",
"xmlElement",
"return",
"target",
"=",
"evaluate",
".",
"getEvaluatedLinkValue",
"(",
"str",
"(",
"xmlElement",
".",
"attributeDictionary",
"[",
"'target'",
"]",
")",
".",
"strip",
"(",
")",
",",
"xmlElement",
")",
"if",
"(",
"target",
".",
"__class__",
".",
"__name__",
"==",
"'XMLElement'",
")",
":",
"manipulationFunction",
"(",
"target",
",",
"xmlElement",
")",
"return",
"lineation",
".",
"processXMLElementByGeometry",
"(",
"target",
",",
"xmlElement",
")",
"manipulationFunction",
"(",
"xmlElement",
",",
"xmlElement",
")"
] | process the xml element by manipulationfunction . | train | false |
21,529 | def _get_convergence_plans(project, service_names):
ret = {}
plans = project._get_convergence_plans(project.get_services(service_names), ConvergenceStrategy.changed)
for cont in plans:
(action, container) = plans[cont]
if (action == 'create'):
ret[cont] = 'Creating container'
elif (action == 'recreate'):
ret[cont] = 'Re-creating container'
elif (action == 'start'):
ret[cont] = 'Starting container'
elif (action == 'noop'):
ret[cont] = 'Container is up to date'
return ret
| [
"def",
"_get_convergence_plans",
"(",
"project",
",",
"service_names",
")",
":",
"ret",
"=",
"{",
"}",
"plans",
"=",
"project",
".",
"_get_convergence_plans",
"(",
"project",
".",
"get_services",
"(",
"service_names",
")",
",",
"ConvergenceStrategy",
".",
"changed",
")",
"for",
"cont",
"in",
"plans",
":",
"(",
"action",
",",
"container",
")",
"=",
"plans",
"[",
"cont",
"]",
"if",
"(",
"action",
"==",
"'create'",
")",
":",
"ret",
"[",
"cont",
"]",
"=",
"'Creating container'",
"elif",
"(",
"action",
"==",
"'recreate'",
")",
":",
"ret",
"[",
"cont",
"]",
"=",
"'Re-creating container'",
"elif",
"(",
"action",
"==",
"'start'",
")",
":",
"ret",
"[",
"cont",
"]",
"=",
"'Starting container'",
"elif",
"(",
"action",
"==",
"'noop'",
")",
":",
"ret",
"[",
"cont",
"]",
"=",
"'Container is up to date'",
"return",
"ret"
] | get action executed for each container . | train | true |
21,530 | def append_position(path, position, separator=''):
(filename, extension) = os.path.splitext(path)
return ''.join([filename, separator, str(position), extension])
| [
"def",
"append_position",
"(",
"path",
",",
"position",
",",
"separator",
"=",
"''",
")",
":",
"(",
"filename",
",",
"extension",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"return",
"''",
".",
"join",
"(",
"[",
"filename",
",",
"separator",
",",
"str",
"(",
"position",
")",
",",
"extension",
"]",
")"
] | concatenate a path and a position . | train | true |
21,532 | def is_eof_layer(layer):
if (int(layer[4], 16) is 254):
return True
return False
| [
"def",
"is_eof_layer",
"(",
"layer",
")",
":",
"if",
"(",
"int",
"(",
"layer",
"[",
"4",
"]",
",",
"16",
")",
"is",
"254",
")",
":",
"return",
"True",
"return",
"False"
] | check if layer is an eof layer . | train | false |
21,534 | def cdf(x, iterations=300):
product = 1.0
taylor_exp = [x]
for i in range(3, iterations, 2):
product *= i
taylor_exp.append((float((x ** i)) / product))
taylor_fact = sum(taylor_exp)
return (0.5 + (taylor_fact * std_normal_pdf.pdf(x, mean=0, std_dev=1)))
| [
"def",
"cdf",
"(",
"x",
",",
"iterations",
"=",
"300",
")",
":",
"product",
"=",
"1.0",
"taylor_exp",
"=",
"[",
"x",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
",",
"iterations",
",",
"2",
")",
":",
"product",
"*=",
"i",
"taylor_exp",
".",
"append",
"(",
"(",
"float",
"(",
"(",
"x",
"**",
"i",
")",
")",
"/",
"product",
")",
")",
"taylor_fact",
"=",
"sum",
"(",
"taylor_exp",
")",
"return",
"(",
"0.5",
"+",
"(",
"taylor_fact",
"*",
"std_normal_pdf",
".",
"pdf",
"(",
"x",
",",
"mean",
"=",
"0",
",",
"std_dev",
"=",
"1",
")",
")",
")"
] | returns the cumulative distribution function at x . | train | false |
21,537 | def split_sections(s):
section = None
content = []
for line in yield_lines(s):
if line.startswith('['):
if line.endswith(']'):
if (section or content):
(yield (section, content))
section = line[1:(-1)].strip()
content = []
else:
raise ValueError('Invalid section heading', line)
else:
content.append(line)
(yield (section, content))
| [
"def",
"split_sections",
"(",
"s",
")",
":",
"section",
"=",
"None",
"content",
"=",
"[",
"]",
"for",
"line",
"in",
"yield_lines",
"(",
"s",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'['",
")",
":",
"if",
"line",
".",
"endswith",
"(",
"']'",
")",
":",
"if",
"(",
"section",
"or",
"content",
")",
":",
"(",
"yield",
"(",
"section",
",",
"content",
")",
")",
"section",
"=",
"line",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
".",
"strip",
"(",
")",
"content",
"=",
"[",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid section heading'",
",",
"line",
")",
"else",
":",
"content",
".",
"append",
"(",
"line",
")",
"(",
"yield",
"(",
"section",
",",
"content",
")",
")"
] | split a string or iterable thereof into pairs each section is a stripped version of the section header and each content is a list of stripped lines excluding blank lines and comment-only lines . | train | true |
21,538 | def user_id():
return random.randint(1, 100000000)
| [
"def",
"user_id",
"(",
")",
":",
"return",
"random",
".",
"randint",
"(",
"1",
",",
"100000000",
")"
] | fake user id . | train | false |
21,539 | def should_check():
filename = cache_file()
if (not filename):
return False
if (read(filename).strip() == 'never'):
return False
return (time.time() > (last_check() + update_freq))
| [
"def",
"should_check",
"(",
")",
":",
"filename",
"=",
"cache_file",
"(",
")",
"if",
"(",
"not",
"filename",
")",
":",
"return",
"False",
"if",
"(",
"read",
"(",
"filename",
")",
".",
"strip",
"(",
")",
"==",
"'never'",
")",
":",
"return",
"False",
"return",
"(",
"time",
".",
"time",
"(",
")",
">",
"(",
"last_check",
"(",
")",
"+",
"update_freq",
")",
")"
] | return true if we should check for an update . | train | false |
21,540 | def get_or_create_mrjob_service_role(conn):
for (role_name, role_document) in _yield_roles(conn):
if (role_document != _MRJOB_SERVICE_ROLE):
continue
policy_arns = list(_yield_attached_role_policies(conn, role_name))
if (policy_arns == [_EMR_SERVICE_ROLE_POLICY_ARN]):
return role_name
name = _create_mrjob_role_with_attached_policy(conn, _MRJOB_SERVICE_ROLE, _EMR_SERVICE_ROLE_POLICY_ARN)
log.info(('Auto-created service role %s' % name))
return name
| [
"def",
"get_or_create_mrjob_service_role",
"(",
"conn",
")",
":",
"for",
"(",
"role_name",
",",
"role_document",
")",
"in",
"_yield_roles",
"(",
"conn",
")",
":",
"if",
"(",
"role_document",
"!=",
"_MRJOB_SERVICE_ROLE",
")",
":",
"continue",
"policy_arns",
"=",
"list",
"(",
"_yield_attached_role_policies",
"(",
"conn",
",",
"role_name",
")",
")",
"if",
"(",
"policy_arns",
"==",
"[",
"_EMR_SERVICE_ROLE_POLICY_ARN",
"]",
")",
":",
"return",
"role_name",
"name",
"=",
"_create_mrjob_role_with_attached_policy",
"(",
"conn",
",",
"_MRJOB_SERVICE_ROLE",
",",
"_EMR_SERVICE_ROLE_POLICY_ARN",
")",
"log",
".",
"info",
"(",
"(",
"'Auto-created service role %s'",
"%",
"name",
")",
")",
"return",
"name"
] | look for a usable service role for emr . | train | false |
21,542 | def _parse_text_rule(rule):
if (not rule):
return TrueCheck()
state = ParseState()
for (tok, value) in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
LOG.exception((_('Failed to understand rule %(rule)r') % locals()))
return FalseCheck()
| [
"def",
"_parse_text_rule",
"(",
"rule",
")",
":",
"if",
"(",
"not",
"rule",
")",
":",
"return",
"TrueCheck",
"(",
")",
"state",
"=",
"ParseState",
"(",
")",
"for",
"(",
"tok",
",",
"value",
")",
"in",
"_parse_tokenize",
"(",
"rule",
")",
":",
"state",
".",
"shift",
"(",
"tok",
",",
"value",
")",
"try",
":",
"return",
"state",
".",
"result",
"except",
"ValueError",
":",
"LOG",
".",
"exception",
"(",
"(",
"_",
"(",
"'Failed to understand rule %(rule)r'",
")",
"%",
"locals",
"(",
")",
")",
")",
"return",
"FalseCheck",
"(",
")"
] | translates a policy written in the policy language into a tree of check objects . | train | false |
21,543 | def get_pricing_steps(context, product):
(mod, ctx) = _get_module_and_context(context)
steps = mod.get_pricing_steps(ctx, product)
for module in get_discount_modules():
steps = module.get_pricing_steps(ctx, product, steps)
return steps
| [
"def",
"get_pricing_steps",
"(",
"context",
",",
"product",
")",
":",
"(",
"mod",
",",
"ctx",
")",
"=",
"_get_module_and_context",
"(",
"context",
")",
"steps",
"=",
"mod",
".",
"get_pricing_steps",
"(",
"ctx",
",",
"product",
")",
"for",
"module",
"in",
"get_discount_modules",
"(",
")",
":",
"steps",
"=",
"module",
".",
"get_pricing_steps",
"(",
"ctx",
",",
"product",
",",
"steps",
")",
"return",
"steps"
] | get context-specific list pricing steps for the given product . | train | false |
21,544 | def shelve_server(servers_client, server_id, force_shelve_offload=False):
servers_client.shelve_server(server_id)
offload_time = CONF.compute.shelved_offload_time
if (offload_time >= 0):
waiters.wait_for_server_status(servers_client, server_id, 'SHELVED_OFFLOADED', extra_timeout=offload_time)
else:
waiters.wait_for_server_status(servers_client, server_id, 'SHELVED')
if force_shelve_offload:
servers_client.shelve_offload_server(server_id)
waiters.wait_for_server_status(servers_client, server_id, 'SHELVED_OFFLOADED')
| [
"def",
"shelve_server",
"(",
"servers_client",
",",
"server_id",
",",
"force_shelve_offload",
"=",
"False",
")",
":",
"servers_client",
".",
"shelve_server",
"(",
"server_id",
")",
"offload_time",
"=",
"CONF",
".",
"compute",
".",
"shelved_offload_time",
"if",
"(",
"offload_time",
">=",
"0",
")",
":",
"waiters",
".",
"wait_for_server_status",
"(",
"servers_client",
",",
"server_id",
",",
"'SHELVED_OFFLOADED'",
",",
"extra_timeout",
"=",
"offload_time",
")",
"else",
":",
"waiters",
".",
"wait_for_server_status",
"(",
"servers_client",
",",
"server_id",
",",
"'SHELVED'",
")",
"if",
"force_shelve_offload",
":",
"servers_client",
".",
"shelve_offload_server",
"(",
"server_id",
")",
"waiters",
".",
"wait_for_server_status",
"(",
"servers_client",
",",
"server_id",
",",
"'SHELVED_OFFLOADED'",
")"
] | common wrapper utility to shelve server . | train | false |
21,545 | @socketio.on('disconnect', namespace='/home')
def on_disconnect_home():
pass
| [
"@",
"socketio",
".",
"on",
"(",
"'disconnect'",
",",
"namespace",
"=",
"'/home'",
")",
"def",
"on_disconnect_home",
"(",
")",
":",
"pass"
] | somebody disconnected from the homepage . | train | false |
21,546 | def instance_extra_get_by_instance_uuid(context, instance_uuid, columns=None):
return IMPL.instance_extra_get_by_instance_uuid(context, instance_uuid, columns=columns)
| [
"def",
"instance_extra_get_by_instance_uuid",
"(",
"context",
",",
"instance_uuid",
",",
"columns",
"=",
"None",
")",
":",
"return",
"IMPL",
".",
"instance_extra_get_by_instance_uuid",
"(",
"context",
",",
"instance_uuid",
",",
"columns",
"=",
"columns",
")"
] | get the instance extra record . | train | false |
21,547 | def mutShuffleIndexes(individual, indpb):
size = len(individual)
for i in xrange(size):
if (random.random() < indpb):
swap_indx = random.randint(0, (size - 2))
if (swap_indx >= i):
swap_indx += 1
(individual[i], individual[swap_indx]) = (individual[swap_indx], individual[i])
return (individual,)
| [
"def",
"mutShuffleIndexes",
"(",
"individual",
",",
"indpb",
")",
":",
"size",
"=",
"len",
"(",
"individual",
")",
"for",
"i",
"in",
"xrange",
"(",
"size",
")",
":",
"if",
"(",
"random",
".",
"random",
"(",
")",
"<",
"indpb",
")",
":",
"swap_indx",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"(",
"size",
"-",
"2",
")",
")",
"if",
"(",
"swap_indx",
">=",
"i",
")",
":",
"swap_indx",
"+=",
"1",
"(",
"individual",
"[",
"i",
"]",
",",
"individual",
"[",
"swap_indx",
"]",
")",
"=",
"(",
"individual",
"[",
"swap_indx",
"]",
",",
"individual",
"[",
"i",
"]",
")",
"return",
"(",
"individual",
",",
")"
] | shuffle the attributes of the input individual and return the mutant . | train | false |
21,548 | def init_session_completion():
log.completion.debug('Initializing session completion.')
try:
_instances[usertypes.Completion.sessions].deleteLater()
except KeyError:
pass
model = miscmodels.SessionCompletionModel()
_instances[usertypes.Completion.sessions] = model
| [
"def",
"init_session_completion",
"(",
")",
":",
"log",
".",
"completion",
".",
"debug",
"(",
"'Initializing session completion.'",
")",
"try",
":",
"_instances",
"[",
"usertypes",
".",
"Completion",
".",
"sessions",
"]",
".",
"deleteLater",
"(",
")",
"except",
"KeyError",
":",
"pass",
"model",
"=",
"miscmodels",
".",
"SessionCompletionModel",
"(",
")",
"_instances",
"[",
"usertypes",
".",
"Completion",
".",
"sessions",
"]",
"=",
"model"
] | initialize session completion model . | train | false |
21,549 | def save_objects_to_file(file_name, data_dict):
if (file_name.count('.') == 1):
(_, out_format) = file_name.split('.')
else:
raise ValueError('Invalid file name: {0}'.format(file_name))
if (out_format == 'pkl'):
with open(file_name, 'w') as f:
pickle.dump(data_dict, f)
elif (out_format == 'yml'):
with open(file_name, 'w') as f:
f.write(yaml.dump(data_dict, default_flow_style=False))
elif (out_format == 'json'):
with open(file_name, 'w') as f:
json.dump(data_dict, f)
| [
"def",
"save_objects_to_file",
"(",
"file_name",
",",
"data_dict",
")",
":",
"if",
"(",
"file_name",
".",
"count",
"(",
"'.'",
")",
"==",
"1",
")",
":",
"(",
"_",
",",
"out_format",
")",
"=",
"file_name",
".",
"split",
"(",
"'.'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid file name: {0}'",
".",
"format",
"(",
"file_name",
")",
")",
"if",
"(",
"out_format",
"==",
"'pkl'",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"'w'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"data_dict",
",",
"f",
")",
"elif",
"(",
"out_format",
"==",
"'yml'",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"yaml",
".",
"dump",
"(",
"data_dict",
",",
"default_flow_style",
"=",
"False",
")",
")",
"elif",
"(",
"out_format",
"==",
"'json'",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"'w'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"data_dict",
",",
"f",
")"
] | write the network devices out to a file . | train | false |
21,551 | def printTrace(msg='', indent=4, prefix='|'):
trace = backtrace(1)
print(('[%s] %s\n' % (time.strftime('%H:%M:%S'), msg)))
print(((((' ' * indent) + prefix) + ('=' * 30)) + '>>'))
for line in trace.split('\n'):
print(((((' ' * indent) + prefix) + ' ') + line))
print(((((' ' * indent) + prefix) + ('=' * 30)) + '<<'))
| [
"def",
"printTrace",
"(",
"msg",
"=",
"''",
",",
"indent",
"=",
"4",
",",
"prefix",
"=",
"'|'",
")",
":",
"trace",
"=",
"backtrace",
"(",
"1",
")",
"print",
"(",
"(",
"'[%s] %s\\n'",
"%",
"(",
"time",
".",
"strftime",
"(",
"'%H:%M:%S'",
")",
",",
"msg",
")",
")",
")",
"print",
"(",
"(",
"(",
"(",
"(",
"' '",
"*",
"indent",
")",
"+",
"prefix",
")",
"+",
"(",
"'='",
"*",
"30",
")",
")",
"+",
"'>>'",
")",
")",
"for",
"line",
"in",
"trace",
".",
"split",
"(",
"'\\n'",
")",
":",
"print",
"(",
"(",
"(",
"(",
"(",
"' '",
"*",
"indent",
")",
"+",
"prefix",
")",
"+",
"' '",
")",
"+",
"line",
")",
")",
"print",
"(",
"(",
"(",
"(",
"(",
"' '",
"*",
"indent",
")",
"+",
"prefix",
")",
"+",
"(",
"'='",
"*",
"30",
")",
")",
"+",
"'<<'",
")",
")"
] | print an error message followed by an indented stack trace . | train | false |
21,552 | def _oauth_tokengetter(token=None):
token = session.get('oauth')
log.debug('Token Get: {0}'.format(token))
return token
| [
"def",
"_oauth_tokengetter",
"(",
"token",
"=",
"None",
")",
":",
"token",
"=",
"session",
".",
"get",
"(",
"'oauth'",
")",
"log",
".",
"debug",
"(",
"'Token Get: {0}'",
".",
"format",
"(",
"token",
")",
")",
"return",
"token"
] | default function to return the current user oauth token from session cookie . | train | true |
21,554 | def _fix_artifact(data, window, picks, first_samp, last_samp, mode):
from scipy.interpolate import interp1d
if (mode == 'linear'):
x = np.array([first_samp, last_samp])
f = interp1d(x, data[:, (first_samp, last_samp)][picks])
xnew = np.arange(first_samp, last_samp)
interp_data = f(xnew)
data[picks, first_samp:last_samp] = interp_data
if (mode == 'window'):
data[picks, first_samp:last_samp] = (data[picks, first_samp:last_samp] * window[np.newaxis, :])
| [
"def",
"_fix_artifact",
"(",
"data",
",",
"window",
",",
"picks",
",",
"first_samp",
",",
"last_samp",
",",
"mode",
")",
":",
"from",
"scipy",
".",
"interpolate",
"import",
"interp1d",
"if",
"(",
"mode",
"==",
"'linear'",
")",
":",
"x",
"=",
"np",
".",
"array",
"(",
"[",
"first_samp",
",",
"last_samp",
"]",
")",
"f",
"=",
"interp1d",
"(",
"x",
",",
"data",
"[",
":",
",",
"(",
"first_samp",
",",
"last_samp",
")",
"]",
"[",
"picks",
"]",
")",
"xnew",
"=",
"np",
".",
"arange",
"(",
"first_samp",
",",
"last_samp",
")",
"interp_data",
"=",
"f",
"(",
"xnew",
")",
"data",
"[",
"picks",
",",
"first_samp",
":",
"last_samp",
"]",
"=",
"interp_data",
"if",
"(",
"mode",
"==",
"'window'",
")",
":",
"data",
"[",
"picks",
",",
"first_samp",
":",
"last_samp",
"]",
"=",
"(",
"data",
"[",
"picks",
",",
"first_samp",
":",
"last_samp",
"]",
"*",
"window",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
")"
] | modify original data by using parameter data . | train | false |
21,556 | def fetch_or_load(spec_path):
headers = {}
try:
modified = datetime.utcfromtimestamp(os.path.getmtime(spec_path))
date = modified.strftime('%a, %d %b %Y %I:%M:%S UTC')
headers['If-Modified-Since'] = date
except OSError as error:
if (error.errno != errno.ENOENT):
raise
request = urllib.Request(VALIDATION_SPEC, headers=headers)
response = urllib.urlopen(request)
if (response.code == 200):
with open(spec_path, 'w+b') as spec:
spec.writelines(response)
spec.seek(0)
return html.parse(spec)
with open(spec_path) as spec:
return html.parse(spec)
| [
"def",
"fetch_or_load",
"(",
"spec_path",
")",
":",
"headers",
"=",
"{",
"}",
"try",
":",
"modified",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"spec_path",
")",
")",
"date",
"=",
"modified",
".",
"strftime",
"(",
"'%a, %d %b %Y %I:%M:%S UTC'",
")",
"headers",
"[",
"'If-Modified-Since'",
"]",
"=",
"date",
"except",
"OSError",
"as",
"error",
":",
"if",
"(",
"error",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
")",
":",
"raise",
"request",
"=",
"urllib",
".",
"Request",
"(",
"VALIDATION_SPEC",
",",
"headers",
"=",
"headers",
")",
"response",
"=",
"urllib",
".",
"urlopen",
"(",
"request",
")",
"if",
"(",
"response",
".",
"code",
"==",
"200",
")",
":",
"with",
"open",
"(",
"spec_path",
",",
"'w+b'",
")",
"as",
"spec",
":",
"spec",
".",
"writelines",
"(",
"response",
")",
"spec",
".",
"seek",
"(",
"0",
")",
"return",
"html",
".",
"parse",
"(",
"spec",
")",
"with",
"open",
"(",
"spec_path",
")",
"as",
"spec",
":",
"return",
"html",
".",
"parse",
"(",
"spec",
")"
] | fetch a new specification or use the cache if its current . | train | true |
21,557 | def has_admin_scope(request):
cookie = request.COOKIES.get(website_settings.COOKIE_NAME)
if cookie:
return bool(get_session_from_cookie(cookie))
token = request.auth
if ((token is None) or (not isinstance(token, CasResponse))):
return False
return set(ComposedScopes.ADMIN_LEVEL).issubset(normalize_scopes(token.attributes['accessTokenScope']))
| [
"def",
"has_admin_scope",
"(",
"request",
")",
":",
"cookie",
"=",
"request",
".",
"COOKIES",
".",
"get",
"(",
"website_settings",
".",
"COOKIE_NAME",
")",
"if",
"cookie",
":",
"return",
"bool",
"(",
"get_session_from_cookie",
"(",
"cookie",
")",
")",
"token",
"=",
"request",
".",
"auth",
"if",
"(",
"(",
"token",
"is",
"None",
")",
"or",
"(",
"not",
"isinstance",
"(",
"token",
",",
"CasResponse",
")",
")",
")",
":",
"return",
"False",
"return",
"set",
"(",
"ComposedScopes",
".",
"ADMIN_LEVEL",
")",
".",
"issubset",
"(",
"normalize_scopes",
"(",
"token",
".",
"attributes",
"[",
"'accessTokenScope'",
"]",
")",
")"
] | helper function to determine if a request should be treated as though it has the osf . | train | false |
21,558 | def is_multicast(text):
try:
first = ord(dns.ipv4.inet_aton(text)[0])
return ((first >= 224) and (first <= 239))
except:
try:
first = ord(dns.ipv6.inet_aton(text)[0])
return (first == 255)
except:
raise ValueError
| [
"def",
"is_multicast",
"(",
"text",
")",
":",
"try",
":",
"first",
"=",
"ord",
"(",
"dns",
".",
"ipv4",
".",
"inet_aton",
"(",
"text",
")",
"[",
"0",
"]",
")",
"return",
"(",
"(",
"first",
">=",
"224",
")",
"and",
"(",
"first",
"<=",
"239",
")",
")",
"except",
":",
"try",
":",
"first",
"=",
"ord",
"(",
"dns",
".",
"ipv6",
".",
"inet_aton",
"(",
"text",
")",
"[",
"0",
"]",
")",
"return",
"(",
"first",
"==",
"255",
")",
"except",
":",
"raise",
"ValueError"
] | is the textual-form network address a multicast address? . | train | true |
21,559 | def print_cli(msg):
try:
try:
print(msg)
except UnicodeEncodeError:
print(msg.encode('utf-8'))
except IOError as exc:
if (exc.errno != errno.EPIPE):
raise
| [
"def",
"print_cli",
"(",
"msg",
")",
":",
"try",
":",
"try",
":",
"print",
"(",
"msg",
")",
"except",
"UnicodeEncodeError",
":",
"print",
"(",
"msg",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"except",
"IOError",
"as",
"exc",
":",
"if",
"(",
"exc",
".",
"errno",
"!=",
"errno",
".",
"EPIPE",
")",
":",
"raise"
] | wrapper around print() that suppresses tracebacks on broken pipes . | train | false |
21,560 | def pars_of_set(wdir, setname):
list = []
for file in os.listdir(wdir):
m = FULLVOLPAR2_RE.search(file)
if (m and (m.group(1) == setname) and m.group(2)):
list.append(file)
return list
| [
"def",
"pars_of_set",
"(",
"wdir",
",",
"setname",
")",
":",
"list",
"=",
"[",
"]",
"for",
"file",
"in",
"os",
".",
"listdir",
"(",
"wdir",
")",
":",
"m",
"=",
"FULLVOLPAR2_RE",
".",
"search",
"(",
"file",
")",
"if",
"(",
"m",
"and",
"(",
"m",
".",
"group",
"(",
"1",
")",
"==",
"setname",
")",
"and",
"m",
".",
"group",
"(",
"2",
")",
")",
":",
"list",
".",
"append",
"(",
"file",
")",
"return",
"list"
] | return list of par2 files matching the set . | train | false |
21,561 | def find_stream(client, stream_name, check_mode=False):
err_msg = ''
success = False
params = {'StreamName': stream_name}
results = dict()
has_more_shards = True
shards = list()
try:
if (not check_mode):
while has_more_shards:
results = client.describe_stream(**params)['StreamDescription']
shards.extend(results.pop('Shards'))
has_more_shards = results['HasMoreShards']
results['Shards'] = shards
results['ShardsCount'] = len(shards)
else:
results = {'HasMoreShards': True, 'RetentionPeriodHours': 24, 'StreamName': stream_name, 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/{0}'.format(stream_name), 'StreamStatus': 'ACTIVE'}
success = True
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return (success, err_msg, results)
| [
"def",
"find_stream",
"(",
"client",
",",
"stream_name",
",",
"check_mode",
"=",
"False",
")",
":",
"err_msg",
"=",
"''",
"success",
"=",
"False",
"params",
"=",
"{",
"'StreamName'",
":",
"stream_name",
"}",
"results",
"=",
"dict",
"(",
")",
"has_more_shards",
"=",
"True",
"shards",
"=",
"list",
"(",
")",
"try",
":",
"if",
"(",
"not",
"check_mode",
")",
":",
"while",
"has_more_shards",
":",
"results",
"=",
"client",
".",
"describe_stream",
"(",
"**",
"params",
")",
"[",
"'StreamDescription'",
"]",
"shards",
".",
"extend",
"(",
"results",
".",
"pop",
"(",
"'Shards'",
")",
")",
"has_more_shards",
"=",
"results",
"[",
"'HasMoreShards'",
"]",
"results",
"[",
"'Shards'",
"]",
"=",
"shards",
"results",
"[",
"'ShardsCount'",
"]",
"=",
"len",
"(",
"shards",
")",
"else",
":",
"results",
"=",
"{",
"'HasMoreShards'",
":",
"True",
",",
"'RetentionPeriodHours'",
":",
"24",
",",
"'StreamName'",
":",
"stream_name",
",",
"'StreamARN'",
":",
"'arn:aws:kinesis:east-side:123456789:stream/{0}'",
".",
"format",
"(",
"stream_name",
")",
",",
"'StreamStatus'",
":",
"'ACTIVE'",
"}",
"success",
"=",
"True",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"e",
":",
"err_msg",
"=",
"str",
"(",
"e",
")",
"return",
"(",
"success",
",",
"err_msg",
",",
"results",
")"
] | retrieve a kinesis stream . | train | false |
21,564 | def validate_password(user, password):
err_msg = None
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_strength(password)
except ValidationError as err:
err_msg = (_('Password: ') + '; '.join(err.messages))
if (not PasswordHistory.is_allowable_password_reuse(user, password)):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
err_msg = ungettext('You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.', 'You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.', num_distinct).format(num=num_distinct)
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
err_msg = ungettext('You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.', 'You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.', num_days).format(num=num_days)
is_password_valid = (err_msg is None)
return (is_password_valid, err_msg)
| [
"def",
"validate_password",
"(",
"user",
",",
"password",
")",
":",
"err_msg",
"=",
"None",
"if",
"settings",
".",
"FEATURES",
".",
"get",
"(",
"'ENFORCE_PASSWORD_POLICY'",
",",
"False",
")",
":",
"try",
":",
"validate_password_strength",
"(",
"password",
")",
"except",
"ValidationError",
"as",
"err",
":",
"err_msg",
"=",
"(",
"_",
"(",
"'Password: '",
")",
"+",
"'; '",
".",
"join",
"(",
"err",
".",
"messages",
")",
")",
"if",
"(",
"not",
"PasswordHistory",
".",
"is_allowable_password_reuse",
"(",
"user",
",",
"password",
")",
")",
":",
"if",
"user",
".",
"is_staff",
":",
"num_distinct",
"=",
"settings",
".",
"ADVANCED_SECURITY_CONFIG",
"[",
"'MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE'",
"]",
"else",
":",
"num_distinct",
"=",
"settings",
".",
"ADVANCED_SECURITY_CONFIG",
"[",
"'MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE'",
"]",
"err_msg",
"=",
"ungettext",
"(",
"'You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.'",
",",
"'You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.'",
",",
"num_distinct",
")",
".",
"format",
"(",
"num",
"=",
"num_distinct",
")",
"if",
"PasswordHistory",
".",
"is_password_reset_too_soon",
"(",
"user",
")",
":",
"num_days",
"=",
"settings",
".",
"ADVANCED_SECURITY_CONFIG",
"[",
"'MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS'",
"]",
"err_msg",
"=",
"ungettext",
"(",
"'You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.'",
",",
"'You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.'",
",",
"num_days",
")",
".",
"format",
"(",
"num",
"=",
"num_days",
")",
"is_password_valid",
"=",
"(",
"err_msg",
"is",
"None",
")",
"return",
"(",
"is_password_valid",
",",
"err_msg",
")"
] | validate whether the password meets all validator requirements . | train | false |
21,565 | def whitespace_around_named_parameter_equals(logical_line, tokens):
parens = 0
no_space = False
prev_end = None
annotated_func_arg = False
in_def = logical_line.startswith('def')
message = 'E251 unexpected spaces around keyword / parameter equals'
for (token_type, text, start, end, line) in tokens:
if (token_type == tokenize.NL):
continue
if no_space:
no_space = False
if (start != prev_end):
(yield (prev_end, message))
if (token_type == tokenize.OP):
if (text == '('):
parens += 1
elif (text == ')'):
parens -= 1
elif (in_def and (text == ':') and (parens == 1)):
annotated_func_arg = True
elif (parens and (text == ',') and (parens == 1)):
annotated_func_arg = False
elif (parens and (text == '=') and (not annotated_func_arg)):
no_space = True
if (start != prev_end):
(yield (prev_end, message))
if (not parens):
annotated_func_arg = False
prev_end = end
| [
"def",
"whitespace_around_named_parameter_equals",
"(",
"logical_line",
",",
"tokens",
")",
":",
"parens",
"=",
"0",
"no_space",
"=",
"False",
"prev_end",
"=",
"None",
"annotated_func_arg",
"=",
"False",
"in_def",
"=",
"logical_line",
".",
"startswith",
"(",
"'def'",
")",
"message",
"=",
"'E251 unexpected spaces around keyword / parameter equals'",
"for",
"(",
"token_type",
",",
"text",
",",
"start",
",",
"end",
",",
"line",
")",
"in",
"tokens",
":",
"if",
"(",
"token_type",
"==",
"tokenize",
".",
"NL",
")",
":",
"continue",
"if",
"no_space",
":",
"no_space",
"=",
"False",
"if",
"(",
"start",
"!=",
"prev_end",
")",
":",
"(",
"yield",
"(",
"prev_end",
",",
"message",
")",
")",
"if",
"(",
"token_type",
"==",
"tokenize",
".",
"OP",
")",
":",
"if",
"(",
"text",
"==",
"'('",
")",
":",
"parens",
"+=",
"1",
"elif",
"(",
"text",
"==",
"')'",
")",
":",
"parens",
"-=",
"1",
"elif",
"(",
"in_def",
"and",
"(",
"text",
"==",
"':'",
")",
"and",
"(",
"parens",
"==",
"1",
")",
")",
":",
"annotated_func_arg",
"=",
"True",
"elif",
"(",
"parens",
"and",
"(",
"text",
"==",
"','",
")",
"and",
"(",
"parens",
"==",
"1",
")",
")",
":",
"annotated_func_arg",
"=",
"False",
"elif",
"(",
"parens",
"and",
"(",
"text",
"==",
"'='",
")",
"and",
"(",
"not",
"annotated_func_arg",
")",
")",
":",
"no_space",
"=",
"True",
"if",
"(",
"start",
"!=",
"prev_end",
")",
":",
"(",
"yield",
"(",
"prev_end",
",",
"message",
")",
")",
"if",
"(",
"not",
"parens",
")",
":",
"annotated_func_arg",
"=",
"False",
"prev_end",
"=",
"end"
] | dont use spaces around the = sign in function arguments . | train | true |
21,568 | def count_calls(callers):
nc = 0
for calls in callers.itervalues():
nc += calls
return nc
| [
"def",
"count_calls",
"(",
"callers",
")",
":",
"nc",
"=",
"0",
"for",
"calls",
"in",
"callers",
".",
"itervalues",
"(",
")",
":",
"nc",
"+=",
"calls",
"return",
"nc"
] | sum the caller statistics to get total number of calls received . | train | false |
21,569 | def sanitize_html_id(html_id):
sanitized_html_id = re.sub('[:-]', '_', html_id)
return sanitized_html_id
| [
"def",
"sanitize_html_id",
"(",
"html_id",
")",
":",
"sanitized_html_id",
"=",
"re",
".",
"sub",
"(",
"'[:-]'",
",",
"'_'",
",",
"html_id",
")",
"return",
"sanitized_html_id"
] | template uses element_id in js function names . | train | false |
21,571 | def index_functional_areas(request):
query = Group.get_functional_areas()
template = 'groups/index_areas.html'
return _list_groups(request, template, query)
| [
"def",
"index_functional_areas",
"(",
"request",
")",
":",
"query",
"=",
"Group",
".",
"get_functional_areas",
"(",
")",
"template",
"=",
"'groups/index_areas.html'",
"return",
"_list_groups",
"(",
"request",
",",
"template",
",",
"query",
")"
] | lists all functional areas . | train | false |
21,572 | def libvlc_media_list_player_play_item_at_index(p_mlp, i_index):
f = (_Cfunctions.get('libvlc_media_list_player_play_item_at_index', None) or _Cfunction('libvlc_media_list_player_play_item_at_index', ((1,), (1,)), None, ctypes.c_int, MediaListPlayer, ctypes.c_int))
return f(p_mlp, i_index)
| [
"def",
"libvlc_media_list_player_play_item_at_index",
"(",
"p_mlp",
",",
"i_index",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_media_list_player_play_item_at_index'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_media_list_player_play_item_at_index'",
",",
"(",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
")",
",",
"None",
",",
"ctypes",
".",
"c_int",
",",
"MediaListPlayer",
",",
"ctypes",
".",
"c_int",
")",
")",
"return",
"f",
"(",
"p_mlp",
",",
"i_index",
")"
] | play media list item at position index . | train | true |
21,573 | def wrap_db_error(f):
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except sqlalchemy.exc.OperationalError as e:
if (not is_db_connection_error(e.args[0])):
raise
remaining_attempts = _MAX_RETRIES
while True:
LOG.warning(_('SQL connection failed. %d attempts left.'), remaining_attempts)
remaining_attempts -= 1
time.sleep(_RETRY_INTERVAL)
try:
return f(*args, **kwargs)
except sqlalchemy.exc.OperationalError as e:
if ((remaining_attempts == 0) or (not is_db_connection_error(e.args[0]))):
raise
except sqlalchemy.exc.DBAPIError:
raise
except sqlalchemy.exc.DBAPIError:
raise
_wrap.func_name = f.func_name
return _wrap
| [
"def",
"wrap_db_error",
"(",
"f",
")",
":",
"def",
"_wrap",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"try",
":",
"return",
"f",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"except",
"sqlalchemy",
".",
"exc",
".",
"OperationalError",
"as",
"e",
":",
"if",
"(",
"not",
"is_db_connection_error",
"(",
"e",
".",
"args",
"[",
"0",
"]",
")",
")",
":",
"raise",
"remaining_attempts",
"=",
"_MAX_RETRIES",
"while",
"True",
":",
"LOG",
".",
"warning",
"(",
"_",
"(",
"'SQL connection failed. %d attempts left.'",
")",
",",
"remaining_attempts",
")",
"remaining_attempts",
"-=",
"1",
"time",
".",
"sleep",
"(",
"_RETRY_INTERVAL",
")",
"try",
":",
"return",
"f",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"except",
"sqlalchemy",
".",
"exc",
".",
"OperationalError",
"as",
"e",
":",
"if",
"(",
"(",
"remaining_attempts",
"==",
"0",
")",
"or",
"(",
"not",
"is_db_connection_error",
"(",
"e",
".",
"args",
"[",
"0",
"]",
")",
")",
")",
":",
"raise",
"except",
"sqlalchemy",
".",
"exc",
".",
"DBAPIError",
":",
"raise",
"except",
"sqlalchemy",
".",
"exc",
".",
"DBAPIError",
":",
"raise",
"_wrap",
".",
"func_name",
"=",
"f",
".",
"func_name",
"return",
"_wrap"
] | retry db connection . | train | false |
21,574 | def _sub_func(expr, sub_dict):
if (expr in sub_dict):
return sub_dict[expr]
elif ((not expr.args) or expr.is_Derivative):
return expr
| [
"def",
"_sub_func",
"(",
"expr",
",",
"sub_dict",
")",
":",
"if",
"(",
"expr",
"in",
"sub_dict",
")",
":",
"return",
"sub_dict",
"[",
"expr",
"]",
"elif",
"(",
"(",
"not",
"expr",
".",
"args",
")",
"or",
"expr",
".",
"is_Derivative",
")",
":",
"return",
"expr"
] | perform direct matching substitution . | train | false |
21,576 | def get_pack_resource_file_abs_path(pack_ref, resource_type, file_path):
path_components = []
if (resource_type == 'action'):
path_components.append('actions/')
elif (resource_type == 'sensor'):
path_components.append('sensors/')
elif (resource_type == 'rule'):
path_components.append('rules/')
else:
raise ValueError(('Invalid resource type: %s' % resource_type))
path_components.append(file_path)
file_path = os.path.join(*path_components)
result = get_pack_file_abs_path(pack_ref=pack_ref, file_path=file_path)
return result
| [
"def",
"get_pack_resource_file_abs_path",
"(",
"pack_ref",
",",
"resource_type",
",",
"file_path",
")",
":",
"path_components",
"=",
"[",
"]",
"if",
"(",
"resource_type",
"==",
"'action'",
")",
":",
"path_components",
".",
"append",
"(",
"'actions/'",
")",
"elif",
"(",
"resource_type",
"==",
"'sensor'",
")",
":",
"path_components",
".",
"append",
"(",
"'sensors/'",
")",
"elif",
"(",
"resource_type",
"==",
"'rule'",
")",
":",
"path_components",
".",
"append",
"(",
"'rules/'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"(",
"'Invalid resource type: %s'",
"%",
"resource_type",
")",
")",
"path_components",
".",
"append",
"(",
"file_path",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"path_components",
")",
"result",
"=",
"get_pack_file_abs_path",
"(",
"pack_ref",
"=",
"pack_ref",
",",
"file_path",
"=",
"file_path",
")",
"return",
"result"
] | retrieve full absolute path to the pack resource file . | train | false |
21,578 | def test_get_syslog_facility_case_insensitive(monkeypatch):
for low_name in ([('local' + str(n)) for n in range(8)] + ['user']):
monkeypatch.setenv('WALE_SYSLOG_FACILITY', low_name)
(out, valid_facility) = log_help.get_syslog_facility()
assert (valid_facility is True)
monkeypatch.setenv('WALE_SYSLOG_FACILITY', low_name.upper())
(out, valid_facility) = log_help.get_syslog_facility()
assert (valid_facility is True)
| [
"def",
"test_get_syslog_facility_case_insensitive",
"(",
"monkeypatch",
")",
":",
"for",
"low_name",
"in",
"(",
"[",
"(",
"'local'",
"+",
"str",
"(",
"n",
")",
")",
"for",
"n",
"in",
"range",
"(",
"8",
")",
"]",
"+",
"[",
"'user'",
"]",
")",
":",
"monkeypatch",
".",
"setenv",
"(",
"'WALE_SYSLOG_FACILITY'",
",",
"low_name",
")",
"(",
"out",
",",
"valid_facility",
")",
"=",
"log_help",
".",
"get_syslog_facility",
"(",
")",
"assert",
"(",
"valid_facility",
"is",
"True",
")",
"monkeypatch",
".",
"setenv",
"(",
"'WALE_SYSLOG_FACILITY'",
",",
"low_name",
".",
"upper",
"(",
")",
")",
"(",
"out",
",",
"valid_facility",
")",
"=",
"log_help",
".",
"get_syslog_facility",
"(",
")",
"assert",
"(",
"valid_facility",
"is",
"True",
")"
] | wale_syslog_facility is case insensitive . | train | false |
21,580 | def update_block(block):
return modulestore().update_item(block, ModuleStoreEnum.UserID.test)
| [
"def",
"update_block",
"(",
"block",
")",
":",
"return",
"modulestore",
"(",
")",
".",
"update_item",
"(",
"block",
",",
"ModuleStoreEnum",
".",
"UserID",
".",
"test",
")"
] | helper method to update the block in the modulestore . | train | false |
21,582 | def _user_profile(user_profile):
return {'profile': {'image': user_profile['profile_image']}}
| [
"def",
"_user_profile",
"(",
"user_profile",
")",
":",
"return",
"{",
"'profile'",
":",
"{",
"'image'",
":",
"user_profile",
"[",
"'profile_image'",
"]",
"}",
"}"
] | returns the user profile object . | train | false |
21,583 | def get_footer(is_secure=True):
return {'copyright': _footer_copyright(), 'logo_image': _footer_logo_img(is_secure), 'social_links': _footer_social_links(), 'navigation_links': _footer_navigation_links(), 'mobile_links': _footer_mobile_links(is_secure), 'legal_links': _footer_legal_links(), 'openedx_link': _footer_openedx_link()}
| [
"def",
"get_footer",
"(",
"is_secure",
"=",
"True",
")",
":",
"return",
"{",
"'copyright'",
":",
"_footer_copyright",
"(",
")",
",",
"'logo_image'",
":",
"_footer_logo_img",
"(",
"is_secure",
")",
",",
"'social_links'",
":",
"_footer_social_links",
"(",
")",
",",
"'navigation_links'",
":",
"_footer_navigation_links",
"(",
")",
",",
"'mobile_links'",
":",
"_footer_mobile_links",
"(",
"is_secure",
")",
",",
"'legal_links'",
":",
"_footer_legal_links",
"(",
")",
",",
"'openedx_link'",
":",
"_footer_openedx_link",
"(",
")",
"}"
] | retrieve information used to render the footer . | train | false |
21,584 | def cbExamineMbox(result, proto):
return proto.fetchSpecific('1:*', headerType='HEADER.FIELDS', headerArgs=['SUBJECT']).addCallback(cbFetch, proto)
| [
"def",
"cbExamineMbox",
"(",
"result",
",",
"proto",
")",
":",
"return",
"proto",
".",
"fetchSpecific",
"(",
"'1:*'",
",",
"headerType",
"=",
"'HEADER.FIELDS'",
",",
"headerArgs",
"=",
"[",
"'SUBJECT'",
"]",
")",
".",
"addCallback",
"(",
"cbFetch",
",",
"proto",
")"
] | callback invoked when examine command completes . | train | false |
21,586 | def create_export_job(task_id, event_id):
export_job = ExportJob.query.filter_by(event_id=event_id).first()
task_url = url_for('api.extras_celery_task', task_id=task_id)
if export_job:
export_job.task = task_url
export_job.user_email = g.user.email
export_job.event = EventModel.query.get(event_id)
export_job.start_time = datetime.now()
else:
export_job = ExportJob(task=task_url, user_email=g.user.email, event=EventModel.query.get(event_id))
save_to_db(export_job, 'ExportJob saved')
| [
"def",
"create_export_job",
"(",
"task_id",
",",
"event_id",
")",
":",
"export_job",
"=",
"ExportJob",
".",
"query",
".",
"filter_by",
"(",
"event_id",
"=",
"event_id",
")",
".",
"first",
"(",
")",
"task_url",
"=",
"url_for",
"(",
"'api.extras_celery_task'",
",",
"task_id",
"=",
"task_id",
")",
"if",
"export_job",
":",
"export_job",
".",
"task",
"=",
"task_url",
"export_job",
".",
"user_email",
"=",
"g",
".",
"user",
".",
"email",
"export_job",
".",
"event",
"=",
"EventModel",
".",
"query",
".",
"get",
"(",
"event_id",
")",
"export_job",
".",
"start_time",
"=",
"datetime",
".",
"now",
"(",
")",
"else",
":",
"export_job",
"=",
"ExportJob",
"(",
"task",
"=",
"task_url",
",",
"user_email",
"=",
"g",
".",
"user",
".",
"email",
",",
"event",
"=",
"EventModel",
".",
"query",
".",
"get",
"(",
"event_id",
")",
")",
"save_to_db",
"(",
"export_job",
",",
"'ExportJob saved'",
")"
] | create export job for an export that is going to start . | train | false |
21,588 | def login_forbidden(view_func, template_name='login_forbidden.html', status=403):
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if (not request.user.is_authenticated()):
return view_func(request, *args, **kwargs)
return render(request, template_name, status=status)
return _checklogin
| [
"def",
"login_forbidden",
"(",
"view_func",
",",
"template_name",
"=",
"'login_forbidden.html'",
",",
"status",
"=",
"403",
")",
":",
"@",
"wraps",
"(",
"view_func",
")",
"def",
"_checklogin",
"(",
"request",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"not",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
")",
":",
"return",
"view_func",
"(",
"request",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"render",
"(",
"request",
",",
"template_name",
",",
"status",
"=",
"status",
")",
"return",
"_checklogin"
] | only allow anonymous users to access this view . | train | false |
21,589 | def details():
tablename = 'tour_details'
s3db.table(tablename)
table = s3db.tour_details
return s3_rest_controller('tour', 'details')
| [
"def",
"details",
"(",
")",
":",
"tablename",
"=",
"'tour_details'",
"s3db",
".",
"table",
"(",
"tablename",
")",
"table",
"=",
"s3db",
".",
"tour_details",
"return",
"s3_rest_controller",
"(",
"'tour'",
",",
"'details'",
")"
] | return dict with details of first item found in output . | train | false |
21,590 | def add_routing(app, port):
logging.info('Waiting for application {} on port {} to be active.'.format(str(app), str(port)))
if (not wait_on_app(port)):
logging.warning('AppServer did not come up in time, for {}:{}.'.format(str(app), str(port)))
return
acc = appscale_info.get_appcontroller_client()
appserver_ip = appscale_info.get_private_ip()
while True:
result = acc.add_routing_for_appserver(app, appserver_ip, port)
if (result == AppControllerClient.NOT_READY):
logging.info('AppController not yet ready to add routing.')
time.sleep(ROUTING_RETRY_INTERVAL)
else:
break
logging.info('Successfully established routing for {} on port {}'.format(app, port))
| [
"def",
"add_routing",
"(",
"app",
",",
"port",
")",
":",
"logging",
".",
"info",
"(",
"'Waiting for application {} on port {} to be active.'",
".",
"format",
"(",
"str",
"(",
"app",
")",
",",
"str",
"(",
"port",
")",
")",
")",
"if",
"(",
"not",
"wait_on_app",
"(",
"port",
")",
")",
":",
"logging",
".",
"warning",
"(",
"'AppServer did not come up in time, for {}:{}.'",
".",
"format",
"(",
"str",
"(",
"app",
")",
",",
"str",
"(",
"port",
")",
")",
")",
"return",
"acc",
"=",
"appscale_info",
".",
"get_appcontroller_client",
"(",
")",
"appserver_ip",
"=",
"appscale_info",
".",
"get_private_ip",
"(",
")",
"while",
"True",
":",
"result",
"=",
"acc",
".",
"add_routing_for_appserver",
"(",
"app",
",",
"appserver_ip",
",",
"port",
")",
"if",
"(",
"result",
"==",
"AppControllerClient",
".",
"NOT_READY",
")",
":",
"logging",
".",
"info",
"(",
"'AppController not yet ready to add routing.'",
")",
"time",
".",
"sleep",
"(",
"ROUTING_RETRY_INTERVAL",
")",
"else",
":",
"break",
"logging",
".",
"info",
"(",
"'Successfully established routing for {} on port {}'",
".",
"format",
"(",
"app",
",",
"port",
")",
")"
] | tells the appcontroller to begin routing traffic to an appserver . | train | false |
21,591 | @task
def code_verify(revision=None):
if is_old_code():
fprint('installed code is in the old style (directory instead of symlink). Manual intervention required')
return False
rev = (revision or hg_revision())
if exists(('~/viewfinder.%s' % rev)):
fprint(('Code at revision %s is installed' % rev))
return True
else:
fprint(('Code at revision %s is not installed' % rev))
return False
| [
"@",
"task",
"def",
"code_verify",
"(",
"revision",
"=",
"None",
")",
":",
"if",
"is_old_code",
"(",
")",
":",
"fprint",
"(",
"'installed code is in the old style (directory instead of symlink). Manual intervention required'",
")",
"return",
"False",
"rev",
"=",
"(",
"revision",
"or",
"hg_revision",
"(",
")",
")",
"if",
"exists",
"(",
"(",
"'~/viewfinder.%s'",
"%",
"rev",
")",
")",
":",
"fprint",
"(",
"(",
"'Code at revision %s is installed'",
"%",
"rev",
")",
")",
"return",
"True",
"else",
":",
"fprint",
"(",
"(",
"'Code at revision %s is not installed'",
"%",
"rev",
")",
")",
"return",
"False"
] | verify the code for a given revision . | train | false |
21,592 | def _toggle_labels(label, params):
if (label == 'Channel names visible'):
params['settings'][0] = (not params['settings'][0])
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
elif (label == 'Event-id visible'):
params['settings'][1] = (not params['settings'][1])
labels = params['ax2'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][1])
elif (label == 'Epoch-id visible'):
params['settings'][2] = (not params['settings'][2])
labels = params['ax'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][2])
elif (label == 'Zeroline visible'):
params['settings'][3] = (not params['settings'][3])
_plot_vert_lines(params)
params['fig'].canvas.draw()
if (params['fig_proj'] is not None):
params['fig_proj'].canvas.draw()
| [
"def",
"_toggle_labels",
"(",
"label",
",",
"params",
")",
":",
"if",
"(",
"label",
"==",
"'Channel names visible'",
")",
":",
"params",
"[",
"'settings'",
"]",
"[",
"0",
"]",
"=",
"(",
"not",
"params",
"[",
"'settings'",
"]",
"[",
"0",
"]",
")",
"labels",
"=",
"params",
"[",
"'ax'",
"]",
".",
"yaxis",
".",
"get_ticklabels",
"(",
")",
"for",
"label",
"in",
"labels",
":",
"label",
".",
"set_visible",
"(",
"params",
"[",
"'settings'",
"]",
"[",
"0",
"]",
")",
"elif",
"(",
"label",
"==",
"'Event-id visible'",
")",
":",
"params",
"[",
"'settings'",
"]",
"[",
"1",
"]",
"=",
"(",
"not",
"params",
"[",
"'settings'",
"]",
"[",
"1",
"]",
")",
"labels",
"=",
"params",
"[",
"'ax2'",
"]",
".",
"xaxis",
".",
"get_ticklabels",
"(",
")",
"for",
"label",
"in",
"labels",
":",
"label",
".",
"set_visible",
"(",
"params",
"[",
"'settings'",
"]",
"[",
"1",
"]",
")",
"elif",
"(",
"label",
"==",
"'Epoch-id visible'",
")",
":",
"params",
"[",
"'settings'",
"]",
"[",
"2",
"]",
"=",
"(",
"not",
"params",
"[",
"'settings'",
"]",
"[",
"2",
"]",
")",
"labels",
"=",
"params",
"[",
"'ax'",
"]",
".",
"xaxis",
".",
"get_ticklabels",
"(",
")",
"for",
"label",
"in",
"labels",
":",
"label",
".",
"set_visible",
"(",
"params",
"[",
"'settings'",
"]",
"[",
"2",
"]",
")",
"elif",
"(",
"label",
"==",
"'Zeroline visible'",
")",
":",
"params",
"[",
"'settings'",
"]",
"[",
"3",
"]",
"=",
"(",
"not",
"params",
"[",
"'settings'",
"]",
"[",
"3",
"]",
")",
"_plot_vert_lines",
"(",
"params",
")",
"params",
"[",
"'fig'",
"]",
".",
"canvas",
".",
"draw",
"(",
")",
"if",
"(",
"params",
"[",
"'fig_proj'",
"]",
"is",
"not",
"None",
")",
":",
"params",
"[",
"'fig_proj'",
"]",
".",
"canvas",
".",
"draw",
"(",
")"
] | toggle axis labels . | train | false |
21,593 | def _iter_lines(byte_iter, line_separator):
chunks = []
for data in byte_iter:
while data:
(head, sep, data) = data.partition(line_separator)
if (not sep):
chunks.append(head)
break
chunks.append((head + sep))
(yield ''.join(chunks))
chunks = []
if chunks:
(yield ''.join(chunks))
| [
"def",
"_iter_lines",
"(",
"byte_iter",
",",
"line_separator",
")",
":",
"chunks",
"=",
"[",
"]",
"for",
"data",
"in",
"byte_iter",
":",
"while",
"data",
":",
"(",
"head",
",",
"sep",
",",
"data",
")",
"=",
"data",
".",
"partition",
"(",
"line_separator",
")",
"if",
"(",
"not",
"sep",
")",
":",
"chunks",
".",
"append",
"(",
"head",
")",
"break",
"chunks",
".",
"append",
"(",
"(",
"head",
"+",
"sep",
")",
")",
"(",
"yield",
"''",
".",
"join",
"(",
"chunks",
")",
")",
"chunks",
"=",
"[",
"]",
"if",
"chunks",
":",
"(",
"yield",
"''",
".",
"join",
"(",
"chunks",
")",
")"
] | iterate over the lines that make up content . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.