id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
18,883 | def is_in_subnet(ip, mask):
ipaddr = int(''.join([('%02x' % int(x)) for x in ip.split('.')]), 16)
(netstr, bits) = mask.split('/')
netaddr = int(''.join([('%02x' % int(x)) for x in netstr.split('.')]), 16)
mask = ((4294967295 << (32 - int(bits))) & 4294967295)
return ((ipaddr & mask) == (netaddr & mask))
| [
"def",
"is_in_subnet",
"(",
"ip",
",",
"mask",
")",
":",
"ipaddr",
"=",
"int",
"(",
"''",
".",
"join",
"(",
"[",
"(",
"'%02x'",
"%",
"int",
"(",
"x",
")",
")",
"for",
"x",
"in",
"ip",
".",
"split",
"(",
"'.'",
")",
"]",
")",
",",
"16",
")",
"(",
"netstr",
",",
"bits",
")",
"=",
"mask",
".",
"split",
"(",
"'/'",
")",
"netaddr",
"=",
"int",
"(",
"''",
".",
"join",
"(",
"[",
"(",
"'%02x'",
"%",
"int",
"(",
"x",
")",
")",
"for",
"x",
"in",
"netstr",
".",
"split",
"(",
"'.'",
")",
"]",
")",
",",
"16",
")",
"mask",
"=",
"(",
"(",
"4294967295",
"<<",
"(",
"32",
"-",
"int",
"(",
"bits",
")",
")",
")",
"&",
"4294967295",
")",
"return",
"(",
"(",
"ipaddr",
"&",
"mask",
")",
"==",
"(",
"netaddr",
"&",
"mask",
")",
")"
] | check if a given ip address is lies within the given netmask true if ip falls within mask false otherwise . | train | false |
18,884 | def import_buffer_to_hst(buf):
return tokenize((buf + '\n'))
| [
"def",
"import_buffer_to_hst",
"(",
"buf",
")",
":",
"return",
"tokenize",
"(",
"(",
"buf",
"+",
"'\\n'",
")",
")"
] | import content from buf and return a hy ast . | train | false |
18,885 | def win_find_executable(executable, env):
if os.path.dirname(executable):
return executable
path = env.get(u'PATH', u'')
pathext = (env.get(u'PATHEXT') or u'.EXE')
dirs = path.split(os.path.pathsep)
(base, ext) = os.path.splitext(executable)
if ext:
extensions = [ext]
else:
extensions = pathext.split(os.path.pathsep)
for directory in dirs:
for extension in extensions:
filepath = os.path.join(directory, (base + extension))
if os.path.exists(filepath):
return filepath
return None
| [
"def",
"win_find_executable",
"(",
"executable",
",",
"env",
")",
":",
"if",
"os",
".",
"path",
".",
"dirname",
"(",
"executable",
")",
":",
"return",
"executable",
"path",
"=",
"env",
".",
"get",
"(",
"u'PATH'",
",",
"u''",
")",
"pathext",
"=",
"(",
"env",
".",
"get",
"(",
"u'PATHEXT'",
")",
"or",
"u'.EXE'",
")",
"dirs",
"=",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
"(",
"base",
",",
"ext",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"executable",
")",
"if",
"ext",
":",
"extensions",
"=",
"[",
"ext",
"]",
"else",
":",
"extensions",
"=",
"pathext",
".",
"split",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
"for",
"directory",
"in",
"dirs",
":",
"for",
"extension",
"in",
"extensions",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"(",
"base",
"+",
"extension",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filepath",
")",
":",
"return",
"filepath",
"return",
"None"
] | explicetely looks for executable in env["path"] . | train | false |
18,886 | def ListBasename(filelist):
return [os.path.basename(f) for f in filelist]
| [
"def",
"ListBasename",
"(",
"filelist",
")",
":",
"return",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
"for",
"f",
"in",
"filelist",
"]"
] | turns a list of paths into a list of basenames for each entry . | train | false |
18,887 | def format_as_index(indices):
if (not indices):
return ''
return ('[%s]' % ']['.join((repr(index) for index in indices)))
| [
"def",
"format_as_index",
"(",
"indices",
")",
":",
"if",
"(",
"not",
"indices",
")",
":",
"return",
"''",
"return",
"(",
"'[%s]'",
"%",
"']['",
".",
"join",
"(",
"(",
"repr",
"(",
"index",
")",
"for",
"index",
"in",
"indices",
")",
")",
")"
] | construct a single string containing indexing operations for the indices . | train | false |
18,890 | def in_zoom(coord, range):
zooms = search('^(\\d+)-(\\d+)$|^(\\d+)$', range)
if (not zooms):
raise Core.KnownUnknown(('Bad zoom range in a Sandwich Layer: %s' % repr(range)))
(min_zoom, max_zoom, at_zoom) = zooms.groups()
if ((min_zoom is not None) and (max_zoom is not None)):
(min_zoom, max_zoom) = (int(min_zoom), int(max_zoom))
elif (at_zoom is not None):
(min_zoom, max_zoom) = (int(at_zoom), int(at_zoom))
else:
(min_zoom, max_zoom) = (0, float('inf'))
return ((min_zoom <= coord.zoom) and (coord.zoom <= max_zoom))
| [
"def",
"in_zoom",
"(",
"coord",
",",
"range",
")",
":",
"zooms",
"=",
"search",
"(",
"'^(\\\\d+)-(\\\\d+)$|^(\\\\d+)$'",
",",
"range",
")",
"if",
"(",
"not",
"zooms",
")",
":",
"raise",
"Core",
".",
"KnownUnknown",
"(",
"(",
"'Bad zoom range in a Sandwich Layer: %s'",
"%",
"repr",
"(",
"range",
")",
")",
")",
"(",
"min_zoom",
",",
"max_zoom",
",",
"at_zoom",
")",
"=",
"zooms",
".",
"groups",
"(",
")",
"if",
"(",
"(",
"min_zoom",
"is",
"not",
"None",
")",
"and",
"(",
"max_zoom",
"is",
"not",
"None",
")",
")",
":",
"(",
"min_zoom",
",",
"max_zoom",
")",
"=",
"(",
"int",
"(",
"min_zoom",
")",
",",
"int",
"(",
"max_zoom",
")",
")",
"elif",
"(",
"at_zoom",
"is",
"not",
"None",
")",
":",
"(",
"min_zoom",
",",
"max_zoom",
")",
"=",
"(",
"int",
"(",
"at_zoom",
")",
",",
"int",
"(",
"at_zoom",
")",
")",
"else",
":",
"(",
"min_zoom",
",",
"max_zoom",
")",
"=",
"(",
"0",
",",
"float",
"(",
"'inf'",
")",
")",
"return",
"(",
"(",
"min_zoom",
"<=",
"coord",
".",
"zoom",
")",
"and",
"(",
"coord",
".",
"zoom",
"<=",
"max_zoom",
")",
")"
] | return true if the coordinate zoom is within the textual range . | train | false |
18,892 | def _import_modules(imports, dglobals):
if (imports is not None):
for stmt in imports:
try:
exec stmt in dglobals
except TypeError:
raise TypeError(('invalid type: %s' % stmt))
except Exception:
continue
| [
"def",
"_import_modules",
"(",
"imports",
",",
"dglobals",
")",
":",
"if",
"(",
"imports",
"is",
"not",
"None",
")",
":",
"for",
"stmt",
"in",
"imports",
":",
"try",
":",
"exec",
"stmt",
"in",
"dglobals",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"(",
"'invalid type: %s'",
"%",
"stmt",
")",
")",
"except",
"Exception",
":",
"continue"
] | if given . | train | false |
18,894 | def _plain_bfs(G, source):
Gsucc = G.succ
Gpred = G.pred
seen = set()
nextlevel = {source}
while nextlevel:
thislevel = nextlevel
nextlevel = set()
for v in thislevel:
if (v not in seen):
(yield v)
seen.add(v)
nextlevel.update(Gsucc[v])
nextlevel.update(Gpred[v])
| [
"def",
"_plain_bfs",
"(",
"G",
",",
"source",
")",
":",
"Gsucc",
"=",
"G",
".",
"succ",
"Gpred",
"=",
"G",
".",
"pred",
"seen",
"=",
"set",
"(",
")",
"nextlevel",
"=",
"{",
"source",
"}",
"while",
"nextlevel",
":",
"thislevel",
"=",
"nextlevel",
"nextlevel",
"=",
"set",
"(",
")",
"for",
"v",
"in",
"thislevel",
":",
"if",
"(",
"v",
"not",
"in",
"seen",
")",
":",
"(",
"yield",
"v",
")",
"seen",
".",
"add",
"(",
"v",
")",
"nextlevel",
".",
"update",
"(",
"Gsucc",
"[",
"v",
"]",
")",
"nextlevel",
".",
"update",
"(",
"Gpred",
"[",
"v",
"]",
")"
] | a fast bfs node generator the direction of the edge between nodes is ignored . | train | false |
18,895 | def SortedRPCSummaries(urlstats, summary_percentile):
rpcsummary = {}
for (index, request) in enumerate(reversed(urlstats.urlrequestlist)):
for rpc in request.rpcstatslist:
label = rpc.GetLabel()
if (label not in rpcsummary):
rpcsummary[label] = RPCSummary()
summary = rpcsummary[label]
summary.requests += 1
summary.calls += rpc.numcalls
summary.times.append(rpc.time)
summary.indices.append(index)
successful_reads = (len(rpc.keys_read) - len(rpc.keys_failed_get))
summary.stats.append((rpc.numcalls, successful_reads, len(rpc.keys_written), len(rpc.keys_failed_get)))
for label in rpcsummary:
summary = _GetPercentile(sorted(rpcsummary[label].times), summary_percentile)
rpcsummary[label].summary_time = summary
rpcsummary_sort = sorted(rpcsummary.iteritems(), key=(lambda pair: pair[1].summary_time), reverse=True)
return rpcsummary_sort
| [
"def",
"SortedRPCSummaries",
"(",
"urlstats",
",",
"summary_percentile",
")",
":",
"rpcsummary",
"=",
"{",
"}",
"for",
"(",
"index",
",",
"request",
")",
"in",
"enumerate",
"(",
"reversed",
"(",
"urlstats",
".",
"urlrequestlist",
")",
")",
":",
"for",
"rpc",
"in",
"request",
".",
"rpcstatslist",
":",
"label",
"=",
"rpc",
".",
"GetLabel",
"(",
")",
"if",
"(",
"label",
"not",
"in",
"rpcsummary",
")",
":",
"rpcsummary",
"[",
"label",
"]",
"=",
"RPCSummary",
"(",
")",
"summary",
"=",
"rpcsummary",
"[",
"label",
"]",
"summary",
".",
"requests",
"+=",
"1",
"summary",
".",
"calls",
"+=",
"rpc",
".",
"numcalls",
"summary",
".",
"times",
".",
"append",
"(",
"rpc",
".",
"time",
")",
"summary",
".",
"indices",
".",
"append",
"(",
"index",
")",
"successful_reads",
"=",
"(",
"len",
"(",
"rpc",
".",
"keys_read",
")",
"-",
"len",
"(",
"rpc",
".",
"keys_failed_get",
")",
")",
"summary",
".",
"stats",
".",
"append",
"(",
"(",
"rpc",
".",
"numcalls",
",",
"successful_reads",
",",
"len",
"(",
"rpc",
".",
"keys_written",
")",
",",
"len",
"(",
"rpc",
".",
"keys_failed_get",
")",
")",
")",
"for",
"label",
"in",
"rpcsummary",
":",
"summary",
"=",
"_GetPercentile",
"(",
"sorted",
"(",
"rpcsummary",
"[",
"label",
"]",
".",
"times",
")",
",",
"summary_percentile",
")",
"rpcsummary",
"[",
"label",
"]",
".",
"summary_time",
"=",
"summary",
"rpcsummary_sort",
"=",
"sorted",
"(",
"rpcsummary",
".",
"iteritems",
"(",
")",
",",
"key",
"=",
"(",
"lambda",
"pair",
":",
"pair",
"[",
"1",
"]",
".",
"summary_time",
")",
",",
"reverse",
"=",
"True",
")",
"return",
"rpcsummary_sort"
] | summarize rpc statistics of requests for ui . | train | false |
18,896 | def _create_model(data):
model = base.BaseCompletionModel()
for catdata in data:
cat = model.new_category('')
for itemdata in catdata:
model.new_item(cat, *itemdata)
return model
| [
"def",
"_create_model",
"(",
"data",
")",
":",
"model",
"=",
"base",
".",
"BaseCompletionModel",
"(",
")",
"for",
"catdata",
"in",
"data",
":",
"cat",
"=",
"model",
".",
"new_category",
"(",
"''",
")",
"for",
"itemdata",
"in",
"catdata",
":",
"model",
".",
"new_item",
"(",
"cat",
",",
"*",
"itemdata",
")",
"return",
"model"
] | create a completion model populated with the given data . | train | false |
18,897 | def increment_trigger_ref_count(rule_api):
trigger_dict = _get_trigger_dict_given_rule(rule_api)
if trigger_dict.get('parameters', None):
trigger_db = _get_trigger_db(trigger_dict)
Trigger.update(trigger_db, inc__ref_count=1)
| [
"def",
"increment_trigger_ref_count",
"(",
"rule_api",
")",
":",
"trigger_dict",
"=",
"_get_trigger_dict_given_rule",
"(",
"rule_api",
")",
"if",
"trigger_dict",
".",
"get",
"(",
"'parameters'",
",",
"None",
")",
":",
"trigger_db",
"=",
"_get_trigger_db",
"(",
"trigger_dict",
")",
"Trigger",
".",
"update",
"(",
"trigger_db",
",",
"inc__ref_count",
"=",
"1",
")"
] | given the rule figures out the triggertype with parameter and increments reference count on the appropriate trigger . | train | false |
18,898 | def object_f(a, indices):
for idx in indices:
sleep((10 * sleep_factor))
object()
a[idx] = PyThread_get_thread_ident()
| [
"def",
"object_f",
"(",
"a",
",",
"indices",
")",
":",
"for",
"idx",
"in",
"indices",
":",
"sleep",
"(",
"(",
"10",
"*",
"sleep_factor",
")",
")",
"object",
"(",
")",
"a",
"[",
"idx",
"]",
"=",
"PyThread_get_thread_ident",
"(",
")"
] | same as f() . | train | false |
18,899 | def heawood_graph(create_using=None):
G = LCF_graph(14, [5, (-5)], 7, create_using)
G.name = 'Heawood Graph'
return G
| [
"def",
"heawood_graph",
"(",
"create_using",
"=",
"None",
")",
":",
"G",
"=",
"LCF_graph",
"(",
"14",
",",
"[",
"5",
",",
"(",
"-",
"5",
")",
"]",
",",
"7",
",",
"create_using",
")",
"G",
".",
"name",
"=",
"'Heawood Graph'",
"return",
"G"
] | return the heawood graph . | train | false |
18,900 | def _intersect(d1, d2):
res = {}
for (key, val) in d1.iteritems():
if ((key in d2) and (d2[key] == val)):
res[key] = val
return res
| [
"def",
"_intersect",
"(",
"d1",
",",
"d2",
")",
":",
"res",
"=",
"{",
"}",
"for",
"(",
"key",
",",
"val",
")",
"in",
"d1",
".",
"iteritems",
"(",
")",
":",
"if",
"(",
"(",
"key",
"in",
"d2",
")",
"and",
"(",
"d2",
"[",
"key",
"]",
"==",
"val",
")",
")",
":",
"res",
"[",
"key",
"]",
"=",
"val",
"return",
"res"
] | create intersection of two dictionaries . | train | false |
18,901 | def get_router():
router = getattr(settings, 'RAPIDSMS_ROUTER', 'rapidsms.router.blocking.BlockingRouter')
if isinstance(router, string_types):
try:
router = import_class(router)()
except ImportError as e:
raise ImproperlyConfigured(e)
return router
| [
"def",
"get_router",
"(",
")",
":",
"router",
"=",
"getattr",
"(",
"settings",
",",
"'RAPIDSMS_ROUTER'",
",",
"'rapidsms.router.blocking.BlockingRouter'",
")",
"if",
"isinstance",
"(",
"router",
",",
"string_types",
")",
":",
"try",
":",
"router",
"=",
"import_class",
"(",
"router",
")",
"(",
")",
"except",
"ImportError",
"as",
"e",
":",
"raise",
"ImproperlyConfigured",
"(",
"e",
")",
"return",
"router"
] | return router defined by rapidsms_router setting . | train | false |
18,902 | def exec_script(script_filename, env=None, *args):
script_filename = os.path.basename(script_filename)
script_filename = os.path.join(os.path.dirname(__file__), 'subproc', script_filename)
if (not os.path.exists(script_filename)):
raise SystemError('To prevent misuse, the script passed to PyInstaller.utils.hooks.exec_script must be located in the `PyInstaller/utils/hooks/subproc` directory.')
cmd = [script_filename]
cmd.extend(args)
return __exec_python_cmd(cmd, env=env)
| [
"def",
"exec_script",
"(",
"script_filename",
",",
"env",
"=",
"None",
",",
"*",
"args",
")",
":",
"script_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"script_filename",
")",
"script_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'subproc'",
",",
"script_filename",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"script_filename",
")",
")",
":",
"raise",
"SystemError",
"(",
"'To prevent misuse, the script passed to PyInstaller.utils.hooks.exec_script must be located in the `PyInstaller/utils/hooks/subproc` directory.'",
")",
"cmd",
"=",
"[",
"script_filename",
"]",
"cmd",
".",
"extend",
"(",
"args",
")",
"return",
"__exec_python_cmd",
"(",
"cmd",
",",
"env",
"=",
"env",
")"
] | executes a python script in an externally spawned interpreter . | train | false |
18,903 | def convert_datetime_for_arith(builder, dt_val, src_unit, dest_unit):
(dt_val, dt_unit) = reduce_datetime_for_unit(builder, dt_val, src_unit, dest_unit)
dt_factor = npdatetime.get_timedelta_conversion_factor(dt_unit, dest_unit)
if (dt_factor is None):
raise NotImplementedError(('cannot convert datetime64 from %r to %r' % (src_unit, dest_unit)))
return scale_by_constant(builder, dt_val, dt_factor)
| [
"def",
"convert_datetime_for_arith",
"(",
"builder",
",",
"dt_val",
",",
"src_unit",
",",
"dest_unit",
")",
":",
"(",
"dt_val",
",",
"dt_unit",
")",
"=",
"reduce_datetime_for_unit",
"(",
"builder",
",",
"dt_val",
",",
"src_unit",
",",
"dest_unit",
")",
"dt_factor",
"=",
"npdatetime",
".",
"get_timedelta_conversion_factor",
"(",
"dt_unit",
",",
"dest_unit",
")",
"if",
"(",
"dt_factor",
"is",
"None",
")",
":",
"raise",
"NotImplementedError",
"(",
"(",
"'cannot convert datetime64 from %r to %r'",
"%",
"(",
"src_unit",
",",
"dest_unit",
")",
")",
")",
"return",
"scale_by_constant",
"(",
"builder",
",",
"dt_val",
",",
"dt_factor",
")"
] | convert datetime *dt_val* from *src_unit* to *dest_unit* . | train | false |
18,904 | @log_call
def metadef_tag_get_by_id(context, namespace_name, id):
namespace = metadef_namespace_get(context, namespace_name)
_check_namespace_visibility(context, namespace, namespace_name)
for tag in DATA['metadef_tags']:
if ((tag['namespace_id'] == namespace['id']) and (tag['id'] == id)):
return tag
else:
msg = (_('Metadata definition tag not found for id=%s') % id)
LOG.warn(msg)
raise exception.MetadefTagNotFound(msg)
| [
"@",
"log_call",
"def",
"metadef_tag_get_by_id",
"(",
"context",
",",
"namespace_name",
",",
"id",
")",
":",
"namespace",
"=",
"metadef_namespace_get",
"(",
"context",
",",
"namespace_name",
")",
"_check_namespace_visibility",
"(",
"context",
",",
"namespace",
",",
"namespace_name",
")",
"for",
"tag",
"in",
"DATA",
"[",
"'metadef_tags'",
"]",
":",
"if",
"(",
"(",
"tag",
"[",
"'namespace_id'",
"]",
"==",
"namespace",
"[",
"'id'",
"]",
")",
"and",
"(",
"tag",
"[",
"'id'",
"]",
"==",
"id",
")",
")",
":",
"return",
"tag",
"else",
":",
"msg",
"=",
"(",
"_",
"(",
"'Metadata definition tag not found for id=%s'",
")",
"%",
"id",
")",
"LOG",
".",
"warn",
"(",
"msg",
")",
"raise",
"exception",
".",
"MetadefTagNotFound",
"(",
"msg",
")"
] | get a metadef tag . | train | false |
18,905 | def group_destroy(context, group_id):
return IMPL.group_destroy(context, group_id)
| [
"def",
"group_destroy",
"(",
"context",
",",
"group_id",
")",
":",
"return",
"IMPL",
".",
"group_destroy",
"(",
"context",
",",
"group_id",
")"
] | destroy the group or raise if it does not exist . | train | false |
18,906 | def _verify_padding(padding):
if (padding not in ALLOWED_PADDINGS):
raise ValueError("Padding must be member of '{}', not {}".format(ALLOWED_PADDINGS, padding))
return padding
| [
"def",
"_verify_padding",
"(",
"padding",
")",
":",
"if",
"(",
"padding",
"not",
"in",
"ALLOWED_PADDINGS",
")",
":",
"raise",
"ValueError",
"(",
"\"Padding must be member of '{}', not {}\"",
".",
"format",
"(",
"ALLOWED_PADDINGS",
",",
"padding",
")",
")",
"return",
"padding"
] | verifies that the provided padding is supported . | train | false |
18,907 | def _connect_client(trig_queue):
t0 = time.time()
while (((time.time() - t0) < _max_wait) and ((_server is None) or (not _server._running))):
time.sleep(0.01)
assert_true(((_server is not None) and _server._running))
stim_client = StimClient('localhost', port=4218)
t0 = time.time()
while (((time.time() - t0) < _max_wait) and (not _have_put_in_trigger)):
time.sleep(0.01)
assert_true(_have_put_in_trigger)
trig_queue.put(stim_client.get_trigger())
stim_client.close()
| [
"def",
"_connect_client",
"(",
"trig_queue",
")",
":",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"while",
"(",
"(",
"(",
"time",
".",
"time",
"(",
")",
"-",
"t0",
")",
"<",
"_max_wait",
")",
"and",
"(",
"(",
"_server",
"is",
"None",
")",
"or",
"(",
"not",
"_server",
".",
"_running",
")",
")",
")",
":",
"time",
".",
"sleep",
"(",
"0.01",
")",
"assert_true",
"(",
"(",
"(",
"_server",
"is",
"not",
"None",
")",
"and",
"_server",
".",
"_running",
")",
")",
"stim_client",
"=",
"StimClient",
"(",
"'localhost'",
",",
"port",
"=",
"4218",
")",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"while",
"(",
"(",
"(",
"time",
".",
"time",
"(",
")",
"-",
"t0",
")",
"<",
"_max_wait",
")",
"and",
"(",
"not",
"_have_put_in_trigger",
")",
")",
":",
"time",
".",
"sleep",
"(",
"0.01",
")",
"assert_true",
"(",
"_have_put_in_trigger",
")",
"trig_queue",
".",
"put",
"(",
"stim_client",
".",
"get_trigger",
"(",
")",
")",
"stim_client",
".",
"close",
"(",
")"
] | helper method that instantiates the stimclient . | train | false |
18,911 | def set_file(path, saltenv='base', **kwargs):
if ('__env__' in kwargs):
salt.utils.warn_until('Oxygen', "Parameter '__env__' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.")
kwargs.pop('__env__')
path = __salt__['cp.cache_file'](path, saltenv)
if path:
_set_file(path)
return True
return False
| [
"def",
"set_file",
"(",
"path",
",",
"saltenv",
"=",
"'base'",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"'__env__'",
"in",
"kwargs",
")",
":",
"salt",
".",
"utils",
".",
"warn_until",
"(",
"'Oxygen'",
",",
"\"Parameter '__env__' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.\"",
")",
"kwargs",
".",
"pop",
"(",
"'__env__'",
")",
"path",
"=",
"__salt__",
"[",
"'cp.cache_file'",
"]",
"(",
"path",
",",
"saltenv",
")",
"if",
"path",
":",
"_set_file",
"(",
"path",
")",
"return",
"True",
"return",
"False"
] | set debconf selections from a file or a template . | train | false |
18,912 | @require_POST
@login_required
def watch_thread(request, document_slug, thread_id):
doc = get_document(document_slug, request)
thread = get_object_or_404(Thread, pk=thread_id, document=doc)
if (request.POST.get('watch') == 'yes'):
NewPostEvent.notify(request.user, thread)
statsd.incr('kbforums.watches.thread')
else:
NewPostEvent.stop_notifying(request.user, thread)
return HttpResponseRedirect(reverse('wiki.discuss.posts', args=[document_slug, thread_id]))
| [
"@",
"require_POST",
"@",
"login_required",
"def",
"watch_thread",
"(",
"request",
",",
"document_slug",
",",
"thread_id",
")",
":",
"doc",
"=",
"get_document",
"(",
"document_slug",
",",
"request",
")",
"thread",
"=",
"get_object_or_404",
"(",
"Thread",
",",
"pk",
"=",
"thread_id",
",",
"document",
"=",
"doc",
")",
"if",
"(",
"request",
".",
"POST",
".",
"get",
"(",
"'watch'",
")",
"==",
"'yes'",
")",
":",
"NewPostEvent",
".",
"notify",
"(",
"request",
".",
"user",
",",
"thread",
")",
"statsd",
".",
"incr",
"(",
"'kbforums.watches.thread'",
")",
"else",
":",
"NewPostEvent",
".",
"stop_notifying",
"(",
"request",
".",
"user",
",",
"thread",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'wiki.discuss.posts'",
",",
"args",
"=",
"[",
"document_slug",
",",
"thread_id",
"]",
")",
")"
] | watch/unwatch a thread . | train | false |
18,913 | def _setup_dots(mode, coils, ch_type):
int_rad = 0.06
noise = _ad_hoc_noise(coils, ch_type)
if (mode == 'fast'):
n_coeff = 50
lut_fun = _get_legen_lut_fast
else:
n_coeff = 100
lut_fun = _get_legen_lut_accurate
(lut, n_fact) = _get_legen_table(ch_type, False, n_coeff, verbose=False)
lut_fun = partial(lut_fun, lut=lut)
return (int_rad, noise, lut_fun, n_fact)
| [
"def",
"_setup_dots",
"(",
"mode",
",",
"coils",
",",
"ch_type",
")",
":",
"int_rad",
"=",
"0.06",
"noise",
"=",
"_ad_hoc_noise",
"(",
"coils",
",",
"ch_type",
")",
"if",
"(",
"mode",
"==",
"'fast'",
")",
":",
"n_coeff",
"=",
"50",
"lut_fun",
"=",
"_get_legen_lut_fast",
"else",
":",
"n_coeff",
"=",
"100",
"lut_fun",
"=",
"_get_legen_lut_accurate",
"(",
"lut",
",",
"n_fact",
")",
"=",
"_get_legen_table",
"(",
"ch_type",
",",
"False",
",",
"n_coeff",
",",
"verbose",
"=",
"False",
")",
"lut_fun",
"=",
"partial",
"(",
"lut_fun",
",",
"lut",
"=",
"lut",
")",
"return",
"(",
"int_rad",
",",
"noise",
",",
"lut_fun",
",",
"n_fact",
")"
] | setup dot products . | train | false |
18,914 | def _state_description(vm_state, _shutdown_terminate):
name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
return {'code': inst_state.name_to_code(name), 'name': name}
| [
"def",
"_state_description",
"(",
"vm_state",
",",
"_shutdown_terminate",
")",
":",
"name",
"=",
"_STATE_DESCRIPTION_MAP",
".",
"get",
"(",
"vm_state",
",",
"vm_state",
")",
"return",
"{",
"'code'",
":",
"inst_state",
".",
"name_to_code",
"(",
"name",
")",
",",
"'name'",
":",
"name",
"}"
] | map the vm state to the server status string . | train | false |
18,915 | def libvlc_media_player_get_state(p_mi):
f = (_Cfunctions.get('libvlc_media_player_get_state', None) or _Cfunction('libvlc_media_player_get_state', ((1,),), None, State, MediaPlayer))
return f(p_mi)
| [
"def",
"libvlc_media_player_get_state",
"(",
"p_mi",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_media_player_get_state'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_media_player_get_state'",
",",
"(",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"State",
",",
"MediaPlayer",
")",
")",
"return",
"f",
"(",
"p_mi",
")"
] | get current movie state . | train | true |
18,918 | @command('url_file\\s(\\S+)')
def yt_url_file(file_name):
try:
with open(file_name, 'r') as fo:
output = ' '.join([line.strip() for line in fo if line.strip()])
except IOError:
g.message = ((c.r + 'Error while opening the file, check the validity of the path') + c.w)
g.content = (g.content or content.generate_songlist_display(zeromsg=g.message))
return
yt_url(output)
| [
"@",
"command",
"(",
"'url_file\\\\s(\\\\S+)'",
")",
"def",
"yt_url_file",
"(",
"file_name",
")",
":",
"try",
":",
"with",
"open",
"(",
"file_name",
",",
"'r'",
")",
"as",
"fo",
":",
"output",
"=",
"' '",
".",
"join",
"(",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"fo",
"if",
"line",
".",
"strip",
"(",
")",
"]",
")",
"except",
"IOError",
":",
"g",
".",
"message",
"=",
"(",
"(",
"c",
".",
"r",
"+",
"'Error while opening the file, check the validity of the path'",
")",
"+",
"c",
".",
"w",
")",
"g",
".",
"content",
"=",
"(",
"g",
".",
"content",
"or",
"content",
".",
"generate_songlist_display",
"(",
"zeromsg",
"=",
"g",
".",
"message",
")",
")",
"return",
"yt_url",
"(",
"output",
")"
] | access a list of urls in a text file . | train | false |
18,921 | def hunt_for_stacktrace(url):
req = urlreq.Request(url)
req.add_header('Accept-Encoding', 'gzip')
page = urlreq.urlopen(req)
buf = six.StringIO(page.read())
f = gzip.GzipFile(fileobj=buf)
content = f.read()
traces = []
trace = StackTrace()
for line in content.splitlines():
m = re.match(NOVA_REGEX, line)
if m:
data = m.groupdict()
if (trace.not_none() and trace.is_same(data)):
trace.append((data['msg'] + '\n'))
else:
trace = StackTrace(timestamp=data.get('timestamp'), pid=data.get('pid'), level=data.get('level'), module=data.get('module'), msg=data.get('msg'))
elif trace.not_none():
traces.append(trace)
trace = StackTrace()
if trace.not_none():
traces.append(trace)
return traces
| [
"def",
"hunt_for_stacktrace",
"(",
"url",
")",
":",
"req",
"=",
"urlreq",
".",
"Request",
"(",
"url",
")",
"req",
".",
"add_header",
"(",
"'Accept-Encoding'",
",",
"'gzip'",
")",
"page",
"=",
"urlreq",
".",
"urlopen",
"(",
"req",
")",
"buf",
"=",
"six",
".",
"StringIO",
"(",
"page",
".",
"read",
"(",
")",
")",
"f",
"=",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"buf",
")",
"content",
"=",
"f",
".",
"read",
"(",
")",
"traces",
"=",
"[",
"]",
"trace",
"=",
"StackTrace",
"(",
")",
"for",
"line",
"in",
"content",
".",
"splitlines",
"(",
")",
":",
"m",
"=",
"re",
".",
"match",
"(",
"NOVA_REGEX",
",",
"line",
")",
"if",
"m",
":",
"data",
"=",
"m",
".",
"groupdict",
"(",
")",
"if",
"(",
"trace",
".",
"not_none",
"(",
")",
"and",
"trace",
".",
"is_same",
"(",
"data",
")",
")",
":",
"trace",
".",
"append",
"(",
"(",
"data",
"[",
"'msg'",
"]",
"+",
"'\\n'",
")",
")",
"else",
":",
"trace",
"=",
"StackTrace",
"(",
"timestamp",
"=",
"data",
".",
"get",
"(",
"'timestamp'",
")",
",",
"pid",
"=",
"data",
".",
"get",
"(",
"'pid'",
")",
",",
"level",
"=",
"data",
".",
"get",
"(",
"'level'",
")",
",",
"module",
"=",
"data",
".",
"get",
"(",
"'module'",
")",
",",
"msg",
"=",
"data",
".",
"get",
"(",
"'msg'",
")",
")",
"elif",
"trace",
".",
"not_none",
"(",
")",
":",
"traces",
".",
"append",
"(",
"trace",
")",
"trace",
"=",
"StackTrace",
"(",
")",
"if",
"trace",
".",
"not_none",
"(",
")",
":",
"traces",
".",
"append",
"(",
"trace",
")",
"return",
"traces"
] | return trace or error lines out of logs . | train | false |
18,922 | def _encode_none(name, dummy0, dummy1, dummy2):
return ('\n' + name)
| [
"def",
"_encode_none",
"(",
"name",
",",
"dummy0",
",",
"dummy1",
",",
"dummy2",
")",
":",
"return",
"(",
"'\\n'",
"+",
"name",
")"
] | encode python none . | train | false |
18,923 | def expand_substates(states):
def nbits(n):
'number of bits set in n base 2'
nb = 0
while n:
(n, rem) = divmod(n, 2)
nb += rem
return nb
statelist = []
for state in states:
substates = list(set(((state & x) for x in states)))
substates.sort(key=nbits, reverse=True)
statelist.append(substates)
return statelist
| [
"def",
"expand_substates",
"(",
"states",
")",
":",
"def",
"nbits",
"(",
"n",
")",
":",
"nb",
"=",
"0",
"while",
"n",
":",
"(",
"n",
",",
"rem",
")",
"=",
"divmod",
"(",
"n",
",",
"2",
")",
"nb",
"+=",
"rem",
"return",
"nb",
"statelist",
"=",
"[",
"]",
"for",
"state",
"in",
"states",
":",
"substates",
"=",
"list",
"(",
"set",
"(",
"(",
"(",
"state",
"&",
"x",
")",
"for",
"x",
"in",
"states",
")",
")",
")",
"substates",
".",
"sort",
"(",
"key",
"=",
"nbits",
",",
"reverse",
"=",
"True",
")",
"statelist",
".",
"append",
"(",
"substates",
")",
"return",
"statelist"
] | for each item of states return a list containing all combinations of that item with individual bits reset . | train | false |
18,924 | @handle_response_format
@treeio_login_required
def weblink_add(request, response_format='html'):
if request.POST:
if ('cancel' not in request.POST):
link = WebLink()
form = WebLinkForm(request.user.profile, None, request.POST, instance=link)
if form.is_valid():
link = form.save()
link.set_user_from_request(request)
return HttpResponseRedirect(reverse('documents_weblink_view', args=[link.id]))
else:
return HttpResponseRedirect(reverse('document_index'))
else:
form = WebLinkForm(request.user.profile, None)
context = _get_default_context(request)
context.update({'form': form, 'file': file})
return render_to_response('documents/weblink_add', context, context_instance=RequestContext(request), response_format=response_format)
| [
"@",
"handle_response_format",
"@",
"treeio_login_required",
"def",
"weblink_add",
"(",
"request",
",",
"response_format",
"=",
"'html'",
")",
":",
"if",
"request",
".",
"POST",
":",
"if",
"(",
"'cancel'",
"not",
"in",
"request",
".",
"POST",
")",
":",
"link",
"=",
"WebLink",
"(",
")",
"form",
"=",
"WebLinkForm",
"(",
"request",
".",
"user",
".",
"profile",
",",
"None",
",",
"request",
".",
"POST",
",",
"instance",
"=",
"link",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"link",
"=",
"form",
".",
"save",
"(",
")",
"link",
".",
"set_user_from_request",
"(",
"request",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'documents_weblink_view'",
",",
"args",
"=",
"[",
"link",
".",
"id",
"]",
")",
")",
"else",
":",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'document_index'",
")",
")",
"else",
":",
"form",
"=",
"WebLinkForm",
"(",
"request",
".",
"user",
".",
"profile",
",",
"None",
")",
"context",
"=",
"_get_default_context",
"(",
"request",
")",
"context",
".",
"update",
"(",
"{",
"'form'",
":",
"form",
",",
"'file'",
":",
"file",
"}",
")",
"return",
"render_to_response",
"(",
"'documents/weblink_add'",
",",
"context",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
",",
"response_format",
"=",
"response_format",
")"
] | new web link form . | train | false |
18,925 | def run_doctest(target_dir=None, *args, **kwargs):
import doctest
default_kwargs = {'optionflags': doctest.ELLIPSIS}
kwargs.update(default_kwargs)
cur_dir = os.path.abspath(os.curdir)
print('Running doctests...')
try:
os.chdir(find_test_dir(target_dir))
doctest.testmod(*args, **kwargs)
finally:
os.chdir(cur_dir)
print('Done')
| [
"def",
"run_doctest",
"(",
"target_dir",
"=",
"None",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"import",
"doctest",
"default_kwargs",
"=",
"{",
"'optionflags'",
":",
"doctest",
".",
"ELLIPSIS",
"}",
"kwargs",
".",
"update",
"(",
"default_kwargs",
")",
"cur_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"curdir",
")",
"print",
"(",
"'Running doctests...'",
")",
"try",
":",
"os",
".",
"chdir",
"(",
"find_test_dir",
"(",
"target_dir",
")",
")",
"doctest",
".",
"testmod",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"cur_dir",
")",
"print",
"(",
"'Done'",
")"
] | run doctest on the given module . | train | false |
18,926 | def test_parent_with_slash(dir_layout, quteproc):
quteproc.open_url((dir_layout.file_url() + '/'))
page = parse(quteproc)
assert (page.parent == dir_layout.base_path())
| [
"def",
"test_parent_with_slash",
"(",
"dir_layout",
",",
"quteproc",
")",
":",
"quteproc",
".",
"open_url",
"(",
"(",
"dir_layout",
".",
"file_url",
"(",
")",
"+",
"'/'",
")",
")",
"page",
"=",
"parse",
"(",
"quteproc",
")",
"assert",
"(",
"page",
".",
"parent",
"==",
"dir_layout",
".",
"base_path",
"(",
")",
")"
] | test the parent link with a url that has a trailing slash . | train | false |
18,927 | def _get_folder_info(folder):
folder_info = ''
if (folder and folder.parent):
folder_info = _get_folder_info(folder.parent)
folder_info += (' %s %s' % (folder.name.replace('Unnamed folder', ''), (folder.description or '')))
return folder_info
| [
"def",
"_get_folder_info",
"(",
"folder",
")",
":",
"folder_info",
"=",
"''",
"if",
"(",
"folder",
"and",
"folder",
".",
"parent",
")",
":",
"folder_info",
"=",
"_get_folder_info",
"(",
"folder",
".",
"parent",
")",
"folder_info",
"+=",
"(",
"' %s %s'",
"%",
"(",
"folder",
".",
"name",
".",
"replace",
"(",
"'Unnamed folder'",
",",
"''",
")",
",",
"(",
"folder",
".",
"description",
"or",
"''",
")",
")",
")",
"return",
"folder_info"
] | get names and descriptions for all parent folders except top level . | train | false |
18,930 | def _getPersistentRSAKey(location, keySize=4096):
location.parent().makedirs(ignoreExistingDirectory=True)
if (not location.exists()):
privateKey = rsa.generate_private_key(public_exponent=65537, key_size=keySize, backend=default_backend())
pem = privateKey.private_bytes(encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption())
location.setContent(pem)
with location.open('rb') as keyFile:
privateKey = serialization.load_pem_private_key(keyFile.read(), password=None, backend=default_backend())
return Key(privateKey)
| [
"def",
"_getPersistentRSAKey",
"(",
"location",
",",
"keySize",
"=",
"4096",
")",
":",
"location",
".",
"parent",
"(",
")",
".",
"makedirs",
"(",
"ignoreExistingDirectory",
"=",
"True",
")",
"if",
"(",
"not",
"location",
".",
"exists",
"(",
")",
")",
":",
"privateKey",
"=",
"rsa",
".",
"generate_private_key",
"(",
"public_exponent",
"=",
"65537",
",",
"key_size",
"=",
"keySize",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"pem",
"=",
"privateKey",
".",
"private_bytes",
"(",
"encoding",
"=",
"serialization",
".",
"Encoding",
".",
"PEM",
",",
"format",
"=",
"serialization",
".",
"PrivateFormat",
".",
"TraditionalOpenSSL",
",",
"encryption_algorithm",
"=",
"serialization",
".",
"NoEncryption",
"(",
")",
")",
"location",
".",
"setContent",
"(",
"pem",
")",
"with",
"location",
".",
"open",
"(",
"'rb'",
")",
"as",
"keyFile",
":",
"privateKey",
"=",
"serialization",
".",
"load_pem_private_key",
"(",
"keyFile",
".",
"read",
"(",
")",
",",
"password",
"=",
"None",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"return",
"Key",
"(",
"privateKey",
")"
] | this function returns a persistent l{key} . | train | false |
18,932 | @validator
def fi_ssn(ssn):
if (not ssn):
return False
result = re.match(ssn_pattern, ssn)
if (not result):
return False
gd = result.groupdict()
checksum = int((gd['date'] + gd['serial']))
return (ssn_checkmarks[(checksum % len(ssn_checkmarks))] == gd['checksum'].upper())
| [
"@",
"validator",
"def",
"fi_ssn",
"(",
"ssn",
")",
":",
"if",
"(",
"not",
"ssn",
")",
":",
"return",
"False",
"result",
"=",
"re",
".",
"match",
"(",
"ssn_pattern",
",",
"ssn",
")",
"if",
"(",
"not",
"result",
")",
":",
"return",
"False",
"gd",
"=",
"result",
".",
"groupdict",
"(",
")",
"checksum",
"=",
"int",
"(",
"(",
"gd",
"[",
"'date'",
"]",
"+",
"gd",
"[",
"'serial'",
"]",
")",
")",
"return",
"(",
"ssn_checkmarks",
"[",
"(",
"checksum",
"%",
"len",
"(",
"ssn_checkmarks",
")",
")",
"]",
"==",
"gd",
"[",
"'checksum'",
"]",
".",
"upper",
"(",
")",
")"
] | validate a finnish social security number . | train | false |
18,934 | @environmentfilter
def restructuredtext(env, value):
try:
from docutils.core import publish_parts
except ImportError:
logger.error(u'Cannot load the docutils library.')
raise TemplateError(u'Cannot load the docutils library.')
highlight_source = False
if hasattr(env.config, 'restructuredtext'):
highlight_source = getattr(env.config.restructuredtext, 'highlight_source', False)
extensions = getattr(env.config.restructuredtext, 'extensions', [])
import imp
for extension in extensions:
imp.load_module(extension, *imp.find_module(extension))
if highlight_source:
import hyde.lib.pygments.rst_directive
parts = publish_parts(source=value, writer_name='html')
return parts['html_body']
| [
"@",
"environmentfilter",
"def",
"restructuredtext",
"(",
"env",
",",
"value",
")",
":",
"try",
":",
"from",
"docutils",
".",
"core",
"import",
"publish_parts",
"except",
"ImportError",
":",
"logger",
".",
"error",
"(",
"u'Cannot load the docutils library.'",
")",
"raise",
"TemplateError",
"(",
"u'Cannot load the docutils library.'",
")",
"highlight_source",
"=",
"False",
"if",
"hasattr",
"(",
"env",
".",
"config",
",",
"'restructuredtext'",
")",
":",
"highlight_source",
"=",
"getattr",
"(",
"env",
".",
"config",
".",
"restructuredtext",
",",
"'highlight_source'",
",",
"False",
")",
"extensions",
"=",
"getattr",
"(",
"env",
".",
"config",
".",
"restructuredtext",
",",
"'extensions'",
",",
"[",
"]",
")",
"import",
"imp",
"for",
"extension",
"in",
"extensions",
":",
"imp",
".",
"load_module",
"(",
"extension",
",",
"*",
"imp",
".",
"find_module",
"(",
"extension",
")",
")",
"if",
"highlight_source",
":",
"import",
"hyde",
".",
"lib",
".",
"pygments",
".",
"rst_directive",
"parts",
"=",
"publish_parts",
"(",
"source",
"=",
"value",
",",
"writer_name",
"=",
"'html'",
")",
"return",
"parts",
"[",
"'html_body'",
"]"
] | restructuredtext processing with optionnally custom settings . | train | false |
18,935 | def setUnjellyableFactoryForClass(classname, copyFactory):
global unjellyableFactoryRegistry
classname = _maybeClass(classname)
unjellyableFactoryRegistry[classname] = copyFactory
globalSecurity.allowTypes(classname)
| [
"def",
"setUnjellyableFactoryForClass",
"(",
"classname",
",",
"copyFactory",
")",
":",
"global",
"unjellyableFactoryRegistry",
"classname",
"=",
"_maybeClass",
"(",
"classname",
")",
"unjellyableFactoryRegistry",
"[",
"classname",
"]",
"=",
"copyFactory",
"globalSecurity",
".",
"allowTypes",
"(",
"classname",
")"
] | set the factory to construct a remote instance of a type:: jellier . | train | false |
18,937 | def resource_find(filename):
if (not filename):
return None
if (filename[:8] == 'atlas://'):
return filename
if exists(abspath(filename)):
return abspath(filename)
for path in reversed(resource_paths):
output = abspath(join(path, filename))
if exists(output):
return output
if (filename[:5] == 'data:'):
return filename
return None
| [
"def",
"resource_find",
"(",
"filename",
")",
":",
"if",
"(",
"not",
"filename",
")",
":",
"return",
"None",
"if",
"(",
"filename",
"[",
":",
"8",
"]",
"==",
"'atlas://'",
")",
":",
"return",
"filename",
"if",
"exists",
"(",
"abspath",
"(",
"filename",
")",
")",
":",
"return",
"abspath",
"(",
"filename",
")",
"for",
"path",
"in",
"reversed",
"(",
"resource_paths",
")",
":",
"output",
"=",
"abspath",
"(",
"join",
"(",
"path",
",",
"filename",
")",
")",
"if",
"exists",
"(",
"output",
")",
":",
"return",
"output",
"if",
"(",
"filename",
"[",
":",
"5",
"]",
"==",
"'data:'",
")",
":",
"return",
"filename",
"return",
"None"
] | search for a resource in the list of paths . | train | false |
18,939 | def wait_on_app(port):
retries = math.ceil((START_APP_TIMEOUT / BACKOFF_TIME))
private_ip = appscale_info.get_private_ip()
url = (((('http://' + private_ip) + ':') + str(port)) + FETCH_PATH)
while (retries > 0):
try:
opener = urllib2.build_opener(NoRedirection)
response = opener.open(url)
if (response.code != HTTP_OK):
logging.warning('{} returned {}. Headers: {}'.format(url, response.code, response.headers.headers))
return True
except IOError:
retries -= 1
time.sleep(BACKOFF_TIME)
logging.error('Application did not come up on {} after {} seconds'.format(url, START_APP_TIMEOUT))
return False
| [
"def",
"wait_on_app",
"(",
"port",
")",
":",
"retries",
"=",
"math",
".",
"ceil",
"(",
"(",
"START_APP_TIMEOUT",
"/",
"BACKOFF_TIME",
")",
")",
"private_ip",
"=",
"appscale_info",
".",
"get_private_ip",
"(",
")",
"url",
"=",
"(",
"(",
"(",
"(",
"'http://'",
"+",
"private_ip",
")",
"+",
"':'",
")",
"+",
"str",
"(",
"port",
")",
")",
"+",
"FETCH_PATH",
")",
"while",
"(",
"retries",
">",
"0",
")",
":",
"try",
":",
"opener",
"=",
"urllib2",
".",
"build_opener",
"(",
"NoRedirection",
")",
"response",
"=",
"opener",
".",
"open",
"(",
"url",
")",
"if",
"(",
"response",
".",
"code",
"!=",
"HTTP_OK",
")",
":",
"logging",
".",
"warning",
"(",
"'{} returned {}. Headers: {}'",
".",
"format",
"(",
"url",
",",
"response",
".",
"code",
",",
"response",
".",
"headers",
".",
"headers",
")",
")",
"return",
"True",
"except",
"IOError",
":",
"retries",
"-=",
"1",
"time",
".",
"sleep",
"(",
"BACKOFF_TIME",
")",
"logging",
".",
"error",
"(",
"'Application did not come up on {} after {} seconds'",
".",
"format",
"(",
"url",
",",
"START_APP_TIMEOUT",
")",
")",
"return",
"False"
] | waits for the application hosted on this machine . | train | false |
18,942 | def get_credential_storage(filename, client_id, user_agent, scope, warn_on_readonly=True):
filename = os.path.realpath(os.path.expanduser(filename))
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(filename, _MultiStore(filename, warn_on_readonly))
finally:
_multistores_lock.release()
if (type(scope) is list):
scope = ' '.join(scope)
return multistore._get_storage(client_id, user_agent, scope)
| [
"def",
"get_credential_storage",
"(",
"filename",
",",
"client_id",
",",
"user_agent",
",",
"scope",
",",
"warn_on_readonly",
"=",
"True",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
")",
"_multistores_lock",
".",
"acquire",
"(",
")",
"try",
":",
"multistore",
"=",
"_multistores",
".",
"setdefault",
"(",
"filename",
",",
"_MultiStore",
"(",
"filename",
",",
"warn_on_readonly",
")",
")",
"finally",
":",
"_multistores_lock",
".",
"release",
"(",
")",
"if",
"(",
"type",
"(",
"scope",
")",
"is",
"list",
")",
":",
"scope",
"=",
"' '",
".",
"join",
"(",
"scope",
")",
"return",
"multistore",
".",
"_get_storage",
"(",
"client_id",
",",
"user_agent",
",",
"scope",
")"
] | get a storage instance for a credential . | train | false |
18,945 | def ext_tunables(dev):
cmd = ('tune2fs -l %s' % dev)
try:
out = utils.system_output(cmd)
except error.CmdError:
tools_dir = os.path.join(os.environ['AUTODIR'], 'tools')
cmd = ('%s/tune2fs.ext4dev -l %s' % (tools_dir, dev))
out = utils.system_output(cmd)
tune2fs_dict = {}
for line in out.splitlines():
components = line.split(':', 1)
if (len(components) == 2):
value = components[1].strip()
option = components[0]
if value.isdigit():
tune2fs_dict[option] = int(value)
else:
tune2fs_dict[option] = value
return tune2fs_dict
| [
"def",
"ext_tunables",
"(",
"dev",
")",
":",
"cmd",
"=",
"(",
"'tune2fs -l %s'",
"%",
"dev",
")",
"try",
":",
"out",
"=",
"utils",
".",
"system_output",
"(",
"cmd",
")",
"except",
"error",
".",
"CmdError",
":",
"tools_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"'AUTODIR'",
"]",
",",
"'tools'",
")",
"cmd",
"=",
"(",
"'%s/tune2fs.ext4dev -l %s'",
"%",
"(",
"tools_dir",
",",
"dev",
")",
")",
"out",
"=",
"utils",
".",
"system_output",
"(",
"cmd",
")",
"tune2fs_dict",
"=",
"{",
"}",
"for",
"line",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"components",
"=",
"line",
".",
"split",
"(",
"':'",
",",
"1",
")",
"if",
"(",
"len",
"(",
"components",
")",
"==",
"2",
")",
":",
"value",
"=",
"components",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"option",
"=",
"components",
"[",
"0",
"]",
"if",
"value",
".",
"isdigit",
"(",
")",
":",
"tune2fs_dict",
"[",
"option",
"]",
"=",
"int",
"(",
"value",
")",
"else",
":",
"tune2fs_dict",
"[",
"option",
"]",
"=",
"value",
"return",
"tune2fs_dict"
] | call tune2fs -l and parse the result . | train | false |
18,946 | def create_container_with_mbytes_and_specific_cpus(name, mbytes, cpus=None, root=SUPER_ROOT, io={}, move_in=True, timeout=0):
need_mem_containers()
if (not container_exists(root)):
raise error.AutotestError(('Parent container "%s" does not exist' % root))
if (cpus is None):
cpus = get_cpus(root)
else:
cpus = set(cpus)
if (not cpus):
raise error.AutotestError('Creating container with no cpus')
name = os.path.join(root, name)
if os.path.exists(full_path(name)):
raise error.AutotestError(('Container %s already exists' % name))
create_container_directly(name, mbytes, cpus)
set_io_controls(name, **io)
if move_in:
move_self_into_container(name)
return name
| [
"def",
"create_container_with_mbytes_and_specific_cpus",
"(",
"name",
",",
"mbytes",
",",
"cpus",
"=",
"None",
",",
"root",
"=",
"SUPER_ROOT",
",",
"io",
"=",
"{",
"}",
",",
"move_in",
"=",
"True",
",",
"timeout",
"=",
"0",
")",
":",
"need_mem_containers",
"(",
")",
"if",
"(",
"not",
"container_exists",
"(",
"root",
")",
")",
":",
"raise",
"error",
".",
"AutotestError",
"(",
"(",
"'Parent container \"%s\" does not exist'",
"%",
"root",
")",
")",
"if",
"(",
"cpus",
"is",
"None",
")",
":",
"cpus",
"=",
"get_cpus",
"(",
"root",
")",
"else",
":",
"cpus",
"=",
"set",
"(",
"cpus",
")",
"if",
"(",
"not",
"cpus",
")",
":",
"raise",
"error",
".",
"AutotestError",
"(",
"'Creating container with no cpus'",
")",
"name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"full_path",
"(",
"name",
")",
")",
":",
"raise",
"error",
".",
"AutotestError",
"(",
"(",
"'Container %s already exists'",
"%",
"name",
")",
")",
"create_container_directly",
"(",
"name",
",",
"mbytes",
",",
"cpus",
")",
"set_io_controls",
"(",
"name",
",",
"**",
"io",
")",
"if",
"move_in",
":",
"move_self_into_container",
"(",
"name",
")",
"return",
"name"
] | create a cpuset container and move jobs current pid into it allocate the list "cpus" of cpus to that container . | train | false |
18,947 | @declared
def perf(obj_ref, metric_name):
obj = get_object(obj_ref)
p = PerfDatas(obj.perf_data)
if (metric_name in p):
logger.debug('[trigger] I found the perfdata')
return p[metric_name].value
logger.debug('[trigger] I am in perf command')
return None
| [
"@",
"declared",
"def",
"perf",
"(",
"obj_ref",
",",
"metric_name",
")",
":",
"obj",
"=",
"get_object",
"(",
"obj_ref",
")",
"p",
"=",
"PerfDatas",
"(",
"obj",
".",
"perf_data",
")",
"if",
"(",
"metric_name",
"in",
"p",
")",
":",
"logger",
".",
"debug",
"(",
"'[trigger] I found the perfdata'",
")",
"return",
"p",
"[",
"metric_name",
"]",
".",
"value",
"logger",
".",
"debug",
"(",
"'[trigger] I am in perf command'",
")",
"return",
"None"
] | get perf data from a service . | train | false |
18,948 | @contextmanager
def isolate_lru_cache(lru_cache_object):
lru_cache_object.cache_clear()
try:
(yield)
finally:
lru_cache_object.cache_clear()
| [
"@",
"contextmanager",
"def",
"isolate_lru_cache",
"(",
"lru_cache_object",
")",
":",
"lru_cache_object",
".",
"cache_clear",
"(",
")",
"try",
":",
"(",
"yield",
")",
"finally",
":",
"lru_cache_object",
".",
"cache_clear",
"(",
")"
] | clear the cache of an lru cache object on entering and exiting . | train | false |
18,949 | def verifyCryptedPassword(crypted, pw):
return (crypt.crypt(pw, crypted) == crypted)
| [
"def",
"verifyCryptedPassword",
"(",
"crypted",
",",
"pw",
")",
":",
"return",
"(",
"crypt",
".",
"crypt",
"(",
"pw",
",",
"crypted",
")",
"==",
"crypted",
")"
] | check that the password . | train | false |
18,950 | def stream_action(client, stream_name, shard_count=1, action='create', timeout=300, check_mode=False):
success = False
err_msg = ''
params = {'StreamName': stream_name}
try:
if (not check_mode):
if (action == 'create'):
params['ShardCount'] = shard_count
client.create_stream(**params)
success = True
elif (action == 'delete'):
client.delete_stream(**params)
success = True
else:
err_msg = 'Invalid action {0}'.format(action)
elif (action == 'create'):
success = True
elif (action == 'delete'):
success = True
else:
err_msg = 'Invalid action {0}'.format(action)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return (success, err_msg)
| [
"def",
"stream_action",
"(",
"client",
",",
"stream_name",
",",
"shard_count",
"=",
"1",
",",
"action",
"=",
"'create'",
",",
"timeout",
"=",
"300",
",",
"check_mode",
"=",
"False",
")",
":",
"success",
"=",
"False",
"err_msg",
"=",
"''",
"params",
"=",
"{",
"'StreamName'",
":",
"stream_name",
"}",
"try",
":",
"if",
"(",
"not",
"check_mode",
")",
":",
"if",
"(",
"action",
"==",
"'create'",
")",
":",
"params",
"[",
"'ShardCount'",
"]",
"=",
"shard_count",
"client",
".",
"create_stream",
"(",
"**",
"params",
")",
"success",
"=",
"True",
"elif",
"(",
"action",
"==",
"'delete'",
")",
":",
"client",
".",
"delete_stream",
"(",
"**",
"params",
")",
"success",
"=",
"True",
"else",
":",
"err_msg",
"=",
"'Invalid action {0}'",
".",
"format",
"(",
"action",
")",
"elif",
"(",
"action",
"==",
"'create'",
")",
":",
"success",
"=",
"True",
"elif",
"(",
"action",
"==",
"'delete'",
")",
":",
"success",
"=",
"True",
"else",
":",
"err_msg",
"=",
"'Invalid action {0}'",
".",
"format",
"(",
"action",
")",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"e",
":",
"err_msg",
"=",
"str",
"(",
"e",
")",
"return",
"(",
"success",
",",
"err_msg",
")"
] | create or delete an amazon kinesis stream . | train | false |
18,951 | def _enroll_user_in_pending_courses(student):
ceas = CourseEnrollmentAllowed.objects.filter(email=student.email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student, cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email)
if (manual_enrollment_audit is not None):
ManualEnrollmentAudit.create_manual_enrollment_audit(manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED, manual_enrollment_audit.reason, enrollment)
| [
"def",
"_enroll_user_in_pending_courses",
"(",
"student",
")",
":",
"ceas",
"=",
"CourseEnrollmentAllowed",
".",
"objects",
".",
"filter",
"(",
"email",
"=",
"student",
".",
"email",
")",
"for",
"cea",
"in",
"ceas",
":",
"if",
"cea",
".",
"auto_enroll",
":",
"enrollment",
"=",
"CourseEnrollment",
".",
"enroll",
"(",
"student",
",",
"cea",
".",
"course_id",
")",
"manual_enrollment_audit",
"=",
"ManualEnrollmentAudit",
".",
"get_manual_enrollment_by_email",
"(",
"student",
".",
"email",
")",
"if",
"(",
"manual_enrollment_audit",
"is",
"not",
"None",
")",
":",
"ManualEnrollmentAudit",
".",
"create_manual_enrollment_audit",
"(",
"manual_enrollment_audit",
".",
"enrolled_by",
",",
"student",
".",
"email",
",",
"ALLOWEDTOENROLL_TO_ENROLLED",
",",
"manual_enrollment_audit",
".",
"reason",
",",
"enrollment",
")"
] | enroll student in any pending courses he/she may have . | train | false |
18,952 | def is_meta_resource_type_mutable(context, meta_resource_type):
if context.is_admin:
return True
if (context.owner is None):
return False
if meta_resource_type.namespace:
return (meta_resource_type.namespace.owner == context.owner)
else:
return False
| [
"def",
"is_meta_resource_type_mutable",
"(",
"context",
",",
"meta_resource_type",
")",
":",
"if",
"context",
".",
"is_admin",
":",
"return",
"True",
"if",
"(",
"context",
".",
"owner",
"is",
"None",
")",
":",
"return",
"False",
"if",
"meta_resource_type",
".",
"namespace",
":",
"return",
"(",
"meta_resource_type",
".",
"namespace",
".",
"owner",
"==",
"context",
".",
"owner",
")",
"else",
":",
"return",
"False"
] | return true if the meta_resource_type is mutable in this context . | train | false |
18,953 | def get_header_name(name):
uc_name = re.sub('\\W+', '_', force_text(name)).upper()
if ((uc_name in ['CONTENT_LENGTH', 'CONTENT_TYPE']) or uc_name.startswith('HTTP_')):
return uc_name
return 'HTTP_{0}'.format(uc_name)
| [
"def",
"get_header_name",
"(",
"name",
")",
":",
"uc_name",
"=",
"re",
".",
"sub",
"(",
"'\\\\W+'",
",",
"'_'",
",",
"force_text",
"(",
"name",
")",
")",
".",
"upper",
"(",
")",
"if",
"(",
"(",
"uc_name",
"in",
"[",
"'CONTENT_LENGTH'",
",",
"'CONTENT_TYPE'",
"]",
")",
"or",
"uc_name",
".",
"startswith",
"(",
"'HTTP_'",
")",
")",
":",
"return",
"uc_name",
"return",
"'HTTP_{0}'",
".",
"format",
"(",
"uc_name",
")"
] | returns "http_header_name" for "header-name" or "header-name" . | train | false |
18,954 | def group_person_status():
return s3_rest_controller()
| [
"def",
"group_person_status",
"(",
")",
":",
"return",
"s3_rest_controller",
"(",
")"
] | restful crud controller . | train | false |
18,956 | def test_derivative_numerically(f, z, tol=1e-06, a=2, b=(-1), c=3, d=1):
from sympy.core.function import Derivative
z0 = random_complex_number(a, b, c, d)
f1 = f.diff(z).subs(z, z0)
f2 = Derivative(f, z).doit_numerically(z0)
return comp(f1.n(), f2.n(), tol)
| [
"def",
"test_derivative_numerically",
"(",
"f",
",",
"z",
",",
"tol",
"=",
"1e-06",
",",
"a",
"=",
"2",
",",
"b",
"=",
"(",
"-",
"1",
")",
",",
"c",
"=",
"3",
",",
"d",
"=",
"1",
")",
":",
"from",
"sympy",
".",
"core",
".",
"function",
"import",
"Derivative",
"z0",
"=",
"random_complex_number",
"(",
"a",
",",
"b",
",",
"c",
",",
"d",
")",
"f1",
"=",
"f",
".",
"diff",
"(",
"z",
")",
".",
"subs",
"(",
"z",
",",
"z0",
")",
"f2",
"=",
"Derivative",
"(",
"f",
",",
"z",
")",
".",
"doit_numerically",
"(",
"z0",
")",
"return",
"comp",
"(",
"f1",
".",
"n",
"(",
")",
",",
"f2",
".",
"n",
"(",
")",
",",
"tol",
")"
] | test numerically that the symbolically computed derivative of f with respect to z is correct . | train | false |
18,957 | def humanize_bytes(num, suffix=u'B', si_prefix=False):
if (num == 0):
return (u'0 ' + suffix)
div = (1000 if si_prefix else 1024)
exponent = min((int(log(num, div)) if num else 0), (len(unit_list) - 1))
quotient = (float(num) / (div ** exponent))
(unit, decimals) = unit_list[exponent]
if (unit and (not si_prefix)):
unit = (unit.upper() + u'i')
return u'{{quotient:.{decimals}f}} {{unit}}{{suffix}}'.format(decimals=decimals).format(quotient=quotient, unit=unit, suffix=suffix)
| [
"def",
"humanize_bytes",
"(",
"num",
",",
"suffix",
"=",
"u'B'",
",",
"si_prefix",
"=",
"False",
")",
":",
"if",
"(",
"num",
"==",
"0",
")",
":",
"return",
"(",
"u'0 '",
"+",
"suffix",
")",
"div",
"=",
"(",
"1000",
"if",
"si_prefix",
"else",
"1024",
")",
"exponent",
"=",
"min",
"(",
"(",
"int",
"(",
"log",
"(",
"num",
",",
"div",
")",
")",
"if",
"num",
"else",
"0",
")",
",",
"(",
"len",
"(",
"unit_list",
")",
"-",
"1",
")",
")",
"quotient",
"=",
"(",
"float",
"(",
"num",
")",
"/",
"(",
"div",
"**",
"exponent",
")",
")",
"(",
"unit",
",",
"decimals",
")",
"=",
"unit_list",
"[",
"exponent",
"]",
"if",
"(",
"unit",
"and",
"(",
"not",
"si_prefix",
")",
")",
":",
"unit",
"=",
"(",
"unit",
".",
"upper",
"(",
")",
"+",
"u'i'",
")",
"return",
"u'{{quotient:.{decimals}f}} {{unit}}{{suffix}}'",
".",
"format",
"(",
"decimals",
"=",
"decimals",
")",
".",
"format",
"(",
"quotient",
"=",
"quotient",
",",
"unit",
"=",
"unit",
",",
"suffix",
"=",
"suffix",
")"
] | return a humanized string representation of a number of bytes . | train | false |
18,958 | def _AbiTrimIterator(handle):
return AbiIterator(handle, trim=True)
| [
"def",
"_AbiTrimIterator",
"(",
"handle",
")",
":",
"return",
"AbiIterator",
"(",
"handle",
",",
"trim",
"=",
"True",
")"
] | iterator for the abi file format that yields trimmed seqrecord objects . | train | false |
18,959 | def resample_to_csv(barFeed, frequency, csvFile):
assert (frequency > 0), 'Invalid frequency'
resample_impl(barFeed, frequency, csvFile)
| [
"def",
"resample_to_csv",
"(",
"barFeed",
",",
"frequency",
",",
"csvFile",
")",
":",
"assert",
"(",
"frequency",
">",
"0",
")",
",",
"'Invalid frequency'",
"resample_impl",
"(",
"barFeed",
",",
"frequency",
",",
"csvFile",
")"
] | resample a barfeed into a csv file grouping bars by a certain frequency . | train | false |
18,960 | def end_tag(doc, name, namespace=None):
doc.endElementNS((namespace, name), name)
| [
"def",
"end_tag",
"(",
"doc",
",",
"name",
",",
"namespace",
"=",
"None",
")",
":",
"doc",
".",
"endElementNS",
"(",
"(",
"namespace",
",",
"name",
")",
",",
"name",
")"
] | the text representation of an end tag for a tag . | train | false |
18,961 | def get_codon_alphabet(alphabet, gap='-', stop='*'):
from Bio.Alphabet import NucleotideAlphabet
if isinstance(alphabet, NucleotideAlphabet):
alpha = alphabet
if gap:
alpha = Gapped(alpha, gap_char=gap)
if stop:
alpha = HasStopCodon(alpha, stop_symbol=stop)
else:
raise TypeError('Only Nuclteotide Alphabet is accepted!')
return alpha
| [
"def",
"get_codon_alphabet",
"(",
"alphabet",
",",
"gap",
"=",
"'-'",
",",
"stop",
"=",
"'*'",
")",
":",
"from",
"Bio",
".",
"Alphabet",
"import",
"NucleotideAlphabet",
"if",
"isinstance",
"(",
"alphabet",
",",
"NucleotideAlphabet",
")",
":",
"alpha",
"=",
"alphabet",
"if",
"gap",
":",
"alpha",
"=",
"Gapped",
"(",
"alpha",
",",
"gap_char",
"=",
"gap",
")",
"if",
"stop",
":",
"alpha",
"=",
"HasStopCodon",
"(",
"alpha",
",",
"stop_symbol",
"=",
"stop",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Only Nuclteotide Alphabet is accepted!'",
")",
"return",
"alpha"
] | gets alignment alphabet for codon alignment . | train | false |
18,962 | def archive_today(request, **kwargs):
today = datetime.date.today()
kwargs.update({'year': str(today.year), 'month': today.strftime('%b').lower(), 'day': str(today.day)})
return archive_day(request, **kwargs)
| [
"def",
"archive_today",
"(",
"request",
",",
"**",
"kwargs",
")",
":",
"today",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"kwargs",
".",
"update",
"(",
"{",
"'year'",
":",
"str",
"(",
"today",
".",
"year",
")",
",",
"'month'",
":",
"today",
".",
"strftime",
"(",
"'%b'",
")",
".",
"lower",
"(",
")",
",",
"'day'",
":",
"str",
"(",
"today",
".",
"day",
")",
"}",
")",
"return",
"archive_day",
"(",
"request",
",",
"**",
"kwargs",
")"
] | generic daily archive view for today . | train | false |
18,963 | def check_estimator(Estimator):
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
try:
check(name, Estimator)
except SkipTest as message:
warnings.warn(message, SkipTestWarning)
| [
"def",
"check_estimator",
"(",
"Estimator",
")",
":",
"name",
"=",
"Estimator",
".",
"__name__",
"check_parameters_default_constructible",
"(",
"name",
",",
"Estimator",
")",
"for",
"check",
"in",
"_yield_all_checks",
"(",
"name",
",",
"Estimator",
")",
":",
"try",
":",
"check",
"(",
"name",
",",
"Estimator",
")",
"except",
"SkipTest",
"as",
"message",
":",
"warnings",
".",
"warn",
"(",
"message",
",",
"SkipTestWarning",
")"
] | check if estimator adheres to scikit-learn conventions . | train | false |
18,964 | def iddr_id(A, k):
A = np.asfortranarray(A)
(idx, rnorms) = _id.iddr_id(A, k)
n = A.shape[1]
proj = A.T.ravel()[:(k * (n - k))].reshape((k, (n - k)), order='F')
return (idx, proj)
| [
"def",
"iddr_id",
"(",
"A",
",",
"k",
")",
":",
"A",
"=",
"np",
".",
"asfortranarray",
"(",
"A",
")",
"(",
"idx",
",",
"rnorms",
")",
"=",
"_id",
".",
"iddr_id",
"(",
"A",
",",
"k",
")",
"n",
"=",
"A",
".",
"shape",
"[",
"1",
"]",
"proj",
"=",
"A",
".",
"T",
".",
"ravel",
"(",
")",
"[",
":",
"(",
"k",
"*",
"(",
"n",
"-",
"k",
")",
")",
"]",
".",
"reshape",
"(",
"(",
"k",
",",
"(",
"n",
"-",
"k",
")",
")",
",",
"order",
"=",
"'F'",
")",
"return",
"(",
"idx",
",",
"proj",
")"
] | compute id of a real matrix to a specified rank . | train | false |
18,965 | @core_helper
def markdown_extract(text, extract_length=190):
if (not text):
return ''
plain = RE_MD_HTML_TAGS.sub('', markdown(text))
if ((not extract_length) or (len(plain) < extract_length)):
return literal(plain)
return literal(unicode(whtext.truncate(plain, length=extract_length, indicator='...', whole_word=True)))
| [
"@",
"core_helper",
"def",
"markdown_extract",
"(",
"text",
",",
"extract_length",
"=",
"190",
")",
":",
"if",
"(",
"not",
"text",
")",
":",
"return",
"''",
"plain",
"=",
"RE_MD_HTML_TAGS",
".",
"sub",
"(",
"''",
",",
"markdown",
"(",
"text",
")",
")",
"if",
"(",
"(",
"not",
"extract_length",
")",
"or",
"(",
"len",
"(",
"plain",
")",
"<",
"extract_length",
")",
")",
":",
"return",
"literal",
"(",
"plain",
")",
"return",
"literal",
"(",
"unicode",
"(",
"whtext",
".",
"truncate",
"(",
"plain",
",",
"length",
"=",
"extract_length",
",",
"indicator",
"=",
"'...'",
",",
"whole_word",
"=",
"True",
")",
")",
")"
] | return the plain text representation of markdown encoded text . | train | false |
18,966 | def _compose_meas_info(res4, coils, trans, eeg):
info = _empty_info(res4['sfreq'])
info['meas_id'] = get_new_file_id()
info['meas_id']['usecs'] = 0
info['meas_id']['secs'] = _convert_time(res4['data_date'], res4['data_time'])
info['experimenter'] = res4['nf_operator']
info['subject_info'] = dict(his_id=res4['nf_subject_id'])
for filt in res4['filters']:
if (filt['type'] in _filt_map):
info[_filt_map[filt['type']]] = filt['freq']
(info['dig'], info['hpi_results']) = _pick_isotrak_and_hpi_coils(res4, coils, trans)
if (trans is not None):
if (len(info['hpi_results']) > 0):
info['hpi_results'][0]['coord_trans'] = trans['t_ctf_head_head']
if (trans['t_dev_head'] is not None):
info['dev_head_t'] = trans['t_dev_head']
info['dev_ctf_t'] = combine_transforms(trans['t_dev_head'], invert_transform(trans['t_ctf_head_head']), FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_MNE_COORD_CTF_HEAD)
if (trans['t_ctf_head_head'] is not None):
info['ctf_head_t'] = trans['t_ctf_head_head']
info['chs'] = _convert_channel_info(res4, trans, (eeg is None))
info['comps'] = _convert_comp_data(res4)
if (eeg is None):
eeg = _pick_eeg_pos(info)
_add_eeg_pos(eeg, trans, info)
logger.info(' Measurement info composed.')
info._update_redundant()
return info
| [
"def",
"_compose_meas_info",
"(",
"res4",
",",
"coils",
",",
"trans",
",",
"eeg",
")",
":",
"info",
"=",
"_empty_info",
"(",
"res4",
"[",
"'sfreq'",
"]",
")",
"info",
"[",
"'meas_id'",
"]",
"=",
"get_new_file_id",
"(",
")",
"info",
"[",
"'meas_id'",
"]",
"[",
"'usecs'",
"]",
"=",
"0",
"info",
"[",
"'meas_id'",
"]",
"[",
"'secs'",
"]",
"=",
"_convert_time",
"(",
"res4",
"[",
"'data_date'",
"]",
",",
"res4",
"[",
"'data_time'",
"]",
")",
"info",
"[",
"'experimenter'",
"]",
"=",
"res4",
"[",
"'nf_operator'",
"]",
"info",
"[",
"'subject_info'",
"]",
"=",
"dict",
"(",
"his_id",
"=",
"res4",
"[",
"'nf_subject_id'",
"]",
")",
"for",
"filt",
"in",
"res4",
"[",
"'filters'",
"]",
":",
"if",
"(",
"filt",
"[",
"'type'",
"]",
"in",
"_filt_map",
")",
":",
"info",
"[",
"_filt_map",
"[",
"filt",
"[",
"'type'",
"]",
"]",
"]",
"=",
"filt",
"[",
"'freq'",
"]",
"(",
"info",
"[",
"'dig'",
"]",
",",
"info",
"[",
"'hpi_results'",
"]",
")",
"=",
"_pick_isotrak_and_hpi_coils",
"(",
"res4",
",",
"coils",
",",
"trans",
")",
"if",
"(",
"trans",
"is",
"not",
"None",
")",
":",
"if",
"(",
"len",
"(",
"info",
"[",
"'hpi_results'",
"]",
")",
">",
"0",
")",
":",
"info",
"[",
"'hpi_results'",
"]",
"[",
"0",
"]",
"[",
"'coord_trans'",
"]",
"=",
"trans",
"[",
"'t_ctf_head_head'",
"]",
"if",
"(",
"trans",
"[",
"'t_dev_head'",
"]",
"is",
"not",
"None",
")",
":",
"info",
"[",
"'dev_head_t'",
"]",
"=",
"trans",
"[",
"'t_dev_head'",
"]",
"info",
"[",
"'dev_ctf_t'",
"]",
"=",
"combine_transforms",
"(",
"trans",
"[",
"'t_dev_head'",
"]",
",",
"invert_transform",
"(",
"trans",
"[",
"'t_ctf_head_head'",
"]",
")",
",",
"FIFF",
".",
"FIFFV_COORD_DEVICE",
",",
"FIFF",
".",
"FIFFV_MNE_COORD_CTF_HEAD",
")",
"if",
"(",
"trans",
"[",
"'t_ctf_head_head'",
"]",
"is",
"not",
"None",
")",
":",
"info",
"[",
"'ctf_head_t'",
"]",
"=",
"trans",
"[",
"'t_ctf_head_head'",
"]",
"info",
"[",
"'chs'",
"]",
"=",
"_convert_channel_info",
"(",
"res4",
",",
"trans",
",",
"(",
"eeg",
"is",
"None",
")",
")",
"info",
"[",
"'comps'",
"]",
"=",
"_convert_comp_data",
"(",
"res4",
")",
"if",
"(",
"eeg",
"is",
"None",
")",
":",
"eeg",
"=",
"_pick_eeg_pos",
"(",
"info",
")",
"_add_eeg_pos",
"(",
"eeg",
",",
"trans",
",",
"info",
")",
"logger",
".",
"info",
"(",
"' Measurement info composed.'",
")",
"info",
".",
"_update_redundant",
"(",
")",
"return",
"info"
] | create meas info from ctf data . | train | false |
18,967 | @contextmanager
def patch_read_csv(url_map, module=pd, strict=False):
read_csv = pd.read_csv
def patched_read_csv(filepath_or_buffer, *args, **kwargs):
if (filepath_or_buffer in url_map):
return read_csv(url_map[filepath_or_buffer], *args, **kwargs)
elif (not strict):
return read_csv(filepath_or_buffer, *args, **kwargs)
else:
raise AssertionError(('attempted to call read_csv on %r which not in the url map' % filepath_or_buffer))
with patch.object(module, 'read_csv', patched_read_csv):
(yield)
| [
"@",
"contextmanager",
"def",
"patch_read_csv",
"(",
"url_map",
",",
"module",
"=",
"pd",
",",
"strict",
"=",
"False",
")",
":",
"read_csv",
"=",
"pd",
".",
"read_csv",
"def",
"patched_read_csv",
"(",
"filepath_or_buffer",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"filepath_or_buffer",
"in",
"url_map",
")",
":",
"return",
"read_csv",
"(",
"url_map",
"[",
"filepath_or_buffer",
"]",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"elif",
"(",
"not",
"strict",
")",
":",
"return",
"read_csv",
"(",
"filepath_or_buffer",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"(",
"'attempted to call read_csv on %r which not in the url map'",
"%",
"filepath_or_buffer",
")",
")",
"with",
"patch",
".",
"object",
"(",
"module",
",",
"'read_csv'",
",",
"patched_read_csv",
")",
":",
"(",
"yield",
")"
] | patch pandas . | train | false |
18,968 | def fix_encoding():
ret = True
if (sys.platform == 'win32'):
ret &= fix_win_codec()
ret &= fix_default_encoding()
if (sys.platform == 'win32'):
encoding = sys.getdefaultencoding()
ret &= fix_win_sys_argv(encoding)
ret &= fix_win_console(encoding)
return ret
| [
"def",
"fix_encoding",
"(",
")",
":",
"ret",
"=",
"True",
"if",
"(",
"sys",
".",
"platform",
"==",
"'win32'",
")",
":",
"ret",
"&=",
"fix_win_codec",
"(",
")",
"ret",
"&=",
"fix_default_encoding",
"(",
")",
"if",
"(",
"sys",
".",
"platform",
"==",
"'win32'",
")",
":",
"encoding",
"=",
"sys",
".",
"getdefaultencoding",
"(",
")",
"ret",
"&=",
"fix_win_sys_argv",
"(",
"encoding",
")",
"ret",
"&=",
"fix_win_console",
"(",
"encoding",
")",
"return",
"ret"
] | fixes various encoding problems on all platforms . | train | false |
18,969 | def inv_send_pdf_footer(r):
if r.record:
T = current.T
footer = DIV(TABLE(TR(TH(T('Commodities Loaded')), TH(T('Date')), TH(T('Function')), TH(T('Name')), TH(T('Signature')), TH(T('Location (Site)')), TH(T('Condition'))), TR(TD(T('Loaded By')), TD(), TD(), TD(), TD(), TD(), TD()), TR(TD(T('Transported By')), TD(), TD(), TD(), TD(), TD(), TD()), TR(TH(T('Reception')), TH(T('Date')), TH(T('Function')), TH(T('Name')), TH(T('Signature')), TH(T('Location (Site)')), TH(T('Condition'))), TR(TD(T('Received By')), TD(), TD(), TD(), TD(), TD(), TD())))
return footer
return None
| [
"def",
"inv_send_pdf_footer",
"(",
"r",
")",
":",
"if",
"r",
".",
"record",
":",
"T",
"=",
"current",
".",
"T",
"footer",
"=",
"DIV",
"(",
"TABLE",
"(",
"TR",
"(",
"TH",
"(",
"T",
"(",
"'Commodities Loaded'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Date'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Function'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Name'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Signature'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Location (Site)'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Condition'",
")",
")",
")",
",",
"TR",
"(",
"TD",
"(",
"T",
"(",
"'Loaded By'",
")",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
")",
",",
"TR",
"(",
"TD",
"(",
"T",
"(",
"'Transported By'",
")",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
")",
",",
"TR",
"(",
"TH",
"(",
"T",
"(",
"'Reception'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Date'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Function'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Name'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Signature'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Location (Site)'",
")",
")",
",",
"TH",
"(",
"T",
"(",
"'Condition'",
")",
")",
")",
",",
"TR",
"(",
"TD",
"(",
"T",
"(",
"'Received By'",
")",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
",",
"TD",
"(",
")",
")",
")",
")",
"return",
"footer",
"return",
"None"
] | footer for the waybill . | train | false |
18,971 | def _kernel_versions_redhat():
kernel_get_last = __salt__['cmd.run']('rpm -q --last kernel')
kernels = []
kernel_versions = []
for line in kernel_get_last.splitlines():
if ('kernel-' in line):
kernels.append(line)
kernel = kernels[0].split(' ', 1)[0]
kernel = kernel.strip('kernel-')
kernel_versions.append(kernel)
return kernel_versions
| [
"def",
"_kernel_versions_redhat",
"(",
")",
":",
"kernel_get_last",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'rpm -q --last kernel'",
")",
"kernels",
"=",
"[",
"]",
"kernel_versions",
"=",
"[",
"]",
"for",
"line",
"in",
"kernel_get_last",
".",
"splitlines",
"(",
")",
":",
"if",
"(",
"'kernel-'",
"in",
"line",
")",
":",
"kernels",
".",
"append",
"(",
"line",
")",
"kernel",
"=",
"kernels",
"[",
"0",
"]",
".",
"split",
"(",
"' '",
",",
"1",
")",
"[",
"0",
"]",
"kernel",
"=",
"kernel",
".",
"strip",
"(",
"'kernel-'",
")",
"kernel_versions",
".",
"append",
"(",
"kernel",
")",
"return",
"kernel_versions"
] | name of the last installed kernel . | train | true |
18,972 | def technical_404_response(request, exception):
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried):
return empty_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': request.path_info[1:], 'urlpatterns': tried, 'reason': smart_str(exception, errors='replace'), 'request': request, 'settings': get_safe_settings()})
return HttpResponseNotFound(t.render(c), mimetype='text/html')
| [
"def",
"technical_404_response",
"(",
"request",
",",
"exception",
")",
":",
"try",
":",
"tried",
"=",
"exception",
".",
"args",
"[",
"0",
"]",
"[",
"'tried'",
"]",
"except",
"(",
"IndexError",
",",
"TypeError",
",",
"KeyError",
")",
":",
"tried",
"=",
"[",
"]",
"else",
":",
"if",
"(",
"not",
"tried",
")",
":",
"return",
"empty_urlconf",
"(",
"request",
")",
"urlconf",
"=",
"getattr",
"(",
"request",
",",
"'urlconf'",
",",
"settings",
".",
"ROOT_URLCONF",
")",
"if",
"isinstance",
"(",
"urlconf",
",",
"types",
".",
"ModuleType",
")",
":",
"urlconf",
"=",
"urlconf",
".",
"__name__",
"t",
"=",
"Template",
"(",
"TECHNICAL_404_TEMPLATE",
",",
"name",
"=",
"'Technical 404 template'",
")",
"c",
"=",
"Context",
"(",
"{",
"'urlconf'",
":",
"urlconf",
",",
"'root_urlconf'",
":",
"settings",
".",
"ROOT_URLCONF",
",",
"'request_path'",
":",
"request",
".",
"path_info",
"[",
"1",
":",
"]",
",",
"'urlpatterns'",
":",
"tried",
",",
"'reason'",
":",
"smart_str",
"(",
"exception",
",",
"errors",
"=",
"'replace'",
")",
",",
"'request'",
":",
"request",
",",
"'settings'",
":",
"get_safe_settings",
"(",
")",
"}",
")",
"return",
"HttpResponseNotFound",
"(",
"t",
".",
"render",
"(",
"c",
")",
",",
"mimetype",
"=",
"'text/html'",
")"
] | create a technical 404 error response . | train | false |
18,974 | def format_track(track, show_url=True):
out = track['title']
out += ' by \x02{}\x02'.format(track['user']['username'])
if track['genre']:
out += ' - \x02{}\x02'.format(track['genre'])
out += ' - \x02{:,}\x02 plays, \x02{:,}\x02 favorites, \x02{:,}\x02 comments'.format(track['playback_count'], track['favoritings_count'], track['comment_count'])
if show_url:
out += ' - {}'.format(web.try_shorten(track['permalink_url']))
return out
| [
"def",
"format_track",
"(",
"track",
",",
"show_url",
"=",
"True",
")",
":",
"out",
"=",
"track",
"[",
"'title'",
"]",
"out",
"+=",
"' by \\x02{}\\x02'",
".",
"format",
"(",
"track",
"[",
"'user'",
"]",
"[",
"'username'",
"]",
")",
"if",
"track",
"[",
"'genre'",
"]",
":",
"out",
"+=",
"' - \\x02{}\\x02'",
".",
"format",
"(",
"track",
"[",
"'genre'",
"]",
")",
"out",
"+=",
"' - \\x02{:,}\\x02 plays, \\x02{:,}\\x02 favorites, \\x02{:,}\\x02 comments'",
".",
"format",
"(",
"track",
"[",
"'playback_count'",
"]",
",",
"track",
"[",
"'favoritings_count'",
"]",
",",
"track",
"[",
"'comment_count'",
"]",
")",
"if",
"show_url",
":",
"out",
"+=",
"' - {}'",
".",
"format",
"(",
"web",
".",
"try_shorten",
"(",
"track",
"[",
"'permalink_url'",
"]",
")",
")",
"return",
"out"
] | takes a soundcloud track item and returns a formatted string . | train | false |
18,976 | def _pretty_longstring(defstr, prefix=u'', wrap_at=65):
outstr = u''
for line in textwrap.fill(defstr, wrap_at).split(u'\n'):
outstr += ((prefix + line) + u'\n')
return outstr
| [
"def",
"_pretty_longstring",
"(",
"defstr",
",",
"prefix",
"=",
"u''",
",",
"wrap_at",
"=",
"65",
")",
":",
"outstr",
"=",
"u''",
"for",
"line",
"in",
"textwrap",
".",
"fill",
"(",
"defstr",
",",
"wrap_at",
")",
".",
"split",
"(",
"u'\\n'",
")",
":",
"outstr",
"+=",
"(",
"(",
"prefix",
"+",
"line",
")",
"+",
"u'\\n'",
")",
"return",
"outstr"
] | helper function for pretty-printing a long string . | train | false |
18,978 | def obtain(proxy):
if (not isproxy(proxy)):
raise TypeError('object must be a proxy')
if isinstance(proxy, function):
globals = getconn(proxy)._local_namespace
return _load_function(_dump_function(proxy), globals)
else:
return pickle.loads(getconn(proxy).modules.cPickle.dumps(proxy, pickle.HIGHEST_PROTOCOL))
| [
"def",
"obtain",
"(",
"proxy",
")",
":",
"if",
"(",
"not",
"isproxy",
"(",
"proxy",
")",
")",
":",
"raise",
"TypeError",
"(",
"'object must be a proxy'",
")",
"if",
"isinstance",
"(",
"proxy",
",",
"function",
")",
":",
"globals",
"=",
"getconn",
"(",
"proxy",
")",
".",
"_local_namespace",
"return",
"_load_function",
"(",
"_dump_function",
"(",
"proxy",
")",
",",
"globals",
")",
"else",
":",
"return",
"pickle",
".",
"loads",
"(",
"getconn",
"(",
"proxy",
")",
".",
"modules",
".",
"cPickle",
".",
"dumps",
"(",
"proxy",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
")"
] | obtains a remote object . | train | false |
18,979 | def patmatch(name, pat):
if (pat not in _pat_cache):
_pat_cache[pat] = re.compile(_translate_pattern(pat))
return _pat_cache[pat].match(name)
| [
"def",
"patmatch",
"(",
"name",
",",
"pat",
")",
":",
"if",
"(",
"pat",
"not",
"in",
"_pat_cache",
")",
":",
"_pat_cache",
"[",
"pat",
"]",
"=",
"re",
".",
"compile",
"(",
"_translate_pattern",
"(",
"pat",
")",
")",
"return",
"_pat_cache",
"[",
"pat",
"]",
".",
"match",
"(",
"name",
")"
] | return if name matches pat . | train | false |
18,981 | def zap(target=None, **kwargs):
if (target is not None):
log.warning('Depricated use of function, use kwargs')
target = kwargs.get('dev', target)
kwargs['dev'] = target
return ceph_cfg.zap(**kwargs)
| [
"def",
"zap",
"(",
"target",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"target",
"is",
"not",
"None",
")",
":",
"log",
".",
"warning",
"(",
"'Depricated use of function, use kwargs'",
")",
"target",
"=",
"kwargs",
".",
"get",
"(",
"'dev'",
",",
"target",
")",
"kwargs",
"[",
"'dev'",
"]",
"=",
"target",
"return",
"ceph_cfg",
".",
"zap",
"(",
"**",
"kwargs",
")"
] | destroy the partition table and content of a given disk . | train | true |
18,982 | def decode_TEXT(value):
atoms = decode_header(value)
decodedvalue = ''
for (atom, charset) in atoms:
if (charset is not None):
atom = atom.decode(charset)
decodedvalue += atom
return decodedvalue
| [
"def",
"decode_TEXT",
"(",
"value",
")",
":",
"atoms",
"=",
"decode_header",
"(",
"value",
")",
"decodedvalue",
"=",
"''",
"for",
"(",
"atom",
",",
"charset",
")",
"in",
"atoms",
":",
"if",
"(",
"charset",
"is",
"not",
"None",
")",
":",
"atom",
"=",
"atom",
".",
"decode",
"(",
"charset",
")",
"decodedvalue",
"+=",
"atom",
"return",
"decodedvalue"
] | decode :rfc:2047 text . | train | false |
18,983 | def totals(cls, interval):
time_points = get_time_points(interval)
q = Session.query(cls.date, sum(cls.pageview_count).label('sum')).filter((cls.interval == interval)).filter(cls.date.in_(time_points)).filter(cls.codename.startswith(Link._type_prefix)).group_by(cls.date).order_by(desc(cls.date))
return fill_gaps(time_points, q, 'sum')
| [
"def",
"totals",
"(",
"cls",
",",
"interval",
")",
":",
"time_points",
"=",
"get_time_points",
"(",
"interval",
")",
"q",
"=",
"Session",
".",
"query",
"(",
"cls",
".",
"date",
",",
"sum",
"(",
"cls",
".",
"pageview_count",
")",
".",
"label",
"(",
"'sum'",
")",
")",
".",
"filter",
"(",
"(",
"cls",
".",
"interval",
"==",
"interval",
")",
")",
".",
"filter",
"(",
"cls",
".",
"date",
".",
"in_",
"(",
"time_points",
")",
")",
".",
"filter",
"(",
"cls",
".",
"codename",
".",
"startswith",
"(",
"Link",
".",
"_type_prefix",
")",
")",
".",
"group_by",
"(",
"cls",
".",
"date",
")",
".",
"order_by",
"(",
"desc",
"(",
"cls",
".",
"date",
")",
")",
"return",
"fill_gaps",
"(",
"time_points",
",",
"q",
",",
"'sum'",
")"
] | aggregate sitewide totals for self-serve promotion traffic . | train | false |
18,984 | def p_expr_unary(p):
p[0] = ('UNARY', '-', p[2])
| [
"def",
"p_expr_unary",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"(",
"'UNARY'",
",",
"'-'",
",",
"p",
"[",
"2",
"]",
")"
] | expr : minus expr %prec uminus . | train | false |
18,985 | def avro_hexlify(reader):
bytes = []
current_byte = reader.read(1)
bytes.append(hexlify(current_byte))
while ((ord(current_byte) & 128) != 0):
current_byte = reader.read(1)
bytes.append(hexlify(current_byte))
return ' '.join(bytes)
| [
"def",
"avro_hexlify",
"(",
"reader",
")",
":",
"bytes",
"=",
"[",
"]",
"current_byte",
"=",
"reader",
".",
"read",
"(",
"1",
")",
"bytes",
".",
"append",
"(",
"hexlify",
"(",
"current_byte",
")",
")",
"while",
"(",
"(",
"ord",
"(",
"current_byte",
")",
"&",
"128",
")",
"!=",
"0",
")",
":",
"current_byte",
"=",
"reader",
".",
"read",
"(",
"1",
")",
"bytes",
".",
"append",
"(",
"hexlify",
"(",
"current_byte",
")",
")",
"return",
"' '",
".",
"join",
"(",
"bytes",
")"
] | return the hex value . | train | false |
18,986 | def db_decrypt_parameters_and_properties(ctxt, encryption_key, batch_size=50, verbose=False):
excs = []
excs.extend(_db_encrypt_or_decrypt_template_params(ctxt, encryption_key, False, batch_size, verbose))
excs.extend(_db_encrypt_or_decrypt_resource_prop_data(ctxt, encryption_key, False, batch_size, verbose))
return excs
| [
"def",
"db_decrypt_parameters_and_properties",
"(",
"ctxt",
",",
"encryption_key",
",",
"batch_size",
"=",
"50",
",",
"verbose",
"=",
"False",
")",
":",
"excs",
"=",
"[",
"]",
"excs",
".",
"extend",
"(",
"_db_encrypt_or_decrypt_template_params",
"(",
"ctxt",
",",
"encryption_key",
",",
"False",
",",
"batch_size",
",",
"verbose",
")",
")",
"excs",
".",
"extend",
"(",
"_db_encrypt_or_decrypt_resource_prop_data",
"(",
"ctxt",
",",
"encryption_key",
",",
"False",
",",
"batch_size",
",",
"verbose",
")",
")",
"return",
"excs"
] | decrypt parameters and properties for all templates in db . | train | false |
18,988 | def list_pool(hostname, username, password, name=None):
bigip_session = _build_session(username, password)
try:
if name:
response = bigip_session.get((BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool/{name}/?expandSubcollections=true'.format(name=name)))
else:
response = bigip_session.get((BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool'))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
| [
"def",
"list_pool",
"(",
"hostname",
",",
"username",
",",
"password",
",",
"name",
"=",
"None",
")",
":",
"bigip_session",
"=",
"_build_session",
"(",
"username",
",",
"password",
")",
"try",
":",
"if",
"name",
":",
"response",
"=",
"bigip_session",
".",
"get",
"(",
"(",
"BIG_IP_URL_BASE",
".",
"format",
"(",
"host",
"=",
"hostname",
")",
"+",
"'/ltm/pool/{name}/?expandSubcollections=true'",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
")",
"else",
":",
"response",
"=",
"bigip_session",
".",
"get",
"(",
"(",
"BIG_IP_URL_BASE",
".",
"format",
"(",
"host",
"=",
"hostname",
")",
"+",
"'/ltm/pool'",
")",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
"as",
"e",
":",
"return",
"_load_connection_error",
"(",
"hostname",
",",
"e",
")",
"return",
"_load_response",
"(",
"response",
")"
] | a function to connect to a bigip device and list a specific pool . | train | true |
18,990 | def updating_writer(a):
log.debug('updating the context')
context = a[0]
register = 3
slave_id = 0
address = 16
values = context[slave_id].getValues(register, address, count=5)
values = [(v + 1) for v in values]
log.debug(('new values: ' + str(values)))
context[slave_id].setValues(register, address, values)
| [
"def",
"updating_writer",
"(",
"a",
")",
":",
"log",
".",
"debug",
"(",
"'updating the context'",
")",
"context",
"=",
"a",
"[",
"0",
"]",
"register",
"=",
"3",
"slave_id",
"=",
"0",
"address",
"=",
"16",
"values",
"=",
"context",
"[",
"slave_id",
"]",
".",
"getValues",
"(",
"register",
",",
"address",
",",
"count",
"=",
"5",
")",
"values",
"=",
"[",
"(",
"v",
"+",
"1",
")",
"for",
"v",
"in",
"values",
"]",
"log",
".",
"debug",
"(",
"(",
"'new values: '",
"+",
"str",
"(",
"values",
")",
")",
")",
"context",
"[",
"slave_id",
"]",
".",
"setValues",
"(",
"register",
",",
"address",
",",
"values",
")"
] | a worker process that runs every so often and updates live values of the context . | train | false |
18,991 | def test_cache_config_disable_private_browsing(config_stub, tmpdir):
config_stub.data = {'storage': {'cache-size': 1024}, 'general': {'private-browsing': True}}
url = 'http://qutebrowser.org'
metadata = QNetworkCacheMetaData()
metadata.setUrl(QUrl(url))
assert metadata.isValid()
disk_cache = cache.DiskCache(str(tmpdir))
assert (disk_cache.prepare(metadata) is None)
config_stub.set('general', 'private-browsing', False)
content = 'cute'
preload_cache(disk_cache, url, content)
assert (disk_cache.data(QUrl(url)).readAll() == content)
| [
"def",
"test_cache_config_disable_private_browsing",
"(",
"config_stub",
",",
"tmpdir",
")",
":",
"config_stub",
".",
"data",
"=",
"{",
"'storage'",
":",
"{",
"'cache-size'",
":",
"1024",
"}",
",",
"'general'",
":",
"{",
"'private-browsing'",
":",
"True",
"}",
"}",
"url",
"=",
"'http://qutebrowser.org'",
"metadata",
"=",
"QNetworkCacheMetaData",
"(",
")",
"metadata",
".",
"setUrl",
"(",
"QUrl",
"(",
"url",
")",
")",
"assert",
"metadata",
".",
"isValid",
"(",
")",
"disk_cache",
"=",
"cache",
".",
"DiskCache",
"(",
"str",
"(",
"tmpdir",
")",
")",
"assert",
"(",
"disk_cache",
".",
"prepare",
"(",
"metadata",
")",
"is",
"None",
")",
"config_stub",
".",
"set",
"(",
"'general'",
",",
"'private-browsing'",
",",
"False",
")",
"content",
"=",
"'cute'",
"preload_cache",
"(",
"disk_cache",
",",
"url",
",",
"content",
")",
"assert",
"(",
"disk_cache",
".",
"data",
"(",
"QUrl",
"(",
"url",
")",
")",
".",
"readAll",
"(",
")",
"==",
"content",
")"
] | change private-browsing config to false and emit signal . | train | false |
18,993 | def get_cat_sample_groups(sam_cats):
cat_sam_groups = defaultdict(list)
for (k, v) in sam_cats.iteritems():
cat_sam_groups[v].append(k)
return cat_sam_groups
| [
"def",
"get_cat_sample_groups",
"(",
"sam_cats",
")",
":",
"cat_sam_groups",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"sam_cats",
".",
"iteritems",
"(",
")",
":",
"cat_sam_groups",
"[",
"v",
"]",
".",
"append",
"(",
"k",
")",
"return",
"cat_sam_groups"
] | create {category_value:[samples_with_that_value]} dict . | train | false |
18,997 | def get_mission_test_data_path(mission_type):
base_path = os.path.abspath(os.path.dirname(__file__))
new_path = os.path.join(base_path, '..', mission_type, 'test_data')
absolute_ified = os.path.abspath(new_path)
return absolute_ified
| [
"def",
"get_mission_test_data_path",
"(",
"mission_type",
")",
":",
"base_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"new_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"'..'",
",",
"mission_type",
",",
"'test_data'",
")",
"absolute_ified",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"new_path",
")",
"return",
"absolute_ified"
] | returns an absolute path to test data . | train | false |
18,998 | def get_uuid(app, user):
try:
return app.addonpurchase_set.get(user=user).uuid
except ObjectDoesNotExist:
return 'none'
| [
"def",
"get_uuid",
"(",
"app",
",",
"user",
")",
":",
"try",
":",
"return",
"app",
".",
"addonpurchase_set",
".",
"get",
"(",
"user",
"=",
"user",
")",
".",
"uuid",
"except",
"ObjectDoesNotExist",
":",
"return",
"'none'"
] | return a uuid for use in the payment flow . | train | false |
18,999 | @hug.startup()
def on_startup(api):
return
| [
"@",
"hug",
".",
"startup",
"(",
")",
"def",
"on_startup",
"(",
"api",
")",
":",
"return"
] | for testing . | train | false |
19,000 | @sync_performer
def perform_download_packages_from_repository(dispatcher, intent):
rpm_version = make_rpm_version(intent.flocker_version)
package_type = intent.distribution.package_type()
s = requests.Session()
s.mount('file://', FileAdapter())
downloaded_packages = set()
for package in intent.packages:
package_name = package_filename(package_type=package_type, package=package, architecture=PACKAGE_ARCHITECTURE[package], rpm_version=rpm_version)
url = ((intent.source_repo + '/') + package_name)
local_path = intent.target_path.child(package_name).path
download = s.get(url)
download.raise_for_status()
content = download.content
with open(local_path, 'wb') as local_file:
local_file.write(content)
downloaded_packages.add(package_name)
return downloaded_packages
| [
"@",
"sync_performer",
"def",
"perform_download_packages_from_repository",
"(",
"dispatcher",
",",
"intent",
")",
":",
"rpm_version",
"=",
"make_rpm_version",
"(",
"intent",
".",
"flocker_version",
")",
"package_type",
"=",
"intent",
".",
"distribution",
".",
"package_type",
"(",
")",
"s",
"=",
"requests",
".",
"Session",
"(",
")",
"s",
".",
"mount",
"(",
"'file://'",
",",
"FileAdapter",
"(",
")",
")",
"downloaded_packages",
"=",
"set",
"(",
")",
"for",
"package",
"in",
"intent",
".",
"packages",
":",
"package_name",
"=",
"package_filename",
"(",
"package_type",
"=",
"package_type",
",",
"package",
"=",
"package",
",",
"architecture",
"=",
"PACKAGE_ARCHITECTURE",
"[",
"package",
"]",
",",
"rpm_version",
"=",
"rpm_version",
")",
"url",
"=",
"(",
"(",
"intent",
".",
"source_repo",
"+",
"'/'",
")",
"+",
"package_name",
")",
"local_path",
"=",
"intent",
".",
"target_path",
".",
"child",
"(",
"package_name",
")",
".",
"path",
"download",
"=",
"s",
".",
"get",
"(",
"url",
")",
"download",
".",
"raise_for_status",
"(",
")",
"content",
"=",
"download",
".",
"content",
"with",
"open",
"(",
"local_path",
",",
"'wb'",
")",
"as",
"local_file",
":",
"local_file",
".",
"write",
"(",
"content",
")",
"downloaded_packages",
".",
"add",
"(",
"package_name",
")",
"return",
"downloaded_packages"
] | see :class:downloadpackagesfromrepository . | train | false |
19,001 | def distribute_or_over_and(expr):
return _distribute((expr, Or, And))
| [
"def",
"distribute_or_over_and",
"(",
"expr",
")",
":",
"return",
"_distribute",
"(",
"(",
"expr",
",",
"Or",
",",
"And",
")",
")"
] | given a sentence s consisting of conjunctions and disjunctions of literals . | train | false |
19,002 | def _num_cpus_unix():
return os.sysconf('SC_NPROCESSORS_ONLN')
| [
"def",
"_num_cpus_unix",
"(",
")",
":",
"return",
"os",
".",
"sysconf",
"(",
"'SC_NPROCESSORS_ONLN'",
")"
] | return the number of active cpus on a unix system . | train | false |
19,004 | def register_pkg(name, formula_def, conn=None):
if (conn is None):
conn = init()
conn.execute('INSERT INTO packages VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (name, formula_def['version'], formula_def['release'], datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT'), formula_def.get('os', None), formula_def.get('os_family', None), formula_def.get('dependencies', None), formula_def.get('os_dependencies', None), formula_def.get('os_family_dependencies', None), formula_def['summary'], formula_def['description']))
| [
"def",
"register_pkg",
"(",
"name",
",",
"formula_def",
",",
"conn",
"=",
"None",
")",
":",
"if",
"(",
"conn",
"is",
"None",
")",
":",
"conn",
"=",
"init",
"(",
")",
"conn",
".",
"execute",
"(",
"'INSERT INTO packages VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'",
",",
"(",
"name",
",",
"formula_def",
"[",
"'version'",
"]",
",",
"formula_def",
"[",
"'release'",
"]",
",",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"'%a, %d %b %Y %H:%M:%S GMT'",
")",
",",
"formula_def",
".",
"get",
"(",
"'os'",
",",
"None",
")",
",",
"formula_def",
".",
"get",
"(",
"'os_family'",
",",
"None",
")",
",",
"formula_def",
".",
"get",
"(",
"'dependencies'",
",",
"None",
")",
",",
"formula_def",
".",
"get",
"(",
"'os_dependencies'",
",",
"None",
")",
",",
"formula_def",
".",
"get",
"(",
"'os_family_dependencies'",
",",
"None",
")",
",",
"formula_def",
"[",
"'summary'",
"]",
",",
"formula_def",
"[",
"'description'",
"]",
")",
")"
] | register a package in the package database . | train | false |
19,005 | def get_ports(proto='tcp', direction='in'):
proto = proto.upper()
direction = direction.upper()
results = {}
_validate_direction_and_proto(direction, proto)
directions = build_directions(direction)
for direction in directions:
option = '{0}_{1}'.format(proto, direction)
results[direction] = _csf_to_list(option)
return results
| [
"def",
"get_ports",
"(",
"proto",
"=",
"'tcp'",
",",
"direction",
"=",
"'in'",
")",
":",
"proto",
"=",
"proto",
".",
"upper",
"(",
")",
"direction",
"=",
"direction",
".",
"upper",
"(",
")",
"results",
"=",
"{",
"}",
"_validate_direction_and_proto",
"(",
"direction",
",",
"proto",
")",
"directions",
"=",
"build_directions",
"(",
"direction",
")",
"for",
"direction",
"in",
"directions",
":",
"option",
"=",
"'{0}_{1}'",
".",
"format",
"(",
"proto",
",",
"direction",
")",
"results",
"[",
"direction",
"]",
"=",
"_csf_to_list",
"(",
"option",
")",
"return",
"results"
] | get a brocade specific port . | train | true |
19,006 | def EvalLognormalCdf(x, mu=0, sigma=1):
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
| [
"def",
"EvalLognormalCdf",
"(",
"x",
",",
"mu",
"=",
"0",
",",
"sigma",
"=",
"1",
")",
":",
"return",
"stats",
".",
"lognorm",
".",
"cdf",
"(",
"x",
",",
"loc",
"=",
"mu",
",",
"scale",
"=",
"sigma",
")"
] | evaluates the cdf of the lognormal distribution . | train | false |
19,007 | def getAllAttributeNames(object):
attrdict = {}
try:
key = type(object).__name__
except Exception:
key = 'anonymous'
wakeupcall = dir(object)
del wakeupcall
attributes = dir(object)
attrdict[(key, 'dir', len(attributes))] = attributes
try:
attributes = object.__dict__.keys()
attributes.sort()
except Exception:
pass
else:
attrdict[(key, '__dict__', len(attributes))] = attributes
try:
klass = object.__class__
except Exception:
pass
else:
if (klass is object):
pass
else:
attrdict.update(getAllAttributeNames(klass))
try:
bases = object.__bases__
except Exception:
pass
else:
if isinstance(bases, types.TupleType):
for base in bases:
if (type(base) is types.TypeType):
pass
else:
attrdict.update(getAllAttributeNames(base))
return attrdict
| [
"def",
"getAllAttributeNames",
"(",
"object",
")",
":",
"attrdict",
"=",
"{",
"}",
"try",
":",
"key",
"=",
"type",
"(",
"object",
")",
".",
"__name__",
"except",
"Exception",
":",
"key",
"=",
"'anonymous'",
"wakeupcall",
"=",
"dir",
"(",
"object",
")",
"del",
"wakeupcall",
"attributes",
"=",
"dir",
"(",
"object",
")",
"attrdict",
"[",
"(",
"key",
",",
"'dir'",
",",
"len",
"(",
"attributes",
")",
")",
"]",
"=",
"attributes",
"try",
":",
"attributes",
"=",
"object",
".",
"__dict__",
".",
"keys",
"(",
")",
"attributes",
".",
"sort",
"(",
")",
"except",
"Exception",
":",
"pass",
"else",
":",
"attrdict",
"[",
"(",
"key",
",",
"'__dict__'",
",",
"len",
"(",
"attributes",
")",
")",
"]",
"=",
"attributes",
"try",
":",
"klass",
"=",
"object",
".",
"__class__",
"except",
"Exception",
":",
"pass",
"else",
":",
"if",
"(",
"klass",
"is",
"object",
")",
":",
"pass",
"else",
":",
"attrdict",
".",
"update",
"(",
"getAllAttributeNames",
"(",
"klass",
")",
")",
"try",
":",
"bases",
"=",
"object",
".",
"__bases__",
"except",
"Exception",
":",
"pass",
"else",
":",
"if",
"isinstance",
"(",
"bases",
",",
"types",
".",
"TupleType",
")",
":",
"for",
"base",
"in",
"bases",
":",
"if",
"(",
"type",
"(",
"base",
")",
"is",
"types",
".",
"TypeType",
")",
":",
"pass",
"else",
":",
"attrdict",
".",
"update",
"(",
"getAllAttributeNames",
"(",
"base",
")",
")",
"return",
"attrdict"
] | return dict of all attributes . | train | false |
19,008 | def get_continuous_query(database, name, **client_args):
client = _client(**client_args)
try:
for (db, cqs) in client.query('SHOW CONTINUOUS QUERIES').items():
if (db[0] == database):
return next((cq for cq in cqs if (cq.get('name') == name)))
except StopIteration:
return {}
return {}
| [
"def",
"get_continuous_query",
"(",
"database",
",",
"name",
",",
"**",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"**",
"client_args",
")",
"try",
":",
"for",
"(",
"db",
",",
"cqs",
")",
"in",
"client",
".",
"query",
"(",
"'SHOW CONTINUOUS QUERIES'",
")",
".",
"items",
"(",
")",
":",
"if",
"(",
"db",
"[",
"0",
"]",
"==",
"database",
")",
":",
"return",
"next",
"(",
"(",
"cq",
"for",
"cq",
"in",
"cqs",
"if",
"(",
"cq",
".",
"get",
"(",
"'name'",
")",
"==",
"name",
")",
")",
")",
"except",
"StopIteration",
":",
"return",
"{",
"}",
"return",
"{",
"}"
] | get an existing continuous query . | train | true |
19,012 | def compile_dir(dfn):
return
subprocess.call([PYTHON, '-OO', '-m', 'compileall', '-f', dfn])
| [
"def",
"compile_dir",
"(",
"dfn",
")",
":",
"return",
"subprocess",
".",
"call",
"(",
"[",
"PYTHON",
",",
"'-OO'",
",",
"'-m'",
",",
"'compileall'",
",",
"'-f'",
",",
"dfn",
"]",
")"
] | compile * . | train | false |
19,013 | def get_authorization_header(request):
auth = request.META.get(u'HTTP_AUTHORIZATION', '')
if isinstance(auth, text_type):
auth = auth.encode(HTTP_HEADER_ENCODING)
return auth
| [
"def",
"get_authorization_header",
"(",
"request",
")",
":",
"auth",
"=",
"request",
".",
"META",
".",
"get",
"(",
"u'HTTP_AUTHORIZATION'",
",",
"''",
")",
"if",
"isinstance",
"(",
"auth",
",",
"text_type",
")",
":",
"auth",
"=",
"auth",
".",
"encode",
"(",
"HTTP_HEADER_ENCODING",
")",
"return",
"auth"
] | return requests authorization: header . | train | false |
19,014 | def _solve_cg(lap_sparse, B, tol, return_full_prob=False):
lap_sparse = lap_sparse.tocsc()
X = []
for i in range(len(B)):
x0 = cg(lap_sparse, (- B[i].todense()), tol=tol)[0]
X.append(x0)
if (not return_full_prob):
X = np.array(X)
X = np.argmax(X, axis=0)
return X
| [
"def",
"_solve_cg",
"(",
"lap_sparse",
",",
"B",
",",
"tol",
",",
"return_full_prob",
"=",
"False",
")",
":",
"lap_sparse",
"=",
"lap_sparse",
".",
"tocsc",
"(",
")",
"X",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"B",
")",
")",
":",
"x0",
"=",
"cg",
"(",
"lap_sparse",
",",
"(",
"-",
"B",
"[",
"i",
"]",
".",
"todense",
"(",
")",
")",
",",
"tol",
"=",
"tol",
")",
"[",
"0",
"]",
"X",
".",
"append",
"(",
"x0",
")",
"if",
"(",
"not",
"return_full_prob",
")",
":",
"X",
"=",
"np",
".",
"array",
"(",
"X",
")",
"X",
"=",
"np",
".",
"argmax",
"(",
"X",
",",
"axis",
"=",
"0",
")",
"return",
"X"
] | solves lap_sparse x_i = b_i for each phase i . | train | false |
19,015 | def p_const_value(p):
p[0] = p[1]
| [
"def",
"p_const_value",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]"
] | const_value : intconstant | dubconstant | literal | boolconstant | const_list | const_map | const_ref . | train | false |
19,016 | def no_filter(blast_subject_entry):
return True
| [
"def",
"no_filter",
"(",
"blast_subject_entry",
")",
":",
"return",
"True"
] | a placeholder filter function which always returns true . | train | false |
19,017 | def assert_is_valid_xml(output):
try:
to_xml(output)
except Exception as e:
raise AssertionError(('Expected valid XML, but could not parse output. %s' % str(e)))
| [
"def",
"assert_is_valid_xml",
"(",
"output",
")",
":",
"try",
":",
"to_xml",
"(",
"output",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"AssertionError",
"(",
"(",
"'Expected valid XML, but could not parse output. %s'",
"%",
"str",
"(",
"e",
")",
")",
")"
] | simple assertion that just verifies the specified output is valid xml . | train | false |
19,019 | @reflect(types.Set)
def reflect_set(typ, val, c):
if (not typ.reflected):
return
inst = setobj.SetInstance(c.context, c.builder, typ, val)
payload = inst.payload
with c.builder.if_then(payload.dirty, likely=False):
obj = inst.parent
c.pyapi.set_clear(obj)
(ok, listobj) = _native_set_to_python_list(typ, payload, c)
with c.builder.if_then(ok, likely=True):
c.pyapi.set_update(obj, listobj)
c.pyapi.decref(listobj)
inst.set_dirty(False)
| [
"@",
"reflect",
"(",
"types",
".",
"Set",
")",
"def",
"reflect_set",
"(",
"typ",
",",
"val",
",",
"c",
")",
":",
"if",
"(",
"not",
"typ",
".",
"reflected",
")",
":",
"return",
"inst",
"=",
"setobj",
".",
"SetInstance",
"(",
"c",
".",
"context",
",",
"c",
".",
"builder",
",",
"typ",
",",
"val",
")",
"payload",
"=",
"inst",
".",
"payload",
"with",
"c",
".",
"builder",
".",
"if_then",
"(",
"payload",
".",
"dirty",
",",
"likely",
"=",
"False",
")",
":",
"obj",
"=",
"inst",
".",
"parent",
"c",
".",
"pyapi",
".",
"set_clear",
"(",
"obj",
")",
"(",
"ok",
",",
"listobj",
")",
"=",
"_native_set_to_python_list",
"(",
"typ",
",",
"payload",
",",
"c",
")",
"with",
"c",
".",
"builder",
".",
"if_then",
"(",
"ok",
",",
"likely",
"=",
"True",
")",
":",
"c",
".",
"pyapi",
".",
"set_update",
"(",
"obj",
",",
"listobj",
")",
"c",
".",
"pyapi",
".",
"decref",
"(",
"listobj",
")",
"inst",
".",
"set_dirty",
"(",
"False",
")"
] | reflect the native sets contents into the python object . | train | false |
19,020 | def _configure_subclass_mapper(mapper, context, path, adapter):
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(('No such polymorphic_identity %r is defined' % discriminator))
if (sub_mapper is mapper):
return None
return instance_processor(sub_mapper, context, path, adapter, polymorphic_from=mapper)
return configure_subclass_mapper
| [
"def",
"_configure_subclass_mapper",
"(",
"mapper",
",",
"context",
",",
"path",
",",
"adapter",
")",
":",
"def",
"configure_subclass_mapper",
"(",
"discriminator",
")",
":",
"try",
":",
"sub_mapper",
"=",
"mapper",
".",
"polymorphic_map",
"[",
"discriminator",
"]",
"except",
"KeyError",
":",
"raise",
"AssertionError",
"(",
"(",
"'No such polymorphic_identity %r is defined'",
"%",
"discriminator",
")",
")",
"if",
"(",
"sub_mapper",
"is",
"mapper",
")",
":",
"return",
"None",
"return",
"instance_processor",
"(",
"sub_mapper",
",",
"context",
",",
"path",
",",
"adapter",
",",
"polymorphic_from",
"=",
"mapper",
")",
"return",
"configure_subclass_mapper"
] | produce a mapper level row processor callable factory for mappers inheriting this one . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.