id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
6,881
|
def openHubFile(filepath, filename, mode):
global _hubFiles
hubFile = openFile(os.path.join(filepath, filename), mode)
_hubFiles.append(hubFile)
return hubFile
|
[
"def",
"openHubFile",
"(",
"filepath",
",",
"filename",
",",
"mode",
")",
":",
"global",
"_hubFiles",
"hubFile",
"=",
"openFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"filepath",
",",
"filename",
")",
",",
"mode",
")",
"_hubFiles",
".",
"append",
"(",
"hubFile",
")",
"return",
"hubFile"
] |
open an hdf5 datastore file and register it so that it is closed even on interpreter crash .
|
train
| false
|
6,883
|
def test_a_name():
html = '<a name="foo">bar</a>'
eq_(html, bleach.delinkify(html))
|
[
"def",
"test_a_name",
"(",
")",
":",
"html",
"=",
"'<a name=\"foo\">bar</a>'",
"eq_",
"(",
"html",
",",
"bleach",
".",
"delinkify",
"(",
"html",
")",
")"
] |
dont screw with non-link <a> tags .
|
train
| false
|
6,884
|
def get_line_col(index):
(line, col) = map(int, index.split('.'))
return (line, col)
|
[
"def",
"get_line_col",
"(",
"index",
")",
":",
"(",
"line",
",",
"col",
")",
"=",
"map",
"(",
"int",
",",
"index",
".",
"split",
"(",
"'.'",
")",
")",
"return",
"(",
"line",
",",
"col",
")"
] |
return tuple of ints from line .
|
train
| false
|
6,885
|
def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=[]):
lines = ((['// marker so line numbers and indices both start at 1'] + lines) + ['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = _NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if (file_extension == 'h'):
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
|
[
"def",
"ProcessFileData",
"(",
"filename",
",",
"file_extension",
",",
"lines",
",",
"error",
",",
"extra_check_functions",
"=",
"[",
"]",
")",
":",
"lines",
"=",
"(",
"(",
"[",
"'// marker so line numbers and indices both start at 1'",
"]",
"+",
"lines",
")",
"+",
"[",
"'// marker so line numbers end in a known way'",
"]",
")",
"include_state",
"=",
"_IncludeState",
"(",
")",
"function_state",
"=",
"_FunctionState",
"(",
")",
"nesting_state",
"=",
"_NestingState",
"(",
")",
"ResetNolintSuppressions",
"(",
")",
"CheckForCopyright",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"if",
"(",
"file_extension",
"==",
"'h'",
")",
":",
"CheckForHeaderGuard",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"RemoveMultiLineComments",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"clean_lines",
"=",
"CleansedLines",
"(",
"lines",
")",
"for",
"line",
"in",
"xrange",
"(",
"clean_lines",
".",
"NumLines",
"(",
")",
")",
":",
"ProcessLine",
"(",
"filename",
",",
"file_extension",
",",
"clean_lines",
",",
"line",
",",
"include_state",
",",
"function_state",
",",
"nesting_state",
",",
"error",
",",
"extra_check_functions",
")",
"nesting_state",
".",
"CheckCompletedBlocks",
"(",
"filename",
",",
"error",
")",
"CheckForIncludeWhatYouUse",
"(",
"filename",
",",
"clean_lines",
",",
"include_state",
",",
"error",
")",
"CheckForBadCharacters",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"CheckForNewlineAtEOF",
"(",
"filename",
",",
"lines",
",",
"error",
")"
] |
performs lint checks and reports any errors to the given error function .
|
train
| false
|
6,886
|
def random_distribution(min=(-5.0), max=5.0, total_items=50):
num_items = random.randrange(5, total_items)
all_info = []
for item in range(num_items):
new_item = random.uniform(min, max)
all_info.append(new_item)
return all_info
|
[
"def",
"random_distribution",
"(",
"min",
"=",
"(",
"-",
"5.0",
")",
",",
"max",
"=",
"5.0",
",",
"total_items",
"=",
"50",
")",
":",
"num_items",
"=",
"random",
".",
"randrange",
"(",
"5",
",",
"total_items",
")",
"all_info",
"=",
"[",
"]",
"for",
"item",
"in",
"range",
"(",
"num_items",
")",
":",
"new_item",
"=",
"random",
".",
"uniform",
"(",
"min",
",",
"max",
")",
"all_info",
".",
"append",
"(",
"new_item",
")",
"return",
"all_info"
] |
generate a random column of probabilities .
|
train
| false
|
6,887
|
def TimestampUTCToISO8601(timestamp):
utc_tuple = datetime.utcfromtimestamp(timestamp)
return ('%0.4d-%0.2d-%0.2d' % (utc_tuple.year, utc_tuple.month, utc_tuple.day))
|
[
"def",
"TimestampUTCToISO8601",
"(",
"timestamp",
")",
":",
"utc_tuple",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"timestamp",
")",
"return",
"(",
"'%0.4d-%0.2d-%0.2d'",
"%",
"(",
"utc_tuple",
".",
"year",
",",
"utc_tuple",
".",
"month",
",",
"utc_tuple",
".",
"day",
")",
")"
] |
return the timestamp iso 8601 format: yyyy-mm-dd .
|
train
| false
|
6,888
|
def c2f(r, i, ctype_name):
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name]((ftype(r) + (1j * ftype(i))))
|
[
"def",
"c2f",
"(",
"r",
",",
"i",
",",
"ctype_name",
")",
":",
"ftype",
"=",
"c2f_dict",
"[",
"ctype_name",
"]",
"return",
"np",
".",
"typeDict",
"[",
"ctype_name",
"]",
"(",
"(",
"ftype",
"(",
"r",
")",
"+",
"(",
"1j",
"*",
"ftype",
"(",
"i",
")",
")",
")",
")"
] |
convert strings to complex number instance with specified numpy type .
|
train
| true
|
6,889
|
def get_subplotspec_list(axes_list, grid_spec=None):
subplotspec_list = []
for ax in axes_list:
axes_or_locator = ax.get_axes_locator()
if (axes_or_locator is None):
axes_or_locator = ax
if hasattr(axes_or_locator, 'get_subplotspec'):
subplotspec = axes_or_locator.get_subplotspec()
subplotspec = subplotspec.get_topmost_subplotspec()
gs = subplotspec.get_gridspec()
if (grid_spec is not None):
if (gs != grid_spec):
subplotspec = None
elif gs.locally_modified_subplot_params():
subplotspec = None
else:
subplotspec = None
subplotspec_list.append(subplotspec)
return subplotspec_list
|
[
"def",
"get_subplotspec_list",
"(",
"axes_list",
",",
"grid_spec",
"=",
"None",
")",
":",
"subplotspec_list",
"=",
"[",
"]",
"for",
"ax",
"in",
"axes_list",
":",
"axes_or_locator",
"=",
"ax",
".",
"get_axes_locator",
"(",
")",
"if",
"(",
"axes_or_locator",
"is",
"None",
")",
":",
"axes_or_locator",
"=",
"ax",
"if",
"hasattr",
"(",
"axes_or_locator",
",",
"'get_subplotspec'",
")",
":",
"subplotspec",
"=",
"axes_or_locator",
".",
"get_subplotspec",
"(",
")",
"subplotspec",
"=",
"subplotspec",
".",
"get_topmost_subplotspec",
"(",
")",
"gs",
"=",
"subplotspec",
".",
"get_gridspec",
"(",
")",
"if",
"(",
"grid_spec",
"is",
"not",
"None",
")",
":",
"if",
"(",
"gs",
"!=",
"grid_spec",
")",
":",
"subplotspec",
"=",
"None",
"elif",
"gs",
".",
"locally_modified_subplot_params",
"(",
")",
":",
"subplotspec",
"=",
"None",
"else",
":",
"subplotspec",
"=",
"None",
"subplotspec_list",
".",
"append",
"(",
"subplotspec",
")",
"return",
"subplotspec_list"
] |
return a list of subplotspec from the given list of axes .
|
train
| false
|
6,890
|
def _convert_to_varsSOP(minterm, variables):
temp = []
for (i, m) in enumerate(minterm):
if (m == 0):
temp.append(Not(variables[i]))
elif (m == 1):
temp.append(variables[i])
else:
pass
return And(*temp)
|
[
"def",
"_convert_to_varsSOP",
"(",
"minterm",
",",
"variables",
")",
":",
"temp",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"m",
")",
"in",
"enumerate",
"(",
"minterm",
")",
":",
"if",
"(",
"m",
"==",
"0",
")",
":",
"temp",
".",
"append",
"(",
"Not",
"(",
"variables",
"[",
"i",
"]",
")",
")",
"elif",
"(",
"m",
"==",
"1",
")",
":",
"temp",
".",
"append",
"(",
"variables",
"[",
"i",
"]",
")",
"else",
":",
"pass",
"return",
"And",
"(",
"*",
"temp",
")"
] |
converts a term in the expansion of a function from binary to its variable form .
|
train
| false
|
6,891
|
def square_this(x):
return (x ** 2)
|
[
"def",
"square_this",
"(",
"x",
")",
":",
"return",
"(",
"x",
"**",
"2",
")"
] |
return the square of a number .
|
train
| false
|
6,892
|
def c_login(client):
cname = (DUMMY_NAME % client.gid)
cpwd = (DUMMY_PWD % client.gid)
roomname = (ROOM_TEMPLATE % client.counter())
exitname1 = (EXIT_TEMPLATE % client.counter())
exitname2 = (EXIT_TEMPLATE % client.counter())
client.exits.extend([exitname1, exitname2])
cmds = (('create %s %s' % (cname, cpwd)), ('connect %s %s' % (cname, cpwd)), (('@dig %s' % START_ROOM) % client.gid), (('@teleport %s' % START_ROOM) % client.gid), ('@dig %s = %s, %s' % (roomname, exitname1, exitname2)))
return cmds
|
[
"def",
"c_login",
"(",
"client",
")",
":",
"cname",
"=",
"(",
"DUMMY_NAME",
"%",
"client",
".",
"gid",
")",
"cpwd",
"=",
"(",
"DUMMY_PWD",
"%",
"client",
".",
"gid",
")",
"roomname",
"=",
"(",
"ROOM_TEMPLATE",
"%",
"client",
".",
"counter",
"(",
")",
")",
"exitname1",
"=",
"(",
"EXIT_TEMPLATE",
"%",
"client",
".",
"counter",
"(",
")",
")",
"exitname2",
"=",
"(",
"EXIT_TEMPLATE",
"%",
"client",
".",
"counter",
"(",
")",
")",
"client",
".",
"exits",
".",
"extend",
"(",
"[",
"exitname1",
",",
"exitname2",
"]",
")",
"cmds",
"=",
"(",
"(",
"'create %s %s'",
"%",
"(",
"cname",
",",
"cpwd",
")",
")",
",",
"(",
"'connect %s %s'",
"%",
"(",
"cname",
",",
"cpwd",
")",
")",
",",
"(",
"(",
"'@dig %s'",
"%",
"START_ROOM",
")",
"%",
"client",
".",
"gid",
")",
",",
"(",
"(",
"'@teleport %s'",
"%",
"START_ROOM",
")",
"%",
"client",
".",
"gid",
")",
",",
"(",
"'@dig %s = %s, %s'",
"%",
"(",
"roomname",
",",
"exitname1",
",",
"exitname2",
")",
")",
")",
"return",
"cmds"
] |
logins to the game .
|
train
| false
|
6,893
|
@pytest.fixture(params=['tests/fake-repo-pre/', 'tests/fake-repo-pre'])
def bake(request):
main.cookiecutter(request.param, no_input=True)
|
[
"@",
"pytest",
".",
"fixture",
"(",
"params",
"=",
"[",
"'tests/fake-repo-pre/'",
",",
"'tests/fake-repo-pre'",
"]",
")",
"def",
"bake",
"(",
"request",
")",
":",
"main",
".",
"cookiecutter",
"(",
"request",
".",
"param",
",",
"no_input",
"=",
"True",
")"
] |
run cookiecutter with the given input_dir path .
|
train
| false
|
6,894
|
def api_view(http_method_names=None, exclude_from_schema=False):
http_method_names = ([u'GET'] if (http_method_names is None) else http_method_names)
def decorator(func):
WrappedAPIView = type(((six.PY3 and u'WrappedAPIView') or 'WrappedAPIView'), (APIView,), {u'__doc__': func.__doc__})
assert (not isinstance(http_method_names, types.FunctionType)), u'@api_view missing list of allowed HTTP methods'
assert isinstance(http_method_names, (list, tuple)), (u'@api_view expected a list of strings, received %s' % type(http_method_names).__name__)
allowed_methods = (set(http_method_names) | set((u'options',)))
WrappedAPIView.http_method_names = [method.lower() for method in allowed_methods]
def handler(self, *args, **kwargs):
return func(*args, **kwargs)
for method in http_method_names:
setattr(WrappedAPIView, method.lower(), handler)
WrappedAPIView.__name__ = func.__name__
WrappedAPIView.__module__ = func.__module__
WrappedAPIView.renderer_classes = getattr(func, u'renderer_classes', APIView.renderer_classes)
WrappedAPIView.parser_classes = getattr(func, u'parser_classes', APIView.parser_classes)
WrappedAPIView.authentication_classes = getattr(func, u'authentication_classes', APIView.authentication_classes)
WrappedAPIView.throttle_classes = getattr(func, u'throttle_classes', APIView.throttle_classes)
WrappedAPIView.permission_classes = getattr(func, u'permission_classes', APIView.permission_classes)
WrappedAPIView.exclude_from_schema = exclude_from_schema
return WrappedAPIView.as_view()
return decorator
|
[
"def",
"api_view",
"(",
"http_method_names",
"=",
"None",
",",
"exclude_from_schema",
"=",
"False",
")",
":",
"http_method_names",
"=",
"(",
"[",
"u'GET'",
"]",
"if",
"(",
"http_method_names",
"is",
"None",
")",
"else",
"http_method_names",
")",
"def",
"decorator",
"(",
"func",
")",
":",
"WrappedAPIView",
"=",
"type",
"(",
"(",
"(",
"six",
".",
"PY3",
"and",
"u'WrappedAPIView'",
")",
"or",
"'WrappedAPIView'",
")",
",",
"(",
"APIView",
",",
")",
",",
"{",
"u'__doc__'",
":",
"func",
".",
"__doc__",
"}",
")",
"assert",
"(",
"not",
"isinstance",
"(",
"http_method_names",
",",
"types",
".",
"FunctionType",
")",
")",
",",
"u'@api_view missing list of allowed HTTP methods'",
"assert",
"isinstance",
"(",
"http_method_names",
",",
"(",
"list",
",",
"tuple",
")",
")",
",",
"(",
"u'@api_view expected a list of strings, received %s'",
"%",
"type",
"(",
"http_method_names",
")",
".",
"__name__",
")",
"allowed_methods",
"=",
"(",
"set",
"(",
"http_method_names",
")",
"|",
"set",
"(",
"(",
"u'options'",
",",
")",
")",
")",
"WrappedAPIView",
".",
"http_method_names",
"=",
"[",
"method",
".",
"lower",
"(",
")",
"for",
"method",
"in",
"allowed_methods",
"]",
"def",
"handler",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"for",
"method",
"in",
"http_method_names",
":",
"setattr",
"(",
"WrappedAPIView",
",",
"method",
".",
"lower",
"(",
")",
",",
"handler",
")",
"WrappedAPIView",
".",
"__name__",
"=",
"func",
".",
"__name__",
"WrappedAPIView",
".",
"__module__",
"=",
"func",
".",
"__module__",
"WrappedAPIView",
".",
"renderer_classes",
"=",
"getattr",
"(",
"func",
",",
"u'renderer_classes'",
",",
"APIView",
".",
"renderer_classes",
")",
"WrappedAPIView",
".",
"parser_classes",
"=",
"getattr",
"(",
"func",
",",
"u'parser_classes'",
",",
"APIView",
".",
"parser_classes",
")",
"WrappedAPIView",
".",
"authentication_classes",
"=",
"getattr",
"(",
"func",
",",
"u'authentication_classes'",
",",
"APIView",
".",
"authentication_classes",
")",
"WrappedAPIView",
".",
"throttle_classes",
"=",
"getattr",
"(",
"func",
",",
"u'throttle_classes'",
",",
"APIView",
".",
"throttle_classes",
")",
"WrappedAPIView",
".",
"permission_classes",
"=",
"getattr",
"(",
"func",
",",
"u'permission_classes'",
",",
"APIView",
".",
"permission_classes",
")",
"WrappedAPIView",
".",
"exclude_from_schema",
"=",
"exclude_from_schema",
"return",
"WrappedAPIView",
".",
"as_view",
"(",
")",
"return",
"decorator"
] |
wrapper for calling an api view .
|
train
| false
|
6,895
|
@jsonrpc_method('os.kldload')
def os_kldload(request, plugin_id, module):
plugin = Plugins.objects.filter(pk=plugin_id)
if (plugin.exists() and (not Kmod.objects.filter(plugin__id=plugin[0].id, module=module).exists())):
Kmod.objects.create(plugin=plugin[0], module=module, order=None)
pipe = Popen(['/sbin/kldstat', '-n', module], stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
pipe.communicate()
if (pipe.returncode == 0):
return True
pipe = Popen(['/sbin/kldload', module], stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
pipe.communicate()
return (pipe.returncode == 0)
|
[
"@",
"jsonrpc_method",
"(",
"'os.kldload'",
")",
"def",
"os_kldload",
"(",
"request",
",",
"plugin_id",
",",
"module",
")",
":",
"plugin",
"=",
"Plugins",
".",
"objects",
".",
"filter",
"(",
"pk",
"=",
"plugin_id",
")",
"if",
"(",
"plugin",
".",
"exists",
"(",
")",
"and",
"(",
"not",
"Kmod",
".",
"objects",
".",
"filter",
"(",
"plugin__id",
"=",
"plugin",
"[",
"0",
"]",
".",
"id",
",",
"module",
"=",
"module",
")",
".",
"exists",
"(",
")",
")",
")",
":",
"Kmod",
".",
"objects",
".",
"create",
"(",
"plugin",
"=",
"plugin",
"[",
"0",
"]",
",",
"module",
"=",
"module",
",",
"order",
"=",
"None",
")",
"pipe",
"=",
"Popen",
"(",
"[",
"'/sbin/kldstat'",
",",
"'-n'",
",",
"module",
"]",
",",
"stdin",
"=",
"PIPE",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"pipe",
".",
"communicate",
"(",
")",
"if",
"(",
"pipe",
".",
"returncode",
"==",
"0",
")",
":",
"return",
"True",
"pipe",
"=",
"Popen",
"(",
"[",
"'/sbin/kldload'",
",",
"module",
"]",
",",
"stdin",
"=",
"PIPE",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"pipe",
".",
"communicate",
"(",
")",
"return",
"(",
"pipe",
".",
"returncode",
"==",
"0",
")"
] |
load a kernel module returns: boolean .
|
train
| false
|
6,897
|
def connect_action(action, fn):
action.triggered[bool].connect((lambda x: fn()))
|
[
"def",
"connect_action",
"(",
"action",
",",
"fn",
")",
":",
"action",
".",
"triggered",
"[",
"bool",
"]",
".",
"connect",
"(",
"(",
"lambda",
"x",
":",
"fn",
"(",
")",
")",
")"
] |
connect an action to a function .
|
train
| false
|
6,898
|
def getRotatedComplexes(planeAngle, points):
rotatedComplexes = []
for point in points:
rotatedComplexes.append((planeAngle * point))
return rotatedComplexes
|
[
"def",
"getRotatedComplexes",
"(",
"planeAngle",
",",
"points",
")",
":",
"rotatedComplexes",
"=",
"[",
"]",
"for",
"point",
"in",
"points",
":",
"rotatedComplexes",
".",
"append",
"(",
"(",
"planeAngle",
"*",
"point",
")",
")",
"return",
"rotatedComplexes"
] |
get points rotated by the plane angle .
|
train
| false
|
6,899
|
def get_version_and_flavor(executable):
version_string = get_version_from_pycaffe()
if (version_string is None):
version_string = get_version_from_cmdline(executable)
if (version_string is None):
version_string = get_version_from_soname(executable)
if (version_string is None):
raise ValueError(('Could not find version information for Caffe build ' + ('at "%s". Upgrade your installation' % executable)))
version = parse_version(version_string)
if (parse_version(0, 99, 0) > version > parse_version(0, 9, 0)):
flavor = 'NVIDIA'
minimum_version = '0.11.0'
if (version < parse_version(minimum_version)):
raise ValueError(('Required version "%s" is greater than "%s". Upgrade your installation.' % (minimum_version, version_string)))
else:
flavor = 'BVLC'
return (version_string, flavor)
|
[
"def",
"get_version_and_flavor",
"(",
"executable",
")",
":",
"version_string",
"=",
"get_version_from_pycaffe",
"(",
")",
"if",
"(",
"version_string",
"is",
"None",
")",
":",
"version_string",
"=",
"get_version_from_cmdline",
"(",
"executable",
")",
"if",
"(",
"version_string",
"is",
"None",
")",
":",
"version_string",
"=",
"get_version_from_soname",
"(",
"executable",
")",
"if",
"(",
"version_string",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'Could not find version information for Caffe build '",
"+",
"(",
"'at \"%s\". Upgrade your installation'",
"%",
"executable",
")",
")",
")",
"version",
"=",
"parse_version",
"(",
"version_string",
")",
"if",
"(",
"parse_version",
"(",
"0",
",",
"99",
",",
"0",
")",
">",
"version",
">",
"parse_version",
"(",
"0",
",",
"9",
",",
"0",
")",
")",
":",
"flavor",
"=",
"'NVIDIA'",
"minimum_version",
"=",
"'0.11.0'",
"if",
"(",
"version",
"<",
"parse_version",
"(",
"minimum_version",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'Required version \"%s\" is greater than \"%s\". Upgrade your installation.'",
"%",
"(",
"minimum_version",
",",
"version_string",
")",
")",
")",
"else",
":",
"flavor",
"=",
"'BVLC'",
"return",
"(",
"version_string",
",",
"flavor",
")"
] |
returns should be called after import_pycaffe() .
|
train
| false
|
6,900
|
@pytest.fixture()
def link_headers(manager):
def headers(response):
links = {}
for link in requests.utils.parse_header_links(response.headers.get(u'link')):
url = link[u'url']
page = int(re.search(u'(?<!per_)page=(\\d)', url).group(1))
links[link[u'rel']] = dict(url=url, page=page)
return links
return headers
|
[
"@",
"pytest",
".",
"fixture",
"(",
")",
"def",
"link_headers",
"(",
"manager",
")",
":",
"def",
"headers",
"(",
"response",
")",
":",
"links",
"=",
"{",
"}",
"for",
"link",
"in",
"requests",
".",
"utils",
".",
"parse_header_links",
"(",
"response",
".",
"headers",
".",
"get",
"(",
"u'link'",
")",
")",
":",
"url",
"=",
"link",
"[",
"u'url'",
"]",
"page",
"=",
"int",
"(",
"re",
".",
"search",
"(",
"u'(?<!per_)page=(\\\\d)'",
",",
"url",
")",
".",
"group",
"(",
"1",
")",
")",
"links",
"[",
"link",
"[",
"u'rel'",
"]",
"]",
"=",
"dict",
"(",
"url",
"=",
"url",
",",
"page",
"=",
"page",
")",
"return",
"links",
"return",
"headers"
] |
parses link headers and return them in dict form .
|
train
| false
|
6,901
|
def dense_to_one_hot(labels, n_classes=2):
labels = np.array(labels)
n_labels = labels.shape[0]
index_offset = (np.arange(n_labels) * n_classes)
labels_one_hot = np.zeros((n_labels, n_classes), dtype=np.float32)
labels_one_hot.flat[(index_offset + labels.ravel())] = 1
return labels_one_hot
|
[
"def",
"dense_to_one_hot",
"(",
"labels",
",",
"n_classes",
"=",
"2",
")",
":",
"labels",
"=",
"np",
".",
"array",
"(",
"labels",
")",
"n_labels",
"=",
"labels",
".",
"shape",
"[",
"0",
"]",
"index_offset",
"=",
"(",
"np",
".",
"arange",
"(",
"n_labels",
")",
"*",
"n_classes",
")",
"labels_one_hot",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_labels",
",",
"n_classes",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"labels_one_hot",
".",
"flat",
"[",
"(",
"index_offset",
"+",
"labels",
".",
"ravel",
"(",
")",
")",
"]",
"=",
"1",
"return",
"labels_one_hot"
] |
convert class labels from scalars to one-hot vectors .
|
train
| false
|
6,902
|
def bulk_replace(values, existing_adapter, new_adapter):
assert isinstance(values, list)
idset = util.IdentitySet
existing_idset = idset((existing_adapter or ()))
constants = existing_idset.intersection((values or ()))
additions = idset((values or ())).difference(constants)
removals = existing_idset.difference(constants)
appender = new_adapter.bulk_appender()
for member in (values or ()):
if (member in additions):
appender(member)
elif (member in constants):
appender(member, _sa_initiator=False)
if existing_adapter:
remover = existing_adapter.bulk_remover()
for member in removals:
remover(member)
|
[
"def",
"bulk_replace",
"(",
"values",
",",
"existing_adapter",
",",
"new_adapter",
")",
":",
"assert",
"isinstance",
"(",
"values",
",",
"list",
")",
"idset",
"=",
"util",
".",
"IdentitySet",
"existing_idset",
"=",
"idset",
"(",
"(",
"existing_adapter",
"or",
"(",
")",
")",
")",
"constants",
"=",
"existing_idset",
".",
"intersection",
"(",
"(",
"values",
"or",
"(",
")",
")",
")",
"additions",
"=",
"idset",
"(",
"(",
"values",
"or",
"(",
")",
")",
")",
".",
"difference",
"(",
"constants",
")",
"removals",
"=",
"existing_idset",
".",
"difference",
"(",
"constants",
")",
"appender",
"=",
"new_adapter",
".",
"bulk_appender",
"(",
")",
"for",
"member",
"in",
"(",
"values",
"or",
"(",
")",
")",
":",
"if",
"(",
"member",
"in",
"additions",
")",
":",
"appender",
"(",
"member",
")",
"elif",
"(",
"member",
"in",
"constants",
")",
":",
"appender",
"(",
"member",
",",
"_sa_initiator",
"=",
"False",
")",
"if",
"existing_adapter",
":",
"remover",
"=",
"existing_adapter",
".",
"bulk_remover",
"(",
")",
"for",
"member",
"in",
"removals",
":",
"remover",
"(",
"member",
")"
] |
load a new collection .
|
train
| false
|
6,903
|
@register.tag('static_with_version')
def do_static_with_version(parser, token):
return StaticWithVersionNode.handle_token(parser, token)
|
[
"@",
"register",
".",
"tag",
"(",
"'static_with_version'",
")",
"def",
"do_static_with_version",
"(",
"parser",
",",
"token",
")",
":",
"return",
"StaticWithVersionNode",
".",
"handle_token",
"(",
"parser",
",",
"token",
")"
] |
joins the given path with the static_url setting and appends the cms version as a get parameter .
|
train
| false
|
6,904
|
def constrains(*args):
return attrsetter('_constrains', args)
|
[
"def",
"constrains",
"(",
"*",
"args",
")",
":",
"return",
"attrsetter",
"(",
"'_constrains'",
",",
"args",
")"
] |
decorates a constraint checker .
|
train
| false
|
6,905
|
def _usage_overall(raw):
data = {}
for line in raw.split('\n')[1:]:
keyset = [item.strip() for item in re.sub('\\s+', ' ', line).split(':', 1) if item.strip()]
if (len(keyset) == 2):
key = re.sub('[()]', '', keyset[0]).replace(' ', '_').lower()
if (key in ['free_estimated', 'global_reserve']):
subk = keyset[1].split('(')
data[key] = subk[0].strip()
subk = subk[1].replace(')', '').split(': ')
data['{0}_{1}'.format(key, subk[0])] = subk[1]
else:
data[key] = keyset[1]
return data
|
[
"def",
"_usage_overall",
"(",
"raw",
")",
":",
"data",
"=",
"{",
"}",
"for",
"line",
"in",
"raw",
".",
"split",
"(",
"'\\n'",
")",
"[",
"1",
":",
"]",
":",
"keyset",
"=",
"[",
"item",
".",
"strip",
"(",
")",
"for",
"item",
"in",
"re",
".",
"sub",
"(",
"'\\\\s+'",
",",
"' '",
",",
"line",
")",
".",
"split",
"(",
"':'",
",",
"1",
")",
"if",
"item",
".",
"strip",
"(",
")",
"]",
"if",
"(",
"len",
"(",
"keyset",
")",
"==",
"2",
")",
":",
"key",
"=",
"re",
".",
"sub",
"(",
"'[()]'",
",",
"''",
",",
"keyset",
"[",
"0",
"]",
")",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
".",
"lower",
"(",
")",
"if",
"(",
"key",
"in",
"[",
"'free_estimated'",
",",
"'global_reserve'",
"]",
")",
":",
"subk",
"=",
"keyset",
"[",
"1",
"]",
".",
"split",
"(",
"'('",
")",
"data",
"[",
"key",
"]",
"=",
"subk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"subk",
"=",
"subk",
"[",
"1",
"]",
".",
"replace",
"(",
"')'",
",",
"''",
")",
".",
"split",
"(",
"': '",
")",
"data",
"[",
"'{0}_{1}'",
".",
"format",
"(",
"key",
",",
"subk",
"[",
"0",
"]",
")",
"]",
"=",
"subk",
"[",
"1",
"]",
"else",
":",
"data",
"[",
"key",
"]",
"=",
"keyset",
"[",
"1",
"]",
"return",
"data"
] |
parse usage/overall .
|
train
| true
|
6,906
|
def make_buffer_from_bit_pattern(pattern, on_freq, off_freq):
last_bit = pattern[(-1)]
output_buffer = []
offset = 0
for i in range(len(pattern)):
bit = pattern[i]
if (i < (len(pattern) - 1)):
next_bit = pattern[(i + 1)]
else:
next_bit = pattern[0]
freq = (on_freq if (bit == '1') else off_freq)
tone = quietnet.tone(freq, DATASIZE, offset=offset)
output_buffer += quietnet.envelope(tone, left=(last_bit == '0'), right=(next_bit == '0'))
offset += DATASIZE
last_bit = bit
return quietnet.pack_buffer(output_buffer)
|
[
"def",
"make_buffer_from_bit_pattern",
"(",
"pattern",
",",
"on_freq",
",",
"off_freq",
")",
":",
"last_bit",
"=",
"pattern",
"[",
"(",
"-",
"1",
")",
"]",
"output_buffer",
"=",
"[",
"]",
"offset",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"pattern",
")",
")",
":",
"bit",
"=",
"pattern",
"[",
"i",
"]",
"if",
"(",
"i",
"<",
"(",
"len",
"(",
"pattern",
")",
"-",
"1",
")",
")",
":",
"next_bit",
"=",
"pattern",
"[",
"(",
"i",
"+",
"1",
")",
"]",
"else",
":",
"next_bit",
"=",
"pattern",
"[",
"0",
"]",
"freq",
"=",
"(",
"on_freq",
"if",
"(",
"bit",
"==",
"'1'",
")",
"else",
"off_freq",
")",
"tone",
"=",
"quietnet",
".",
"tone",
"(",
"freq",
",",
"DATASIZE",
",",
"offset",
"=",
"offset",
")",
"output_buffer",
"+=",
"quietnet",
".",
"envelope",
"(",
"tone",
",",
"left",
"=",
"(",
"last_bit",
"==",
"'0'",
")",
",",
"right",
"=",
"(",
"next_bit",
"==",
"'0'",
")",
")",
"offset",
"+=",
"DATASIZE",
"last_bit",
"=",
"bit",
"return",
"quietnet",
".",
"pack_buffer",
"(",
"output_buffer",
")"
] |
takes a pattern and returns an audio buffer that encodes that pattern .
|
train
| false
|
6,907
|
def firstLine(s):
try:
i = s.index('\n')
return s[:i]
except ValueError:
return s
|
[
"def",
"firstLine",
"(",
"s",
")",
":",
"try",
":",
"i",
"=",
"s",
".",
"index",
"(",
"'\\n'",
")",
"return",
"s",
"[",
":",
"i",
"]",
"except",
"ValueError",
":",
"return",
"s"
] |
return the first line of the given string .
|
train
| false
|
6,908
|
def render_markdown(text):
if isinstance(text, bytes):
text = text.decode(u'utf-8')
return markdown(text, **MARKDOWN_KWARGS)
|
[
"def",
"render_markdown",
"(",
"text",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"bytes",
")",
":",
"text",
"=",
"text",
".",
"decode",
"(",
"u'utf-8'",
")",
"return",
"markdown",
"(",
"text",
",",
"**",
"MARKDOWN_KWARGS",
")"
] |
returns the data as rendered markdown .
|
train
| false
|
6,909
|
def validate_integer(value, name, min_value=None, max_value=None):
try:
value = int(value)
except (TypeError, ValueError, UnicodeEncodeError):
raise webob.exc.HTTPBadRequest(explanation=(_('%s must be an integer.') % name))
if ((min_value is not None) and (value < min_value)):
raise webob.exc.HTTPBadRequest(explanation=(_('%(value_name)s must be >= %(min_value)d') % {'value_name': name, 'min_value': min_value}))
if ((max_value is not None) and (value > max_value)):
raise webob.exc.HTTPBadRequest(explanation=(_('%(value_name)s must be <= %(max_value)d') % {'value_name': name, 'max_value': max_value}))
return value
|
[
"def",
"validate_integer",
"(",
"value",
",",
"name",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
")",
":",
"try",
":",
"value",
"=",
"int",
"(",
"value",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"UnicodeEncodeError",
")",
":",
"raise",
"webob",
".",
"exc",
".",
"HTTPBadRequest",
"(",
"explanation",
"=",
"(",
"_",
"(",
"'%s must be an integer.'",
")",
"%",
"name",
")",
")",
"if",
"(",
"(",
"min_value",
"is",
"not",
"None",
")",
"and",
"(",
"value",
"<",
"min_value",
")",
")",
":",
"raise",
"webob",
".",
"exc",
".",
"HTTPBadRequest",
"(",
"explanation",
"=",
"(",
"_",
"(",
"'%(value_name)s must be >= %(min_value)d'",
")",
"%",
"{",
"'value_name'",
":",
"name",
",",
"'min_value'",
":",
"min_value",
"}",
")",
")",
"if",
"(",
"(",
"max_value",
"is",
"not",
"None",
")",
"and",
"(",
"value",
">",
"max_value",
")",
")",
":",
"raise",
"webob",
".",
"exc",
".",
"HTTPBadRequest",
"(",
"explanation",
"=",
"(",
"_",
"(",
"'%(value_name)s must be <= %(max_value)d'",
")",
"%",
"{",
"'value_name'",
":",
"name",
",",
"'max_value'",
":",
"max_value",
"}",
")",
")",
"return",
"value"
] |
make sure that value is a valid integer .
|
train
| false
|
6,910
|
def update_schema_task(cursor=None, num_updated=0, batch_size=100):
reload(models_v2)
query = models_v2.Picture.query()
(pictures, next_cursor, more) = query.fetch_page(batch_size, start_cursor=cursor)
to_put = []
for picture in pictures:
picture.num_votes = 1
picture.avg_rating = 5
to_put.append(picture)
if to_put:
ndb.put_multi(to_put)
num_updated += len(to_put)
logging.info('Put {} entities to Datastore for a total of {}'.format(len(to_put), num_updated))
if more:
deferred.defer(update_schema_task, cursor=next_cursor, num_updated=num_updated)
else:
logging.debug('update_schema_task complete with {0} updates!'.format(num_updated))
|
[
"def",
"update_schema_task",
"(",
"cursor",
"=",
"None",
",",
"num_updated",
"=",
"0",
",",
"batch_size",
"=",
"100",
")",
":",
"reload",
"(",
"models_v2",
")",
"query",
"=",
"models_v2",
".",
"Picture",
".",
"query",
"(",
")",
"(",
"pictures",
",",
"next_cursor",
",",
"more",
")",
"=",
"query",
".",
"fetch_page",
"(",
"batch_size",
",",
"start_cursor",
"=",
"cursor",
")",
"to_put",
"=",
"[",
"]",
"for",
"picture",
"in",
"pictures",
":",
"picture",
".",
"num_votes",
"=",
"1",
"picture",
".",
"avg_rating",
"=",
"5",
"to_put",
".",
"append",
"(",
"picture",
")",
"if",
"to_put",
":",
"ndb",
".",
"put_multi",
"(",
"to_put",
")",
"num_updated",
"+=",
"len",
"(",
"to_put",
")",
"logging",
".",
"info",
"(",
"'Put {} entities to Datastore for a total of {}'",
".",
"format",
"(",
"len",
"(",
"to_put",
")",
",",
"num_updated",
")",
")",
"if",
"more",
":",
"deferred",
".",
"defer",
"(",
"update_schema_task",
",",
"cursor",
"=",
"next_cursor",
",",
"num_updated",
"=",
"num_updated",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"'update_schema_task complete with {0} updates!'",
".",
"format",
"(",
"num_updated",
")",
")"
] |
task that handles updating the models schema .
|
train
| false
|
6,911
|
def checkInt(s):
try:
int(s)
return True
except ValueError:
return False
|
[
"def",
"checkInt",
"(",
"s",
")",
":",
"try",
":",
"int",
"(",
"s",
")",
"return",
"True",
"except",
"ValueError",
":",
"return",
"False"
] |
check if input string is an int .
|
train
| false
|
6,912
|
def iscsi_login(target_name):
cmd = ('iscsiadm --mode node --login --targetname %s' % target_name)
output = utils.system_output(cmd)
target_login = ''
if ('successful' in output):
target_login = target_name
return target_login
|
[
"def",
"iscsi_login",
"(",
"target_name",
")",
":",
"cmd",
"=",
"(",
"'iscsiadm --mode node --login --targetname %s'",
"%",
"target_name",
")",
"output",
"=",
"utils",
".",
"system_output",
"(",
"cmd",
")",
"target_login",
"=",
"''",
"if",
"(",
"'successful'",
"in",
"output",
")",
":",
"target_login",
"=",
"target_name",
"return",
"target_login"
] |
login to a target with the target name .
|
train
| false
|
6,913
|
def character_ngrams(string='', n=3, top=None, threshold=0, exclude=[], **kwargs):
count = defaultdict(int)
if (n > 0):
for i in xrange(((len(string) - n) + 1)):
w = string[i:(i + n)]
if (w not in exclude):
count[w] += 1
if (threshold > 0):
count = dict(((k, v) for (k, v) in count.items() if (v > threshold)))
if (top is not None):
count = dict(heapq.nsmallest(top, count.items(), key=(lambda kv: ((- kv[1]), kv[0]))))
return kwargs.get('dict', dict)(count)
|
[
"def",
"character_ngrams",
"(",
"string",
"=",
"''",
",",
"n",
"=",
"3",
",",
"top",
"=",
"None",
",",
"threshold",
"=",
"0",
",",
"exclude",
"=",
"[",
"]",
",",
"**",
"kwargs",
")",
":",
"count",
"=",
"defaultdict",
"(",
"int",
")",
"if",
"(",
"n",
">",
"0",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"(",
"(",
"len",
"(",
"string",
")",
"-",
"n",
")",
"+",
"1",
")",
")",
":",
"w",
"=",
"string",
"[",
"i",
":",
"(",
"i",
"+",
"n",
")",
"]",
"if",
"(",
"w",
"not",
"in",
"exclude",
")",
":",
"count",
"[",
"w",
"]",
"+=",
"1",
"if",
"(",
"threshold",
">",
"0",
")",
":",
"count",
"=",
"dict",
"(",
"(",
"(",
"k",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"count",
".",
"items",
"(",
")",
"if",
"(",
"v",
">",
"threshold",
")",
")",
")",
"if",
"(",
"top",
"is",
"not",
"None",
")",
":",
"count",
"=",
"dict",
"(",
"heapq",
".",
"nsmallest",
"(",
"top",
",",
"count",
".",
"items",
"(",
")",
",",
"key",
"=",
"(",
"lambda",
"kv",
":",
"(",
"(",
"-",
"kv",
"[",
"1",
"]",
")",
",",
"kv",
"[",
"0",
"]",
")",
")",
")",
")",
"return",
"kwargs",
".",
"get",
"(",
"'dict'",
",",
"dict",
")",
"(",
"count",
")"
] |
returns a dictionary of -items .
|
train
| false
|
6,914
|
def output():
return s3_rest_controller()
|
[
"def",
"output",
"(",
")",
":",
"return",
"s3_rest_controller",
"(",
")"
] |
appends string_ to the response .
|
train
| false
|
6,915
|
def estimate_graph_size(old_chunks, new_chunks):
crossed_size = reduce(mul, ((len(oc) + len(nc)) for (oc, nc) in zip(old_chunks, new_chunks)))
return crossed_size
|
[
"def",
"estimate_graph_size",
"(",
"old_chunks",
",",
"new_chunks",
")",
":",
"crossed_size",
"=",
"reduce",
"(",
"mul",
",",
"(",
"(",
"len",
"(",
"oc",
")",
"+",
"len",
"(",
"nc",
")",
")",
"for",
"(",
"oc",
",",
"nc",
")",
"in",
"zip",
"(",
"old_chunks",
",",
"new_chunks",
")",
")",
")",
"return",
"crossed_size"
] |
estimate the graph size during a rechunk computation .
|
train
| false
|
6,916
|
def APITester(request, response):
print 'not yet implemented'
|
[
"def",
"APITester",
"(",
"request",
",",
"response",
")",
":",
"print",
"'not yet implemented'"
] |
api tester perform security testing on all api calls passed via the proxy .
|
train
| false
|
6,918
|
def run_bokchoy(options, passthrough_options):
test_suite = BokChoyTestSuite('bok-choy', passthrough_options=passthrough_options, **options)
msg = colorize('green', 'Running tests using {default_store} modulestore.'.format(default_store=test_suite.default_store))
print msg
test_suite.run()
|
[
"def",
"run_bokchoy",
"(",
"options",
",",
"passthrough_options",
")",
":",
"test_suite",
"=",
"BokChoyTestSuite",
"(",
"'bok-choy'",
",",
"passthrough_options",
"=",
"passthrough_options",
",",
"**",
"options",
")",
"msg",
"=",
"colorize",
"(",
"'green'",
",",
"'Running tests using {default_store} modulestore.'",
".",
"format",
"(",
"default_store",
"=",
"test_suite",
".",
"default_store",
")",
")",
"print",
"msg",
"test_suite",
".",
"run",
"(",
")"
] |
runs bokchoytestsuite with the given options .
|
train
| false
|
6,919
|
def find_git_command():
if (sys.platform == 'win32'):
try:
import win32api
except ImportError:
return ['cmd', '/c', 'git']
else:
(status, git) = win32api.FindExecutable('git')
return [git]
else:
return ['git']
|
[
"def",
"find_git_command",
"(",
")",
":",
"if",
"(",
"sys",
".",
"platform",
"==",
"'win32'",
")",
":",
"try",
":",
"import",
"win32api",
"except",
"ImportError",
":",
"return",
"[",
"'cmd'",
",",
"'/c'",
",",
"'git'",
"]",
"else",
":",
"(",
"status",
",",
"git",
")",
"=",
"win32api",
".",
"FindExecutable",
"(",
"'git'",
")",
"return",
"[",
"git",
"]",
"else",
":",
"return",
"[",
"'git'",
"]"
] |
find command to run for system git .
|
train
| false
|
6,920
|
@register.assignment_tag
def feed_list(slug, limit=10):
return BlogEntry.objects.filter(feed__feedaggregate__slug=slug).order_by('-pub_date')[:limit]
|
[
"@",
"register",
".",
"assignment_tag",
"def",
"feed_list",
"(",
"slug",
",",
"limit",
"=",
"10",
")",
":",
"return",
"BlogEntry",
".",
"objects",
".",
"filter",
"(",
"feed__feedaggregate__slug",
"=",
"slug",
")",
".",
"order_by",
"(",
"'-pub_date'",
")",
"[",
":",
"limit",
"]"
] |
returns a list of blog entries for the given feedaggregate slug .
|
train
| false
|
6,921
|
def askokcancel(title=None, message=None, **options):
s = _show(title, message, QUESTION, OKCANCEL, **options)
return (s == OK)
|
[
"def",
"askokcancel",
"(",
"title",
"=",
"None",
",",
"message",
"=",
"None",
",",
"**",
"options",
")",
":",
"s",
"=",
"_show",
"(",
"title",
",",
"message",
",",
"QUESTION",
",",
"OKCANCEL",
",",
"**",
"options",
")",
"return",
"(",
"s",
"==",
"OK",
")"
] |
ask if operation should proceed; return true if the answer is ok .
|
train
| false
|
6,922
|
def _transpose_vectorized(M):
ndim = M.ndim
assert (ndim == 3)
return np.transpose(M, [0, (ndim - 1), (ndim - 2)])
|
[
"def",
"_transpose_vectorized",
"(",
"M",
")",
":",
"ndim",
"=",
"M",
".",
"ndim",
"assert",
"(",
"ndim",
"==",
"3",
")",
"return",
"np",
".",
"transpose",
"(",
"M",
",",
"[",
"0",
",",
"(",
"ndim",
"-",
"1",
")",
",",
"(",
"ndim",
"-",
"2",
")",
"]",
")"
] |
transposition of an array of matrices *m* .
|
train
| false
|
6,924
|
def _move(src, dst):
try:
os.remove(os.path.join(dst, os.path.basename(src)))
except OSError as exc:
if (exc.errno != errno.ENOENT):
_abort(exc)
try:
shutil.move(src, dst)
except shutil.Error as exc:
_abort(exc)
|
[
"def",
"_move",
"(",
"src",
",",
"dst",
")",
":",
"try",
":",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"os",
".",
"path",
".",
"basename",
"(",
"src",
")",
")",
")",
"except",
"OSError",
"as",
"exc",
":",
"if",
"(",
"exc",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
")",
":",
"_abort",
"(",
"exc",
")",
"try",
":",
"shutil",
".",
"move",
"(",
"src",
",",
"dst",
")",
"except",
"shutil",
".",
"Error",
"as",
"exc",
":",
"_abort",
"(",
"exc",
")"
] |
wrapper around shutil .
|
train
| false
|
6,926
|
def port_rule_masking(port_min, port_max):
if (port_max < port_min):
raise ValueError(_("'port_max' is smaller than 'port_min'"))
bitdiff = (port_min ^ port_max)
if (bitdiff == 0):
return [_hex_format(port_min)]
top_bit = 1
while (top_bit <= bitdiff):
top_bit <<= 1
if (((port_min & (top_bit - 1)) == 0) and ((port_max & (top_bit - 1)) == (top_bit - 1))):
return [_hex_format(port_min, (top_bit - 1))]
top_bit >>= 1
rules = []
rules.extend(_gen_rules_port_min(port_min, top_bit))
rules.extend(_gen_rules_port_max(port_max, top_bit))
return rules
|
[
"def",
"port_rule_masking",
"(",
"port_min",
",",
"port_max",
")",
":",
"if",
"(",
"port_max",
"<",
"port_min",
")",
":",
"raise",
"ValueError",
"(",
"_",
"(",
"\"'port_max' is smaller than 'port_min'\"",
")",
")",
"bitdiff",
"=",
"(",
"port_min",
"^",
"port_max",
")",
"if",
"(",
"bitdiff",
"==",
"0",
")",
":",
"return",
"[",
"_hex_format",
"(",
"port_min",
")",
"]",
"top_bit",
"=",
"1",
"while",
"(",
"top_bit",
"<=",
"bitdiff",
")",
":",
"top_bit",
"<<=",
"1",
"if",
"(",
"(",
"(",
"port_min",
"&",
"(",
"top_bit",
"-",
"1",
")",
")",
"==",
"0",
")",
"and",
"(",
"(",
"port_max",
"&",
"(",
"top_bit",
"-",
"1",
")",
")",
"==",
"(",
"top_bit",
"-",
"1",
")",
")",
")",
":",
"return",
"[",
"_hex_format",
"(",
"port_min",
",",
"(",
"top_bit",
"-",
"1",
")",
")",
"]",
"top_bit",
">>=",
"1",
"rules",
"=",
"[",
"]",
"rules",
".",
"extend",
"(",
"_gen_rules_port_min",
"(",
"port_min",
",",
"top_bit",
")",
")",
"rules",
".",
"extend",
"(",
"_gen_rules_port_max",
"(",
"port_max",
",",
"top_bit",
")",
")",
"return",
"rules"
] |
translate a range [port_min .
|
train
| false
|
6,928
|
def getCallerInfo(depth=2):
f = sys._getframe(depth)
method_name = f.f_code.co_name
filename = f.f_code.co_filename
arg_class = None
args = inspect.getargvalues(f)
if (len(args[0]) > 0):
arg_name = args[0][0]
arg_class = args[3][arg_name].__class__.__name__
return (method_name, filename, arg_class)
|
[
"def",
"getCallerInfo",
"(",
"depth",
"=",
"2",
")",
":",
"f",
"=",
"sys",
".",
"_getframe",
"(",
"depth",
")",
"method_name",
"=",
"f",
".",
"f_code",
".",
"co_name",
"filename",
"=",
"f",
".",
"f_code",
".",
"co_filename",
"arg_class",
"=",
"None",
"args",
"=",
"inspect",
".",
"getargvalues",
"(",
"f",
")",
"if",
"(",
"len",
"(",
"args",
"[",
"0",
"]",
")",
">",
"0",
")",
":",
"arg_name",
"=",
"args",
"[",
"0",
"]",
"[",
"0",
"]",
"arg_class",
"=",
"args",
"[",
"3",
"]",
"[",
"arg_name",
"]",
".",
"__class__",
".",
"__name__",
"return",
"(",
"method_name",
",",
"filename",
",",
"arg_class",
")"
] |
utility function to get information about function callers the information is the tuple the class will be none if the caller is just a function and not an object method .
|
train
| true
|
6,929
|
def change_working_directory(directory):
try:
os.chdir(directory)
except Exception as exc:
error = DaemonOSEnvironmentError(('Unable to change working directory (%(exc)s)' % vars()))
raise error
|
[
"def",
"change_working_directory",
"(",
"directory",
")",
":",
"try",
":",
"os",
".",
"chdir",
"(",
"directory",
")",
"except",
"Exception",
"as",
"exc",
":",
"error",
"=",
"DaemonOSEnvironmentError",
"(",
"(",
"'Unable to change working directory (%(exc)s)'",
"%",
"vars",
"(",
")",
")",
")",
"raise",
"error"
] |
change the working directory of this process .
|
train
| false
|
6,930
|
def _to_binary_string_py3(text):
if isinstance(text, six.binary_type):
return text
elif isinstance(text, six.string_types):
return six.b(text)
else:
raise Exception('only takes string types')
|
[
"def",
"_to_binary_string_py3",
"(",
"text",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"text",
"elif",
"isinstance",
"(",
"text",
",",
"six",
".",
"string_types",
")",
":",
"return",
"six",
".",
"b",
"(",
"text",
")",
"else",
":",
"raise",
"Exception",
"(",
"'only takes string types'",
")"
] |
converts a string to a binary string if it is not already one .
|
train
| false
|
6,932
|
def getmtime(path, use_sudo=False):
func = ((use_sudo and run_as_root) or run)
with settings(hide('running', 'stdout')):
return int(func(('stat -c %%Y "%(path)s" ' % locals())).strip())
|
[
"def",
"getmtime",
"(",
"path",
",",
"use_sudo",
"=",
"False",
")",
":",
"func",
"=",
"(",
"(",
"use_sudo",
"and",
"run_as_root",
")",
"or",
"run",
")",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
")",
")",
":",
"return",
"int",
"(",
"func",
"(",
"(",
"'stat -c %%Y \"%(path)s\" '",
"%",
"locals",
"(",
")",
")",
")",
".",
"strip",
"(",
")",
")"
] |
return the last modification time of a file .
|
train
| true
|
6,933
|
def _modinv(e, m):
(x1, y1, x2, y2) = (1, 0, 0, 1)
(a, b) = (e, m)
while (b > 0):
(q, r) = divmod(a, b)
(xn, yn) = ((x1 - (q * x2)), (y1 - (q * y2)))
(a, b, x1, y1, x2, y2) = (b, r, x2, y2, xn, yn)
return (x1 % m)
|
[
"def",
"_modinv",
"(",
"e",
",",
"m",
")",
":",
"(",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")",
"=",
"(",
"1",
",",
"0",
",",
"0",
",",
"1",
")",
"(",
"a",
",",
"b",
")",
"=",
"(",
"e",
",",
"m",
")",
"while",
"(",
"b",
">",
"0",
")",
":",
"(",
"q",
",",
"r",
")",
"=",
"divmod",
"(",
"a",
",",
"b",
")",
"(",
"xn",
",",
"yn",
")",
"=",
"(",
"(",
"x1",
"-",
"(",
"q",
"*",
"x2",
")",
")",
",",
"(",
"y1",
"-",
"(",
"q",
"*",
"y2",
")",
")",
")",
"(",
"a",
",",
"b",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")",
"=",
"(",
"b",
",",
"r",
",",
"x2",
",",
"y2",
",",
"xn",
",",
"yn",
")",
"return",
"(",
"x1",
"%",
"m",
")"
] |
modular multiplicative inverse .
|
train
| false
|
6,934
|
def DocumentListEntryFromString(xml_string):
return atom.CreateClassFromXMLString(DocumentListEntry, xml_string)
|
[
"def",
"DocumentListEntryFromString",
"(",
"xml_string",
")",
":",
"return",
"atom",
".",
"CreateClassFromXMLString",
"(",
"DocumentListEntry",
",",
"xml_string",
")"
] |
converts an xml string into a documentlistentry object .
|
train
| false
|
6,936
|
def expm_multiply(A, B, start=None, stop=None, num=None, endpoint=None):
if all(((arg is None) for arg in (start, stop, num, endpoint))):
X = _expm_multiply_simple(A, B)
else:
(X, status) = _expm_multiply_interval(A, B, start, stop, num, endpoint)
return X
|
[
"def",
"expm_multiply",
"(",
"A",
",",
"B",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"num",
"=",
"None",
",",
"endpoint",
"=",
"None",
")",
":",
"if",
"all",
"(",
"(",
"(",
"arg",
"is",
"None",
")",
"for",
"arg",
"in",
"(",
"start",
",",
"stop",
",",
"num",
",",
"endpoint",
")",
")",
")",
":",
"X",
"=",
"_expm_multiply_simple",
"(",
"A",
",",
"B",
")",
"else",
":",
"(",
"X",
",",
"status",
")",
"=",
"_expm_multiply_interval",
"(",
"A",
",",
"B",
",",
"start",
",",
"stop",
",",
"num",
",",
"endpoint",
")",
"return",
"X"
] |
compute the action of the matrix exponential of a on b .
|
train
| false
|
6,937
|
def difference_delta(expr, n=None, step=1):
expr = sympify(expr)
if (n is None):
f = expr.free_symbols
if (len(f) == 1):
n = f.pop()
elif (len(f) == 0):
return S.Zero
else:
raise ValueError(('Since there is more than one variable in the expression, a variable must be supplied to take the difference of %s' % expr))
step = sympify(step)
if (step.is_number is False):
raise ValueError('Step should be a number.')
elif (step in [S.Infinity, (- S.Infinity)]):
raise ValueError('Step should be bounded.')
if hasattr(expr, '_eval_difference_delta'):
result = expr._eval_difference_delta(n, step)
if result:
return result
return (expr.subs(n, (n + step)) - expr)
|
[
"def",
"difference_delta",
"(",
"expr",
",",
"n",
"=",
"None",
",",
"step",
"=",
"1",
")",
":",
"expr",
"=",
"sympify",
"(",
"expr",
")",
"if",
"(",
"n",
"is",
"None",
")",
":",
"f",
"=",
"expr",
".",
"free_symbols",
"if",
"(",
"len",
"(",
"f",
")",
"==",
"1",
")",
":",
"n",
"=",
"f",
".",
"pop",
"(",
")",
"elif",
"(",
"len",
"(",
"f",
")",
"==",
"0",
")",
":",
"return",
"S",
".",
"Zero",
"else",
":",
"raise",
"ValueError",
"(",
"(",
"'Since there is more than one variable in the expression, a variable must be supplied to take the difference of %s'",
"%",
"expr",
")",
")",
"step",
"=",
"sympify",
"(",
"step",
")",
"if",
"(",
"step",
".",
"is_number",
"is",
"False",
")",
":",
"raise",
"ValueError",
"(",
"'Step should be a number.'",
")",
"elif",
"(",
"step",
"in",
"[",
"S",
".",
"Infinity",
",",
"(",
"-",
"S",
".",
"Infinity",
")",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'Step should be bounded.'",
")",
"if",
"hasattr",
"(",
"expr",
",",
"'_eval_difference_delta'",
")",
":",
"result",
"=",
"expr",
".",
"_eval_difference_delta",
"(",
"n",
",",
"step",
")",
"if",
"result",
":",
"return",
"result",
"return",
"(",
"expr",
".",
"subs",
"(",
"n",
",",
"(",
"n",
"+",
"step",
")",
")",
"-",
"expr",
")"
] |
difference operator .
|
train
| false
|
6,938
|
def _get_server(vm_, volumes, nics):
vm_size = _override_size(vm_)
availability_zone = config.get_cloud_config_value('availability_zone', vm_, __opts__, default=None, search_global=False)
cpu_family = config.get_cloud_config_value('cpu_family', vm_, __opts__, default=None, search_global=False)
return Server(name=vm_['name'], ram=vm_size['ram'], availability_zone=availability_zone, cores=vm_size['cores'], cpu_family=cpu_family, create_volumes=volumes, nics=nics)
|
[
"def",
"_get_server",
"(",
"vm_",
",",
"volumes",
",",
"nics",
")",
":",
"vm_size",
"=",
"_override_size",
"(",
"vm_",
")",
"availability_zone",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'availability_zone'",
",",
"vm_",
",",
"__opts__",
",",
"default",
"=",
"None",
",",
"search_global",
"=",
"False",
")",
"cpu_family",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'cpu_family'",
",",
"vm_",
",",
"__opts__",
",",
"default",
"=",
"None",
",",
"search_global",
"=",
"False",
")",
"return",
"Server",
"(",
"name",
"=",
"vm_",
"[",
"'name'",
"]",
",",
"ram",
"=",
"vm_size",
"[",
"'ram'",
"]",
",",
"availability_zone",
"=",
"availability_zone",
",",
"cores",
"=",
"vm_size",
"[",
"'cores'",
"]",
",",
"cpu_family",
"=",
"cpu_family",
",",
"create_volumes",
"=",
"volumes",
",",
"nics",
"=",
"nics",
")"
] |
construct server instance from cloud profile config .
|
train
| true
|
6,939
|
def TranslateXmlToYamlForDevAppServer(app_engine_web_xml, web_xml, war_root):
translator = AppYamlTranslatorForDevAppServer(app_engine_web_xml, web_xml, war_root)
return translator.GetYaml()
|
[
"def",
"TranslateXmlToYamlForDevAppServer",
"(",
"app_engine_web_xml",
",",
"web_xml",
",",
"war_root",
")",
":",
"translator",
"=",
"AppYamlTranslatorForDevAppServer",
"(",
"app_engine_web_xml",
",",
"web_xml",
",",
"war_root",
")",
"return",
"translator",
".",
"GetYaml",
"(",
")"
] |
does parsed-xml to yaml-string translation .
|
train
| false
|
6,940
|
def unroll_scan(fn, sequences, outputs_info, non_sequences, n_steps, go_backwards=False):
if (not isinstance(sequences, (list, tuple))):
sequences = [sequences]
counter = range(n_steps)
if go_backwards:
counter = counter[::(-1)]
output = []
prev_vals = outputs_info
for i in counter:
step_input = (([s[i] for s in sequences] + prev_vals) + non_sequences)
out_ = fn(*step_input)
if isinstance(out_, T.TensorVariable):
out_ = [out_]
if isinstance(out_, tuple):
out_ = list(out_)
output.append(out_)
prev_vals = output[(-1)]
output_scan = []
for i in range(len(output[0])):
l = map((lambda x: x[i]), output)
output_scan.append(T.stack(*l))
return output_scan
|
[
"def",
"unroll_scan",
"(",
"fn",
",",
"sequences",
",",
"outputs_info",
",",
"non_sequences",
",",
"n_steps",
",",
"go_backwards",
"=",
"False",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"sequences",
",",
"(",
"list",
",",
"tuple",
")",
")",
")",
":",
"sequences",
"=",
"[",
"sequences",
"]",
"counter",
"=",
"range",
"(",
"n_steps",
")",
"if",
"go_backwards",
":",
"counter",
"=",
"counter",
"[",
":",
":",
"(",
"-",
"1",
")",
"]",
"output",
"=",
"[",
"]",
"prev_vals",
"=",
"outputs_info",
"for",
"i",
"in",
"counter",
":",
"step_input",
"=",
"(",
"(",
"[",
"s",
"[",
"i",
"]",
"for",
"s",
"in",
"sequences",
"]",
"+",
"prev_vals",
")",
"+",
"non_sequences",
")",
"out_",
"=",
"fn",
"(",
"*",
"step_input",
")",
"if",
"isinstance",
"(",
"out_",
",",
"T",
".",
"TensorVariable",
")",
":",
"out_",
"=",
"[",
"out_",
"]",
"if",
"isinstance",
"(",
"out_",
",",
"tuple",
")",
":",
"out_",
"=",
"list",
"(",
"out_",
")",
"output",
".",
"append",
"(",
"out_",
")",
"prev_vals",
"=",
"output",
"[",
"(",
"-",
"1",
")",
"]",
"output_scan",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"output",
"[",
"0",
"]",
")",
")",
":",
"l",
"=",
"map",
"(",
"(",
"lambda",
"x",
":",
"x",
"[",
"i",
"]",
")",
",",
"output",
")",
"output_scan",
".",
"append",
"(",
"T",
".",
"stack",
"(",
"*",
"l",
")",
")",
"return",
"output_scan"
] |
helper function to unroll for loops .
|
train
| false
|
6,941
|
def _str(s):
try:
if isinstance(s, unicode):
return s.encode('utf-8', 'replace')
except NameError:
pass
return s
|
[
"def",
"_str",
"(",
"s",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"s",
",",
"unicode",
")",
":",
"return",
"s",
".",
"encode",
"(",
"'utf-8'",
",",
"'replace'",
")",
"except",
"NameError",
":",
"pass",
"return",
"s"
] |
wrap single quotes around strings .
|
train
| false
|
6,942
|
def BuildDefaultPythonKey():
return ('Software\\Python\\PythonCore\\' + sys.winver)
|
[
"def",
"BuildDefaultPythonKey",
"(",
")",
":",
"return",
"(",
"'Software\\\\Python\\\\PythonCore\\\\'",
"+",
"sys",
".",
"winver",
")"
] |
builds a string containing the path to the current registry key .
|
train
| false
|
6,943
|
def moveIntf(intf, dstNode, printError=True, retries=3, delaySecs=0.001):
retry(retries, delaySecs, moveIntfNoRetry, intf, dstNode, printError=printError)
|
[
"def",
"moveIntf",
"(",
"intf",
",",
"dstNode",
",",
"printError",
"=",
"True",
",",
"retries",
"=",
"3",
",",
"delaySecs",
"=",
"0.001",
")",
":",
"retry",
"(",
"retries",
",",
"delaySecs",
",",
"moveIntfNoRetry",
",",
"intf",
",",
"dstNode",
",",
"printError",
"=",
"printError",
")"
] |
move interface to node .
|
train
| false
|
6,944
|
def constrain_stationary_multivariate_python(unconstrained, error_variance, transform_variance=False, prefix=None):
use_list = (type(unconstrained) == list)
if (not use_list):
(k_endog, order) = unconstrained.shape
order //= k_endog
unconstrained = [unconstrained[:k_endog, (i * k_endog):((i + 1) * k_endog)] for i in range(order)]
order = len(unconstrained)
k_endog = unconstrained[0].shape[0]
sv_constrained = _constrain_sv_less_than_one_python(unconstrained, order, k_endog)
(constrained, var) = _compute_coefficients_from_multivariate_pacf_python(sv_constrained, error_variance, transform_variance, order, k_endog)
if (not use_list):
constrained = np.concatenate(constrained, axis=1).reshape(k_endog, (k_endog * order))
return (constrained, var)
|
[
"def",
"constrain_stationary_multivariate_python",
"(",
"unconstrained",
",",
"error_variance",
",",
"transform_variance",
"=",
"False",
",",
"prefix",
"=",
"None",
")",
":",
"use_list",
"=",
"(",
"type",
"(",
"unconstrained",
")",
"==",
"list",
")",
"if",
"(",
"not",
"use_list",
")",
":",
"(",
"k_endog",
",",
"order",
")",
"=",
"unconstrained",
".",
"shape",
"order",
"//=",
"k_endog",
"unconstrained",
"=",
"[",
"unconstrained",
"[",
":",
"k_endog",
",",
"(",
"i",
"*",
"k_endog",
")",
":",
"(",
"(",
"i",
"+",
"1",
")",
"*",
"k_endog",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"order",
")",
"]",
"order",
"=",
"len",
"(",
"unconstrained",
")",
"k_endog",
"=",
"unconstrained",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
"sv_constrained",
"=",
"_constrain_sv_less_than_one_python",
"(",
"unconstrained",
",",
"order",
",",
"k_endog",
")",
"(",
"constrained",
",",
"var",
")",
"=",
"_compute_coefficients_from_multivariate_pacf_python",
"(",
"sv_constrained",
",",
"error_variance",
",",
"transform_variance",
",",
"order",
",",
"k_endog",
")",
"if",
"(",
"not",
"use_list",
")",
":",
"constrained",
"=",
"np",
".",
"concatenate",
"(",
"constrained",
",",
"axis",
"=",
"1",
")",
".",
"reshape",
"(",
"k_endog",
",",
"(",
"k_endog",
"*",
"order",
")",
")",
"return",
"(",
"constrained",
",",
"var",
")"
] |
transform unconstrained parameters used by the optimizer to constrained parameters used in likelihood evaluation for a vector autoregression .
|
train
| false
|
6,946
|
def xmlrunner_wrapper(output):
def _runner(*args, **kwargs):
kwargs[u'output'] = output
return xmlrunner.XMLTestRunner(*args, **kwargs)
return _runner
|
[
"def",
"xmlrunner_wrapper",
"(",
"output",
")",
":",
"def",
"_runner",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"kwargs",
"[",
"u'output'",
"]",
"=",
"output",
"return",
"xmlrunner",
".",
"XMLTestRunner",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"_runner"
] |
convenience wrapper to keep method signature unchanged for xmltestrunner and texttestrunner .
|
train
| false
|
6,947
|
def GroupSizer(field_number, is_repeated, is_packed):
tag_size = (_TagSize(field_number) * 2)
assert (not is_packed)
if is_repeated:
def RepeatedFieldSize(value):
result = (tag_size * len(value))
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return (tag_size + value.ByteSize())
return FieldSize
|
[
"def",
"GroupSizer",
"(",
"field_number",
",",
"is_repeated",
",",
"is_packed",
")",
":",
"tag_size",
"=",
"(",
"_TagSize",
"(",
"field_number",
")",
"*",
"2",
")",
"assert",
"(",
"not",
"is_packed",
")",
"if",
"is_repeated",
":",
"def",
"RepeatedFieldSize",
"(",
"value",
")",
":",
"result",
"=",
"(",
"tag_size",
"*",
"len",
"(",
"value",
")",
")",
"for",
"element",
"in",
"value",
":",
"result",
"+=",
"element",
".",
"ByteSize",
"(",
")",
"return",
"result",
"return",
"RepeatedFieldSize",
"else",
":",
"def",
"FieldSize",
"(",
"value",
")",
":",
"return",
"(",
"tag_size",
"+",
"value",
".",
"ByteSize",
"(",
")",
")",
"return",
"FieldSize"
] |
returns a sizer for a group field .
|
train
| true
|
6,948
|
def unique_by_id_and_kind_sort(seq):
seq.sort(key=(lambda x: x.get('sort_order', 0)))
seen = {}
result = []
for item in seq:
marker = (item.get('id') + item.get('kind'))
if (marker in seen):
continue
seen[marker] = 1
result.append(item)
return result
|
[
"def",
"unique_by_id_and_kind_sort",
"(",
"seq",
")",
":",
"seq",
".",
"sort",
"(",
"key",
"=",
"(",
"lambda",
"x",
":",
"x",
".",
"get",
"(",
"'sort_order'",
",",
"0",
")",
")",
")",
"seen",
"=",
"{",
"}",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"seq",
":",
"marker",
"=",
"(",
"item",
".",
"get",
"(",
"'id'",
")",
"+",
"item",
".",
"get",
"(",
"'kind'",
")",
")",
"if",
"(",
"marker",
"in",
"seen",
")",
":",
"continue",
"seen",
"[",
"marker",
"]",
"=",
"1",
"result",
".",
"append",
"(",
"item",
")",
"return",
"result"
] |
due to the fact that we have duplicate content items for the same content id in our topic tree .
|
train
| false
|
6,950
|
def expectedFailureIf(condition):
if condition:
return expectedFailure
return (lambda func: func)
|
[
"def",
"expectedFailureIf",
"(",
"condition",
")",
":",
"if",
"condition",
":",
"return",
"expectedFailure",
"return",
"(",
"lambda",
"func",
":",
"func",
")"
] |
marks a test as an expected failure if condition is met .
|
train
| false
|
6,951
|
def slice_sampler(px, N=1, x=None):
values = np.zeros(N, dtype=np.int)
samples = np.arange(len(px))
px = (np.array(px) / (1.0 * sum(px)))
u = uniform(0, max(px))
for n in xrange(N):
included = (px >= u)
choice = random.sample(range(np.sum(included)), 1)[0]
values[n] = samples[included][choice]
u = uniform(0, px[included][choice])
if x:
if (len(x) == len(px)):
x = np.array(x)
values = x[values]
else:
print 'px and x are different lengths. Returning index locations for px.'
return values
|
[
"def",
"slice_sampler",
"(",
"px",
",",
"N",
"=",
"1",
",",
"x",
"=",
"None",
")",
":",
"values",
"=",
"np",
".",
"zeros",
"(",
"N",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"samples",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"px",
")",
")",
"px",
"=",
"(",
"np",
".",
"array",
"(",
"px",
")",
"/",
"(",
"1.0",
"*",
"sum",
"(",
"px",
")",
")",
")",
"u",
"=",
"uniform",
"(",
"0",
",",
"max",
"(",
"px",
")",
")",
"for",
"n",
"in",
"xrange",
"(",
"N",
")",
":",
"included",
"=",
"(",
"px",
">=",
"u",
")",
"choice",
"=",
"random",
".",
"sample",
"(",
"range",
"(",
"np",
".",
"sum",
"(",
"included",
")",
")",
",",
"1",
")",
"[",
"0",
"]",
"values",
"[",
"n",
"]",
"=",
"samples",
"[",
"included",
"]",
"[",
"choice",
"]",
"u",
"=",
"uniform",
"(",
"0",
",",
"px",
"[",
"included",
"]",
"[",
"choice",
"]",
")",
"if",
"x",
":",
"if",
"(",
"len",
"(",
"x",
")",
"==",
"len",
"(",
"px",
")",
")",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"values",
"=",
"x",
"[",
"values",
"]",
"else",
":",
"print",
"'px and x are different lengths. Returning index locations for px.'",
"return",
"values"
] |
provides samples from a user-defined distribution .
|
train
| false
|
6,952
|
def _after(node):
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
return [tree[x] for x in tree.treepositions() if (x[:len(pos)] > pos[:len(x)])]
|
[
"def",
"_after",
"(",
"node",
")",
":",
"try",
":",
"pos",
"=",
"node",
".",
"treeposition",
"(",
")",
"tree",
"=",
"node",
".",
"root",
"(",
")",
"except",
"AttributeError",
":",
"return",
"[",
"]",
"return",
"[",
"tree",
"[",
"x",
"]",
"for",
"x",
"in",
"tree",
".",
"treepositions",
"(",
")",
"if",
"(",
"x",
"[",
":",
"len",
"(",
"pos",
")",
"]",
">",
"pos",
"[",
":",
"len",
"(",
"x",
")",
"]",
")",
"]"
] |
returns the set of all nodes that are after the given node .
|
train
| false
|
6,954
|
@task
def jar(ctx, jython_version='2.7.0', pyyaml_version='3.11', remove_dist=False):
clean(ctx, remove_dist, create_dirs=True)
jython_jar = get_jython_jar(jython_version)
print('Using {0}'.format(jython_jar))
compile_java_files(ctx, jython_jar)
unzip_jar(jython_jar)
copy_robot_files()
pyaml_archive = get_pyyaml(pyyaml_version)
extract_and_copy_pyyaml_files(pyyaml_version, pyaml_archive)
compile_python_files(ctx, jython_jar)
filename = create_robot_jar(ctx, get_version_from_file())
announce()
return os.path.abspath(filename)
|
[
"@",
"task",
"def",
"jar",
"(",
"ctx",
",",
"jython_version",
"=",
"'2.7.0'",
",",
"pyyaml_version",
"=",
"'3.11'",
",",
"remove_dist",
"=",
"False",
")",
":",
"clean",
"(",
"ctx",
",",
"remove_dist",
",",
"create_dirs",
"=",
"True",
")",
"jython_jar",
"=",
"get_jython_jar",
"(",
"jython_version",
")",
"print",
"(",
"'Using {0}'",
".",
"format",
"(",
"jython_jar",
")",
")",
"compile_java_files",
"(",
"ctx",
",",
"jython_jar",
")",
"unzip_jar",
"(",
"jython_jar",
")",
"copy_robot_files",
"(",
")",
"pyaml_archive",
"=",
"get_pyyaml",
"(",
"pyyaml_version",
")",
"extract_and_copy_pyyaml_files",
"(",
"pyyaml_version",
",",
"pyaml_archive",
")",
"compile_python_files",
"(",
"ctx",
",",
"jython_jar",
")",
"filename",
"=",
"create_robot_jar",
"(",
"ctx",
",",
"get_version_from_file",
"(",
")",
")",
"announce",
"(",
")",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")"
] |
create jar distribution .
|
train
| false
|
6,955
|
def saveAll():
for globalRepositoryDialogValue in getGlobalRepositoryDialogValues():
globalRepositoryDialogValue.save()
|
[
"def",
"saveAll",
"(",
")",
":",
"for",
"globalRepositoryDialogValue",
"in",
"getGlobalRepositoryDialogValues",
"(",
")",
":",
"globalRepositoryDialogValue",
".",
"save",
"(",
")"
] |
save all the dialogs .
|
train
| false
|
6,956
|
def set_changed(meth):
@wraps(meth)
def changed_wrapper(self, *args, **kwargs):
self._cache.clear()
return meth(self, *args, **kwargs)
return changed_wrapper
|
[
"def",
"set_changed",
"(",
"meth",
")",
":",
"@",
"wraps",
"(",
"meth",
")",
"def",
"changed_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"self",
".",
"_cache",
".",
"clear",
"(",
")",
"return",
"meth",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"changed_wrapper"
] |
function to decorate methods in order to empty the memoized cache .
|
train
| false
|
6,957
|
def map_dict_to_lower(input_dict):
lower_case_dict = {}
for key in input_dict:
lower_case_dict[key.lower()] = input_dict[key]
return lower_case_dict
|
[
"def",
"map_dict_to_lower",
"(",
"input_dict",
")",
":",
"lower_case_dict",
"=",
"{",
"}",
"for",
"key",
"in",
"input_dict",
":",
"lower_case_dict",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"input_dict",
"[",
"key",
"]",
"return",
"lower_case_dict"
] |
return an equivalent to the input dictionary with lower-case keys .
|
train
| false
|
6,958
|
def t_KEGG_Map(testfiles):
for file in testfiles:
fh = open(os.path.join('KEGG', file))
print((('Testing Bio.KEGG.Map on ' + file) + '\n\n'))
reactions = Map.parse(fh)
system = System()
for reaction in reactions:
system.add_reaction(reaction)
rxs = system.reactions()
rxs.sort(key=(lambda x: str(x)))
for x in rxs:
print(str(x))
fh.close()
|
[
"def",
"t_KEGG_Map",
"(",
"testfiles",
")",
":",
"for",
"file",
"in",
"testfiles",
":",
"fh",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'KEGG'",
",",
"file",
")",
")",
"print",
"(",
"(",
"(",
"'Testing Bio.KEGG.Map on '",
"+",
"file",
")",
"+",
"'\\n\\n'",
")",
")",
"reactions",
"=",
"Map",
".",
"parse",
"(",
"fh",
")",
"system",
"=",
"System",
"(",
")",
"for",
"reaction",
"in",
"reactions",
":",
"system",
".",
"add_reaction",
"(",
"reaction",
")",
"rxs",
"=",
"system",
".",
"reactions",
"(",
")",
"rxs",
".",
"sort",
"(",
"key",
"=",
"(",
"lambda",
"x",
":",
"str",
"(",
"x",
")",
")",
")",
"for",
"x",
"in",
"rxs",
":",
"print",
"(",
"str",
"(",
"x",
")",
")",
"fh",
".",
"close",
"(",
")"
] |
tests bio .
|
train
| false
|
6,959
|
def get_prerequisites(course_key):
course_content_milestones = find_gating_milestones(course_key)
milestones_by_block_id = {}
block_ids = []
for milestone in course_content_milestones:
prereq_content_key = milestone['namespace'].replace(GATING_NAMESPACE_QUALIFIER, '')
block_id = UsageKey.from_string(prereq_content_key).block_id
block_ids.append(block_id)
milestones_by_block_id[block_id] = milestone
result = []
for block in modulestore().get_items(course_key, qualifiers={'name': block_ids}):
milestone = milestones_by_block_id.get(block.location.block_id)
if milestone:
milestone['block_display_name'] = block.display_name
milestone['block_usage_key'] = unicode(block.location)
result.append(milestone)
return result
|
[
"def",
"get_prerequisites",
"(",
"course_key",
")",
":",
"course_content_milestones",
"=",
"find_gating_milestones",
"(",
"course_key",
")",
"milestones_by_block_id",
"=",
"{",
"}",
"block_ids",
"=",
"[",
"]",
"for",
"milestone",
"in",
"course_content_milestones",
":",
"prereq_content_key",
"=",
"milestone",
"[",
"'namespace'",
"]",
".",
"replace",
"(",
"GATING_NAMESPACE_QUALIFIER",
",",
"''",
")",
"block_id",
"=",
"UsageKey",
".",
"from_string",
"(",
"prereq_content_key",
")",
".",
"block_id",
"block_ids",
".",
"append",
"(",
"block_id",
")",
"milestones_by_block_id",
"[",
"block_id",
"]",
"=",
"milestone",
"result",
"=",
"[",
"]",
"for",
"block",
"in",
"modulestore",
"(",
")",
".",
"get_items",
"(",
"course_key",
",",
"qualifiers",
"=",
"{",
"'name'",
":",
"block_ids",
"}",
")",
":",
"milestone",
"=",
"milestones_by_block_id",
".",
"get",
"(",
"block",
".",
"location",
".",
"block_id",
")",
"if",
"milestone",
":",
"milestone",
"[",
"'block_display_name'",
"]",
"=",
"block",
".",
"display_name",
"milestone",
"[",
"'block_usage_key'",
"]",
"=",
"unicode",
"(",
"block",
".",
"location",
")",
"result",
".",
"append",
"(",
"milestone",
")",
"return",
"result"
] |
find all the gating milestones associated with a course and the xblock info associated with those gating milestones .
|
train
| false
|
6,962
|
def chi2p(x2, df=1, tail=UPPER):
return gammai((df * 0.5), (x2 * 0.5), tail)
|
[
"def",
"chi2p",
"(",
"x2",
",",
"df",
"=",
"1",
",",
"tail",
"=",
"UPPER",
")",
":",
"return",
"gammai",
"(",
"(",
"df",
"*",
"0.5",
")",
",",
"(",
"x2",
"*",
"0.5",
")",
",",
"tail",
")"
] |
returns p-value for given x2 and degrees of freedom .
|
train
| false
|
6,963
|
def add_language(**kwargs):
global _LANGUAGE_HANDLER
if (not _LANGUAGE_HANDLER):
try:
_LANGUAGE_HANDLER = LanguageHandler.objects.get(db_key='language_handler')
except LanguageHandler.DoesNotExist:
if (not _LANGUAGE_HANDLER):
from evennia import create_script
_LANGUAGE_HANDLER = create_script(LanguageHandler)
_LANGUAGE_HANDLER.add(**kwargs)
|
[
"def",
"add_language",
"(",
"**",
"kwargs",
")",
":",
"global",
"_LANGUAGE_HANDLER",
"if",
"(",
"not",
"_LANGUAGE_HANDLER",
")",
":",
"try",
":",
"_LANGUAGE_HANDLER",
"=",
"LanguageHandler",
".",
"objects",
".",
"get",
"(",
"db_key",
"=",
"'language_handler'",
")",
"except",
"LanguageHandler",
".",
"DoesNotExist",
":",
"if",
"(",
"not",
"_LANGUAGE_HANDLER",
")",
":",
"from",
"evennia",
"import",
"create_script",
"_LANGUAGE_HANDLER",
"=",
"create_script",
"(",
"LanguageHandler",
")",
"_LANGUAGE_HANDLER",
".",
"add",
"(",
"**",
"kwargs",
")"
] |
access function to creating a new language .
|
train
| false
|
6,964
|
def convert_to_ajax(page, context_instance):
response = apply_rules(page)
return response
|
[
"def",
"convert_to_ajax",
"(",
"page",
",",
"context_instance",
")",
":",
"response",
"=",
"apply_rules",
"(",
"page",
")",
"return",
"response"
] |
converts django html response into ajax response represented by a dict() .
|
train
| false
|
6,965
|
def selinux_enforcing():
cmdresult = run('getenforce', ignore_status=True, verbose=False)
mobj = re.search('Enforcing', cmdresult.stdout)
return (mobj is not None)
|
[
"def",
"selinux_enforcing",
"(",
")",
":",
"cmdresult",
"=",
"run",
"(",
"'getenforce'",
",",
"ignore_status",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
"mobj",
"=",
"re",
".",
"search",
"(",
"'Enforcing'",
",",
"cmdresult",
".",
"stdout",
")",
"return",
"(",
"mobj",
"is",
"not",
"None",
")"
] |
returns true if selinux is in enforcing mode .
|
train
| false
|
6,966
|
def init_weight(weights, initializer, scale=1.0):
if (initializer is None):
initializer = HeNormal((1 / numpy.sqrt(2)))
elif numpy.isscalar(initializer):
initializer = Constant(initializer)
elif isinstance(initializer, numpy.ndarray):
initializer = Constant(initializer)
assert callable(initializer)
initializer(weights)
weights *= scale
|
[
"def",
"init_weight",
"(",
"weights",
",",
"initializer",
",",
"scale",
"=",
"1.0",
")",
":",
"if",
"(",
"initializer",
"is",
"None",
")",
":",
"initializer",
"=",
"HeNormal",
"(",
"(",
"1",
"/",
"numpy",
".",
"sqrt",
"(",
"2",
")",
")",
")",
"elif",
"numpy",
".",
"isscalar",
"(",
"initializer",
")",
":",
"initializer",
"=",
"Constant",
"(",
"initializer",
")",
"elif",
"isinstance",
"(",
"initializer",
",",
"numpy",
".",
"ndarray",
")",
":",
"initializer",
"=",
"Constant",
"(",
"initializer",
")",
"assert",
"callable",
"(",
"initializer",
")",
"initializer",
"(",
"weights",
")",
"weights",
"*=",
"scale"
] |
helper function for initialization of the weight tensor .
|
train
| false
|
6,967
|
def generatePattern(numCols=100, minOnes=21, maxOnes=25, colSet=[], prevPattern=numpy.array([])):
assert (minOnes < maxOnes)
assert (maxOnes < numCols)
nOnes = rgen.randint(minOnes, maxOnes)
candidates = list(colSet.difference(set(prevPattern.nonzero()[0])))
rgen.shuffle(candidates)
ind = candidates[:nOnes]
x = numpy.zeros(numCols, dtype='float32')
x[ind] = 1
return x
|
[
"def",
"generatePattern",
"(",
"numCols",
"=",
"100",
",",
"minOnes",
"=",
"21",
",",
"maxOnes",
"=",
"25",
",",
"colSet",
"=",
"[",
"]",
",",
"prevPattern",
"=",
"numpy",
".",
"array",
"(",
"[",
"]",
")",
")",
":",
"assert",
"(",
"minOnes",
"<",
"maxOnes",
")",
"assert",
"(",
"maxOnes",
"<",
"numCols",
")",
"nOnes",
"=",
"rgen",
".",
"randint",
"(",
"minOnes",
",",
"maxOnes",
")",
"candidates",
"=",
"list",
"(",
"colSet",
".",
"difference",
"(",
"set",
"(",
"prevPattern",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
")",
")",
")",
"rgen",
".",
"shuffle",
"(",
"candidates",
")",
"ind",
"=",
"candidates",
"[",
":",
"nOnes",
"]",
"x",
"=",
"numpy",
".",
"zeros",
"(",
"numCols",
",",
"dtype",
"=",
"'float32'",
")",
"x",
"[",
"ind",
"]",
"=",
"1",
"return",
"x"
] |
generate a single test pattern with given parameters .
|
train
| false
|
6,968
|
def test_require_setuptools():
from fabtools.require.python import setuptools
setuptools()
assert run('easy_install --version', warn_only=True).succeeded
|
[
"def",
"test_require_setuptools",
"(",
")",
":",
"from",
"fabtools",
".",
"require",
".",
"python",
"import",
"setuptools",
"setuptools",
"(",
")",
"assert",
"run",
"(",
"'easy_install --version'",
",",
"warn_only",
"=",
"True",
")",
".",
"succeeded"
] |
test python setuptools installation .
|
train
| false
|
6,969
|
def iddr_aid(A, k):
A = np.asfortranarray(A)
(m, n) = A.shape
w = iddr_aidi(m, n, k)
(idx, proj) = _id.iddr_aid(A, k, w)
if (k == n):
proj = np.array([], dtype='float64', order='F')
else:
proj = proj.reshape((k, (n - k)), order='F')
return (idx, proj)
|
[
"def",
"iddr_aid",
"(",
"A",
",",
"k",
")",
":",
"A",
"=",
"np",
".",
"asfortranarray",
"(",
"A",
")",
"(",
"m",
",",
"n",
")",
"=",
"A",
".",
"shape",
"w",
"=",
"iddr_aidi",
"(",
"m",
",",
"n",
",",
"k",
")",
"(",
"idx",
",",
"proj",
")",
"=",
"_id",
".",
"iddr_aid",
"(",
"A",
",",
"k",
",",
"w",
")",
"if",
"(",
"k",
"==",
"n",
")",
":",
"proj",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"'float64'",
",",
"order",
"=",
"'F'",
")",
"else",
":",
"proj",
"=",
"proj",
".",
"reshape",
"(",
"(",
"k",
",",
"(",
"n",
"-",
"k",
")",
")",
",",
"order",
"=",
"'F'",
")",
"return",
"(",
"idx",
",",
"proj",
")"
] |
compute id of a real matrix to a specified rank using random sampling .
|
train
| false
|
6,971
|
def skip_distribution(unsupported, reason):
def decorator(test_method):
'\n :param test_method: The test method that should be skipped.\n '
@wraps(test_method)
def wrapper(test_case, *args, **kwargs):
distribution = environ.get('FLOCKER_ACCEPTANCE_DISTRIBUTION')
if (distribution in unsupported):
raise SkipTest("Distribution not supported: '{distribution}' ({reason}).".format(distribution=distribution, reason=reason))
return test_method(test_case, *args, **kwargs)
return wrapper
return decorator
|
[
"def",
"skip_distribution",
"(",
"unsupported",
",",
"reason",
")",
":",
"def",
"decorator",
"(",
"test_method",
")",
":",
"@",
"wraps",
"(",
"test_method",
")",
"def",
"wrapper",
"(",
"test_case",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"distribution",
"=",
"environ",
".",
"get",
"(",
"'FLOCKER_ACCEPTANCE_DISTRIBUTION'",
")",
"if",
"(",
"distribution",
"in",
"unsupported",
")",
":",
"raise",
"SkipTest",
"(",
"\"Distribution not supported: '{distribution}' ({reason}).\"",
".",
"format",
"(",
"distribution",
"=",
"distribution",
",",
"reason",
"=",
"reason",
")",
")",
"return",
"test_method",
"(",
"test_case",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"wrapper",
"return",
"decorator"
] |
create decorator that skips a test if the distribution doesnt support the operations required by the test .
|
train
| false
|
6,972
|
def get_grid_shape(m):
r = int(np.sqrt(m))
c = (m // r)
while ((r * c) < m):
c += 1
return (r, c)
|
[
"def",
"get_grid_shape",
"(",
"m",
")",
":",
"r",
"=",
"int",
"(",
"np",
".",
"sqrt",
"(",
"m",
")",
")",
"c",
"=",
"(",
"m",
"//",
"r",
")",
"while",
"(",
"(",
"r",
"*",
"c",
")",
"<",
"m",
")",
":",
"c",
"+=",
"1",
"return",
"(",
"r",
",",
"c",
")"
] |
adjust the shape of the grid to show .
|
train
| false
|
6,973
|
def _int2bytes(val):
MSB = int(((int(val) & 65280) / 256))
LSB = int((int(val) & 255))
return (LSB, MSB)
|
[
"def",
"_int2bytes",
"(",
"val",
")",
":",
"MSB",
"=",
"int",
"(",
"(",
"(",
"int",
"(",
"val",
")",
"&",
"65280",
")",
"/",
"256",
")",
")",
"LSB",
"=",
"int",
"(",
"(",
"int",
"(",
"val",
")",
"&",
"255",
")",
")",
"return",
"(",
"LSB",
",",
"MSB",
")"
] |
converts a number to a string of bytes .
|
train
| false
|
6,974
|
def permute_2d(m, p):
return m[p][:, p]
|
[
"def",
"permute_2d",
"(",
"m",
",",
"p",
")",
":",
"return",
"m",
"[",
"p",
"]",
"[",
":",
",",
"p",
"]"
] |
performs 2d permutation of matrix m according to p .
|
train
| false
|
6,976
|
def helper_functions():
helpers.load_plugin_helpers()
return dict(h=helpers.helper_functions)
|
[
"def",
"helper_functions",
"(",
")",
":",
"helpers",
".",
"load_plugin_helpers",
"(",
")",
"return",
"dict",
"(",
"h",
"=",
"helpers",
".",
"helper_functions",
")"
] |
make helper functions (h) available to flask templates .
|
train
| false
|
6,980
|
def update_week(sched, year, stype, week):
for game in week_schedule(year, stype, week):
sched[game['eid']] = game
|
[
"def",
"update_week",
"(",
"sched",
",",
"year",
",",
"stype",
",",
"week",
")",
":",
"for",
"game",
"in",
"week_schedule",
"(",
"year",
",",
"stype",
",",
"week",
")",
":",
"sched",
"[",
"game",
"[",
"'eid'",
"]",
"]",
"=",
"game"
] |
updates the schedule for the given week in place .
|
train
| false
|
6,981
|
def mvn_loglike_chol(x, sigma):
sigmainv = np.linalg.inv(sigma)
cholsigmainv = np.linalg.cholesky(sigmainv).T
x_whitened = np.dot(cholsigmainv, x)
logdetsigma = np.log(np.linalg.det(sigma))
nobs = len(x)
from scipy import stats
print('scipy.stats')
print(np.log(stats.norm.pdf(x_whitened)).sum())
llf = (- np.dot(x_whitened.T, x_whitened))
llf -= (nobs * np.log((2 * np.pi)))
llf -= logdetsigma
llf *= 0.5
return (llf, logdetsigma, (2 * np.sum(np.log(np.diagonal(cholsigmainv)))))
|
[
"def",
"mvn_loglike_chol",
"(",
"x",
",",
"sigma",
")",
":",
"sigmainv",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"sigma",
")",
"cholsigmainv",
"=",
"np",
".",
"linalg",
".",
"cholesky",
"(",
"sigmainv",
")",
".",
"T",
"x_whitened",
"=",
"np",
".",
"dot",
"(",
"cholsigmainv",
",",
"x",
")",
"logdetsigma",
"=",
"np",
".",
"log",
"(",
"np",
".",
"linalg",
".",
"det",
"(",
"sigma",
")",
")",
"nobs",
"=",
"len",
"(",
"x",
")",
"from",
"scipy",
"import",
"stats",
"print",
"(",
"'scipy.stats'",
")",
"print",
"(",
"np",
".",
"log",
"(",
"stats",
".",
"norm",
".",
"pdf",
"(",
"x_whitened",
")",
")",
".",
"sum",
"(",
")",
")",
"llf",
"=",
"(",
"-",
"np",
".",
"dot",
"(",
"x_whitened",
".",
"T",
",",
"x_whitened",
")",
")",
"llf",
"-=",
"(",
"nobs",
"*",
"np",
".",
"log",
"(",
"(",
"2",
"*",
"np",
".",
"pi",
")",
")",
")",
"llf",
"-=",
"logdetsigma",
"llf",
"*=",
"0.5",
"return",
"(",
"llf",
",",
"logdetsigma",
",",
"(",
"2",
"*",
"np",
".",
"sum",
"(",
"np",
".",
"log",
"(",
"np",
".",
"diagonal",
"(",
"cholsigmainv",
")",
")",
")",
")",
")"
] |
loglike multivariate normal assumes x is 1d .
|
train
| false
|
6,982
|
def get_idx_rect(index_list):
(rows, cols) = list(zip(*[(i.row(), i.column()) for i in index_list]))
return (min(rows), max(rows), min(cols), max(cols))
|
[
"def",
"get_idx_rect",
"(",
"index_list",
")",
":",
"(",
"rows",
",",
"cols",
")",
"=",
"list",
"(",
"zip",
"(",
"*",
"[",
"(",
"i",
".",
"row",
"(",
")",
",",
"i",
".",
"column",
"(",
")",
")",
"for",
"i",
"in",
"index_list",
"]",
")",
")",
"return",
"(",
"min",
"(",
"rows",
")",
",",
"max",
"(",
"rows",
")",
",",
"min",
"(",
"cols",
")",
",",
"max",
"(",
"cols",
")",
")"
] |
extract the boundaries from a list of indexes .
|
train
| true
|
6,983
|
def execvpe(file, args, env):
_execvpe(file, args, env)
|
[
"def",
"execvpe",
"(",
"file",
",",
"args",
",",
"env",
")",
":",
"_execvpe",
"(",
"file",
",",
"args",
",",
"env",
")"
] |
execvpe execute the executable file with argument list args and environment env .
|
train
| false
|
6,984
|
def add_addon_author(original, copy):
author = original.listed_authors[0]
AddonUser.objects.create(addon=copy, user=author, listed=True)
return author
|
[
"def",
"add_addon_author",
"(",
"original",
",",
"copy",
")",
":",
"author",
"=",
"original",
".",
"listed_authors",
"[",
"0",
"]",
"AddonUser",
".",
"objects",
".",
"create",
"(",
"addon",
"=",
"copy",
",",
"user",
"=",
"author",
",",
"listed",
"=",
"True",
")",
"return",
"author"
] |
make both add-ons share an author .
|
train
| false
|
6,986
|
def _get_shells():
start = time.time()
if ('sh.last_shells' in __context__):
if ((start - __context__['sh.last_shells']) > 5):
__context__['sh.last_shells'] = start
else:
__context__['sh.shells'] = __salt__['cmd.shells']()
else:
__context__['sh.last_shells'] = start
__context__['sh.shells'] = __salt__['cmd.shells']()
return __context__['sh.shells']
|
[
"def",
"_get_shells",
"(",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"if",
"(",
"'sh.last_shells'",
"in",
"__context__",
")",
":",
"if",
"(",
"(",
"start",
"-",
"__context__",
"[",
"'sh.last_shells'",
"]",
")",
">",
"5",
")",
":",
"__context__",
"[",
"'sh.last_shells'",
"]",
"=",
"start",
"else",
":",
"__context__",
"[",
"'sh.shells'",
"]",
"=",
"__salt__",
"[",
"'cmd.shells'",
"]",
"(",
")",
"else",
":",
"__context__",
"[",
"'sh.last_shells'",
"]",
"=",
"start",
"__context__",
"[",
"'sh.shells'",
"]",
"=",
"__salt__",
"[",
"'cmd.shells'",
"]",
"(",
")",
"return",
"__context__",
"[",
"'sh.shells'",
"]"
] |
return the valid shells on this system .
|
train
| true
|
6,987
|
def update_account_password_policy(allow_users_to_change_password=None, hard_expiry=None, max_password_age=None, minimum_password_length=None, password_reuse_prevention=None, require_lowercase_characters=None, require_numbers=None, require_symbols=None, require_uppercase_characters=None, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.update_account_password_policy(allow_users_to_change_password, hard_expiry, max_password_age, minimum_password_length, password_reuse_prevention, require_lowercase_characters, require_numbers, require_symbols, require_uppercase_characters)
log.info('The password policy has been updated.')
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to update the password policy'
log.error(msg)
return False
|
[
"def",
"update_account_password_policy",
"(",
"allow_users_to_change_password",
"=",
"None",
",",
"hard_expiry",
"=",
"None",
",",
"max_password_age",
"=",
"None",
",",
"minimum_password_length",
"=",
"None",
",",
"password_reuse_prevention",
"=",
"None",
",",
"require_lowercase_characters",
"=",
"None",
",",
"require_numbers",
"=",
"None",
",",
"require_symbols",
"=",
"None",
",",
"require_uppercase_characters",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"try",
":",
"conn",
".",
"update_account_password_policy",
"(",
"allow_users_to_change_password",
",",
"hard_expiry",
",",
"max_password_age",
",",
"minimum_password_length",
",",
"password_reuse_prevention",
",",
"require_lowercase_characters",
",",
"require_numbers",
",",
"require_symbols",
",",
"require_uppercase_characters",
")",
"log",
".",
"info",
"(",
"'The password policy has been updated.'",
")",
"return",
"True",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"e",
")",
"msg",
"=",
"'Failed to update the password policy'",
"log",
".",
"error",
"(",
"msg",
")",
"return",
"False"
] |
update the password policy for the aws account .
|
train
| true
|
6,988
|
def get_fileobj(byte_io):
byte_io.seek(0)
if zipfile.is_zipfile(byte_io):
byte_io.seek(0)
zf = zipfile.ZipFile(byte_io)
filename = guess_zip_filename(zf)
byte_io = zf.open(filename, mode='r')
else:
byte_io.seek(0)
return io.TextIOWrapper(byte_io, encoding='utf-8')
|
[
"def",
"get_fileobj",
"(",
"byte_io",
")",
":",
"byte_io",
".",
"seek",
"(",
"0",
")",
"if",
"zipfile",
".",
"is_zipfile",
"(",
"byte_io",
")",
":",
"byte_io",
".",
"seek",
"(",
"0",
")",
"zf",
"=",
"zipfile",
".",
"ZipFile",
"(",
"byte_io",
")",
"filename",
"=",
"guess_zip_filename",
"(",
"zf",
")",
"byte_io",
"=",
"zf",
".",
"open",
"(",
"filename",
",",
"mode",
"=",
"'r'",
")",
"else",
":",
"byte_io",
".",
"seek",
"(",
"0",
")",
"return",
"io",
".",
"TextIOWrapper",
"(",
"byte_io",
",",
"encoding",
"=",
"'utf-8'",
")"
] |
get a usable file object to read the hosts file from .
|
train
| false
|
6,989
|
def nvlist(thelist, names=None):
for nvitem in thelist:
if isinstance(nvitem, dict):
(name, value) = next(six.iteritems(nvitem))
if ((names is None) or (name in names)):
(yield (nvitem, name, value))
|
[
"def",
"nvlist",
"(",
"thelist",
",",
"names",
"=",
"None",
")",
":",
"for",
"nvitem",
"in",
"thelist",
":",
"if",
"isinstance",
"(",
"nvitem",
",",
"dict",
")",
":",
"(",
"name",
",",
"value",
")",
"=",
"next",
"(",
"six",
".",
"iteritems",
"(",
"nvitem",
")",
")",
"if",
"(",
"(",
"names",
"is",
"None",
")",
"or",
"(",
"name",
"in",
"names",
")",
")",
":",
"(",
"yield",
"(",
"nvitem",
",",
"name",
",",
"value",
")",
")"
] |
given a list of items:: - whatever - name1: value1 - name2: - key: value - key: value return a generator that yields each tuple .
|
train
| true
|
6,992
|
def _glsa_list_process_output(output):
ret = dict()
for line in output:
try:
(glsa_id, status, desc) = line.split(None, 2)
if ('U' in status):
status += ' Not Affected'
elif ('N' in status):
status += ' Might be Affected'
elif ('A' in status):
status += ' Applied (injected)'
if ('CVE' in desc):
(desc, cves) = desc.rsplit(None, 1)
cves = cves.split(',')
else:
cves = list()
ret[glsa_id] = {'description': desc, 'status': status, 'CVEs': cves}
except ValueError:
pass
return ret
|
[
"def",
"_glsa_list_process_output",
"(",
"output",
")",
":",
"ret",
"=",
"dict",
"(",
")",
"for",
"line",
"in",
"output",
":",
"try",
":",
"(",
"glsa_id",
",",
"status",
",",
"desc",
")",
"=",
"line",
".",
"split",
"(",
"None",
",",
"2",
")",
"if",
"(",
"'U'",
"in",
"status",
")",
":",
"status",
"+=",
"' Not Affected'",
"elif",
"(",
"'N'",
"in",
"status",
")",
":",
"status",
"+=",
"' Might be Affected'",
"elif",
"(",
"'A'",
"in",
"status",
")",
":",
"status",
"+=",
"' Applied (injected)'",
"if",
"(",
"'CVE'",
"in",
"desc",
")",
":",
"(",
"desc",
",",
"cves",
")",
"=",
"desc",
".",
"rsplit",
"(",
"None",
",",
"1",
")",
"cves",
"=",
"cves",
".",
"split",
"(",
"','",
")",
"else",
":",
"cves",
"=",
"list",
"(",
")",
"ret",
"[",
"glsa_id",
"]",
"=",
"{",
"'description'",
":",
"desc",
",",
"'status'",
":",
"status",
",",
"'CVEs'",
":",
"cves",
"}",
"except",
"ValueError",
":",
"pass",
"return",
"ret"
] |
process output from glsa_check_list into a dict returns a dict containing the glsa id .
|
train
| true
|
6,994
|
def add_tools_to_manager(toolmanager, tools=default_tools):
for (name, tool) in six.iteritems(tools):
toolmanager.add_tool(name, tool)
|
[
"def",
"add_tools_to_manager",
"(",
"toolmanager",
",",
"tools",
"=",
"default_tools",
")",
":",
"for",
"(",
"name",
",",
"tool",
")",
"in",
"six",
".",
"iteritems",
"(",
"tools",
")",
":",
"toolmanager",
".",
"add_tool",
"(",
"name",
",",
"tool",
")"
] |
add multiple tools to toolmanager parameters toolmanager: toolmanager backend_managers .
|
train
| false
|
6,995
|
def getLogRecordFactory():
return _logRecordFactory
|
[
"def",
"getLogRecordFactory",
"(",
")",
":",
"return",
"_logRecordFactory"
] |
return the factory to be used when instantiating a log record .
|
train
| false
|
6,996
|
def get_tuple_coords(options, path):
l = []
(path, head) = split(path)
head = head.split('.')[0]
l.append(int(head))
for i in range((options.zoom_level - 1)):
(path, head) = split(path)
l.append(int(head))
l.reverse()
return tuple(l)
|
[
"def",
"get_tuple_coords",
"(",
"options",
",",
"path",
")",
":",
"l",
"=",
"[",
"]",
"(",
"path",
",",
"head",
")",
"=",
"split",
"(",
"path",
")",
"head",
"=",
"head",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"l",
".",
"append",
"(",
"int",
"(",
"head",
")",
")",
"for",
"i",
"in",
"range",
"(",
"(",
"options",
".",
"zoom_level",
"-",
"1",
")",
")",
":",
"(",
"path",
",",
"head",
")",
"=",
"split",
"(",
"path",
")",
"l",
".",
"append",
"(",
"int",
"(",
"head",
")",
")",
"l",
".",
"reverse",
"(",
")",
"return",
"tuple",
"(",
"l",
")"
] |
extracts the "quadtree coordinates" from an image path .
|
train
| false
|
6,997
|
def get_host_info(node_info, host):
if (node_info.get('roles', []) == ['master']):
return None
return host
|
[
"def",
"get_host_info",
"(",
"node_info",
",",
"host",
")",
":",
"if",
"(",
"node_info",
".",
"get",
"(",
"'roles'",
",",
"[",
"]",
")",
"==",
"[",
"'master'",
"]",
")",
":",
"return",
"None",
"return",
"host"
] |
simple callback that takes the node info from /_cluster/nodes and a parsed connection information and return the connection information .
|
train
| false
|
6,998
|
def check_inheritance(path, objectType, user=None):
ret = {'result': False, 'Inheritance': False, 'comment': ''}
sidRet = _getUserSid(user)
dc = daclConstants()
objectType = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectType)
try:
sd = win32security.GetNamedSecurityInfo(path, objectType, win32security.DACL_SECURITY_INFORMATION)
dacls = sd.GetSecurityDescriptorDacl()
except Exception as e:
ret['result'] = False
ret['comment'] = 'Error obtaining the Security Descriptor or DACL of the path: {0}.'.format(e)
return ret
for counter in range(0, dacls.GetAceCount()):
ace = dacls.GetAce(counter)
if ((ace[0][1] & win32security.INHERITED_ACE) == win32security.INHERITED_ACE):
if ((not sidRet['sid']) or (ace[2] == sidRet['sid'])):
ret['Inheritance'] = True
break
ret['result'] = True
return ret
|
[
"def",
"check_inheritance",
"(",
"path",
",",
"objectType",
",",
"user",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'result'",
":",
"False",
",",
"'Inheritance'",
":",
"False",
",",
"'comment'",
":",
"''",
"}",
"sidRet",
"=",
"_getUserSid",
"(",
"user",
")",
"dc",
"=",
"daclConstants",
"(",
")",
"objectType",
"=",
"dc",
".",
"getObjectTypeBit",
"(",
"objectType",
")",
"path",
"=",
"dc",
".",
"processPath",
"(",
"path",
",",
"objectType",
")",
"try",
":",
"sd",
"=",
"win32security",
".",
"GetNamedSecurityInfo",
"(",
"path",
",",
"objectType",
",",
"win32security",
".",
"DACL_SECURITY_INFORMATION",
")",
"dacls",
"=",
"sd",
".",
"GetSecurityDescriptorDacl",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Error obtaining the Security Descriptor or DACL of the path: {0}.'",
".",
"format",
"(",
"e",
")",
"return",
"ret",
"for",
"counter",
"in",
"range",
"(",
"0",
",",
"dacls",
".",
"GetAceCount",
"(",
")",
")",
":",
"ace",
"=",
"dacls",
".",
"GetAce",
"(",
"counter",
")",
"if",
"(",
"(",
"ace",
"[",
"0",
"]",
"[",
"1",
"]",
"&",
"win32security",
".",
"INHERITED_ACE",
")",
"==",
"win32security",
".",
"INHERITED_ACE",
")",
":",
"if",
"(",
"(",
"not",
"sidRet",
"[",
"'sid'",
"]",
")",
"or",
"(",
"ace",
"[",
"2",
"]",
"==",
"sidRet",
"[",
"'sid'",
"]",
")",
")",
":",
"ret",
"[",
"'Inheritance'",
"]",
"=",
"True",
"break",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"return",
"ret"
] |
check a specified path to verify if inheritance is enabled args: path: path of the registry key or file system object to check objecttype: the type of object user: if provided .
|
train
| true
|
6,999
|
def str2intlist(data, intsize=4):
result = []
data = decode_string_escape(data)[::(-1)]
l = len(data)
data = ((('\x00' * (intsize - (l % intsize))) + data) if ((l % intsize) != 0) else data)
for i in range(0, l, intsize):
if (intsize == 8):
val = struct.unpack('>Q', data[i:(i + intsize)])[0]
else:
val = struct.unpack('>L', data[i:(i + intsize)])[0]
result = ([val] + result)
return result
|
[
"def",
"str2intlist",
"(",
"data",
",",
"intsize",
"=",
"4",
")",
":",
"result",
"=",
"[",
"]",
"data",
"=",
"decode_string_escape",
"(",
"data",
")",
"[",
":",
":",
"(",
"-",
"1",
")",
"]",
"l",
"=",
"len",
"(",
"data",
")",
"data",
"=",
"(",
"(",
"(",
"'\\x00'",
"*",
"(",
"intsize",
"-",
"(",
"l",
"%",
"intsize",
")",
")",
")",
"+",
"data",
")",
"if",
"(",
"(",
"l",
"%",
"intsize",
")",
"!=",
"0",
")",
"else",
"data",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"l",
",",
"intsize",
")",
":",
"if",
"(",
"intsize",
"==",
"8",
")",
":",
"val",
"=",
"struct",
".",
"unpack",
"(",
"'>Q'",
",",
"data",
"[",
"i",
":",
"(",
"i",
"+",
"intsize",
")",
"]",
")",
"[",
"0",
"]",
"else",
":",
"val",
"=",
"struct",
".",
"unpack",
"(",
"'>L'",
",",
"data",
"[",
"i",
":",
"(",
"i",
"+",
"intsize",
")",
"]",
")",
"[",
"0",
"]",
"result",
"=",
"(",
"[",
"val",
"]",
"+",
"result",
")",
"return",
"result"
] |
convert a string to list of int .
|
train
| false
|
7,000
|
def _retrive_branch(k, trie):
if (not k):
return None
for c in k:
child_branch = _get_child_branch(trie, c)
if (not child_branch):
return None
trie = child_branch
return trie
|
[
"def",
"_retrive_branch",
"(",
"k",
",",
"trie",
")",
":",
"if",
"(",
"not",
"k",
")",
":",
"return",
"None",
"for",
"c",
"in",
"k",
":",
"child_branch",
"=",
"_get_child_branch",
"(",
"trie",
",",
"c",
")",
"if",
"(",
"not",
"child_branch",
")",
":",
"return",
"None",
"trie",
"=",
"child_branch",
"return",
"trie"
] |
get branch matching the key word .
|
train
| false
|
7,001
|
def prep_for_subprocess(cmd, shell=False):
if shell:
if PY3:
cmd = decode(cmd)
else:
cmd = encode(cmd)
elif PY3:
cmd = [decode(c) for c in cmd]
else:
cmd = [encode(c) for c in cmd]
return cmd
|
[
"def",
"prep_for_subprocess",
"(",
"cmd",
",",
"shell",
"=",
"False",
")",
":",
"if",
"shell",
":",
"if",
"PY3",
":",
"cmd",
"=",
"decode",
"(",
"cmd",
")",
"else",
":",
"cmd",
"=",
"encode",
"(",
"cmd",
")",
"elif",
"PY3",
":",
"cmd",
"=",
"[",
"decode",
"(",
"c",
")",
"for",
"c",
"in",
"cmd",
"]",
"else",
":",
"cmd",
"=",
"[",
"encode",
"(",
"c",
")",
"for",
"c",
"in",
"cmd",
"]",
"return",
"cmd"
] |
decode on python3 .
|
train
| false
|
7,002
|
def plain(text):
return re.sub('.\x08', '', text)
|
[
"def",
"plain",
"(",
"text",
")",
":",
"return",
"re",
".",
"sub",
"(",
"'.\\x08'",
",",
"''",
",",
"text",
")"
] |
remove boldface formatting from text .
|
train
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.