id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
13,109
|
def get_version(package):
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search('__version__ = [\'"]([^\'"]+)[\'"]', init_py).group(1)
|
[
"def",
"get_version",
"(",
"package",
")",
":",
"init_py",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package",
",",
"'__init__.py'",
")",
")",
".",
"read",
"(",
")",
"return",
"re",
".",
"search",
"(",
"'__version__ = [\\'\"]([^\\'\"]+)[\\'\"]'",
",",
"init_py",
")",
".",
"group",
"(",
"1",
")"
] |
parse __init__ .
|
train
| true
|
13,110
|
def get_image(conn, vm_):
vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode('ascii', 'salt-cloud-force-ascii')
if (not vm_image):
log.debug('No image set, must be boot from volume')
return None
image_list = conn.image_list()
for img in image_list:
if (vm_image in (image_list[img]['id'], img)):
return image_list[img]['id']
try:
image = conn.image_show(vm_image)
return image['id']
except novaclient.exceptions.NotFound as exc:
raise SaltCloudNotFound("The specified image, '{0}', could not be found: {1}".format(vm_image, str(exc)))
|
[
"def",
"get_image",
"(",
"conn",
",",
"vm_",
")",
":",
"vm_image",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'image'",
",",
"vm_",
",",
"__opts__",
",",
"default",
"=",
"''",
")",
".",
"encode",
"(",
"'ascii'",
",",
"'salt-cloud-force-ascii'",
")",
"if",
"(",
"not",
"vm_image",
")",
":",
"log",
".",
"debug",
"(",
"'No image set, must be boot from volume'",
")",
"return",
"None",
"image_list",
"=",
"conn",
".",
"image_list",
"(",
")",
"for",
"img",
"in",
"image_list",
":",
"if",
"(",
"vm_image",
"in",
"(",
"image_list",
"[",
"img",
"]",
"[",
"'id'",
"]",
",",
"img",
")",
")",
":",
"return",
"image_list",
"[",
"img",
"]",
"[",
"'id'",
"]",
"try",
":",
"image",
"=",
"conn",
".",
"image_show",
"(",
"vm_image",
")",
"return",
"image",
"[",
"'id'",
"]",
"except",
"novaclient",
".",
"exceptions",
".",
"NotFound",
"as",
"exc",
":",
"raise",
"SaltCloudNotFound",
"(",
"\"The specified image, '{0}', could not be found: {1}\"",
".",
"format",
"(",
"vm_image",
",",
"str",
"(",
"exc",
")",
")",
")"
] |
return the image object to use .
|
train
| true
|
13,111
|
def get_xmlsec_binary(paths=None):
if (os.name == 'posix'):
bin_name = ['xmlsec1']
elif (os.name == 'nt'):
bin_name = ['xmlsec.exe', 'xmlsec1.exe']
else:
bin_name = ['xmlsec1']
if paths:
for bname in bin_name:
for path in paths:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
for path in os.environ['PATH'].split(os.pathsep):
for bname in bin_name:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
raise SigverError(("Can't find %s" % bin_name))
|
[
"def",
"get_xmlsec_binary",
"(",
"paths",
"=",
"None",
")",
":",
"if",
"(",
"os",
".",
"name",
"==",
"'posix'",
")",
":",
"bin_name",
"=",
"[",
"'xmlsec1'",
"]",
"elif",
"(",
"os",
".",
"name",
"==",
"'nt'",
")",
":",
"bin_name",
"=",
"[",
"'xmlsec.exe'",
",",
"'xmlsec1.exe'",
"]",
"else",
":",
"bin_name",
"=",
"[",
"'xmlsec1'",
"]",
"if",
"paths",
":",
"for",
"bname",
"in",
"bin_name",
":",
"for",
"path",
"in",
"paths",
":",
"fil",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"bname",
")",
"try",
":",
"if",
"os",
".",
"lstat",
"(",
"fil",
")",
":",
"return",
"fil",
"except",
"OSError",
":",
"pass",
"for",
"path",
"in",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"for",
"bname",
"in",
"bin_name",
":",
"fil",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"bname",
")",
"try",
":",
"if",
"os",
".",
"lstat",
"(",
"fil",
")",
":",
"return",
"fil",
"except",
"OSError",
":",
"pass",
"raise",
"SigverError",
"(",
"(",
"\"Can't find %s\"",
"%",
"bin_name",
")",
")"
] |
tries to find the xmlsec1 binary .
|
train
| true
|
13,112
|
def dnsrepr2names(x):
res = []
cur = ''
while x:
l = ord(x[0])
x = x[1:]
if (l == 0):
if (cur and (cur[(-1)] == '.')):
cur = cur[:(-1)]
res.append(cur)
cur = ''
if (x and (ord(x[0]) == 0)):
x = x[1:]
continue
if (l & 192):
raise Exception("DNS message can't be compressed at this point!")
else:
cur += (x[:l] + '.')
x = x[l:]
return res
|
[
"def",
"dnsrepr2names",
"(",
"x",
")",
":",
"res",
"=",
"[",
"]",
"cur",
"=",
"''",
"while",
"x",
":",
"l",
"=",
"ord",
"(",
"x",
"[",
"0",
"]",
")",
"x",
"=",
"x",
"[",
"1",
":",
"]",
"if",
"(",
"l",
"==",
"0",
")",
":",
"if",
"(",
"cur",
"and",
"(",
"cur",
"[",
"(",
"-",
"1",
")",
"]",
"==",
"'.'",
")",
")",
":",
"cur",
"=",
"cur",
"[",
":",
"(",
"-",
"1",
")",
"]",
"res",
".",
"append",
"(",
"cur",
")",
"cur",
"=",
"''",
"if",
"(",
"x",
"and",
"(",
"ord",
"(",
"x",
"[",
"0",
"]",
")",
"==",
"0",
")",
")",
":",
"x",
"=",
"x",
"[",
"1",
":",
"]",
"continue",
"if",
"(",
"l",
"&",
"192",
")",
":",
"raise",
"Exception",
"(",
"\"DNS message can't be compressed at this point!\"",
")",
"else",
":",
"cur",
"+=",
"(",
"x",
"[",
":",
"l",
"]",
"+",
"'.'",
")",
"x",
"=",
"x",
"[",
"l",
":",
"]",
"return",
"res"
] |
take as input a dns encoded string and returns a list of dns names contained in it .
|
train
| false
|
13,113
|
def loggers():
root = logging.root
existing = root.manager.loggerDict.keys()
return [logging.getLogger(name) for name in existing]
|
[
"def",
"loggers",
"(",
")",
":",
"root",
"=",
"logging",
".",
"root",
"existing",
"=",
"root",
".",
"manager",
".",
"loggerDict",
".",
"keys",
"(",
")",
"return",
"[",
"logging",
".",
"getLogger",
"(",
"name",
")",
"for",
"name",
"in",
"existing",
"]"
] |
get list of all loggers .
|
train
| false
|
13,114
|
def get_course_versions(course_key):
course_locator = CourseKey.from_string(course_key)
store = modulestore()._get_modulestore_for_courselike(course_locator)
index_entry = store.get_course_index(course_locator)
if (index_entry is not None):
return {'draft-branch': index_entry['versions']['draft-branch'], 'published-branch': index_entry['versions']['published-branch']}
return None
|
[
"def",
"get_course_versions",
"(",
"course_key",
")",
":",
"course_locator",
"=",
"CourseKey",
".",
"from_string",
"(",
"course_key",
")",
"store",
"=",
"modulestore",
"(",
")",
".",
"_get_modulestore_for_courselike",
"(",
"course_locator",
")",
"index_entry",
"=",
"store",
".",
"get_course_index",
"(",
"course_locator",
")",
"if",
"(",
"index_entry",
"is",
"not",
"None",
")",
":",
"return",
"{",
"'draft-branch'",
":",
"index_entry",
"[",
"'versions'",
"]",
"[",
"'draft-branch'",
"]",
",",
"'published-branch'",
":",
"index_entry",
"[",
"'versions'",
"]",
"[",
"'published-branch'",
"]",
"}",
"return",
"None"
] |
fetches the latest course versions .
|
train
| false
|
13,115
|
def extract_first_line(paragraph):
lines = paragraph.splitlines()
first = lines[0].strip()
spaces = list(re.finditer('(\\s+)', first))
if spaces:
longest = max(spaces, key=(lambda m: (m.span()[1] - m.span()[0])))
if (longest and (longest.start() > 1) and ((longest.end() - longest.start()) > 1)):
first = first[:longest.start()]
return first
|
[
"def",
"extract_first_line",
"(",
"paragraph",
")",
":",
"lines",
"=",
"paragraph",
".",
"splitlines",
"(",
")",
"first",
"=",
"lines",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"spaces",
"=",
"list",
"(",
"re",
".",
"finditer",
"(",
"'(\\\\s+)'",
",",
"first",
")",
")",
"if",
"spaces",
":",
"longest",
"=",
"max",
"(",
"spaces",
",",
"key",
"=",
"(",
"lambda",
"m",
":",
"(",
"m",
".",
"span",
"(",
")",
"[",
"1",
"]",
"-",
"m",
".",
"span",
"(",
")",
"[",
"0",
"]",
")",
")",
")",
"if",
"(",
"longest",
"and",
"(",
"longest",
".",
"start",
"(",
")",
">",
"1",
")",
"and",
"(",
"(",
"longest",
".",
"end",
"(",
")",
"-",
"longest",
".",
"start",
"(",
")",
")",
">",
"1",
")",
")",
":",
"first",
"=",
"first",
"[",
":",
"longest",
".",
"start",
"(",
")",
"]",
"return",
"first"
] |
extract the first line information from the function code text if available .
|
train
| false
|
13,117
|
def _parse_valid_types_from_validator(validator):
if (not isinstance(validator, list)):
return anglicize_json_type(validator)
if (len(validator) == 1):
return anglicize_json_type(validator[0])
return u'{}, or {}'.format(u', '.join(([anglicize_json_type(validator[0])] + validator[1:(-1)])), anglicize_json_type(validator[(-1)]))
|
[
"def",
"_parse_valid_types_from_validator",
"(",
"validator",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"validator",
",",
"list",
")",
")",
":",
"return",
"anglicize_json_type",
"(",
"validator",
")",
"if",
"(",
"len",
"(",
"validator",
")",
"==",
"1",
")",
":",
"return",
"anglicize_json_type",
"(",
"validator",
"[",
"0",
"]",
")",
"return",
"u'{}, or {}'",
".",
"format",
"(",
"u', '",
".",
"join",
"(",
"(",
"[",
"anglicize_json_type",
"(",
"validator",
"[",
"0",
"]",
")",
"]",
"+",
"validator",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
")",
")",
",",
"anglicize_json_type",
"(",
"validator",
"[",
"(",
"-",
"1",
")",
"]",
")",
")"
] |
a validator value can be either an array of valid types or a string of a valid type .
|
train
| false
|
13,118
|
def get_idx_from_sent(sent, word_idx_map, max_l=51, k=300, filter_h=5):
x = []
pad = (filter_h - 1)
for i in xrange(pad):
x.append(0)
words = sent.split()
for word in words:
if (word in word_idx_map):
x.append(word_idx_map[word])
while (len(x) < (max_l + (2 * pad))):
x.append(0)
return x
|
[
"def",
"get_idx_from_sent",
"(",
"sent",
",",
"word_idx_map",
",",
"max_l",
"=",
"51",
",",
"k",
"=",
"300",
",",
"filter_h",
"=",
"5",
")",
":",
"x",
"=",
"[",
"]",
"pad",
"=",
"(",
"filter_h",
"-",
"1",
")",
"for",
"i",
"in",
"xrange",
"(",
"pad",
")",
":",
"x",
".",
"append",
"(",
"0",
")",
"words",
"=",
"sent",
".",
"split",
"(",
")",
"for",
"word",
"in",
"words",
":",
"if",
"(",
"word",
"in",
"word_idx_map",
")",
":",
"x",
".",
"append",
"(",
"word_idx_map",
"[",
"word",
"]",
")",
"while",
"(",
"len",
"(",
"x",
")",
"<",
"(",
"max_l",
"+",
"(",
"2",
"*",
"pad",
")",
")",
")",
":",
"x",
".",
"append",
"(",
"0",
")",
"return",
"x"
] |
transforms sentence into a list of indices .
|
train
| false
|
13,119
|
def decodeHexValue(value, raw=False):
retVal = value
def _(value):
retVal = value
if (value and isinstance(value, basestring) and ((len(value) % 2) == 0)):
retVal = hexdecode(retVal)
if ((not kb.binaryField) and (not raw)):
if (Backend.isDbms(DBMS.MSSQL) and value.startswith('0x')):
try:
retVal = retVal.decode('utf-16-le')
except UnicodeDecodeError:
pass
elif Backend.isDbms(DBMS.HSQLDB):
try:
retVal = retVal.decode('utf-16-be')
except UnicodeDecodeError:
pass
if (not isinstance(retVal, unicode)):
retVal = getUnicode(retVal, 'utf8')
return retVal
try:
retVal = applyFunctionRecursively(value, _)
except:
singleTimeWarnMessage(("there was a problem decoding value '%s' from expected hexadecimal form" % value))
return retVal
|
[
"def",
"decodeHexValue",
"(",
"value",
",",
"raw",
"=",
"False",
")",
":",
"retVal",
"=",
"value",
"def",
"_",
"(",
"value",
")",
":",
"retVal",
"=",
"value",
"if",
"(",
"value",
"and",
"isinstance",
"(",
"value",
",",
"basestring",
")",
"and",
"(",
"(",
"len",
"(",
"value",
")",
"%",
"2",
")",
"==",
"0",
")",
")",
":",
"retVal",
"=",
"hexdecode",
"(",
"retVal",
")",
"if",
"(",
"(",
"not",
"kb",
".",
"binaryField",
")",
"and",
"(",
"not",
"raw",
")",
")",
":",
"if",
"(",
"Backend",
".",
"isDbms",
"(",
"DBMS",
".",
"MSSQL",
")",
"and",
"value",
".",
"startswith",
"(",
"'0x'",
")",
")",
":",
"try",
":",
"retVal",
"=",
"retVal",
".",
"decode",
"(",
"'utf-16-le'",
")",
"except",
"UnicodeDecodeError",
":",
"pass",
"elif",
"Backend",
".",
"isDbms",
"(",
"DBMS",
".",
"HSQLDB",
")",
":",
"try",
":",
"retVal",
"=",
"retVal",
".",
"decode",
"(",
"'utf-16-be'",
")",
"except",
"UnicodeDecodeError",
":",
"pass",
"if",
"(",
"not",
"isinstance",
"(",
"retVal",
",",
"unicode",
")",
")",
":",
"retVal",
"=",
"getUnicode",
"(",
"retVal",
",",
"'utf8'",
")",
"return",
"retVal",
"try",
":",
"retVal",
"=",
"applyFunctionRecursively",
"(",
"value",
",",
"_",
")",
"except",
":",
"singleTimeWarnMessage",
"(",
"(",
"\"there was a problem decoding value '%s' from expected hexadecimal form\"",
"%",
"value",
")",
")",
"return",
"retVal"
] |
returns value decoded from dbms specific hexadecimal representation .
|
train
| false
|
13,121
|
def gpu_flatten(x, outdim=1):
x = as_cuda_ndarray_variable(x)
if (outdim > 1):
dims = (tuple(x.shape[:(outdim - 1)]) + ((-1),))
else:
dims = ((-1),)
return GpuReshape(outdim)(x, dims)
|
[
"def",
"gpu_flatten",
"(",
"x",
",",
"outdim",
"=",
"1",
")",
":",
"x",
"=",
"as_cuda_ndarray_variable",
"(",
"x",
")",
"if",
"(",
"outdim",
">",
"1",
")",
":",
"dims",
"=",
"(",
"tuple",
"(",
"x",
".",
"shape",
"[",
":",
"(",
"outdim",
"-",
"1",
")",
"]",
")",
"+",
"(",
"(",
"-",
"1",
")",
",",
")",
")",
"else",
":",
"dims",
"=",
"(",
"(",
"-",
"1",
")",
",",
")",
"return",
"GpuReshape",
"(",
"outdim",
")",
"(",
"x",
",",
"dims",
")"
] |
implement flatten on the gpu .
|
train
| false
|
13,123
|
@register.function
def get_compat_counts(addon):
return CompatReport.get_counts(addon.guid)
|
[
"@",
"register",
".",
"function",
"def",
"get_compat_counts",
"(",
"addon",
")",
":",
"return",
"CompatReport",
".",
"get_counts",
"(",
"addon",
".",
"guid",
")"
] |
get counts for add-on compatibility reports .
|
train
| false
|
13,124
|
def break_simplesub(ctext, startkey=None):
ctext = re.sub('[^A-Z]', '', ctext.upper())
(parentkey, parentscore) = ((startkey or list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')), (-9.9e+100))
if (not startkey):
random.shuffle(parentkey)
parentscore = fitness.score(sub_decipher(ctext, parentkey))
count = 0
while (count < 1000):
a = random.randint(0, 25)
b = random.randint(0, 25)
child = parentkey[:]
(child[a], child[b]) = (child[b], child[a])
score = fitness.score(sub_decipher(ctext, child))
if (score > parentscore):
(parentscore, parentkey) = (score, child[:])
count = 0
count += 1
return (parentscore, parentkey)
|
[
"def",
"break_simplesub",
"(",
"ctext",
",",
"startkey",
"=",
"None",
")",
":",
"ctext",
"=",
"re",
".",
"sub",
"(",
"'[^A-Z]'",
",",
"''",
",",
"ctext",
".",
"upper",
"(",
")",
")",
"(",
"parentkey",
",",
"parentscore",
")",
"=",
"(",
"(",
"startkey",
"or",
"list",
"(",
"'ABCDEFGHIJKLMNOPQRSTUVWXYZ'",
")",
")",
",",
"(",
"-",
"9.9e+100",
")",
")",
"if",
"(",
"not",
"startkey",
")",
":",
"random",
".",
"shuffle",
"(",
"parentkey",
")",
"parentscore",
"=",
"fitness",
".",
"score",
"(",
"sub_decipher",
"(",
"ctext",
",",
"parentkey",
")",
")",
"count",
"=",
"0",
"while",
"(",
"count",
"<",
"1000",
")",
":",
"a",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"25",
")",
"b",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"25",
")",
"child",
"=",
"parentkey",
"[",
":",
"]",
"(",
"child",
"[",
"a",
"]",
",",
"child",
"[",
"b",
"]",
")",
"=",
"(",
"child",
"[",
"b",
"]",
",",
"child",
"[",
"a",
"]",
")",
"score",
"=",
"fitness",
".",
"score",
"(",
"sub_decipher",
"(",
"ctext",
",",
"child",
")",
")",
"if",
"(",
"score",
">",
"parentscore",
")",
":",
"(",
"parentscore",
",",
"parentkey",
")",
"=",
"(",
"score",
",",
"child",
"[",
":",
"]",
")",
"count",
"=",
"0",
"count",
"+=",
"1",
"return",
"(",
"parentscore",
",",
"parentkey",
")"
] |
perform hill-climbing with a single start .
|
train
| false
|
13,125
|
def _getWriters(reactor):
if IReactorFDSet.providedBy(reactor):
return reactor.getWriters()
elif ('IOCP' in reactor.__class__.__name__):
return reactor.handles
else:
raise Exception(('Cannot find writers on %r' % (reactor,)))
|
[
"def",
"_getWriters",
"(",
"reactor",
")",
":",
"if",
"IReactorFDSet",
".",
"providedBy",
"(",
"reactor",
")",
":",
"return",
"reactor",
".",
"getWriters",
"(",
")",
"elif",
"(",
"'IOCP'",
"in",
"reactor",
".",
"__class__",
".",
"__name__",
")",
":",
"return",
"reactor",
".",
"handles",
"else",
":",
"raise",
"Exception",
"(",
"(",
"'Cannot find writers on %r'",
"%",
"(",
"reactor",
",",
")",
")",
")"
] |
like l{ireactorfdset .
|
train
| false
|
13,126
|
@require_context
def mapped_array(shape, dtype=np.float, strides=None, order='C', stream=0, portable=False, wc=False):
(shape, strides, dtype) = _prepare_shape_strides_dtype(shape, strides, dtype, order)
bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)
buffer = current_context().memhostalloc(bytesize, mapped=True)
npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order, buffer=buffer)
mappedview = np.ndarray.view(npary, type=devicearray.MappedNDArray)
mappedview.device_setup(buffer, stream=stream)
return mappedview
|
[
"@",
"require_context",
"def",
"mapped_array",
"(",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
",",
"strides",
"=",
"None",
",",
"order",
"=",
"'C'",
",",
"stream",
"=",
"0",
",",
"portable",
"=",
"False",
",",
"wc",
"=",
"False",
")",
":",
"(",
"shape",
",",
"strides",
",",
"dtype",
")",
"=",
"_prepare_shape_strides_dtype",
"(",
"shape",
",",
"strides",
",",
"dtype",
",",
"order",
")",
"bytesize",
"=",
"driver",
".",
"memory_size_from_info",
"(",
"shape",
",",
"strides",
",",
"dtype",
".",
"itemsize",
")",
"buffer",
"=",
"current_context",
"(",
")",
".",
"memhostalloc",
"(",
"bytesize",
",",
"mapped",
"=",
"True",
")",
"npary",
"=",
"np",
".",
"ndarray",
"(",
"shape",
"=",
"shape",
",",
"strides",
"=",
"strides",
",",
"dtype",
"=",
"dtype",
",",
"order",
"=",
"order",
",",
"buffer",
"=",
"buffer",
")",
"mappedview",
"=",
"np",
".",
"ndarray",
".",
"view",
"(",
"npary",
",",
"type",
"=",
"devicearray",
".",
"MappedNDArray",
")",
"mappedview",
".",
"device_setup",
"(",
"buffer",
",",
"stream",
"=",
"stream",
")",
"return",
"mappedview"
] |
mapped_array allocate a mapped ndarray with a buffer that is pinned and mapped on to the device .
|
train
| false
|
13,128
|
def tailProbability(x, distributionParams):
if (('mean' not in distributionParams) or ('stdev' not in distributionParams)):
raise RuntimeError('Insufficient parameters to specify the distribution.')
if (x < distributionParams['mean']):
xp = ((2 * distributionParams['mean']) - x)
return tailProbability(xp, distributionParams)
z = ((x - distributionParams['mean']) / distributionParams['stdev'])
return (0.5 * math.erfc((z / 1.4142)))
|
[
"def",
"tailProbability",
"(",
"x",
",",
"distributionParams",
")",
":",
"if",
"(",
"(",
"'mean'",
"not",
"in",
"distributionParams",
")",
"or",
"(",
"'stdev'",
"not",
"in",
"distributionParams",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"'Insufficient parameters to specify the distribution.'",
")",
"if",
"(",
"x",
"<",
"distributionParams",
"[",
"'mean'",
"]",
")",
":",
"xp",
"=",
"(",
"(",
"2",
"*",
"distributionParams",
"[",
"'mean'",
"]",
")",
"-",
"x",
")",
"return",
"tailProbability",
"(",
"xp",
",",
"distributionParams",
")",
"z",
"=",
"(",
"(",
"x",
"-",
"distributionParams",
"[",
"'mean'",
"]",
")",
"/",
"distributionParams",
"[",
"'stdev'",
"]",
")",
"return",
"(",
"0.5",
"*",
"math",
".",
"erfc",
"(",
"(",
"z",
"/",
"1.4142",
")",
")",
")"
] |
given the normal distribution specified by the mean and standard deviation in distributionparams .
|
train
| true
|
13,129
|
def test_coil_trans():
rng = np.random.RandomState(0)
x = rng.randn(4, 4)
x[3] = [0, 0, 0, 1]
assert_allclose(_loc_to_coil_trans(_coil_trans_to_loc(x)), x)
x = rng.randn(12)
assert_allclose(_coil_trans_to_loc(_loc_to_coil_trans(x)), x)
|
[
"def",
"test_coil_trans",
"(",
")",
":",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"0",
")",
"x",
"=",
"rng",
".",
"randn",
"(",
"4",
",",
"4",
")",
"x",
"[",
"3",
"]",
"=",
"[",
"0",
",",
"0",
",",
"0",
",",
"1",
"]",
"assert_allclose",
"(",
"_loc_to_coil_trans",
"(",
"_coil_trans_to_loc",
"(",
"x",
")",
")",
",",
"x",
")",
"x",
"=",
"rng",
".",
"randn",
"(",
"12",
")",
"assert_allclose",
"(",
"_coil_trans_to_loc",
"(",
"_loc_to_coil_trans",
"(",
"x",
")",
")",
",",
"x",
")"
] |
test loc<->coil_trans functions .
|
train
| false
|
13,130
|
@task()
@timeit
def send_award_notification(award):
@email_utils.safe_translation
def _make_mail(locale, context, email):
subject = _(u"You were awarded the '{title}' badge!").format(title=pgettext('DB: badger.Badge.title', award.badge.title))
mail = email_utils.make_mail(subject=subject, text_template='kbadge/email/award_notification.ltxt', html_template='kbadge/email/award_notification.html', context_vars=context, from_email=settings.DEFAULT_FROM_EMAIL, to_email=email)
return mail
msg = _make_mail(locale=award.user.profile.locale, context={'host': Site.objects.get_current().domain, 'award': award, 'badge': award.badge}, email=award.user.email)
email_utils.send_messages([msg])
|
[
"@",
"task",
"(",
")",
"@",
"timeit",
"def",
"send_award_notification",
"(",
"award",
")",
":",
"@",
"email_utils",
".",
"safe_translation",
"def",
"_make_mail",
"(",
"locale",
",",
"context",
",",
"email",
")",
":",
"subject",
"=",
"_",
"(",
"u\"You were awarded the '{title}' badge!\"",
")",
".",
"format",
"(",
"title",
"=",
"pgettext",
"(",
"'DB: badger.Badge.title'",
",",
"award",
".",
"badge",
".",
"title",
")",
")",
"mail",
"=",
"email_utils",
".",
"make_mail",
"(",
"subject",
"=",
"subject",
",",
"text_template",
"=",
"'kbadge/email/award_notification.ltxt'",
",",
"html_template",
"=",
"'kbadge/email/award_notification.html'",
",",
"context_vars",
"=",
"context",
",",
"from_email",
"=",
"settings",
".",
"DEFAULT_FROM_EMAIL",
",",
"to_email",
"=",
"email",
")",
"return",
"mail",
"msg",
"=",
"_make_mail",
"(",
"locale",
"=",
"award",
".",
"user",
".",
"profile",
".",
"locale",
",",
"context",
"=",
"{",
"'host'",
":",
"Site",
".",
"objects",
".",
"get_current",
"(",
")",
".",
"domain",
",",
"'award'",
":",
"award",
",",
"'badge'",
":",
"award",
".",
"badge",
"}",
",",
"email",
"=",
"award",
".",
"user",
".",
"email",
")",
"email_utils",
".",
"send_messages",
"(",
"[",
"msg",
"]",
")"
] |
sends the award notification email :arg award: the django-badger award instance .
|
train
| false
|
13,131
|
def _job_dir():
return os.path.join(__opts__['cachedir'], 'jobs')
|
[
"def",
"_job_dir",
"(",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'cachedir'",
"]",
",",
"'jobs'",
")"
] |
return root of the jobs cache directory .
|
train
| false
|
13,132
|
def artifactory_maven(registry, xml_parent, data):
artifactory = XML.SubElement(xml_parent, 'org.jfrog.hudson.maven3.ArtifactoryMaven3NativeConfigurator')
details = XML.SubElement(artifactory, 'details')
artifactory_common_details(details, data)
if ('repo-key' in data):
XML.SubElement(details, 'downloadRepositoryKey').text = data['repo-key']
else:
XML.SubElement(details, 'downloadSnapshotRepositoryKey').text = data.get('snapshot-repo-key', '')
XML.SubElement(details, 'downloadReleaseRepositoryKey').text = data.get('release-repo-key', '')
|
[
"def",
"artifactory_maven",
"(",
"registry",
",",
"xml_parent",
",",
"data",
")",
":",
"artifactory",
"=",
"XML",
".",
"SubElement",
"(",
"xml_parent",
",",
"'org.jfrog.hudson.maven3.ArtifactoryMaven3NativeConfigurator'",
")",
"details",
"=",
"XML",
".",
"SubElement",
"(",
"artifactory",
",",
"'details'",
")",
"artifactory_common_details",
"(",
"details",
",",
"data",
")",
"if",
"(",
"'repo-key'",
"in",
"data",
")",
":",
"XML",
".",
"SubElement",
"(",
"details",
",",
"'downloadRepositoryKey'",
")",
".",
"text",
"=",
"data",
"[",
"'repo-key'",
"]",
"else",
":",
"XML",
".",
"SubElement",
"(",
"details",
",",
"'downloadSnapshotRepositoryKey'",
")",
".",
"text",
"=",
"data",
".",
"get",
"(",
"'snapshot-repo-key'",
",",
"''",
")",
"XML",
".",
"SubElement",
"(",
"details",
",",
"'downloadReleaseRepositoryKey'",
")",
".",
"text",
"=",
"data",
".",
"get",
"(",
"'release-repo-key'",
",",
"''",
")"
] |
yaml: artifactory-maven wrapper for non-maven projects .
|
train
| false
|
13,133
|
def _log_multivariate_normal_density_spherical(X, means, covars):
cv = covars.copy()
if (covars.ndim == 1):
cv = cv[:, np.newaxis]
if (cv.shape[1] == 1):
cv = np.tile(cv, (1, X.shape[(-1)]))
return _log_multivariate_normal_density_diag(X, means, cv)
|
[
"def",
"_log_multivariate_normal_density_spherical",
"(",
"X",
",",
"means",
",",
"covars",
")",
":",
"cv",
"=",
"covars",
".",
"copy",
"(",
")",
"if",
"(",
"covars",
".",
"ndim",
"==",
"1",
")",
":",
"cv",
"=",
"cv",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"if",
"(",
"cv",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
")",
":",
"cv",
"=",
"np",
".",
"tile",
"(",
"cv",
",",
"(",
"1",
",",
"X",
".",
"shape",
"[",
"(",
"-",
"1",
")",
"]",
")",
")",
"return",
"_log_multivariate_normal_density_diag",
"(",
"X",
",",
"means",
",",
"cv",
")"
] |
compute gaussian log-density at x for a spherical model .
|
train
| true
|
13,134
|
def _size_map(size):
try:
if (not isinstance(size, int)):
if re.search('[Kk]', size):
size = (1024 * float(re.sub('[Kk]', '', size)))
elif re.search('[Mm]', size):
size = ((1024 ** 2) * float(re.sub('[Mm]', '', size)))
size = int(size)
return size
except:
return None
|
[
"def",
"_size_map",
"(",
"size",
")",
":",
"try",
":",
"if",
"(",
"not",
"isinstance",
"(",
"size",
",",
"int",
")",
")",
":",
"if",
"re",
".",
"search",
"(",
"'[Kk]'",
",",
"size",
")",
":",
"size",
"=",
"(",
"1024",
"*",
"float",
"(",
"re",
".",
"sub",
"(",
"'[Kk]'",
",",
"''",
",",
"size",
")",
")",
")",
"elif",
"re",
".",
"search",
"(",
"'[Mm]'",
",",
"size",
")",
":",
"size",
"=",
"(",
"(",
"1024",
"**",
"2",
")",
"*",
"float",
"(",
"re",
".",
"sub",
"(",
"'[Mm]'",
",",
"''",
",",
"size",
")",
")",
")",
"size",
"=",
"int",
"(",
"size",
")",
"return",
"size",
"except",
":",
"return",
"None"
] |
map bcaches size strings to real bytes .
|
train
| false
|
13,135
|
def check_exclusive_options(**kwargs):
if (not kwargs):
return
pretty_keys = kwargs.pop('pretty_keys', True)
exclusive_options = {}
for (k, v) in kwargs.items():
if (v is not None):
exclusive_options[k] = True
if (len(exclusive_options) > 1):
if pretty_keys:
names = [k.replace('_', ' ') for k in kwargs.keys()]
else:
names = kwargs.keys()
names = ', '.join(sorted(names))
msg = (_('May specify only one of %s') % names)
raise exception.InvalidInput(reason=msg)
|
[
"def",
"check_exclusive_options",
"(",
"**",
"kwargs",
")",
":",
"if",
"(",
"not",
"kwargs",
")",
":",
"return",
"pretty_keys",
"=",
"kwargs",
".",
"pop",
"(",
"'pretty_keys'",
",",
"True",
")",
"exclusive_options",
"=",
"{",
"}",
"for",
"(",
"k",
",",
"v",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"(",
"v",
"is",
"not",
"None",
")",
":",
"exclusive_options",
"[",
"k",
"]",
"=",
"True",
"if",
"(",
"len",
"(",
"exclusive_options",
")",
">",
"1",
")",
":",
"if",
"pretty_keys",
":",
"names",
"=",
"[",
"k",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
"for",
"k",
"in",
"kwargs",
".",
"keys",
"(",
")",
"]",
"else",
":",
"names",
"=",
"kwargs",
".",
"keys",
"(",
")",
"names",
"=",
"', '",
".",
"join",
"(",
"sorted",
"(",
"names",
")",
")",
"msg",
"=",
"(",
"_",
"(",
"'May specify only one of %s'",
")",
"%",
"names",
")",
"raise",
"exception",
".",
"InvalidInput",
"(",
"reason",
"=",
"msg",
")"
] |
checks that only one of the provided options is actually not-none .
|
train
| false
|
13,136
|
def validate_config_against_schema(config_schema, config_object, config_path, pack_name=None):
pack_name = (pack_name or 'unknown')
schema = util_schema.get_schema_for_resource_parameters(parameters_schema=config_schema, allow_additional_properties=True)
instance = config_object
try:
cleaned = util_schema.validate(instance=instance, schema=schema, cls=util_schema.CustomValidator, use_default=True, allow_default_none=True)
except jsonschema.ValidationError as e:
attribute = getattr(e, 'path', [])
attribute = '.'.join(attribute)
msg = ('Failed validating attribute "%s" in config for pack "%s" (%s): %s' % (attribute, pack_name, config_path, str(e)))
raise jsonschema.ValidationError(msg)
return cleaned
|
[
"def",
"validate_config_against_schema",
"(",
"config_schema",
",",
"config_object",
",",
"config_path",
",",
"pack_name",
"=",
"None",
")",
":",
"pack_name",
"=",
"(",
"pack_name",
"or",
"'unknown'",
")",
"schema",
"=",
"util_schema",
".",
"get_schema_for_resource_parameters",
"(",
"parameters_schema",
"=",
"config_schema",
",",
"allow_additional_properties",
"=",
"True",
")",
"instance",
"=",
"config_object",
"try",
":",
"cleaned",
"=",
"util_schema",
".",
"validate",
"(",
"instance",
"=",
"instance",
",",
"schema",
"=",
"schema",
",",
"cls",
"=",
"util_schema",
".",
"CustomValidator",
",",
"use_default",
"=",
"True",
",",
"allow_default_none",
"=",
"True",
")",
"except",
"jsonschema",
".",
"ValidationError",
"as",
"e",
":",
"attribute",
"=",
"getattr",
"(",
"e",
",",
"'path'",
",",
"[",
"]",
")",
"attribute",
"=",
"'.'",
".",
"join",
"(",
"attribute",
")",
"msg",
"=",
"(",
"'Failed validating attribute \"%s\" in config for pack \"%s\" (%s): %s'",
"%",
"(",
"attribute",
",",
"pack_name",
",",
"config_path",
",",
"str",
"(",
"e",
")",
")",
")",
"raise",
"jsonschema",
".",
"ValidationError",
"(",
"msg",
")",
"return",
"cleaned"
] |
validate provided config dictionary against the provided config schema dictionary .
|
train
| false
|
13,137
|
def sfilter(pred, brule):
def filtered_brl(expr):
for x in filter(pred, brule(expr)):
(yield x)
return filtered_brl
|
[
"def",
"sfilter",
"(",
"pred",
",",
"brule",
")",
":",
"def",
"filtered_brl",
"(",
"expr",
")",
":",
"for",
"x",
"in",
"filter",
"(",
"pred",
",",
"brule",
"(",
"expr",
")",
")",
":",
"(",
"yield",
"x",
")",
"return",
"filtered_brl"
] |
yield only those results which satisfy the predicate .
|
train
| false
|
13,139
|
def _prepare_write_tfr(tfr, condition):
return (condition, dict(times=tfr.times, freqs=tfr.freqs, data=tfr.data, info=tfr.info, nave=tfr.nave, comment=tfr.comment, method=tfr.method))
|
[
"def",
"_prepare_write_tfr",
"(",
"tfr",
",",
"condition",
")",
":",
"return",
"(",
"condition",
",",
"dict",
"(",
"times",
"=",
"tfr",
".",
"times",
",",
"freqs",
"=",
"tfr",
".",
"freqs",
",",
"data",
"=",
"tfr",
".",
"data",
",",
"info",
"=",
"tfr",
".",
"info",
",",
"nave",
"=",
"tfr",
".",
"nave",
",",
"comment",
"=",
"tfr",
".",
"comment",
",",
"method",
"=",
"tfr",
".",
"method",
")",
")"
] |
aux function .
|
train
| false
|
13,140
|
def apply_matrix_norm(m, v):
(a, b, c, d, e, f) = m
(p, q) = v
return (((a * p) + (c * q)), ((b * p) + (d * q)))
|
[
"def",
"apply_matrix_norm",
"(",
"m",
",",
"v",
")",
":",
"(",
"a",
",",
"b",
",",
"c",
",",
"d",
",",
"e",
",",
"f",
")",
"=",
"m",
"(",
"p",
",",
"q",
")",
"=",
"v",
"return",
"(",
"(",
"(",
"a",
"*",
"p",
")",
"+",
"(",
"c",
"*",
"q",
")",
")",
",",
"(",
"(",
"b",
"*",
"p",
")",
"+",
"(",
"d",
"*",
"q",
")",
")",
")"
] |
equivalent to apply_matrix_pt(m .
|
train
| true
|
13,141
|
def handle_file_upload(path, file, site):
uploadedfile = None
try:
file_path = os.path.join(path, file.name)
uploadedfile = site.storage.save(file_path, file)
except Exception as inst:
raise inst
return uploadedfile
|
[
"def",
"handle_file_upload",
"(",
"path",
",",
"file",
",",
"site",
")",
":",
"uploadedfile",
"=",
"None",
"try",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"file",
".",
"name",
")",
"uploadedfile",
"=",
"site",
".",
"storage",
".",
"save",
"(",
"file_path",
",",
"file",
")",
"except",
"Exception",
"as",
"inst",
":",
"raise",
"inst",
"return",
"uploadedfile"
] |
handle file upload .
|
train
| false
|
13,145
|
def rc_file_defaults():
rcParams.update(rcParamsOrig)
|
[
"def",
"rc_file_defaults",
"(",
")",
":",
"rcParams",
".",
"update",
"(",
"rcParamsOrig",
")"
] |
restore the default rc params from the original matplotlib rc that was loaded .
|
train
| false
|
13,146
|
def get_channel_max_user_count(channel=14, **kwargs):
access = get_user_access(channel=channel, uid=1, **kwargs)
return access['channel_info']['max_user_count']
|
[
"def",
"get_channel_max_user_count",
"(",
"channel",
"=",
"14",
",",
"**",
"kwargs",
")",
":",
"access",
"=",
"get_user_access",
"(",
"channel",
"=",
"channel",
",",
"uid",
"=",
"1",
",",
"**",
"kwargs",
")",
"return",
"access",
"[",
"'channel_info'",
"]",
"[",
"'max_user_count'",
"]"
] |
get max users in channel .
|
train
| true
|
13,147
|
def get_model_root(model):
if model._meta.parents:
parent_model = list(model._meta.parents.items())[0][0]
return get_model_root(parent_model)
return model
|
[
"def",
"get_model_root",
"(",
"model",
")",
":",
"if",
"model",
".",
"_meta",
".",
"parents",
":",
"parent_model",
"=",
"list",
"(",
"model",
".",
"_meta",
".",
"parents",
".",
"items",
"(",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"return",
"get_model_root",
"(",
"parent_model",
")",
"return",
"model"
] |
this function finds the root model for any given model .
|
train
| false
|
13,148
|
def remove_cert(name, thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE):
ret = {'name': name, 'changes': dict(), 'comment': str(), 'result': None}
store_path = 'Cert:\\{0}\\{1}'.format(context, store)
current_certs = __salt__['win_pki.get_certs'](context=context, store=store)
if (thumbprint not in current_certs):
ret['comment'] = "Certificate '{0}' already removed from store: {1}".format(thumbprint, store_path)
ret['result'] = True
elif __opts__['test']:
ret['comment'] = "Certificate '{0}' will be removed from store: {1}".format(thumbprint, store_path)
ret['changes'] = {'old': thumbprint, 'new': None}
else:
ret['changes'] = {'old': thumbprint, 'new': None}
ret['result'] = __salt__['win_pki.remove_cert'](thumbprint=thumbprint, context=context, store=store)
if ret['result']:
ret['comment'] = "Certificate '{0}' removed from store: {1}".format(thumbprint, store_path)
else:
ret['comment'] = "Certificate '{0}' unable to be removed from store: {1}".format(thumbprint, store_path)
return ret
|
[
"def",
"remove_cert",
"(",
"name",
",",
"thumbprint",
",",
"context",
"=",
"_DEFAULT_CONTEXT",
",",
"store",
"=",
"_DEFAULT_STORE",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"dict",
"(",
")",
",",
"'comment'",
":",
"str",
"(",
")",
",",
"'result'",
":",
"None",
"}",
"store_path",
"=",
"'Cert:\\\\{0}\\\\{1}'",
".",
"format",
"(",
"context",
",",
"store",
")",
"current_certs",
"=",
"__salt__",
"[",
"'win_pki.get_certs'",
"]",
"(",
"context",
"=",
"context",
",",
"store",
"=",
"store",
")",
"if",
"(",
"thumbprint",
"not",
"in",
"current_certs",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"\"Certificate '{0}' already removed from store: {1}\"",
".",
"format",
"(",
"thumbprint",
",",
"store_path",
")",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"elif",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"\"Certificate '{0}' will be removed from store: {1}\"",
".",
"format",
"(",
"thumbprint",
",",
"store_path",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'old'",
":",
"thumbprint",
",",
"'new'",
":",
"None",
"}",
"else",
":",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'old'",
":",
"thumbprint",
",",
"'new'",
":",
"None",
"}",
"ret",
"[",
"'result'",
"]",
"=",
"__salt__",
"[",
"'win_pki.remove_cert'",
"]",
"(",
"thumbprint",
"=",
"thumbprint",
",",
"context",
"=",
"context",
",",
"store",
"=",
"store",
")",
"if",
"ret",
"[",
"'result'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"\"Certificate '{0}' removed from store: {1}\"",
".",
"format",
"(",
"thumbprint",
",",
"store_path",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"\"Certificate '{0}' unable to be removed from store: {1}\"",
".",
"format",
"(",
"thumbprint",
",",
"store_path",
")",
"return",
"ret"
] |
remove the certificate from the given certificate store .
|
train
| false
|
13,149
|
def getTagBracketedProcedure(procedure):
return getTagBracketedLine('procedureName', procedure)
|
[
"def",
"getTagBracketedProcedure",
"(",
"procedure",
")",
":",
"return",
"getTagBracketedLine",
"(",
"'procedureName'",
",",
"procedure",
")"
] |
get line with a begin procedure tag .
|
train
| false
|
13,151
|
def _expand_integer(ty):
if isinstance(ty, types.Integer):
if ty.signed:
return max(types.intp, ty)
else:
return max(types.uintp, ty)
elif isinstance(ty, types.Boolean):
return types.intp
else:
return ty
|
[
"def",
"_expand_integer",
"(",
"ty",
")",
":",
"if",
"isinstance",
"(",
"ty",
",",
"types",
".",
"Integer",
")",
":",
"if",
"ty",
".",
"signed",
":",
"return",
"max",
"(",
"types",
".",
"intp",
",",
"ty",
")",
"else",
":",
"return",
"max",
"(",
"types",
".",
"uintp",
",",
"ty",
")",
"elif",
"isinstance",
"(",
"ty",
",",
"types",
".",
"Boolean",
")",
":",
"return",
"types",
".",
"intp",
"else",
":",
"return",
"ty"
] |
if *ty* is an integer .
|
train
| false
|
13,152
|
def imsiDetachIndication():
a = TpPd(pd=5)
b = MessageType(mesType=1)
c = MobileStationClassmark1()
d = MobileId()
packet = (((a / b) / c) / d)
return packet
|
[
"def",
"imsiDetachIndication",
"(",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"5",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"1",
")",
"c",
"=",
"MobileStationClassmark1",
"(",
")",
"d",
"=",
"MobileId",
"(",
")",
"packet",
"=",
"(",
"(",
"(",
"a",
"/",
"b",
")",
"/",
"c",
")",
"/",
"d",
")",
"return",
"packet"
] |
imsi detach indication section 9 .
|
train
| true
|
13,156
|
def save_config_value(request, response, key, value):
request.session[key] = value
response.set_cookie(key, value, expires=one_year_from_now())
return response
|
[
"def",
"save_config_value",
"(",
"request",
",",
"response",
",",
"key",
",",
"value",
")",
":",
"request",
".",
"session",
"[",
"key",
"]",
"=",
"value",
"response",
".",
"set_cookie",
"(",
"key",
",",
"value",
",",
"expires",
"=",
"one_year_from_now",
"(",
")",
")",
"return",
"response"
] |
sets value of key key to value in both session and cookies .
|
train
| true
|
13,157
|
def test_hist_flush_with_store_stdout(hist, xonsh_builtins):
hf = hist.flush()
assert (hf is None)
xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set()
xonsh_builtins.__xonsh_env__['XONSH_STORE_STDOUT'] = True
hist.append({'inp': 'still alive?', 'rtn': 0, 'out': 'yes'})
hf = hist.flush()
assert (hf is not None)
while hf.is_alive():
pass
with LazyJSON(hist.filename) as lj:
assert (len(lj['cmds']) == 1)
assert (lj['cmds'][0]['inp'] == 'still alive?')
assert (lj['cmds'][0]['out'].strip() == 'yes')
|
[
"def",
"test_hist_flush_with_store_stdout",
"(",
"hist",
",",
"xonsh_builtins",
")",
":",
"hf",
"=",
"hist",
".",
"flush",
"(",
")",
"assert",
"(",
"hf",
"is",
"None",
")",
"xonsh_builtins",
".",
"__xonsh_env__",
"[",
"'HISTCONTROL'",
"]",
"=",
"set",
"(",
")",
"xonsh_builtins",
".",
"__xonsh_env__",
"[",
"'XONSH_STORE_STDOUT'",
"]",
"=",
"True",
"hist",
".",
"append",
"(",
"{",
"'inp'",
":",
"'still alive?'",
",",
"'rtn'",
":",
"0",
",",
"'out'",
":",
"'yes'",
"}",
")",
"hf",
"=",
"hist",
".",
"flush",
"(",
")",
"assert",
"(",
"hf",
"is",
"not",
"None",
")",
"while",
"hf",
".",
"is_alive",
"(",
")",
":",
"pass",
"with",
"LazyJSON",
"(",
"hist",
".",
"filename",
")",
"as",
"lj",
":",
"assert",
"(",
"len",
"(",
"lj",
"[",
"'cmds'",
"]",
")",
"==",
"1",
")",
"assert",
"(",
"lj",
"[",
"'cmds'",
"]",
"[",
"0",
"]",
"[",
"'inp'",
"]",
"==",
"'still alive?'",
")",
"assert",
"(",
"lj",
"[",
"'cmds'",
"]",
"[",
"0",
"]",
"[",
"'out'",
"]",
".",
"strip",
"(",
")",
"==",
"'yes'",
")"
] |
verify explicit flushing of the history works .
|
train
| false
|
13,158
|
def _failhard():
raise FileserverConfigError('Failed to load hg fileserver backend')
|
[
"def",
"_failhard",
"(",
")",
":",
"raise",
"FileserverConfigError",
"(",
"'Failed to load hg fileserver backend'",
")"
] |
fatal fileserver configuration issue .
|
train
| false
|
13,159
|
def truncate_too_long_number(numobj):
if is_valid_number(numobj):
return True
numobj_copy = PhoneNumber()
numobj_copy.merge_from(numobj)
national_number = numobj.national_number
while (not is_valid_number(numobj_copy)):
national_number = (national_number // 10)
numobj_copy.national_number = national_number
validation_result = is_possible_number_with_reason(numobj_copy)
if ((validation_result == ValidationResult.TOO_SHORT) or (national_number == 0)):
return False
numobj.national_number = national_number
return True
|
[
"def",
"truncate_too_long_number",
"(",
"numobj",
")",
":",
"if",
"is_valid_number",
"(",
"numobj",
")",
":",
"return",
"True",
"numobj_copy",
"=",
"PhoneNumber",
"(",
")",
"numobj_copy",
".",
"merge_from",
"(",
"numobj",
")",
"national_number",
"=",
"numobj",
".",
"national_number",
"while",
"(",
"not",
"is_valid_number",
"(",
"numobj_copy",
")",
")",
":",
"national_number",
"=",
"(",
"national_number",
"//",
"10",
")",
"numobj_copy",
".",
"national_number",
"=",
"national_number",
"validation_result",
"=",
"is_possible_number_with_reason",
"(",
"numobj_copy",
")",
"if",
"(",
"(",
"validation_result",
"==",
"ValidationResult",
".",
"TOO_SHORT",
")",
"or",
"(",
"national_number",
"==",
"0",
")",
")",
":",
"return",
"False",
"numobj",
".",
"national_number",
"=",
"national_number",
"return",
"True"
] |
truncate a number object that is too long .
|
train
| true
|
13,161
|
def disable_dhcp():
current = network()
if (current['Network Settings']['DHCP_ENABLE']['VALUE'] == 'N'):
return True
_xml = '<RIBCL VERSION="2.0">\n <LOGIN USER_LOGIN="adminname" PASSWORD="password">\n <RIB_INFO MODE="write">\n <MOD_NETWORK_SETTINGS>\n <DHCP_ENABLE value="No"/>\n </MOD_NETWORK_SETTINGS>\n </RIB_INFO>\n </LOGIN>\n </RIBCL>'
return __execute_cmd('Disable_DHCP', _xml)
|
[
"def",
"disable_dhcp",
"(",
")",
":",
"current",
"=",
"network",
"(",
")",
"if",
"(",
"current",
"[",
"'Network Settings'",
"]",
"[",
"'DHCP_ENABLE'",
"]",
"[",
"'VALUE'",
"]",
"==",
"'N'",
")",
":",
"return",
"True",
"_xml",
"=",
"'<RIBCL VERSION=\"2.0\">\\n <LOGIN USER_LOGIN=\"adminname\" PASSWORD=\"password\">\\n <RIB_INFO MODE=\"write\">\\n <MOD_NETWORK_SETTINGS>\\n <DHCP_ENABLE value=\"No\"/>\\n </MOD_NETWORK_SETTINGS>\\n </RIB_INFO>\\n </LOGIN>\\n </RIBCL>'",
"return",
"__execute_cmd",
"(",
"'Disable_DHCP'",
",",
"_xml",
")"
] |
disable dhcp cli example: .
|
train
| false
|
13,162
|
def reload_loadbalancers(group, load_balancers, exclude=None):
exclude = (exclude or [])
id_list = grouputils.get_member_refids(group, exclude=exclude)
for (name, lb) in six.iteritems(load_balancers):
props = copy.copy(lb.properties.data)
if ('Instances' in lb.properties_schema):
props['Instances'] = id_list
elif ('members' in lb.properties_schema):
props['members'] = id_list
else:
raise exception.Error((_("Unsupported resource '%s' in LoadBalancerNames") % name))
lb_defn = rsrc_defn.ResourceDefinition(lb.name, lb.type(), properties=props, metadata=lb.t.metadata(), deletion_policy=lb.t.deletion_policy())
scheduler.TaskRunner(lb.update, lb_defn)()
|
[
"def",
"reload_loadbalancers",
"(",
"group",
",",
"load_balancers",
",",
"exclude",
"=",
"None",
")",
":",
"exclude",
"=",
"(",
"exclude",
"or",
"[",
"]",
")",
"id_list",
"=",
"grouputils",
".",
"get_member_refids",
"(",
"group",
",",
"exclude",
"=",
"exclude",
")",
"for",
"(",
"name",
",",
"lb",
")",
"in",
"six",
".",
"iteritems",
"(",
"load_balancers",
")",
":",
"props",
"=",
"copy",
".",
"copy",
"(",
"lb",
".",
"properties",
".",
"data",
")",
"if",
"(",
"'Instances'",
"in",
"lb",
".",
"properties_schema",
")",
":",
"props",
"[",
"'Instances'",
"]",
"=",
"id_list",
"elif",
"(",
"'members'",
"in",
"lb",
".",
"properties_schema",
")",
":",
"props",
"[",
"'members'",
"]",
"=",
"id_list",
"else",
":",
"raise",
"exception",
".",
"Error",
"(",
"(",
"_",
"(",
"\"Unsupported resource '%s' in LoadBalancerNames\"",
")",
"%",
"name",
")",
")",
"lb_defn",
"=",
"rsrc_defn",
".",
"ResourceDefinition",
"(",
"lb",
".",
"name",
",",
"lb",
".",
"type",
"(",
")",
",",
"properties",
"=",
"props",
",",
"metadata",
"=",
"lb",
".",
"t",
".",
"metadata",
"(",
")",
",",
"deletion_policy",
"=",
"lb",
".",
"t",
".",
"deletion_policy",
"(",
")",
")",
"scheduler",
".",
"TaskRunner",
"(",
"lb",
".",
"update",
",",
"lb_defn",
")",
"(",
")"
] |
notify the loadbalancer to reload its config .
|
train
| false
|
13,164
|
def powerset(iterable, include_empty=True):
s = list(iterable)
i = chain.from_iterable((combinations(s, r) for r in range((len(s) + 1))))
if (not include_empty):
next(i)
return i
|
[
"def",
"powerset",
"(",
"iterable",
",",
"include_empty",
"=",
"True",
")",
":",
"s",
"=",
"list",
"(",
"iterable",
")",
"i",
"=",
"chain",
".",
"from_iterable",
"(",
"(",
"combinations",
"(",
"s",
",",
"r",
")",
"for",
"r",
"in",
"range",
"(",
"(",
"len",
"(",
"s",
")",
"+",
"1",
")",
")",
")",
")",
"if",
"(",
"not",
"include_empty",
")",
":",
"next",
"(",
"i",
")",
"return",
"i"
] |
generates all subsets of a set or sequence u .
|
train
| false
|
13,165
|
def load_opts_from_mrjob_confs(runner_alias, conf_paths=None):
if (conf_paths is None):
return load_opts_from_mrjob_conf(runner_alias)
else:
already_loaded = []
results = []
for path in reversed(conf_paths):
results = (load_opts_from_mrjob_conf(runner_alias, path, already_loaded=already_loaded) + results)
return results
|
[
"def",
"load_opts_from_mrjob_confs",
"(",
"runner_alias",
",",
"conf_paths",
"=",
"None",
")",
":",
"if",
"(",
"conf_paths",
"is",
"None",
")",
":",
"return",
"load_opts_from_mrjob_conf",
"(",
"runner_alias",
")",
"else",
":",
"already_loaded",
"=",
"[",
"]",
"results",
"=",
"[",
"]",
"for",
"path",
"in",
"reversed",
"(",
"conf_paths",
")",
":",
"results",
"=",
"(",
"load_opts_from_mrjob_conf",
"(",
"runner_alias",
",",
"path",
",",
"already_loaded",
"=",
"already_loaded",
")",
"+",
"results",
")",
"return",
"results"
] |
load a list of dictionaries representing the options in a given list of mrjob config files for a specific runner .
|
train
| false
|
13,166
|
def instance_metadata_delete(context, instance_uuid, key):
IMPL.instance_metadata_delete(context, instance_uuid, key)
|
[
"def",
"instance_metadata_delete",
"(",
"context",
",",
"instance_uuid",
",",
"key",
")",
":",
"IMPL",
".",
"instance_metadata_delete",
"(",
"context",
",",
"instance_uuid",
",",
"key",
")"
] |
delete the given metadata item .
|
train
| false
|
13,169
|
def zpkfreqd(z, p, k, worN=None):
if ((worN is None) or isinstance(worN, int)):
N = (worN or 512)
ws = [((mpmath.pi * mpmath.mpf(j)) / N) for j in range(N)]
else:
ws = worN
h = []
for wk in ws:
zm1 = mpmath.exp((1j * wk))
numer = _prod([(zm1 - t) for t in z])
denom = _prod([(zm1 - t) for t in p])
hk = ((k * numer) / denom)
h.append(hk)
return (ws, h)
|
[
"def",
"zpkfreqd",
"(",
"z",
",",
"p",
",",
"k",
",",
"worN",
"=",
"None",
")",
":",
"if",
"(",
"(",
"worN",
"is",
"None",
")",
"or",
"isinstance",
"(",
"worN",
",",
"int",
")",
")",
":",
"N",
"=",
"(",
"worN",
"or",
"512",
")",
"ws",
"=",
"[",
"(",
"(",
"mpmath",
".",
"pi",
"*",
"mpmath",
".",
"mpf",
"(",
"j",
")",
")",
"/",
"N",
")",
"for",
"j",
"in",
"range",
"(",
"N",
")",
"]",
"else",
":",
"ws",
"=",
"worN",
"h",
"=",
"[",
"]",
"for",
"wk",
"in",
"ws",
":",
"zm1",
"=",
"mpmath",
".",
"exp",
"(",
"(",
"1j",
"*",
"wk",
")",
")",
"numer",
"=",
"_prod",
"(",
"[",
"(",
"zm1",
"-",
"t",
")",
"for",
"t",
"in",
"z",
"]",
")",
"denom",
"=",
"_prod",
"(",
"[",
"(",
"zm1",
"-",
"t",
")",
"for",
"t",
"in",
"p",
"]",
")",
"hk",
"=",
"(",
"(",
"k",
"*",
"numer",
")",
"/",
"denom",
")",
"h",
".",
"append",
"(",
"hk",
")",
"return",
"(",
"ws",
",",
"h",
")"
] |
frequency response of a filter in zpk format .
|
train
| false
|
13,170
|
def plot_layout(layout, show=True):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
ax.set_xticks([])
ax.set_yticks([])
pos = [((p[0] + (p[2] / 2.0)), (p[1] + (p[3] / 2.0))) for p in layout.pos]
(pos, outlines) = _check_outlines(pos, 'head')
_draw_outlines(ax, outlines)
for (ii, (this_pos, ch_id)) in enumerate(zip(pos, layout.names)):
ax.annotate(ch_id, xy=this_pos[:2], horizontalalignment='center', verticalalignment='center', size='x-small')
plt_show(show)
return fig
|
[
"def",
"plot_layout",
"(",
"layout",
",",
"show",
"=",
"True",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"fig",
".",
"subplots_adjust",
"(",
"left",
"=",
"0",
",",
"bottom",
"=",
"0",
",",
"right",
"=",
"1",
",",
"top",
"=",
"1",
",",
"wspace",
"=",
"None",
",",
"hspace",
"=",
"None",
")",
"ax",
".",
"set_xticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"pos",
"=",
"[",
"(",
"(",
"p",
"[",
"0",
"]",
"+",
"(",
"p",
"[",
"2",
"]",
"/",
"2.0",
")",
")",
",",
"(",
"p",
"[",
"1",
"]",
"+",
"(",
"p",
"[",
"3",
"]",
"/",
"2.0",
")",
")",
")",
"for",
"p",
"in",
"layout",
".",
"pos",
"]",
"(",
"pos",
",",
"outlines",
")",
"=",
"_check_outlines",
"(",
"pos",
",",
"'head'",
")",
"_draw_outlines",
"(",
"ax",
",",
"outlines",
")",
"for",
"(",
"ii",
",",
"(",
"this_pos",
",",
"ch_id",
")",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"pos",
",",
"layout",
".",
"names",
")",
")",
":",
"ax",
".",
"annotate",
"(",
"ch_id",
",",
"xy",
"=",
"this_pos",
"[",
":",
"2",
"]",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
",",
"size",
"=",
"'x-small'",
")",
"plt_show",
"(",
"show",
")",
"return",
"fig"
] |
plot the sensor positions .
|
train
| false
|
13,172
|
def initialize_events(article_generator):
del events[:]
localized_events.clear()
|
[
"def",
"initialize_events",
"(",
"article_generator",
")",
":",
"del",
"events",
"[",
":",
"]",
"localized_events",
".",
"clear",
"(",
")"
] |
clears the events list before generating articles to properly support plugins with multiple generation passes like i18n_subsites .
|
train
| false
|
13,173
|
def stroke_path(path, pen):
stroker = QPainterPathStroker()
stroker.setCapStyle(pen.capStyle())
stroker.setJoinStyle(pen.joinStyle())
stroker.setMiterLimit(pen.miterLimit())
stroker.setWidth(max(pen.widthF(), 1e-09))
return stroker.createStroke(path)
|
[
"def",
"stroke_path",
"(",
"path",
",",
"pen",
")",
":",
"stroker",
"=",
"QPainterPathStroker",
"(",
")",
"stroker",
".",
"setCapStyle",
"(",
"pen",
".",
"capStyle",
"(",
")",
")",
"stroker",
".",
"setJoinStyle",
"(",
"pen",
".",
"joinStyle",
"(",
")",
")",
"stroker",
".",
"setMiterLimit",
"(",
"pen",
".",
"miterLimit",
"(",
")",
")",
"stroker",
".",
"setWidth",
"(",
"max",
"(",
"pen",
".",
"widthF",
"(",
")",
",",
"1e-09",
")",
")",
"return",
"stroker",
".",
"createStroke",
"(",
"path",
")"
] |
create a qpainterpath stroke from the path drawn with pen .
|
train
| false
|
13,174
|
def run_ninja():
signal.signal(signal.SIGINT, signal.SIG_DFL)
if ((not settings.IS_WINDOWS) and (not settings.IS_MAC_OS)):
try:
import ctypes
libc = ctypes.CDLL('libc.so.6')
procname = 'ninja-ide'
libc.prctl(15, ('%s\x00' % procname), 0, 0, 0)
except:
print "The process couldn't be renamed'"
(filenames, projects_path, extra_plugins, linenos, log_level, log_file) = cliparser.parse()
resources.create_home_dir_structure()
from ninja_ide.tools.logger import NinjaLogger
NinjaLogger.argparse(log_level, log_file)
settings.load_settings()
app = QApplication(sys.argv)
from ninja_ide import gui
gui.start_ide(app, filenames, projects_path, extra_plugins, linenos)
sys.exit(app.exec_())
|
[
"def",
"run_ninja",
"(",
")",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIG_DFL",
")",
"if",
"(",
"(",
"not",
"settings",
".",
"IS_WINDOWS",
")",
"and",
"(",
"not",
"settings",
".",
"IS_MAC_OS",
")",
")",
":",
"try",
":",
"import",
"ctypes",
"libc",
"=",
"ctypes",
".",
"CDLL",
"(",
"'libc.so.6'",
")",
"procname",
"=",
"'ninja-ide'",
"libc",
".",
"prctl",
"(",
"15",
",",
"(",
"'%s\\x00'",
"%",
"procname",
")",
",",
"0",
",",
"0",
",",
"0",
")",
"except",
":",
"print",
"\"The process couldn't be renamed'\"",
"(",
"filenames",
",",
"projects_path",
",",
"extra_plugins",
",",
"linenos",
",",
"log_level",
",",
"log_file",
")",
"=",
"cliparser",
".",
"parse",
"(",
")",
"resources",
".",
"create_home_dir_structure",
"(",
")",
"from",
"ninja_ide",
".",
"tools",
".",
"logger",
"import",
"NinjaLogger",
"NinjaLogger",
".",
"argparse",
"(",
"log_level",
",",
"log_file",
")",
"settings",
".",
"load_settings",
"(",
")",
"app",
"=",
"QApplication",
"(",
"sys",
".",
"argv",
")",
"from",
"ninja_ide",
"import",
"gui",
"gui",
".",
"start_ide",
"(",
"app",
",",
"filenames",
",",
"projects_path",
",",
"extra_plugins",
",",
"linenos",
")",
"sys",
".",
"exit",
"(",
"app",
".",
"exec_",
"(",
")",
")"
] |
first obtain the execution args and create the resources folder .
|
train
| false
|
13,176
|
def tenant_list(profile=None, **connection_args):
kstone = auth(profile, **connection_args)
ret = {}
for tenant in getattr(kstone, _TENANTS, None).list():
ret[tenant.name] = dict(((value, getattr(tenant, value)) for value in dir(tenant) if ((not value.startswith('_')) and isinstance(getattr(tenant, value), (six.text_type, dict, bool, str)))))
return ret
|
[
"def",
"tenant_list",
"(",
"profile",
"=",
"None",
",",
"**",
"connection_args",
")",
":",
"kstone",
"=",
"auth",
"(",
"profile",
",",
"**",
"connection_args",
")",
"ret",
"=",
"{",
"}",
"for",
"tenant",
"in",
"getattr",
"(",
"kstone",
",",
"_TENANTS",
",",
"None",
")",
".",
"list",
"(",
")",
":",
"ret",
"[",
"tenant",
".",
"name",
"]",
"=",
"dict",
"(",
"(",
"(",
"value",
",",
"getattr",
"(",
"tenant",
",",
"value",
")",
")",
"for",
"value",
"in",
"dir",
"(",
"tenant",
")",
"if",
"(",
"(",
"not",
"value",
".",
"startswith",
"(",
"'_'",
")",
")",
"and",
"isinstance",
"(",
"getattr",
"(",
"tenant",
",",
"value",
")",
",",
"(",
"six",
".",
"text_type",
",",
"dict",
",",
"bool",
",",
"str",
")",
")",
")",
")",
")",
"return",
"ret"
] |
return a list of available tenants cli example: .
|
train
| true
|
13,180
|
def SearchClients(query_str, token=None, limit=1000):
client_schema = aff4.AFF4Object.classes['VFSGRRClient'].SchemaCls
index = client_index.CreateClientIndex(token=token)
client_list = index.LookupClients([query_str])
result_set = aff4.FACTORY.MultiOpen(client_list, token=token)
results = []
for result in result_set:
results.append((result, str(result.Get(client_schema.HOSTNAME)), str(result.Get(client_schema.OS_VERSION)), str(result.Get(client_schema.PING))))
if (len(results) >= limit):
break
return results
|
[
"def",
"SearchClients",
"(",
"query_str",
",",
"token",
"=",
"None",
",",
"limit",
"=",
"1000",
")",
":",
"client_schema",
"=",
"aff4",
".",
"AFF4Object",
".",
"classes",
"[",
"'VFSGRRClient'",
"]",
".",
"SchemaCls",
"index",
"=",
"client_index",
".",
"CreateClientIndex",
"(",
"token",
"=",
"token",
")",
"client_list",
"=",
"index",
".",
"LookupClients",
"(",
"[",
"query_str",
"]",
")",
"result_set",
"=",
"aff4",
".",
"FACTORY",
".",
"MultiOpen",
"(",
"client_list",
",",
"token",
"=",
"token",
")",
"results",
"=",
"[",
"]",
"for",
"result",
"in",
"result_set",
":",
"results",
".",
"append",
"(",
"(",
"result",
",",
"str",
"(",
"result",
".",
"Get",
"(",
"client_schema",
".",
"HOSTNAME",
")",
")",
",",
"str",
"(",
"result",
".",
"Get",
"(",
"client_schema",
".",
"OS_VERSION",
")",
")",
",",
"str",
"(",
"result",
".",
"Get",
"(",
"client_schema",
".",
"PING",
")",
")",
")",
")",
"if",
"(",
"len",
"(",
"results",
")",
">=",
"limit",
")",
":",
"break",
"return",
"results"
] |
search indexes for clients .
|
train
| true
|
13,181
|
def setDefaultClock(clock):
global defaultClock
defaultClock = clock
|
[
"def",
"setDefaultClock",
"(",
"clock",
")",
":",
"global",
"defaultClock",
"defaultClock",
"=",
"clock"
] |
set the default clock to be used to reference all logging times .
|
train
| false
|
13,182
|
def _bit_size(number):
if (number < 0):
raise ValueError(('Only nonnegative numbers possible: %s' % number))
if (number == 0):
return 0
bits = 0
while number:
bits += 1
number >>= 1
return bits
|
[
"def",
"_bit_size",
"(",
"number",
")",
":",
"if",
"(",
"number",
"<",
"0",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'Only nonnegative numbers possible: %s'",
"%",
"number",
")",
")",
"if",
"(",
"number",
"==",
"0",
")",
":",
"return",
"0",
"bits",
"=",
"0",
"while",
"number",
":",
"bits",
"+=",
"1",
"number",
">>=",
"1",
"return",
"bits"
] |
returns the number of bits required to hold a specific long number .
|
train
| false
|
13,183
|
def gmean(a, axis=0, dtype=None):
if (not isinstance(a, np.ndarray)):
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
|
[
"def",
"gmean",
"(",
"a",
",",
"axis",
"=",
"0",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"a",
",",
"np",
".",
"ndarray",
")",
")",
":",
"log_a",
"=",
"np",
".",
"log",
"(",
"np",
".",
"array",
"(",
"a",
",",
"dtype",
"=",
"dtype",
")",
")",
"elif",
"dtype",
":",
"if",
"isinstance",
"(",
"a",
",",
"np",
".",
"ma",
".",
"MaskedArray",
")",
":",
"log_a",
"=",
"np",
".",
"log",
"(",
"np",
".",
"ma",
".",
"asarray",
"(",
"a",
",",
"dtype",
"=",
"dtype",
")",
")",
"else",
":",
"log_a",
"=",
"np",
".",
"log",
"(",
"np",
".",
"asarray",
"(",
"a",
",",
"dtype",
"=",
"dtype",
")",
")",
"else",
":",
"log_a",
"=",
"np",
".",
"log",
"(",
"a",
")",
"return",
"np",
".",
"exp",
"(",
"log_a",
".",
"mean",
"(",
"axis",
"=",
"axis",
")",
")"
] |
compute the geometric mean along the specified axis .
|
train
| false
|
13,184
|
def load_inline_module():
from cffi import FFI
defs = '\n double _numba_test_sin(double x);\n double _numba_test_cos(double x);\n double _numba_test_funcptr(double (*func)(double));\n '
ffi = FFI()
ffi.cdef(defs)
from numba import _helperlib
return (ffi, ffi.dlopen(_helperlib.__file__))
|
[
"def",
"load_inline_module",
"(",
")",
":",
"from",
"cffi",
"import",
"FFI",
"defs",
"=",
"'\\n double _numba_test_sin(double x);\\n double _numba_test_cos(double x);\\n double _numba_test_funcptr(double (*func)(double));\\n '",
"ffi",
"=",
"FFI",
"(",
")",
"ffi",
".",
"cdef",
"(",
"defs",
")",
"from",
"numba",
"import",
"_helperlib",
"return",
"(",
"ffi",
",",
"ffi",
".",
"dlopen",
"(",
"_helperlib",
".",
"__file__",
")",
")"
] |
create an inline module .
|
train
| false
|
13,185
|
def send_query_failure_email(recipient_id, query_id, query_params):
email_subject = ('Query %s has failed' % query_id)
email_body_template = 'Hi %s,<br>Your query with id %s has failed due to error during execution. Please check the query parameters and submit query again.<br><br>Thanks!<br><br>Best wishes,<br>The Oppia Team<br><br>%s'
recipient_user_settings = user_services.get_user_settings(recipient_id)
email_body = (email_body_template % (recipient_user_settings.username, query_id, EMAIL_FOOTER.value))
_send_email(recipient_id, feconf.SYSTEM_COMMITTER_ID, feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION, email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
admin_email_subject = 'Query job has failed.'
admin_email_body_template = 'Query job with %s query id has failed in its execution.\nQuery parameters:\n\n'
for key in sorted(query_params):
admin_email_body_template += ('%s: %s\n' % (key, query_params[key]))
admin_email_body = (admin_email_body_template % query_id)
send_mail_to_admin(admin_email_subject, admin_email_body)
|
[
"def",
"send_query_failure_email",
"(",
"recipient_id",
",",
"query_id",
",",
"query_params",
")",
":",
"email_subject",
"=",
"(",
"'Query %s has failed'",
"%",
"query_id",
")",
"email_body_template",
"=",
"'Hi %s,<br>Your query with id %s has failed due to error during execution. Please check the query parameters and submit query again.<br><br>Thanks!<br><br>Best wishes,<br>The Oppia Team<br><br>%s'",
"recipient_user_settings",
"=",
"user_services",
".",
"get_user_settings",
"(",
"recipient_id",
")",
"email_body",
"=",
"(",
"email_body_template",
"%",
"(",
"recipient_user_settings",
".",
"username",
",",
"query_id",
",",
"EMAIL_FOOTER",
".",
"value",
")",
")",
"_send_email",
"(",
"recipient_id",
",",
"feconf",
".",
"SYSTEM_COMMITTER_ID",
",",
"feconf",
".",
"EMAIL_INTENT_QUERY_STATUS_NOTIFICATION",
",",
"email_subject",
",",
"email_body",
",",
"feconf",
".",
"NOREPLY_EMAIL_ADDRESS",
")",
"admin_email_subject",
"=",
"'Query job has failed.'",
"admin_email_body_template",
"=",
"'Query job with %s query id has failed in its execution.\\nQuery parameters:\\n\\n'",
"for",
"key",
"in",
"sorted",
"(",
"query_params",
")",
":",
"admin_email_body_template",
"+=",
"(",
"'%s: %s\\n'",
"%",
"(",
"key",
",",
"query_params",
"[",
"key",
"]",
")",
")",
"admin_email_body",
"=",
"(",
"admin_email_body_template",
"%",
"query_id",
")",
"send_mail_to_admin",
"(",
"admin_email_subject",
",",
"admin_email_body",
")"
] |
send an email to the initiator of a failed bulk email query .
|
train
| false
|
13,186
|
def adjust(color, attribute, percent):
(r, g, b, a, type) = parse_color(color)
(r, g, b) = hsl_to_rgb(*_adjust(rgb_to_hsl(r, g, b), attribute, percent))
return unparse_color(r, g, b, a, type)
|
[
"def",
"adjust",
"(",
"color",
",",
"attribute",
",",
"percent",
")",
":",
"(",
"r",
",",
"g",
",",
"b",
",",
"a",
",",
"type",
")",
"=",
"parse_color",
"(",
"color",
")",
"(",
"r",
",",
"g",
",",
"b",
")",
"=",
"hsl_to_rgb",
"(",
"*",
"_adjust",
"(",
"rgb_to_hsl",
"(",
"r",
",",
"g",
",",
"b",
")",
",",
"attribute",
",",
"percent",
")",
")",
"return",
"unparse_color",
"(",
"r",
",",
"g",
",",
"b",
",",
"a",
",",
"type",
")"
] |
adjust an attribute of color by a percent .
|
train
| true
|
13,188
|
def _other_endian(typ):
try:
return getattr(typ, _OTHER_ENDIAN)
except AttributeError:
if (type(typ) == _array_type):
return (_other_endian(typ._type_) * typ._length_)
raise TypeError(('This type does not support other endian: %s' % typ))
|
[
"def",
"_other_endian",
"(",
"typ",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"typ",
",",
"_OTHER_ENDIAN",
")",
"except",
"AttributeError",
":",
"if",
"(",
"type",
"(",
"typ",
")",
"==",
"_array_type",
")",
":",
"return",
"(",
"_other_endian",
"(",
"typ",
".",
"_type_",
")",
"*",
"typ",
".",
"_length_",
")",
"raise",
"TypeError",
"(",
"(",
"'This type does not support other endian: %s'",
"%",
"typ",
")",
")"
] |
return the type with the other byte order .
|
train
| true
|
13,189
|
def load_json(filename):
try:
if PY2:
args = 'rb'
else:
args = 'r'
with open(filename, args) as fid:
data = json.load(fid)
return (data, None)
except Exception as err:
return (None, str(err))
|
[
"def",
"load_json",
"(",
"filename",
")",
":",
"try",
":",
"if",
"PY2",
":",
"args",
"=",
"'rb'",
"else",
":",
"args",
"=",
"'r'",
"with",
"open",
"(",
"filename",
",",
"args",
")",
"as",
"fid",
":",
"data",
"=",
"json",
".",
"load",
"(",
"fid",
")",
"return",
"(",
"data",
",",
"None",
")",
"except",
"Exception",
"as",
"err",
":",
"return",
"(",
"None",
",",
"str",
"(",
"err",
")",
")"
] |
load data from a json file parameters filename : str filename to load data from .
|
train
| true
|
13,190
|
def readlineAvailable():
return (readline._readline is not None)
|
[
"def",
"readlineAvailable",
"(",
")",
":",
"return",
"(",
"readline",
".",
"_readline",
"is",
"not",
"None",
")"
] |
check if the readline is available .
|
train
| false
|
13,194
|
def format_isodate(value, timezone=None):
if (not value):
return ''
dt = dateutil.parser.parse(str(value))
if timezone:
dt = dt.astimezone(TZ(timezone))
value = format_dt(dt)
return value
|
[
"def",
"format_isodate",
"(",
"value",
",",
"timezone",
"=",
"None",
")",
":",
"if",
"(",
"not",
"value",
")",
":",
"return",
"''",
"dt",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"str",
"(",
"value",
")",
")",
"if",
"timezone",
":",
"dt",
"=",
"dt",
".",
"astimezone",
"(",
"TZ",
"(",
"timezone",
")",
")",
"value",
"=",
"format_dt",
"(",
"dt",
")",
"return",
"value"
] |
make a iso date time string human friendly .
|
train
| false
|
13,196
|
def organizations_with(username, number=(-1), etag=None):
return gh.organizations_with(username, number, etag)
|
[
"def",
"organizations_with",
"(",
"username",
",",
"number",
"=",
"(",
"-",
"1",
")",
",",
"etag",
"=",
"None",
")",
":",
"return",
"gh",
".",
"organizations_with",
"(",
"username",
",",
"number",
",",
"etag",
")"
] |
list the organizations with username as a member .
|
train
| false
|
13,197
|
def check_session_cookie(response):
for cookie in request.cookies:
if ((cookie == u'ckan') and (not getattr(g, u'user', None))):
is_valid_cookie_data = False
for (key, value) in session.items():
if ((not key.startswith(u'_')) and value):
is_valid_cookie_data = True
break
if (not is_valid_cookie_data):
if session.id:
log.debug(u'No valid session data - deleting session')
log.debug(u'Session: %r', session.items())
session.delete()
else:
log.debug(u'No session id - deleting session cookie')
response.delete_cookie(cookie)
elif ((cookie == u'auth_tkt') and (not session.id)):
response.delete_cookie(cookie)
return response
|
[
"def",
"check_session_cookie",
"(",
"response",
")",
":",
"for",
"cookie",
"in",
"request",
".",
"cookies",
":",
"if",
"(",
"(",
"cookie",
"==",
"u'ckan'",
")",
"and",
"(",
"not",
"getattr",
"(",
"g",
",",
"u'user'",
",",
"None",
")",
")",
")",
":",
"is_valid_cookie_data",
"=",
"False",
"for",
"(",
"key",
",",
"value",
")",
"in",
"session",
".",
"items",
"(",
")",
":",
"if",
"(",
"(",
"not",
"key",
".",
"startswith",
"(",
"u'_'",
")",
")",
"and",
"value",
")",
":",
"is_valid_cookie_data",
"=",
"True",
"break",
"if",
"(",
"not",
"is_valid_cookie_data",
")",
":",
"if",
"session",
".",
"id",
":",
"log",
".",
"debug",
"(",
"u'No valid session data - deleting session'",
")",
"log",
".",
"debug",
"(",
"u'Session: %r'",
",",
"session",
".",
"items",
"(",
")",
")",
"session",
".",
"delete",
"(",
")",
"else",
":",
"log",
".",
"debug",
"(",
"u'No session id - deleting session cookie'",
")",
"response",
".",
"delete_cookie",
"(",
"cookie",
")",
"elif",
"(",
"(",
"cookie",
"==",
"u'auth_tkt'",
")",
"and",
"(",
"not",
"session",
".",
"id",
")",
")",
":",
"response",
".",
"delete_cookie",
"(",
"cookie",
")",
"return",
"response"
] |
the cookies for auth and session are separate .
|
train
| false
|
13,198
|
def custom(command, user=None, conf_file=None, bin_env=None):
ret = __salt__['cmd.run_all'](_ctl_cmd(command, None, conf_file, bin_env), runas=user, python_shell=False)
return _get_return(ret)
|
[
"def",
"custom",
"(",
"command",
",",
"user",
"=",
"None",
",",
"conf_file",
"=",
"None",
",",
"bin_env",
"=",
"None",
")",
":",
"ret",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"_ctl_cmd",
"(",
"command",
",",
"None",
",",
"conf_file",
",",
"bin_env",
")",
",",
"runas",
"=",
"user",
",",
"python_shell",
"=",
"False",
")",
"return",
"_get_return",
"(",
"ret",
")"
] |
return a custom composite of status data and info for this minion .
|
train
| true
|
13,200
|
def turn_on_internet(verbose=False):
global INTERNET_OFF
global _orig_opener
if (not INTERNET_OFF):
return
INTERNET_OFF = False
if verbose:
print(u'Internet access enabled')
urllib.request.install_opener(_orig_opener)
socket.create_connection = socket_create_connection
socket.socket.bind = socket_bind
socket.socket.connect = socket_connect
return socket
|
[
"def",
"turn_on_internet",
"(",
"verbose",
"=",
"False",
")",
":",
"global",
"INTERNET_OFF",
"global",
"_orig_opener",
"if",
"(",
"not",
"INTERNET_OFF",
")",
":",
"return",
"INTERNET_OFF",
"=",
"False",
"if",
"verbose",
":",
"print",
"(",
"u'Internet access enabled'",
")",
"urllib",
".",
"request",
".",
"install_opener",
"(",
"_orig_opener",
")",
"socket",
".",
"create_connection",
"=",
"socket_create_connection",
"socket",
".",
"socket",
".",
"bind",
"=",
"socket_bind",
"socket",
".",
"socket",
".",
"connect",
"=",
"socket_connect",
"return",
"socket"
] |
restore internet access .
|
train
| false
|
13,201
|
def quickshift(image, ratio=1.0, kernel_size=5, max_dist=10, return_tree=False, sigma=0, convert2lab=True, random_seed=42):
image = img_as_float(np.atleast_3d(image))
if convert2lab:
if (image.shape[2] != 3):
ValueError('Only RGB images can be converted to Lab space.')
image = rgb2lab(image)
if (kernel_size < 1):
raise ValueError('`kernel_size` should be >= 1.')
image = ndi.gaussian_filter(image, [sigma, sigma, 0])
image = np.ascontiguousarray((image * ratio))
segment_mask = _quickshift_cython(image, kernel_size=kernel_size, max_dist=max_dist, return_tree=return_tree, random_seed=random_seed)
return segment_mask
|
[
"def",
"quickshift",
"(",
"image",
",",
"ratio",
"=",
"1.0",
",",
"kernel_size",
"=",
"5",
",",
"max_dist",
"=",
"10",
",",
"return_tree",
"=",
"False",
",",
"sigma",
"=",
"0",
",",
"convert2lab",
"=",
"True",
",",
"random_seed",
"=",
"42",
")",
":",
"image",
"=",
"img_as_float",
"(",
"np",
".",
"atleast_3d",
"(",
"image",
")",
")",
"if",
"convert2lab",
":",
"if",
"(",
"image",
".",
"shape",
"[",
"2",
"]",
"!=",
"3",
")",
":",
"ValueError",
"(",
"'Only RGB images can be converted to Lab space.'",
")",
"image",
"=",
"rgb2lab",
"(",
"image",
")",
"if",
"(",
"kernel_size",
"<",
"1",
")",
":",
"raise",
"ValueError",
"(",
"'`kernel_size` should be >= 1.'",
")",
"image",
"=",
"ndi",
".",
"gaussian_filter",
"(",
"image",
",",
"[",
"sigma",
",",
"sigma",
",",
"0",
"]",
")",
"image",
"=",
"np",
".",
"ascontiguousarray",
"(",
"(",
"image",
"*",
"ratio",
")",
")",
"segment_mask",
"=",
"_quickshift_cython",
"(",
"image",
",",
"kernel_size",
"=",
"kernel_size",
",",
"max_dist",
"=",
"max_dist",
",",
"return_tree",
"=",
"return_tree",
",",
"random_seed",
"=",
"random_seed",
")",
"return",
"segment_mask"
] |
segments image using quickshift clustering in color- space .
|
train
| false
|
13,202
|
def from_html(html_code, **kwargs):
parser = TableHandler(**kwargs)
parser.feed(html_code)
return parser.tables
|
[
"def",
"from_html",
"(",
"html_code",
",",
"**",
"kwargs",
")",
":",
"parser",
"=",
"TableHandler",
"(",
"**",
"kwargs",
")",
"parser",
".",
"feed",
"(",
"html_code",
")",
"return",
"parser",
".",
"tables"
] |
generates a list of prettytables from a string of html code .
|
train
| true
|
13,204
|
def get_key(postfix=''):
domain = getattr(settings, 'CURRENT_DOMAIN', 'default')
key = ('treeio_%s_chat_%s' % (domain, postfix))
return key
|
[
"def",
"get_key",
"(",
"postfix",
"=",
"''",
")",
":",
"domain",
"=",
"getattr",
"(",
"settings",
",",
"'CURRENT_DOMAIN'",
",",
"'default'",
")",
"key",
"=",
"(",
"'treeio_%s_chat_%s'",
"%",
"(",
"domain",
",",
"postfix",
")",
")",
"return",
"key"
] |
returns the ssh private key for vm access .
|
train
| false
|
13,205
|
def assert_config_change(actual_result, expected_result):
change_diffs = assert_config_change_dict(actual_result, expected_result)
for file_change in change_diffs.values():
for line_change in file_change:
if (len(line_change) != 0):
return False
return True
|
[
"def",
"assert_config_change",
"(",
"actual_result",
",",
"expected_result",
")",
":",
"change_diffs",
"=",
"assert_config_change_dict",
"(",
"actual_result",
",",
"expected_result",
")",
"for",
"file_change",
"in",
"change_diffs",
".",
"values",
"(",
")",
":",
"for",
"line_change",
"in",
"file_change",
":",
"if",
"(",
"len",
"(",
"line_change",
")",
"!=",
"0",
")",
":",
"return",
"False",
"return",
"True"
] |
wrapper of the upper method returning boolean true if no config changes were detected .
|
train
| false
|
13,206
|
@scope.define
def callpipe1(fn_list, arg):
for f in fn_list:
arg = f(arg)
return arg
|
[
"@",
"scope",
".",
"define",
"def",
"callpipe1",
"(",
"fn_list",
",",
"arg",
")",
":",
"for",
"f",
"in",
"fn_list",
":",
"arg",
"=",
"f",
"(",
"arg",
")",
"return",
"arg"
] |
fn_list: a list lambdas that return either pyll expressions or python values arg: the argument to the first function in the list return: fn_list[-1]( .
|
train
| false
|
13,208
|
def test_aligned_mem():
a = arange(804, dtype=np.uint8)
z = np.frombuffer(a.data, offset=4, count=100, dtype=float)
z.shape = (10, 10)
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
|
[
"def",
"test_aligned_mem",
"(",
")",
":",
"a",
"=",
"arange",
"(",
"804",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"z",
"=",
"np",
".",
"frombuffer",
"(",
"a",
".",
"data",
",",
"offset",
"=",
"4",
",",
"count",
"=",
"100",
",",
"dtype",
"=",
"float",
")",
"z",
".",
"shape",
"=",
"(",
"10",
",",
"10",
")",
"eig",
"(",
"z",
",",
"overwrite_a",
"=",
"True",
")",
"eig",
"(",
"z",
".",
"T",
",",
"overwrite_a",
"=",
"True",
")"
] |
check linalg works with non-aligned memory .
|
train
| false
|
13,209
|
def group_ranges(ranges):
return foldr(_combine, ranges, ())
|
[
"def",
"group_ranges",
"(",
"ranges",
")",
":",
"return",
"foldr",
"(",
"_combine",
",",
"ranges",
",",
"(",
")",
")"
] |
group any overlapping ranges into a single range .
|
train
| false
|
13,210
|
def magma():
rc(u'image', cmap=u'magma')
im = gci()
if (im is not None):
im.set_cmap(cm.magma)
|
[
"def",
"magma",
"(",
")",
":",
"rc",
"(",
"u'image'",
",",
"cmap",
"=",
"u'magma'",
")",
"im",
"=",
"gci",
"(",
")",
"if",
"(",
"im",
"is",
"not",
"None",
")",
":",
"im",
".",
"set_cmap",
"(",
"cm",
".",
"magma",
")"
] |
set the default colormap to magma and apply to current image if any .
|
train
| false
|
13,211
|
def s_size(block_name, offset=0, length=4, endian='<', format='binary', inclusive=False, signed=False, math=None, fuzzable=False, name=None):
if (block_name in blocks.CURRENT.block_stack):
raise sex.SullyRuntimeError('CAN NOT ADD A SIZE FOR A BLOCK CURRENTLY IN THE STACK')
size = blocks.size(block_name, blocks.CURRENT, offset, length, endian, format, inclusive, signed, math, fuzzable, name)
blocks.CURRENT.push(size)
|
[
"def",
"s_size",
"(",
"block_name",
",",
"offset",
"=",
"0",
",",
"length",
"=",
"4",
",",
"endian",
"=",
"'<'",
",",
"format",
"=",
"'binary'",
",",
"inclusive",
"=",
"False",
",",
"signed",
"=",
"False",
",",
"math",
"=",
"None",
",",
"fuzzable",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"if",
"(",
"block_name",
"in",
"blocks",
".",
"CURRENT",
".",
"block_stack",
")",
":",
"raise",
"sex",
".",
"SullyRuntimeError",
"(",
"'CAN NOT ADD A SIZE FOR A BLOCK CURRENTLY IN THE STACK'",
")",
"size",
"=",
"blocks",
".",
"size",
"(",
"block_name",
",",
"blocks",
".",
"CURRENT",
",",
"offset",
",",
"length",
",",
"endian",
",",
"format",
",",
"inclusive",
",",
"signed",
",",
"math",
",",
"fuzzable",
",",
"name",
")",
"blocks",
".",
"CURRENT",
".",
"push",
"(",
"size",
")"
] |
create a sizer block bound to the block with the specified name .
|
train
| false
|
13,213
|
def deduplication(arg):
if (type(arg) is types.ListType):
return list(set(arg))
elif (type(arg) is types.TupleType):
return tuple(set(arg))
return arg
|
[
"def",
"deduplication",
"(",
"arg",
")",
":",
"if",
"(",
"type",
"(",
"arg",
")",
"is",
"types",
".",
"ListType",
")",
":",
"return",
"list",
"(",
"set",
"(",
"arg",
")",
")",
"elif",
"(",
"type",
"(",
"arg",
")",
"is",
"types",
".",
"TupleType",
")",
":",
"return",
"tuple",
"(",
"set",
"(",
"arg",
")",
")",
"return",
"arg"
] |
deduplication the arg .
|
train
| false
|
13,215
|
def create_warning(path, error_message, skip_file=True):
print_string = 'warning: '
if skip_file:
print_string = (((print_string + 'Skipping file ') + path) + '. ')
print_string = (print_string + error_message)
warning_message = WarningResult(message=print_string, error=False, warning=True)
return warning_message
|
[
"def",
"create_warning",
"(",
"path",
",",
"error_message",
",",
"skip_file",
"=",
"True",
")",
":",
"print_string",
"=",
"'warning: '",
"if",
"skip_file",
":",
"print_string",
"=",
"(",
"(",
"(",
"print_string",
"+",
"'Skipping file '",
")",
"+",
"path",
")",
"+",
"'. '",
")",
"print_string",
"=",
"(",
"print_string",
"+",
"error_message",
")",
"warning_message",
"=",
"WarningResult",
"(",
"message",
"=",
"print_string",
",",
"error",
"=",
"False",
",",
"warning",
"=",
"True",
")",
"return",
"warning_message"
] |
this creates a printtask for whenever a warning is to be thrown .
|
train
| false
|
13,218
|
def remove_hop_by_hop_headers(headers):
headers[:] = [(key, value) for (key, value) in headers if (not is_hop_by_hop_header(key))]
|
[
"def",
"remove_hop_by_hop_headers",
"(",
"headers",
")",
":",
"headers",
"[",
":",
"]",
"=",
"[",
"(",
"key",
",",
"value",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"headers",
"if",
"(",
"not",
"is_hop_by_hop_header",
"(",
"key",
")",
")",
"]"
] |
remove all http/1 .
|
train
| true
|
13,219
|
def make_blob_public(bucket_name, blob_name):
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.make_public()
print 'Blob {} is publicly accessible at {}'.format(blob.name, blob.public_url)
|
[
"def",
"make_blob_public",
"(",
"bucket_name",
",",
"blob_name",
")",
":",
"storage_client",
"=",
"storage",
".",
"Client",
"(",
")",
"bucket",
"=",
"storage_client",
".",
"get_bucket",
"(",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"blob",
"(",
"blob_name",
")",
"blob",
".",
"make_public",
"(",
")",
"print",
"'Blob {} is publicly accessible at {}'",
".",
"format",
"(",
"blob",
".",
"name",
",",
"blob",
".",
"public_url",
")"
] |
makes a blob publicly accessible .
|
train
| false
|
13,220
|
def _fd(f):
return (f.fileno() if hasattr(f, 'fileno') else f)
|
[
"def",
"_fd",
"(",
"f",
")",
":",
"return",
"(",
"f",
".",
"fileno",
"(",
")",
"if",
"hasattr",
"(",
"f",
",",
"'fileno'",
")",
"else",
"f",
")"
] |
get a filedescriptor from something which could be a file or an fd .
|
train
| false
|
13,221
|
def make_full_schema(data, schema):
flattened_schema = flatten_schema(schema)
key_combinations = get_all_key_combinations(data, flattened_schema)
full_schema = {}
for combination in key_combinations:
sub_schema = schema
for key in combination[::2]:
sub_schema = sub_schema[key]
for (key, value) in sub_schema.iteritems():
if isinstance(value, list):
full_schema[(combination + (key,))] = value
return full_schema
|
[
"def",
"make_full_schema",
"(",
"data",
",",
"schema",
")",
":",
"flattened_schema",
"=",
"flatten_schema",
"(",
"schema",
")",
"key_combinations",
"=",
"get_all_key_combinations",
"(",
"data",
",",
"flattened_schema",
")",
"full_schema",
"=",
"{",
"}",
"for",
"combination",
"in",
"key_combinations",
":",
"sub_schema",
"=",
"schema",
"for",
"key",
"in",
"combination",
"[",
":",
":",
"2",
"]",
":",
"sub_schema",
"=",
"sub_schema",
"[",
"key",
"]",
"for",
"(",
"key",
",",
"value",
")",
"in",
"sub_schema",
".",
"iteritems",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"full_schema",
"[",
"(",
"combination",
"+",
"(",
"key",
",",
")",
")",
"]",
"=",
"value",
"return",
"full_schema"
] |
make schema by getting all valid combinations and making sure that all keys are available .
|
train
| false
|
13,222
|
def write_theme():
return theme_xml
|
[
"def",
"write_theme",
"(",
")",
":",
"return",
"theme_xml"
] |
write the theme xml .
|
train
| false
|
13,223
|
@lower_builtin(types.NumberClass, types.Any)
def number_constructor(context, builder, sig, args):
if isinstance(sig.return_type, types.Array):
impl = context.get_function(np.array, sig)
return impl(builder, args)
else:
[val] = args
[valty] = sig.args
return context.cast(builder, val, valty, sig.return_type)
|
[
"@",
"lower_builtin",
"(",
"types",
".",
"NumberClass",
",",
"types",
".",
"Any",
")",
"def",
"number_constructor",
"(",
"context",
",",
"builder",
",",
"sig",
",",
"args",
")",
":",
"if",
"isinstance",
"(",
"sig",
".",
"return_type",
",",
"types",
".",
"Array",
")",
":",
"impl",
"=",
"context",
".",
"get_function",
"(",
"np",
".",
"array",
",",
"sig",
")",
"return",
"impl",
"(",
"builder",
",",
"args",
")",
"else",
":",
"[",
"val",
"]",
"=",
"args",
"[",
"valty",
"]",
"=",
"sig",
".",
"args",
"return",
"context",
".",
"cast",
"(",
"builder",
",",
"val",
",",
"valty",
",",
"sig",
".",
"return_type",
")"
] |
call a number class .
|
train
| false
|
13,224
|
def test_bootstrap_random_seed():
data = rs.randn(50)
seed = 42
boots1 = algo.bootstrap(data, random_seed=seed)
boots2 = algo.bootstrap(data, random_seed=seed)
assert_array_equal(boots1, boots2)
|
[
"def",
"test_bootstrap_random_seed",
"(",
")",
":",
"data",
"=",
"rs",
".",
"randn",
"(",
"50",
")",
"seed",
"=",
"42",
"boots1",
"=",
"algo",
".",
"bootstrap",
"(",
"data",
",",
"random_seed",
"=",
"seed",
")",
"boots2",
"=",
"algo",
".",
"bootstrap",
"(",
"data",
",",
"random_seed",
"=",
"seed",
")",
"assert_array_equal",
"(",
"boots1",
",",
"boots2",
")"
] |
test that we can get reproducible resamples by seeding the rng .
|
train
| false
|
13,225
|
def translation(language):
global _translations
t = _translations.get(language, None)
if (t is not None):
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), u'locale')
def _fetch(lang, fallback=None):
global _translations
res = _translations.get(lang, None)
if (res is not None):
return res
loc = to_locale(lang)
def _translation(path):
try:
t = gettext_module.translation(u'django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError:
return None
res = _translation(globalpath)
base_lang = (lambda x: x.split(u'-', 1)[0])
if (base_lang(lang) in [base_lang(trans) for trans in _translations]):
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if (t is not None):
if (res is None):
return t
else:
res.merge(t)
return res
for appname in reversed(settings.INSTALLED_APPS):
app = import_module(appname)
apppath = os.path.join(os.path.dirname(upath(app.__file__)), u'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
for localepath in reversed(settings.LOCALE_PATHS):
if os.path.isdir(localepath):
res = _merge(localepath)
if (res is None):
if (fallback is not None):
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
|
[
"def",
"translation",
"(",
"language",
")",
":",
"global",
"_translations",
"t",
"=",
"_translations",
".",
"get",
"(",
"language",
",",
"None",
")",
"if",
"(",
"t",
"is",
"not",
"None",
")",
":",
"return",
"t",
"from",
"django",
".",
"conf",
"import",
"settings",
"globalpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"upath",
"(",
"sys",
".",
"modules",
"[",
"settings",
".",
"__module__",
"]",
".",
"__file__",
")",
")",
",",
"u'locale'",
")",
"def",
"_fetch",
"(",
"lang",
",",
"fallback",
"=",
"None",
")",
":",
"global",
"_translations",
"res",
"=",
"_translations",
".",
"get",
"(",
"lang",
",",
"None",
")",
"if",
"(",
"res",
"is",
"not",
"None",
")",
":",
"return",
"res",
"loc",
"=",
"to_locale",
"(",
"lang",
")",
"def",
"_translation",
"(",
"path",
")",
":",
"try",
":",
"t",
"=",
"gettext_module",
".",
"translation",
"(",
"u'django'",
",",
"path",
",",
"[",
"loc",
"]",
",",
"DjangoTranslation",
")",
"t",
".",
"set_language",
"(",
"lang",
")",
"return",
"t",
"except",
"IOError",
":",
"return",
"None",
"res",
"=",
"_translation",
"(",
"globalpath",
")",
"base_lang",
"=",
"(",
"lambda",
"x",
":",
"x",
".",
"split",
"(",
"u'-'",
",",
"1",
")",
"[",
"0",
"]",
")",
"if",
"(",
"base_lang",
"(",
"lang",
")",
"in",
"[",
"base_lang",
"(",
"trans",
")",
"for",
"trans",
"in",
"_translations",
"]",
")",
":",
"res",
".",
"_info",
"=",
"res",
".",
"_info",
".",
"copy",
"(",
")",
"res",
".",
"_catalog",
"=",
"res",
".",
"_catalog",
".",
"copy",
"(",
")",
"def",
"_merge",
"(",
"path",
")",
":",
"t",
"=",
"_translation",
"(",
"path",
")",
"if",
"(",
"t",
"is",
"not",
"None",
")",
":",
"if",
"(",
"res",
"is",
"None",
")",
":",
"return",
"t",
"else",
":",
"res",
".",
"merge",
"(",
"t",
")",
"return",
"res",
"for",
"appname",
"in",
"reversed",
"(",
"settings",
".",
"INSTALLED_APPS",
")",
":",
"app",
"=",
"import_module",
"(",
"appname",
")",
"apppath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"upath",
"(",
"app",
".",
"__file__",
")",
")",
",",
"u'locale'",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"apppath",
")",
":",
"res",
"=",
"_merge",
"(",
"apppath",
")",
"for",
"localepath",
"in",
"reversed",
"(",
"settings",
".",
"LOCALE_PATHS",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"localepath",
")",
":",
"res",
"=",
"_merge",
"(",
"localepath",
")",
"if",
"(",
"res",
"is",
"None",
")",
":",
"if",
"(",
"fallback",
"is",
"not",
"None",
")",
":",
"res",
"=",
"fallback",
"else",
":",
"return",
"gettext_module",
".",
"NullTranslations",
"(",
")",
"_translations",
"[",
"lang",
"]",
"=",
"res",
"return",
"res",
"default_translation",
"=",
"_fetch",
"(",
"settings",
".",
"LANGUAGE_CODE",
")",
"current_translation",
"=",
"_fetch",
"(",
"language",
",",
"fallback",
"=",
"default_translation",
")",
"return",
"current_translation"
] |
create an array with a translation matrix .
|
train
| false
|
13,227
|
def search_ohloh_project(project_name):
results = get_ohloh_api_request(u'https://www.ohloh.net/p.xml', api_key, {u'query': project_name, u'sort': u'relevance'})
if (results.find(u'result/project/id') is None):
raise Exception((u'Could not find project %s on Ohloh' % project_name))
project = results.find(u'result/project')
return {u'id': project.findtext(u'id'), u'name': project.findtext(u'name'), u'description': project.findtext(u'description'), u'analysis': project.findtext(u'analysis_id'), u'tags': [tag.text for tag in project.iterfind(u'tags/tag')]}
|
[
"def",
"search_ohloh_project",
"(",
"project_name",
")",
":",
"results",
"=",
"get_ohloh_api_request",
"(",
"u'https://www.ohloh.net/p.xml'",
",",
"api_key",
",",
"{",
"u'query'",
":",
"project_name",
",",
"u'sort'",
":",
"u'relevance'",
"}",
")",
"if",
"(",
"results",
".",
"find",
"(",
"u'result/project/id'",
")",
"is",
"None",
")",
":",
"raise",
"Exception",
"(",
"(",
"u'Could not find project %s on Ohloh'",
"%",
"project_name",
")",
")",
"project",
"=",
"results",
".",
"find",
"(",
"u'result/project'",
")",
"return",
"{",
"u'id'",
":",
"project",
".",
"findtext",
"(",
"u'id'",
")",
",",
"u'name'",
":",
"project",
".",
"findtext",
"(",
"u'name'",
")",
",",
"u'description'",
":",
"project",
".",
"findtext",
"(",
"u'description'",
")",
",",
"u'analysis'",
":",
"project",
".",
"findtext",
"(",
"u'analysis_id'",
")",
",",
"u'tags'",
":",
"[",
"tag",
".",
"text",
"for",
"tag",
"in",
"project",
".",
"iterfind",
"(",
"u'tags/tag'",
")",
"]",
"}"
] |
searches for an ohloh project by name keyword arguments: project_name -- the project name to search for .
|
train
| false
|
13,228
|
def convert_rgb_to_bokehrgba(img_data, downsample=1):
if (img_data.dtype != np.uint8):
raise NotImplementedError
if (img_data.ndim != 3):
raise NotImplementedError
img_data = img_data[::(- downsample), ::downsample, :]
(img_h, img_w, C) = img_data.shape
bokeh_img = np.dstack([img_data, (255 * np.ones((img_h, img_w), np.uint8))])
final_image = bokeh_img.reshape(img_h, (img_w * (C + 1))).view(np.uint32)
return final_image
|
[
"def",
"convert_rgb_to_bokehrgba",
"(",
"img_data",
",",
"downsample",
"=",
"1",
")",
":",
"if",
"(",
"img_data",
".",
"dtype",
"!=",
"np",
".",
"uint8",
")",
":",
"raise",
"NotImplementedError",
"if",
"(",
"img_data",
".",
"ndim",
"!=",
"3",
")",
":",
"raise",
"NotImplementedError",
"img_data",
"=",
"img_data",
"[",
":",
":",
"(",
"-",
"downsample",
")",
",",
":",
":",
"downsample",
",",
":",
"]",
"(",
"img_h",
",",
"img_w",
",",
"C",
")",
"=",
"img_data",
".",
"shape",
"bokeh_img",
"=",
"np",
".",
"dstack",
"(",
"[",
"img_data",
",",
"(",
"255",
"*",
"np",
".",
"ones",
"(",
"(",
"img_h",
",",
"img_w",
")",
",",
"np",
".",
"uint8",
")",
")",
"]",
")",
"final_image",
"=",
"bokeh_img",
".",
"reshape",
"(",
"img_h",
",",
"(",
"img_w",
"*",
"(",
"C",
"+",
"1",
")",
")",
")",
".",
"view",
"(",
"np",
".",
"uint32",
")",
"return",
"final_image"
] |
convert rgb image to two-dimensional array of rgba values .
|
train
| false
|
13,229
|
def dict_with_str_keys(dct, encoding='utf-8'):
return {force_str(key, encoding): value for (key, value) in six.iteritems(dct)}
|
[
"def",
"dict_with_str_keys",
"(",
"dct",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"return",
"{",
"force_str",
"(",
"key",
",",
"encoding",
")",
":",
"value",
"for",
"(",
"key",
",",
"value",
")",
"in",
"six",
".",
"iteritems",
"(",
"dct",
")",
"}"
] |
applies force_str on the keys of a dict .
|
train
| false
|
13,231
|
def test_xml_filters_round_trip():
plot = Bar()
plot.add('A', [60, 75, 80, 78, 83, 90])
plot.add('B', [92, 87, 81, 73, 68, 55])
before = plot.render()
plot.add_xml_filter((lambda T: T))
after = plot.render()
assert (before == after)
|
[
"def",
"test_xml_filters_round_trip",
"(",
")",
":",
"plot",
"=",
"Bar",
"(",
")",
"plot",
".",
"add",
"(",
"'A'",
",",
"[",
"60",
",",
"75",
",",
"80",
",",
"78",
",",
"83",
",",
"90",
"]",
")",
"plot",
".",
"add",
"(",
"'B'",
",",
"[",
"92",
",",
"87",
",",
"81",
",",
"73",
",",
"68",
",",
"55",
"]",
")",
"before",
"=",
"plot",
".",
"render",
"(",
")",
"plot",
".",
"add_xml_filter",
"(",
"(",
"lambda",
"T",
":",
"T",
")",
")",
"after",
"=",
"plot",
".",
"render",
"(",
")",
"assert",
"(",
"before",
"==",
"after",
")"
] |
ensure doing nothing does nothing .
|
train
| false
|
13,233
|
def rescan_if_missed_blocks(store):
(bad,) = store.selectrow('\n SELECT COUNT(1)\n FROM block\n LEFT JOIN chain_candidate USING (block_id)\n WHERE chain_id IS NULL\n ')
if (bad > 0):
store.sql('UPDATE datadir SET blkfile_number = 1, blkfile_offset = 0')
|
[
"def",
"rescan_if_missed_blocks",
"(",
"store",
")",
":",
"(",
"bad",
",",
")",
"=",
"store",
".",
"selectrow",
"(",
"'\\n SELECT COUNT(1)\\n FROM block\\n LEFT JOIN chain_candidate USING (block_id)\\n WHERE chain_id IS NULL\\n '",
")",
"if",
"(",
"bad",
">",
"0",
")",
":",
"store",
".",
"sql",
"(",
"'UPDATE datadir SET blkfile_number = 1, blkfile_offset = 0'",
")"
] |
due to a bug .
|
train
| false
|
13,234
|
@verbose
def lcmv_epochs(epochs, forward, noise_cov, data_cov, reg=0.01, label=None, pick_ori=None, return_generator=False, picks=None, rank=None, verbose=None):
_check_reference(epochs)
info = epochs.info
tmin = epochs.times[0]
picks = _setup_picks(picks, info, forward, noise_cov)
data = epochs.get_data()[:, picks, :]
stcs = _apply_lcmv(data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov, data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank, pick_ori=pick_ori)
if (not return_generator):
stcs = [s for s in stcs]
return stcs
|
[
"@",
"verbose",
"def",
"lcmv_epochs",
"(",
"epochs",
",",
"forward",
",",
"noise_cov",
",",
"data_cov",
",",
"reg",
"=",
"0.01",
",",
"label",
"=",
"None",
",",
"pick_ori",
"=",
"None",
",",
"return_generator",
"=",
"False",
",",
"picks",
"=",
"None",
",",
"rank",
"=",
"None",
",",
"verbose",
"=",
"None",
")",
":",
"_check_reference",
"(",
"epochs",
")",
"info",
"=",
"epochs",
".",
"info",
"tmin",
"=",
"epochs",
".",
"times",
"[",
"0",
"]",
"picks",
"=",
"_setup_picks",
"(",
"picks",
",",
"info",
",",
"forward",
",",
"noise_cov",
")",
"data",
"=",
"epochs",
".",
"get_data",
"(",
")",
"[",
":",
",",
"picks",
",",
":",
"]",
"stcs",
"=",
"_apply_lcmv",
"(",
"data",
"=",
"data",
",",
"info",
"=",
"info",
",",
"tmin",
"=",
"tmin",
",",
"forward",
"=",
"forward",
",",
"noise_cov",
"=",
"noise_cov",
",",
"data_cov",
"=",
"data_cov",
",",
"reg",
"=",
"reg",
",",
"label",
"=",
"label",
",",
"picks",
"=",
"picks",
",",
"rank",
"=",
"rank",
",",
"pick_ori",
"=",
"pick_ori",
")",
"if",
"(",
"not",
"return_generator",
")",
":",
"stcs",
"=",
"[",
"s",
"for",
"s",
"in",
"stcs",
"]",
"return",
"stcs"
] |
linearly constrained minimum variance beamformer .
|
train
| false
|
13,235
|
def showInfo(text, parent=False, help='', type='info', title='Anki'):
if (parent is False):
parent = (aqt.mw.app.activeWindow() or aqt.mw)
if (type == 'warning'):
icon = QMessageBox.Warning
elif (type == 'critical'):
icon = QMessageBox.Critical
else:
icon = QMessageBox.Information
mb = QMessageBox(parent)
mb.setText(text)
mb.setIcon(icon)
mb.setWindowModality(Qt.WindowModal)
mb.setWindowTitle(title)
b = mb.addButton(QMessageBox.Ok)
b.setDefault(True)
if help:
b = mb.addButton(QMessageBox.Help)
b.clicked.connect((lambda : openHelp(help)))
b.setAutoDefault(False)
return mb.exec_()
|
[
"def",
"showInfo",
"(",
"text",
",",
"parent",
"=",
"False",
",",
"help",
"=",
"''",
",",
"type",
"=",
"'info'",
",",
"title",
"=",
"'Anki'",
")",
":",
"if",
"(",
"parent",
"is",
"False",
")",
":",
"parent",
"=",
"(",
"aqt",
".",
"mw",
".",
"app",
".",
"activeWindow",
"(",
")",
"or",
"aqt",
".",
"mw",
")",
"if",
"(",
"type",
"==",
"'warning'",
")",
":",
"icon",
"=",
"QMessageBox",
".",
"Warning",
"elif",
"(",
"type",
"==",
"'critical'",
")",
":",
"icon",
"=",
"QMessageBox",
".",
"Critical",
"else",
":",
"icon",
"=",
"QMessageBox",
".",
"Information",
"mb",
"=",
"QMessageBox",
"(",
"parent",
")",
"mb",
".",
"setText",
"(",
"text",
")",
"mb",
".",
"setIcon",
"(",
"icon",
")",
"mb",
".",
"setWindowModality",
"(",
"Qt",
".",
"WindowModal",
")",
"mb",
".",
"setWindowTitle",
"(",
"title",
")",
"b",
"=",
"mb",
".",
"addButton",
"(",
"QMessageBox",
".",
"Ok",
")",
"b",
".",
"setDefault",
"(",
"True",
")",
"if",
"help",
":",
"b",
"=",
"mb",
".",
"addButton",
"(",
"QMessageBox",
".",
"Help",
")",
"b",
".",
"clicked",
".",
"connect",
"(",
"(",
"lambda",
":",
"openHelp",
"(",
"help",
")",
")",
")",
"b",
".",
"setAutoDefault",
"(",
"False",
")",
"return",
"mb",
".",
"exec_",
"(",
")"
] |
show a small info window with an ok button .
|
train
| false
|
13,236
|
def _unit_file_changed(name):
return ("'systemctl daemon-reload'" in _systemctl_status(name)['stdout'].lower())
|
[
"def",
"_unit_file_changed",
"(",
"name",
")",
":",
"return",
"(",
"\"'systemctl daemon-reload'\"",
"in",
"_systemctl_status",
"(",
"name",
")",
"[",
"'stdout'",
"]",
".",
"lower",
"(",
")",
")"
] |
returns true if systemctl reports that the unit file has changed .
|
train
| false
|
13,237
|
def print_all_links(res, rules):
r = []
for host in res:
host_name = _apply_rules(host, rules)
print ('%s::esxhostname=%s' % (host_name, host_name))
print ('%s::isesxhost=1' % host_name)
for vm in res[host]:
vm_name = _apply_rules(vm, rules)
print ('%s::vmname=%s' % (vm_name, vm_name))
print ('%s::isesxvm=1' % vm_name)
print ('%s::esxhost=%s' % (vm_name, host_name))
return r
|
[
"def",
"print_all_links",
"(",
"res",
",",
"rules",
")",
":",
"r",
"=",
"[",
"]",
"for",
"host",
"in",
"res",
":",
"host_name",
"=",
"_apply_rules",
"(",
"host",
",",
"rules",
")",
"print",
"(",
"'%s::esxhostname=%s'",
"%",
"(",
"host_name",
",",
"host_name",
")",
")",
"print",
"(",
"'%s::isesxhost=1'",
"%",
"host_name",
")",
"for",
"vm",
"in",
"res",
"[",
"host",
"]",
":",
"vm_name",
"=",
"_apply_rules",
"(",
"vm",
",",
"rules",
")",
"print",
"(",
"'%s::vmname=%s'",
"%",
"(",
"vm_name",
",",
"vm_name",
")",
")",
"print",
"(",
"'%s::isesxvm=1'",
"%",
"vm_name",
")",
"print",
"(",
"'%s::esxhost=%s'",
"%",
"(",
"vm_name",
",",
"host_name",
")",
")",
"return",
"r"
] |
create all tuples of the links for the hosts .
|
train
| false
|
13,238
|
def list_directories():
tenant = show_tenant()
tenant_id = tenant.get('href', '').split('/')[(-1)]
(status, result) = _query(action='tenants', command='{0}/directories'.format(tenant_id))
return result
|
[
"def",
"list_directories",
"(",
")",
":",
"tenant",
"=",
"show_tenant",
"(",
")",
"tenant_id",
"=",
"tenant",
".",
"get",
"(",
"'href'",
",",
"''",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"(",
"-",
"1",
")",
"]",
"(",
"status",
",",
"result",
")",
"=",
"_query",
"(",
"action",
"=",
"'tenants'",
",",
"command",
"=",
"'{0}/directories'",
".",
"format",
"(",
"tenant_id",
")",
")",
"return",
"result"
] |
show all directories .
|
train
| false
|
13,239
|
def makeExtension(*args, **kwargs):
return HeaderAnchorExtension(*args, **kwargs)
|
[
"def",
"makeExtension",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"return",
"HeaderAnchorExtension",
"(",
"*",
"args",
",",
"**",
"kwargs",
")"
] |
make extension .
|
train
| false
|
13,240
|
def find_libs():
from pyamf.util import get_module
types = []
mapping = {}
for mod in ETREE_MODULES:
try:
etree = get_module(mod)
except ImportError:
continue
t = _get_etree_type(etree)
types.append(t)
mapping[t] = etree
return (tuple(types), mapping)
|
[
"def",
"find_libs",
"(",
")",
":",
"from",
"pyamf",
".",
"util",
"import",
"get_module",
"types",
"=",
"[",
"]",
"mapping",
"=",
"{",
"}",
"for",
"mod",
"in",
"ETREE_MODULES",
":",
"try",
":",
"etree",
"=",
"get_module",
"(",
"mod",
")",
"except",
"ImportError",
":",
"continue",
"t",
"=",
"_get_etree_type",
"(",
"etree",
")",
"types",
".",
"append",
"(",
"t",
")",
"mapping",
"[",
"t",
"]",
"=",
"etree",
"return",
"(",
"tuple",
"(",
"types",
")",
",",
"mapping",
")"
] |
run through l{etree_modules} and find c{elementtree} implementations so that any type can be encoded .
|
train
| true
|
13,242
|
def notnull(values):
return (~ isnull(values))
|
[
"def",
"notnull",
"(",
"values",
")",
":",
"return",
"(",
"~",
"isnull",
"(",
"values",
")",
")"
] |
replacement for numpy .
|
train
| false
|
13,243
|
def autoload_server(model, app_path='/', session_id=None, url='default'):
coords = _SessionCoordinates(dict(url=url, session_id=session_id, app_path=app_path))
elementid = make_id()
model_id = ''
if (model is not None):
model_id = model._id
if (model_id and (session_id is None)):
raise ValueError("A specific model was passed to autoload_server() but no session_id; this doesn't work because the server will generate a fresh session which won't have the model in it.")
src_path = (((coords.server_url + '/autoload.js') + '?bokeh-autoload-element=') + elementid)
if (coords.session_id_allowing_none is not None):
src_path = ((src_path + '&bokeh-session-id=') + session_id)
tag = AUTOLOAD_TAG.render(src_path=src_path, elementid=elementid, modelid=model_id)
return encode_utf8(tag)
|
[
"def",
"autoload_server",
"(",
"model",
",",
"app_path",
"=",
"'/'",
",",
"session_id",
"=",
"None",
",",
"url",
"=",
"'default'",
")",
":",
"coords",
"=",
"_SessionCoordinates",
"(",
"dict",
"(",
"url",
"=",
"url",
",",
"session_id",
"=",
"session_id",
",",
"app_path",
"=",
"app_path",
")",
")",
"elementid",
"=",
"make_id",
"(",
")",
"model_id",
"=",
"''",
"if",
"(",
"model",
"is",
"not",
"None",
")",
":",
"model_id",
"=",
"model",
".",
"_id",
"if",
"(",
"model_id",
"and",
"(",
"session_id",
"is",
"None",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"A specific model was passed to autoload_server() but no session_id; this doesn't work because the server will generate a fresh session which won't have the model in it.\"",
")",
"src_path",
"=",
"(",
"(",
"(",
"coords",
".",
"server_url",
"+",
"'/autoload.js'",
")",
"+",
"'?bokeh-autoload-element='",
")",
"+",
"elementid",
")",
"if",
"(",
"coords",
".",
"session_id_allowing_none",
"is",
"not",
"None",
")",
":",
"src_path",
"=",
"(",
"(",
"src_path",
"+",
"'&bokeh-session-id='",
")",
"+",
"session_id",
")",
"tag",
"=",
"AUTOLOAD_TAG",
".",
"render",
"(",
"src_path",
"=",
"src_path",
",",
"elementid",
"=",
"elementid",
",",
"modelid",
"=",
"model_id",
")",
"return",
"encode_utf8",
"(",
"tag",
")"
] |
return a script tag that embeds the given model from a bokeh server session .
|
train
| false
|
13,244
|
def is_object_mutable(context, object):
if context.is_admin:
return True
if (context.owner is None):
return False
return (object.namespace.owner == context.owner)
|
[
"def",
"is_object_mutable",
"(",
"context",
",",
"object",
")",
":",
"if",
"context",
".",
"is_admin",
":",
"return",
"True",
"if",
"(",
"context",
".",
"owner",
"is",
"None",
")",
":",
"return",
"False",
"return",
"(",
"object",
".",
"namespace",
".",
"owner",
"==",
"context",
".",
"owner",
")"
] |
return true if the object is mutable in this context .
|
train
| false
|
13,245
|
def _function_pprint(obj, p, cycle):
name = _safe_getattr(obj, '__qualname__', obj.__name__)
mod = obj.__module__
if (mod and (mod not in ('__builtin__', 'builtins', 'exceptions'))):
name = ((mod + '.') + name)
p.text(('<function %s>' % name))
|
[
"def",
"_function_pprint",
"(",
"obj",
",",
"p",
",",
"cycle",
")",
":",
"name",
"=",
"_safe_getattr",
"(",
"obj",
",",
"'__qualname__'",
",",
"obj",
".",
"__name__",
")",
"mod",
"=",
"obj",
".",
"__module__",
"if",
"(",
"mod",
"and",
"(",
"mod",
"not",
"in",
"(",
"'__builtin__'",
",",
"'builtins'",
",",
"'exceptions'",
")",
")",
")",
":",
"name",
"=",
"(",
"(",
"mod",
"+",
"'.'",
")",
"+",
"name",
")",
"p",
".",
"text",
"(",
"(",
"'<function %s>'",
"%",
"name",
")",
")"
] |
base pprint for all functions and builtin functions .
|
train
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.