id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
11,391
|
def getSegmentsFromXIntersectionIndexes(xIntersectionIndexList, y):
xIntersections = getXIntersectionsFromIntersections(xIntersectionIndexList)
return getSegmentsFromXIntersections(xIntersections, y)
|
[
"def",
"getSegmentsFromXIntersectionIndexes",
"(",
"xIntersectionIndexList",
",",
"y",
")",
":",
"xIntersections",
"=",
"getXIntersectionsFromIntersections",
"(",
"xIntersectionIndexList",
")",
"return",
"getSegmentsFromXIntersections",
"(",
"xIntersections",
",",
"y",
")"
] |
get endpoint segments from the x intersection indexes .
|
train
| false
|
11,392
|
def _check_merge_epochs(epochs_list):
if (len(set((tuple(epochs.event_id.items()) for epochs in epochs_list))) != 1):
raise NotImplementedError('Epochs with unequal values for event_id')
if (len(set((epochs.tmin for epochs in epochs_list))) != 1):
raise NotImplementedError('Epochs with unequal values for tmin')
if (len(set((epochs.tmax for epochs in epochs_list))) != 1):
raise NotImplementedError('Epochs with unequal values for tmax')
if (len(set((epochs.baseline for epochs in epochs_list))) != 1):
raise NotImplementedError('Epochs with unequal values for baseline')
|
[
"def",
"_check_merge_epochs",
"(",
"epochs_list",
")",
":",
"if",
"(",
"len",
"(",
"set",
"(",
"(",
"tuple",
"(",
"epochs",
".",
"event_id",
".",
"items",
"(",
")",
")",
"for",
"epochs",
"in",
"epochs_list",
")",
")",
")",
"!=",
"1",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Epochs with unequal values for event_id'",
")",
"if",
"(",
"len",
"(",
"set",
"(",
"(",
"epochs",
".",
"tmin",
"for",
"epochs",
"in",
"epochs_list",
")",
")",
")",
"!=",
"1",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Epochs with unequal values for tmin'",
")",
"if",
"(",
"len",
"(",
"set",
"(",
"(",
"epochs",
".",
"tmax",
"for",
"epochs",
"in",
"epochs_list",
")",
")",
")",
"!=",
"1",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Epochs with unequal values for tmax'",
")",
"if",
"(",
"len",
"(",
"set",
"(",
"(",
"epochs",
".",
"baseline",
"for",
"epochs",
"in",
"epochs_list",
")",
")",
")",
"!=",
"1",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Epochs with unequal values for baseline'",
")"
] |
aux function .
|
train
| false
|
11,393
|
@register.tag('localize')
def localize_tag(parser, token):
use_l10n = None
bits = list(token.split_contents())
if (len(bits) == 1):
use_l10n = True
elif ((len(bits) > 2) or (bits[1] not in ('on', 'off'))):
raise TemplateSyntaxError(("%r argument should be 'on' or 'off'" % bits[0]))
else:
use_l10n = (bits[1] == 'on')
nodelist = parser.parse(('endlocalize',))
parser.delete_first_token()
return LocalizeNode(nodelist, use_l10n)
|
[
"@",
"register",
".",
"tag",
"(",
"'localize'",
")",
"def",
"localize_tag",
"(",
"parser",
",",
"token",
")",
":",
"use_l10n",
"=",
"None",
"bits",
"=",
"list",
"(",
"token",
".",
"split_contents",
"(",
")",
")",
"if",
"(",
"len",
"(",
"bits",
")",
"==",
"1",
")",
":",
"use_l10n",
"=",
"True",
"elif",
"(",
"(",
"len",
"(",
"bits",
")",
">",
"2",
")",
"or",
"(",
"bits",
"[",
"1",
"]",
"not",
"in",
"(",
"'on'",
",",
"'off'",
")",
")",
")",
":",
"raise",
"TemplateSyntaxError",
"(",
"(",
"\"%r argument should be 'on' or 'off'\"",
"%",
"bits",
"[",
"0",
"]",
")",
")",
"else",
":",
"use_l10n",
"=",
"(",
"bits",
"[",
"1",
"]",
"==",
"'on'",
")",
"nodelist",
"=",
"parser",
".",
"parse",
"(",
"(",
"'endlocalize'",
",",
")",
")",
"parser",
".",
"delete_first_token",
"(",
")",
"return",
"LocalizeNode",
"(",
"nodelist",
",",
"use_l10n",
")"
] |
forces or prevents localization of values .
|
train
| false
|
11,395
|
def _thumbnail_div(subdir, full_dir, fname, snippet):
thumb = os.path.join(full_dir, 'images', 'thumb', (fname[:(-3)] + '.png'))
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append(('.. figure:: %s\n' % thumb))
out.append(' :scale: 50\n')
if link_name.startswith('._'):
link_name = link_name[2:]
if (full_dir != '.'):
out.append((' :target: ./%s/%s.html\n\n' % (full_dir, fname[:(-3)])))
else:
out.append((' :target: ./%s.html\n\n' % link_name[:(-3)]))
out.append((' :ref:`example_%s`\n\n\n\n\n' % ref_name))
return ''.join(out)
|
[
"def",
"_thumbnail_div",
"(",
"subdir",
",",
"full_dir",
",",
"fname",
",",
"snippet",
")",
":",
"thumb",
"=",
"os",
".",
"path",
".",
"join",
"(",
"full_dir",
",",
"'images'",
",",
"'thumb'",
",",
"(",
"fname",
"[",
":",
"(",
"-",
"3",
")",
"]",
"+",
"'.png'",
")",
")",
"link_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"full_dir",
",",
"fname",
")",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'_'",
")",
"ref_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"subdir",
",",
"fname",
")",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'_'",
")",
"if",
"ref_name",
".",
"startswith",
"(",
"'._'",
")",
":",
"ref_name",
"=",
"ref_name",
"[",
"2",
":",
"]",
"out",
"=",
"[",
"]",
"out",
".",
"append",
"(",
"(",
"'.. figure:: %s\\n'",
"%",
"thumb",
")",
")",
"out",
".",
"append",
"(",
"' :scale: 50\\n'",
")",
"if",
"link_name",
".",
"startswith",
"(",
"'._'",
")",
":",
"link_name",
"=",
"link_name",
"[",
"2",
":",
"]",
"if",
"(",
"full_dir",
"!=",
"'.'",
")",
":",
"out",
".",
"append",
"(",
"(",
"' :target: ./%s/%s.html\\n\\n'",
"%",
"(",
"full_dir",
",",
"fname",
"[",
":",
"(",
"-",
"3",
")",
"]",
")",
")",
")",
"else",
":",
"out",
".",
"append",
"(",
"(",
"' :target: ./%s.html\\n\\n'",
"%",
"link_name",
"[",
":",
"(",
"-",
"3",
")",
"]",
")",
")",
"out",
".",
"append",
"(",
"(",
"' :ref:`example_%s`\\n\\n\\n\\n\\n'",
"%",
"ref_name",
")",
")",
"return",
"''",
".",
"join",
"(",
"out",
")"
] |
generates rst to place a thumbnail in a gallery .
|
train
| false
|
11,396
|
def siScale(x, minVal=1e-25, allowUnicode=True):
if isinstance(x, decimal.Decimal):
x = float(x)
try:
if (np.isnan(x) or np.isinf(x)):
return (1, '')
except:
print (x, type(x))
raise
if (abs(x) < minVal):
m = 0
x = 0
else:
m = int(np.clip(np.floor((np.log(abs(x)) / np.log(1000))), (-9.0), 9.0))
if (m == 0):
pref = ''
elif ((m < (-8)) or (m > 8)):
pref = ('e%d' % (m * 3))
elif allowUnicode:
pref = SI_PREFIXES[(m + 8)]
else:
pref = SI_PREFIXES_ASCII[(m + 8)]
p = (0.001 ** m)
return (p, pref)
|
[
"def",
"siScale",
"(",
"x",
",",
"minVal",
"=",
"1e-25",
",",
"allowUnicode",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"decimal",
".",
"Decimal",
")",
":",
"x",
"=",
"float",
"(",
"x",
")",
"try",
":",
"if",
"(",
"np",
".",
"isnan",
"(",
"x",
")",
"or",
"np",
".",
"isinf",
"(",
"x",
")",
")",
":",
"return",
"(",
"1",
",",
"''",
")",
"except",
":",
"print",
"(",
"x",
",",
"type",
"(",
"x",
")",
")",
"raise",
"if",
"(",
"abs",
"(",
"x",
")",
"<",
"minVal",
")",
":",
"m",
"=",
"0",
"x",
"=",
"0",
"else",
":",
"m",
"=",
"int",
"(",
"np",
".",
"clip",
"(",
"np",
".",
"floor",
"(",
"(",
"np",
".",
"log",
"(",
"abs",
"(",
"x",
")",
")",
"/",
"np",
".",
"log",
"(",
"1000",
")",
")",
")",
",",
"(",
"-",
"9.0",
")",
",",
"9.0",
")",
")",
"if",
"(",
"m",
"==",
"0",
")",
":",
"pref",
"=",
"''",
"elif",
"(",
"(",
"m",
"<",
"(",
"-",
"8",
")",
")",
"or",
"(",
"m",
">",
"8",
")",
")",
":",
"pref",
"=",
"(",
"'e%d'",
"%",
"(",
"m",
"*",
"3",
")",
")",
"elif",
"allowUnicode",
":",
"pref",
"=",
"SI_PREFIXES",
"[",
"(",
"m",
"+",
"8",
")",
"]",
"else",
":",
"pref",
"=",
"SI_PREFIXES_ASCII",
"[",
"(",
"m",
"+",
"8",
")",
"]",
"p",
"=",
"(",
"0.001",
"**",
"m",
")",
"return",
"(",
"p",
",",
"pref",
")"
] |
return the recommended scale factor and si prefix string for x .
|
train
| false
|
11,397
|
def _usage_specific(raw):
get_key = (lambda val: dict([tuple(val.split(':'))]))
raw = raw.split('\n')
(section, size, used) = raw[0].split(' ')
section = section.replace(',', '_').replace(':', '').lower()
data = {}
data[section] = {}
for val in [size, used]:
data[section].update(get_key(val.replace(',', '')))
for devices in raw[1:]:
data[section].update(get_key(re.sub('\\s+', ':', devices.strip())))
return data
|
[
"def",
"_usage_specific",
"(",
"raw",
")",
":",
"get_key",
"=",
"(",
"lambda",
"val",
":",
"dict",
"(",
"[",
"tuple",
"(",
"val",
".",
"split",
"(",
"':'",
")",
")",
"]",
")",
")",
"raw",
"=",
"raw",
".",
"split",
"(",
"'\\n'",
")",
"(",
"section",
",",
"size",
",",
"used",
")",
"=",
"raw",
"[",
"0",
"]",
".",
"split",
"(",
"' '",
")",
"section",
"=",
"section",
".",
"replace",
"(",
"','",
",",
"'_'",
")",
".",
"replace",
"(",
"':'",
",",
"''",
")",
".",
"lower",
"(",
")",
"data",
"=",
"{",
"}",
"data",
"[",
"section",
"]",
"=",
"{",
"}",
"for",
"val",
"in",
"[",
"size",
",",
"used",
"]",
":",
"data",
"[",
"section",
"]",
".",
"update",
"(",
"get_key",
"(",
"val",
".",
"replace",
"(",
"','",
",",
"''",
")",
")",
")",
"for",
"devices",
"in",
"raw",
"[",
"1",
":",
"]",
":",
"data",
"[",
"section",
"]",
".",
"update",
"(",
"get_key",
"(",
"re",
".",
"sub",
"(",
"'\\\\s+'",
",",
"':'",
",",
"devices",
".",
"strip",
"(",
")",
")",
")",
")",
"return",
"data"
] |
parse usage/specific .
|
train
| true
|
11,398
|
def deconvolution_2d(x, W, b=None, stride=1, pad=0, outsize=None, use_cudnn=True, deterministic=False):
func = Deconvolution2DFunction(stride, pad, outsize, use_cudnn, deterministic)
if (b is None):
return func(x, W)
else:
return func(x, W, b)
|
[
"def",
"deconvolution_2d",
"(",
"x",
",",
"W",
",",
"b",
"=",
"None",
",",
"stride",
"=",
"1",
",",
"pad",
"=",
"0",
",",
"outsize",
"=",
"None",
",",
"use_cudnn",
"=",
"True",
",",
"deterministic",
"=",
"False",
")",
":",
"func",
"=",
"Deconvolution2DFunction",
"(",
"stride",
",",
"pad",
",",
"outsize",
",",
"use_cudnn",
",",
"deterministic",
")",
"if",
"(",
"b",
"is",
"None",
")",
":",
"return",
"func",
"(",
"x",
",",
"W",
")",
"else",
":",
"return",
"func",
"(",
"x",
",",
"W",
",",
"b",
")"
] |
two dimensional deconvolution function .
|
train
| false
|
11,399
|
def addBeginEndInnerXMLTag(attributeDictionary, className, depth, innerText, output, text=''):
if (len(innerText) > 0):
addBeginXMLTag(attributeDictionary, className, depth, output, text)
output.write(innerText)
addEndXMLTag(className, depth, output)
else:
addClosedXMLTag(attributeDictionary, className, depth, output, text)
|
[
"def",
"addBeginEndInnerXMLTag",
"(",
"attributeDictionary",
",",
"className",
",",
"depth",
",",
"innerText",
",",
"output",
",",
"text",
"=",
"''",
")",
":",
"if",
"(",
"len",
"(",
"innerText",
")",
">",
"0",
")",
":",
"addBeginXMLTag",
"(",
"attributeDictionary",
",",
"className",
",",
"depth",
",",
"output",
",",
"text",
")",
"output",
".",
"write",
"(",
"innerText",
")",
"addEndXMLTag",
"(",
"className",
",",
"depth",
",",
"output",
")",
"else",
":",
"addClosedXMLTag",
"(",
"attributeDictionary",
",",
"className",
",",
"depth",
",",
"output",
",",
"text",
")"
] |
add the begin and end xml tag and the inner text if any .
|
train
| false
|
11,401
|
def logformatter_adapter(logkws):
if (not ({'level', 'msg', 'args'} <= set(logkws))):
warnings.warn('Missing keys in LogFormatter method', ScrapyDeprecationWarning)
if ('format' in logkws):
warnings.warn('`format` key in LogFormatter methods has been deprecated, use `msg` instead', ScrapyDeprecationWarning)
level = logkws.get('level', logging.INFO)
message = logkws.get('format', logkws.get('msg'))
args = (logkws if (not logkws.get('args')) else logkws['args'])
return (level, message, args)
|
[
"def",
"logformatter_adapter",
"(",
"logkws",
")",
":",
"if",
"(",
"not",
"(",
"{",
"'level'",
",",
"'msg'",
",",
"'args'",
"}",
"<=",
"set",
"(",
"logkws",
")",
")",
")",
":",
"warnings",
".",
"warn",
"(",
"'Missing keys in LogFormatter method'",
",",
"ScrapyDeprecationWarning",
")",
"if",
"(",
"'format'",
"in",
"logkws",
")",
":",
"warnings",
".",
"warn",
"(",
"'`format` key in LogFormatter methods has been deprecated, use `msg` instead'",
",",
"ScrapyDeprecationWarning",
")",
"level",
"=",
"logkws",
".",
"get",
"(",
"'level'",
",",
"logging",
".",
"INFO",
")",
"message",
"=",
"logkws",
".",
"get",
"(",
"'format'",
",",
"logkws",
".",
"get",
"(",
"'msg'",
")",
")",
"args",
"=",
"(",
"logkws",
"if",
"(",
"not",
"logkws",
".",
"get",
"(",
"'args'",
")",
")",
"else",
"logkws",
"[",
"'args'",
"]",
")",
"return",
"(",
"level",
",",
"message",
",",
"args",
")"
] |
helper that takes the dictionary output from the methods in logformatter and adapts it into a tuple of positional arguments for logger .
|
train
| false
|
11,403
|
def OpenUrlWithBasicAuth(url, user='root', pwd=''):
return requests.get(url, auth=HTTPBasicAuth(user, pwd), verify=False)
|
[
"def",
"OpenUrlWithBasicAuth",
"(",
"url",
",",
"user",
"=",
"'root'",
",",
"pwd",
"=",
"''",
")",
":",
"return",
"requests",
".",
"get",
"(",
"url",
",",
"auth",
"=",
"HTTPBasicAuth",
"(",
"user",
",",
"pwd",
")",
",",
"verify",
"=",
"False",
")"
] |
open the specified url .
|
train
| true
|
11,404
|
def _test_parse_factory(source):
filename = os.path.join('CDAO/', source)
def test_parse(self):
trees = list(bp._io.parse(filename, 'cdao'))
test_parse.__doc__ = ('Parse the phylogenies in %s.' % source)
return test_parse
|
[
"def",
"_test_parse_factory",
"(",
"source",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'CDAO/'",
",",
"source",
")",
"def",
"test_parse",
"(",
"self",
")",
":",
"trees",
"=",
"list",
"(",
"bp",
".",
"_io",
".",
"parse",
"(",
"filename",
",",
"'cdao'",
")",
")",
"test_parse",
".",
"__doc__",
"=",
"(",
"'Parse the phylogenies in %s.'",
"%",
"source",
")",
"return",
"test_parse"
] |
generate a test method for parse()ing the given source .
|
train
| false
|
11,405
|
def norm_read(path):
return open(path, 'U').read()
|
[
"def",
"norm_read",
"(",
"path",
")",
":",
"return",
"open",
"(",
"path",
",",
"'U'",
")",
".",
"read",
"(",
")"
] |
return the content of the file with normalized line feeds .
|
train
| false
|
11,406
|
@register.filter
def bleach_urlize(text):
linkified = bleach.linkify(unicode(text))
cleaned = bleach.clean(linkified, tags=['a', 'p', 'div', 'br', 'spanb', 'strong', 'em', 'i'])
return mark_safe(cleaned)
|
[
"@",
"register",
".",
"filter",
"def",
"bleach_urlize",
"(",
"text",
")",
":",
"linkified",
"=",
"bleach",
".",
"linkify",
"(",
"unicode",
"(",
"text",
")",
")",
"cleaned",
"=",
"bleach",
".",
"clean",
"(",
"linkified",
",",
"tags",
"=",
"[",
"'a'",
",",
"'p'",
",",
"'div'",
",",
"'br'",
",",
"'spanb'",
",",
"'strong'",
",",
"'em'",
",",
"'i'",
"]",
")",
"return",
"mark_safe",
"(",
"cleaned",
")"
] |
converts any urls in text into clickable links .
|
train
| false
|
11,408
|
def osd_prepare(**kwargs):
return ceph_cfg.osd_prepare(**kwargs)
|
[
"def",
"osd_prepare",
"(",
"**",
"kwargs",
")",
":",
"return",
"ceph_cfg",
".",
"osd_prepare",
"(",
"**",
"kwargs",
")"
] |
prepare an osd cli example: .
|
train
| false
|
11,410
|
def ambiguous_identifier(logical_line, tokens):
idents_to_avoid = ('l', 'O', 'I')
(prev_type, prev_text, prev_start, prev_end, __) = tokens[0]
for (token_type, text, start, end, line) in tokens[1:]:
ident = pos = None
if ((token_type == tokenize.OP) and ('=' in text)):
if (prev_text in idents_to_avoid):
ident = prev_text
pos = prev_start
if (prev_text in ('as', 'global', 'nonlocal')):
if (text in idents_to_avoid):
ident = text
pos = start
if (prev_text == 'class'):
if (text in idents_to_avoid):
(yield (start, ("E742 ambiguous class definition '%s'" % text)))
if (prev_text == 'def'):
if (text in idents_to_avoid):
(yield (start, ("E743 ambiguous function definition '%s'" % text)))
if ident:
(yield (pos, ("E741 ambiguous variable name '%s'" % ident)))
prev_text = text
prev_start = start
|
[
"def",
"ambiguous_identifier",
"(",
"logical_line",
",",
"tokens",
")",
":",
"idents_to_avoid",
"=",
"(",
"'l'",
",",
"'O'",
",",
"'I'",
")",
"(",
"prev_type",
",",
"prev_text",
",",
"prev_start",
",",
"prev_end",
",",
"__",
")",
"=",
"tokens",
"[",
"0",
"]",
"for",
"(",
"token_type",
",",
"text",
",",
"start",
",",
"end",
",",
"line",
")",
"in",
"tokens",
"[",
"1",
":",
"]",
":",
"ident",
"=",
"pos",
"=",
"None",
"if",
"(",
"(",
"token_type",
"==",
"tokenize",
".",
"OP",
")",
"and",
"(",
"'='",
"in",
"text",
")",
")",
":",
"if",
"(",
"prev_text",
"in",
"idents_to_avoid",
")",
":",
"ident",
"=",
"prev_text",
"pos",
"=",
"prev_start",
"if",
"(",
"prev_text",
"in",
"(",
"'as'",
",",
"'global'",
",",
"'nonlocal'",
")",
")",
":",
"if",
"(",
"text",
"in",
"idents_to_avoid",
")",
":",
"ident",
"=",
"text",
"pos",
"=",
"start",
"if",
"(",
"prev_text",
"==",
"'class'",
")",
":",
"if",
"(",
"text",
"in",
"idents_to_avoid",
")",
":",
"(",
"yield",
"(",
"start",
",",
"(",
"\"E742 ambiguous class definition '%s'\"",
"%",
"text",
")",
")",
")",
"if",
"(",
"prev_text",
"==",
"'def'",
")",
":",
"if",
"(",
"text",
"in",
"idents_to_avoid",
")",
":",
"(",
"yield",
"(",
"start",
",",
"(",
"\"E743 ambiguous function definition '%s'\"",
"%",
"text",
")",
")",
")",
"if",
"ident",
":",
"(",
"yield",
"(",
"pos",
",",
"(",
"\"E741 ambiguous variable name '%s'\"",
"%",
"ident",
")",
")",
")",
"prev_text",
"=",
"text",
"prev_start",
"=",
"start"
] |
never use the characters l .
|
train
| true
|
11,411
|
def signing_format_message(method, headers_dict, body_dict):
headers_str = '{}\n\n{}'.format(method, header_string(headers_dict))
body_str = body_string(body_dict)
message = (headers_str + body_str)
return message
|
[
"def",
"signing_format_message",
"(",
"method",
",",
"headers_dict",
",",
"body_dict",
")",
":",
"headers_str",
"=",
"'{}\\n\\n{}'",
".",
"format",
"(",
"method",
",",
"header_string",
"(",
"headers_dict",
")",
")",
"body_str",
"=",
"body_string",
"(",
"body_dict",
")",
"message",
"=",
"(",
"headers_str",
"+",
"body_str",
")",
"return",
"message"
] |
given a dictionary of headers and a dictionary of the json for the body .
|
train
| false
|
11,412
|
def calc_dinucleotide_counts(sequence):
total = 0
for letter in 'ACTGUactgu':
total += sequence.count((letter + letter))
return total
|
[
"def",
"calc_dinucleotide_counts",
"(",
"sequence",
")",
":",
"total",
"=",
"0",
"for",
"letter",
"in",
"'ACTGUactgu'",
":",
"total",
"+=",
"sequence",
".",
"count",
"(",
"(",
"letter",
"+",
"letter",
")",
")",
"return",
"total"
] |
returns the total count of di-nucleotides repeats .
|
train
| false
|
11,413
|
def _load_serializers():
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format])
if hasattr(settings, 'SERIALIZATION_MODULES'):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format])
|
[
"def",
"_load_serializers",
"(",
")",
":",
"for",
"format",
"in",
"BUILTIN_SERIALIZERS",
":",
"register_serializer",
"(",
"format",
",",
"BUILTIN_SERIALIZERS",
"[",
"format",
"]",
")",
"if",
"hasattr",
"(",
"settings",
",",
"'SERIALIZATION_MODULES'",
")",
":",
"for",
"format",
"in",
"settings",
".",
"SERIALIZATION_MODULES",
":",
"register_serializer",
"(",
"format",
",",
"settings",
".",
"SERIALIZATION_MODULES",
"[",
"format",
"]",
")"
] |
register built-in and settings-defined serializers .
|
train
| false
|
11,414
|
def VerifyOTP(user, otp):
timestamp = long(time.time())
challenge = (timestamp / _GRANULARITY)
units = (_TIMEOUT / _GRANULARITY)
secret = _GetUserSecret(user)
ts = _UpdateUserHistory(user, timestamp, otp)
if ((len(ts) - bisect.bisect_left(ts, ((timestamp - 60),))) > _ATTEMPTS_PER_MIN):
raise OTPException('Too many OTP login attempts for {0} in past minute'.format(user))
if [True for x in ts[:(-1)] if (x[1] == otp)]:
raise OTPException('Have already seen OTP {0} for {1}'.format(otp, user))
for offset in range(((- (units - 1)) / 2), ((units / 2) + 1)):
if (int(otp) == _ComputeOTP(secret, (challenge + offset))):
return
raise OTPException('Entered OTP invalid')
|
[
"def",
"VerifyOTP",
"(",
"user",
",",
"otp",
")",
":",
"timestamp",
"=",
"long",
"(",
"time",
".",
"time",
"(",
")",
")",
"challenge",
"=",
"(",
"timestamp",
"/",
"_GRANULARITY",
")",
"units",
"=",
"(",
"_TIMEOUT",
"/",
"_GRANULARITY",
")",
"secret",
"=",
"_GetUserSecret",
"(",
"user",
")",
"ts",
"=",
"_UpdateUserHistory",
"(",
"user",
",",
"timestamp",
",",
"otp",
")",
"if",
"(",
"(",
"len",
"(",
"ts",
")",
"-",
"bisect",
".",
"bisect_left",
"(",
"ts",
",",
"(",
"(",
"timestamp",
"-",
"60",
")",
",",
")",
")",
")",
">",
"_ATTEMPTS_PER_MIN",
")",
":",
"raise",
"OTPException",
"(",
"'Too many OTP login attempts for {0} in past minute'",
".",
"format",
"(",
"user",
")",
")",
"if",
"[",
"True",
"for",
"x",
"in",
"ts",
"[",
":",
"(",
"-",
"1",
")",
"]",
"if",
"(",
"x",
"[",
"1",
"]",
"==",
"otp",
")",
"]",
":",
"raise",
"OTPException",
"(",
"'Have already seen OTP {0} for {1}'",
".",
"format",
"(",
"otp",
",",
"user",
")",
")",
"for",
"offset",
"in",
"range",
"(",
"(",
"(",
"-",
"(",
"units",
"-",
"1",
")",
")",
"/",
"2",
")",
",",
"(",
"(",
"units",
"/",
"2",
")",
"+",
"1",
")",
")",
":",
"if",
"(",
"int",
"(",
"otp",
")",
"==",
"_ComputeOTP",
"(",
"secret",
",",
"(",
"challenge",
"+",
"offset",
")",
")",
")",
":",
"return",
"raise",
"OTPException",
"(",
"'Entered OTP invalid'",
")"
] |
verifies the provided otp for the user by comparing it to one generated right now .
|
train
| false
|
11,416
|
def test_constant_get_stabilized():
x2 = T.scalar()
y2 = T.log((1 + T.exp(x2)))
mode = theano.compile.get_default_mode()
mode.check_isfinite = False
f2 = theano.function([x2], y2, mode=mode)
try:
assert (len(f2.maker.fgraph.toposort()) == 1)
assert (f2.maker.fgraph.toposort()[0].op == theano.tensor.nnet.sigm.softplus)
assert (f2(800) == 800)
x = T.as_tensor_variable(800)
y = T.log((1 + T.exp(x)))
f = theano.function([], y, mode=mode)
assert (len(f.maker.fgraph.toposort()) == 0)
assert numpy.isinf(f())
assert (f() == 800), f()
except AssertionError:
raise SkipTest('Theano optimizes constant before stabilization. This breaks stabilization optimization in some cases. See #504.')
|
[
"def",
"test_constant_get_stabilized",
"(",
")",
":",
"x2",
"=",
"T",
".",
"scalar",
"(",
")",
"y2",
"=",
"T",
".",
"log",
"(",
"(",
"1",
"+",
"T",
".",
"exp",
"(",
"x2",
")",
")",
")",
"mode",
"=",
"theano",
".",
"compile",
".",
"get_default_mode",
"(",
")",
"mode",
".",
"check_isfinite",
"=",
"False",
"f2",
"=",
"theano",
".",
"function",
"(",
"[",
"x2",
"]",
",",
"y2",
",",
"mode",
"=",
"mode",
")",
"try",
":",
"assert",
"(",
"len",
"(",
"f2",
".",
"maker",
".",
"fgraph",
".",
"toposort",
"(",
")",
")",
"==",
"1",
")",
"assert",
"(",
"f2",
".",
"maker",
".",
"fgraph",
".",
"toposort",
"(",
")",
"[",
"0",
"]",
".",
"op",
"==",
"theano",
".",
"tensor",
".",
"nnet",
".",
"sigm",
".",
"softplus",
")",
"assert",
"(",
"f2",
"(",
"800",
")",
"==",
"800",
")",
"x",
"=",
"T",
".",
"as_tensor_variable",
"(",
"800",
")",
"y",
"=",
"T",
".",
"log",
"(",
"(",
"1",
"+",
"T",
".",
"exp",
"(",
"x",
")",
")",
")",
"f",
"=",
"theano",
".",
"function",
"(",
"[",
"]",
",",
"y",
",",
"mode",
"=",
"mode",
")",
"assert",
"(",
"len",
"(",
"f",
".",
"maker",
".",
"fgraph",
".",
"toposort",
"(",
")",
")",
"==",
"0",
")",
"assert",
"numpy",
".",
"isinf",
"(",
"f",
"(",
")",
")",
"assert",
"(",
"f",
"(",
")",
"==",
"800",
")",
",",
"f",
"(",
")",
"except",
"AssertionError",
":",
"raise",
"SkipTest",
"(",
"'Theano optimizes constant before stabilization. This breaks stabilization optimization in some cases. See #504.'",
")"
] |
currently theano enable the constant_folding optimization before stabilization optimization .
|
train
| false
|
11,417
|
def _ica_explained_variance(ica, inst, normalize=False):
if (not isinstance(ica, ICA)):
raise TypeError('first argument must be an instance of ICA.')
if (not isinstance(inst, (BaseRaw, BaseEpochs, Evoked))):
raise TypeError('second argument must an instance of either Raw, Epochs or Evoked.')
source_data = _get_inst_data(ica.get_sources(inst))
if isinstance(inst, BaseEpochs):
(n_epochs, n_chan, n_samp) = source_data.shape
source_data = source_data.transpose(1, 0, 2).reshape((n_chan, (n_epochs * n_samp)))
(n_chan, n_samp) = source_data.shape
var = ((np.sum((ica.mixing_matrix_ ** 2), axis=0) * np.sum((source_data ** 2), axis=1)) / ((n_chan * n_samp) - 1))
if normalize:
var /= var.sum()
return var
|
[
"def",
"_ica_explained_variance",
"(",
"ica",
",",
"inst",
",",
"normalize",
"=",
"False",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"ica",
",",
"ICA",
")",
")",
":",
"raise",
"TypeError",
"(",
"'first argument must be an instance of ICA.'",
")",
"if",
"(",
"not",
"isinstance",
"(",
"inst",
",",
"(",
"BaseRaw",
",",
"BaseEpochs",
",",
"Evoked",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'second argument must an instance of either Raw, Epochs or Evoked.'",
")",
"source_data",
"=",
"_get_inst_data",
"(",
"ica",
".",
"get_sources",
"(",
"inst",
")",
")",
"if",
"isinstance",
"(",
"inst",
",",
"BaseEpochs",
")",
":",
"(",
"n_epochs",
",",
"n_chan",
",",
"n_samp",
")",
"=",
"source_data",
".",
"shape",
"source_data",
"=",
"source_data",
".",
"transpose",
"(",
"1",
",",
"0",
",",
"2",
")",
".",
"reshape",
"(",
"(",
"n_chan",
",",
"(",
"n_epochs",
"*",
"n_samp",
")",
")",
")",
"(",
"n_chan",
",",
"n_samp",
")",
"=",
"source_data",
".",
"shape",
"var",
"=",
"(",
"(",
"np",
".",
"sum",
"(",
"(",
"ica",
".",
"mixing_matrix_",
"**",
"2",
")",
",",
"axis",
"=",
"0",
")",
"*",
"np",
".",
"sum",
"(",
"(",
"source_data",
"**",
"2",
")",
",",
"axis",
"=",
"1",
")",
")",
"/",
"(",
"(",
"n_chan",
"*",
"n_samp",
")",
"-",
"1",
")",
")",
"if",
"normalize",
":",
"var",
"/=",
"var",
".",
"sum",
"(",
")",
"return",
"var"
] |
check variance accounted for by each component in supplied data .
|
train
| false
|
11,420
|
def identify_data(data):
if (not isinstance(data, StringIO)):
data = StringIO(data)
img = Image.open(data)
(width, height) = img.size
fmt = img.format
return (width, height, fmt)
|
[
"def",
"identify_data",
"(",
"data",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"data",
",",
"StringIO",
")",
")",
":",
"data",
"=",
"StringIO",
"(",
"data",
")",
"img",
"=",
"Image",
".",
"open",
"(",
"data",
")",
"(",
"width",
",",
"height",
")",
"=",
"img",
".",
"size",
"fmt",
"=",
"img",
".",
"format",
"return",
"(",
"width",
",",
"height",
",",
"fmt",
")"
] |
identify the image in data .
|
train
| false
|
11,422
|
def startWorker(basedir, quiet, nodaemon):
os.chdir(basedir)
if (quiet or nodaemon):
return launch(nodaemon)
from twisted.python.runtime import platformType
if (platformType == 'win32'):
return launch(nodaemon)
if os.fork():
rc = Follower().follow()
return rc
time.sleep(0.2)
launch(nodaemon)
|
[
"def",
"startWorker",
"(",
"basedir",
",",
"quiet",
",",
"nodaemon",
")",
":",
"os",
".",
"chdir",
"(",
"basedir",
")",
"if",
"(",
"quiet",
"or",
"nodaemon",
")",
":",
"return",
"launch",
"(",
"nodaemon",
")",
"from",
"twisted",
".",
"python",
".",
"runtime",
"import",
"platformType",
"if",
"(",
"platformType",
"==",
"'win32'",
")",
":",
"return",
"launch",
"(",
"nodaemon",
")",
"if",
"os",
".",
"fork",
"(",
")",
":",
"rc",
"=",
"Follower",
"(",
")",
".",
"follow",
"(",
")",
"return",
"rc",
"time",
".",
"sleep",
"(",
"0.2",
")",
"launch",
"(",
"nodaemon",
")"
] |
start worker process .
|
train
| true
|
11,423
|
def ovirt_facts_full_argument_spec(**kwargs):
spec = dict(auth=__get_auth_dict(), fetch_nested=dict(default=False, type='bool'), nested_attributes=dict(type='list'))
spec.update(kwargs)
return spec
|
[
"def",
"ovirt_facts_full_argument_spec",
"(",
"**",
"kwargs",
")",
":",
"spec",
"=",
"dict",
"(",
"auth",
"=",
"__get_auth_dict",
"(",
")",
",",
"fetch_nested",
"=",
"dict",
"(",
"default",
"=",
"False",
",",
"type",
"=",
"'bool'",
")",
",",
"nested_attributes",
"=",
"dict",
"(",
"type",
"=",
"'list'",
")",
")",
"spec",
".",
"update",
"(",
"kwargs",
")",
"return",
"spec"
] |
extend parameters of facts module with parameters which are common to all ovirt facts modules .
|
train
| false
|
11,424
|
def truncate_seqs(fasta_seqs, qual_scores, base_pos):
trunc_fasta_seqs = {}
trunc_qual_scores = {}
for seq in fasta_seqs:
trunc_fasta_seqs[seq] = fasta_seqs[seq][:base_pos]
trunc_qual_scores[seq] = qual_scores[seq][:base_pos]
return (trunc_fasta_seqs, trunc_qual_scores)
|
[
"def",
"truncate_seqs",
"(",
"fasta_seqs",
",",
"qual_scores",
",",
"base_pos",
")",
":",
"trunc_fasta_seqs",
"=",
"{",
"}",
"trunc_qual_scores",
"=",
"{",
"}",
"for",
"seq",
"in",
"fasta_seqs",
":",
"trunc_fasta_seqs",
"[",
"seq",
"]",
"=",
"fasta_seqs",
"[",
"seq",
"]",
"[",
":",
"base_pos",
"]",
"trunc_qual_scores",
"[",
"seq",
"]",
"=",
"qual_scores",
"[",
"seq",
"]",
"[",
":",
"base_pos",
"]",
"return",
"(",
"trunc_fasta_seqs",
",",
"trunc_qual_scores",
")"
] |
truncates sequences to base position specified with base_pos fasta_seqs: dict of seq label: seq string qual_scores: dict of seq label: numpy array of int scores base_pos: index in sequence to truncate at .
|
train
| false
|
11,425
|
def load_lang(lang, apps=None):
if (lang == u'en'):
return {}
out = frappe.cache().hget(u'lang_full_dict', lang, shared=True)
if (not out):
out = {}
for app in (apps or frappe.get_all_apps(True)):
path = os.path.join(frappe.get_pymodule_path(app), u'translations', (lang + u'.csv'))
out.update((get_translation_dict_from_file(path, lang, app) or {}))
if (u'-' in lang):
parent = lang.split(u'-')[0]
parent_out = load_lang(parent)
parent_out.update(out)
out = parent_out
frappe.cache().hset(u'lang_full_dict', lang, out, shared=True)
return (out or {})
|
[
"def",
"load_lang",
"(",
"lang",
",",
"apps",
"=",
"None",
")",
":",
"if",
"(",
"lang",
"==",
"u'en'",
")",
":",
"return",
"{",
"}",
"out",
"=",
"frappe",
".",
"cache",
"(",
")",
".",
"hget",
"(",
"u'lang_full_dict'",
",",
"lang",
",",
"shared",
"=",
"True",
")",
"if",
"(",
"not",
"out",
")",
":",
"out",
"=",
"{",
"}",
"for",
"app",
"in",
"(",
"apps",
"or",
"frappe",
".",
"get_all_apps",
"(",
"True",
")",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"frappe",
".",
"get_pymodule_path",
"(",
"app",
")",
",",
"u'translations'",
",",
"(",
"lang",
"+",
"u'.csv'",
")",
")",
"out",
".",
"update",
"(",
"(",
"get_translation_dict_from_file",
"(",
"path",
",",
"lang",
",",
"app",
")",
"or",
"{",
"}",
")",
")",
"if",
"(",
"u'-'",
"in",
"lang",
")",
":",
"parent",
"=",
"lang",
".",
"split",
"(",
"u'-'",
")",
"[",
"0",
"]",
"parent_out",
"=",
"load_lang",
"(",
"parent",
")",
"parent_out",
".",
"update",
"(",
"out",
")",
"out",
"=",
"parent_out",
"frappe",
".",
"cache",
"(",
")",
".",
"hset",
"(",
"u'lang_full_dict'",
",",
"lang",
",",
"out",
",",
"shared",
"=",
"True",
")",
"return",
"(",
"out",
"or",
"{",
"}",
")"
] |
combine all translations from .
|
train
| false
|
11,426
|
def get_test_html_from_value(value):
data = list(value[u'data'])
table = u'<table>'
if value[u'first_row_is_table_header']:
row_header = data.pop(0)
table += u'<thead><tr>'
for th in row_header:
table += (u'<th>%s</th>' % tiny_escape(th))
table += u'</tr></thead>'
table += u'<tbody>'
for row in data:
table += u'<tr>'
first = True
for col in row:
if (value[u'first_col_is_header'] and first):
table += (u'<th>%s</th>' % tiny_escape(col))
else:
table += (u'<td>%s</td>' % tiny_escape(col))
first = False
table += u'</tr>'
table += u'</tbody></table>'
return table
|
[
"def",
"get_test_html_from_value",
"(",
"value",
")",
":",
"data",
"=",
"list",
"(",
"value",
"[",
"u'data'",
"]",
")",
"table",
"=",
"u'<table>'",
"if",
"value",
"[",
"u'first_row_is_table_header'",
"]",
":",
"row_header",
"=",
"data",
".",
"pop",
"(",
"0",
")",
"table",
"+=",
"u'<thead><tr>'",
"for",
"th",
"in",
"row_header",
":",
"table",
"+=",
"(",
"u'<th>%s</th>'",
"%",
"tiny_escape",
"(",
"th",
")",
")",
"table",
"+=",
"u'</tr></thead>'",
"table",
"+=",
"u'<tbody>'",
"for",
"row",
"in",
"data",
":",
"table",
"+=",
"u'<tr>'",
"first",
"=",
"True",
"for",
"col",
"in",
"row",
":",
"if",
"(",
"value",
"[",
"u'first_col_is_header'",
"]",
"and",
"first",
")",
":",
"table",
"+=",
"(",
"u'<th>%s</th>'",
"%",
"tiny_escape",
"(",
"col",
")",
")",
"else",
":",
"table",
"+=",
"(",
"u'<td>%s</td>'",
"%",
"tiny_escape",
"(",
"col",
")",
")",
"first",
"=",
"False",
"table",
"+=",
"u'</tr>'",
"table",
"+=",
"u'</tbody></table>'",
"return",
"table"
] |
generate a test html from a tableblock value .
|
train
| false
|
11,427
|
def relu_(x):
return (x * (x > 0))
|
[
"def",
"relu_",
"(",
"x",
")",
":",
"return",
"(",
"x",
"*",
"(",
"x",
">",
"0",
")",
")"
] |
alternative relu implementation parameters x: tensor variable .
|
train
| false
|
11,428
|
def is_prime(number):
"\n if not fermat_little_theorem(number) == 1:\n # Not prime, according to Fermat's little theorem\n return False\n "
if randomized_primality_testing(number, 5):
return True
return False
|
[
"def",
"is_prime",
"(",
"number",
")",
":",
"if",
"randomized_primality_testing",
"(",
"number",
",",
"5",
")",
":",
"return",
"True",
"return",
"False"
] |
check if a number is prime .
|
train
| false
|
11,429
|
def matchPreviousExpr(expr):
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s, l, t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s, l, t):
theseTokens = _flatten(t.asList())
if (theseTokens != matchTokens):
raise ParseException('', 0, '')
rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName(('(prev) ' + _ustr(expr)))
return rep
|
[
"def",
"matchPreviousExpr",
"(",
"expr",
")",
":",
"rep",
"=",
"Forward",
"(",
")",
"e2",
"=",
"expr",
".",
"copy",
"(",
")",
"rep",
"<<=",
"e2",
"def",
"copyTokenToRepeater",
"(",
"s",
",",
"l",
",",
"t",
")",
":",
"matchTokens",
"=",
"_flatten",
"(",
"t",
".",
"asList",
"(",
")",
")",
"def",
"mustMatchTheseTokens",
"(",
"s",
",",
"l",
",",
"t",
")",
":",
"theseTokens",
"=",
"_flatten",
"(",
"t",
".",
"asList",
"(",
")",
")",
"if",
"(",
"theseTokens",
"!=",
"matchTokens",
")",
":",
"raise",
"ParseException",
"(",
"''",
",",
"0",
",",
"''",
")",
"rep",
".",
"setParseAction",
"(",
"mustMatchTheseTokens",
",",
"callDuringTry",
"=",
"True",
")",
"expr",
".",
"addParseAction",
"(",
"copyTokenToRepeater",
",",
"callDuringTry",
"=",
"True",
")",
"rep",
".",
"setName",
"(",
"(",
"'(prev) '",
"+",
"_ustr",
"(",
"expr",
")",
")",
")",
"return",
"rep"
] |
helper to define an expression that is indirectly defined from the tokens matched in a previous expression .
|
train
| true
|
11,431
|
def group_has_group_snapshot_filter():
return IMPL.group_has_group_snapshot_filter()
|
[
"def",
"group_has_group_snapshot_filter",
"(",
")",
":",
"return",
"IMPL",
".",
"group_has_group_snapshot_filter",
"(",
")"
] |
return a filter that checks if a group has group snapshots .
|
train
| false
|
11,432
|
def test_pmf_hist_normalization():
(x, h, w) = utils.pmf_hist(a_norm)
nose.tools.assert_almost_equal(sum(h), 1)
nose.tools.assert_less_equal(h.max(), 1)
|
[
"def",
"test_pmf_hist_normalization",
"(",
")",
":",
"(",
"x",
",",
"h",
",",
"w",
")",
"=",
"utils",
".",
"pmf_hist",
"(",
"a_norm",
")",
"nose",
".",
"tools",
".",
"assert_almost_equal",
"(",
"sum",
"(",
"h",
")",
",",
"1",
")",
"nose",
".",
"tools",
".",
"assert_less_equal",
"(",
"h",
".",
"max",
"(",
")",
",",
"1",
")"
] |
test that output data behaves like a pmf .
|
train
| false
|
11,435
|
def _pack_asf_image(mime, data, type=3, description=''):
tag_data = struct.pack('<bi', type, len(data))
tag_data += (mime.encode('utf-16-le') + '\x00\x00')
tag_data += (description.encode('utf-16-le') + '\x00\x00')
tag_data += data
return tag_data
|
[
"def",
"_pack_asf_image",
"(",
"mime",
",",
"data",
",",
"type",
"=",
"3",
",",
"description",
"=",
"''",
")",
":",
"tag_data",
"=",
"struct",
".",
"pack",
"(",
"'<bi'",
",",
"type",
",",
"len",
"(",
"data",
")",
")",
"tag_data",
"+=",
"(",
"mime",
".",
"encode",
"(",
"'utf-16-le'",
")",
"+",
"'\\x00\\x00'",
")",
"tag_data",
"+=",
"(",
"description",
".",
"encode",
"(",
"'utf-16-le'",
")",
"+",
"'\\x00\\x00'",
")",
"tag_data",
"+=",
"data",
"return",
"tag_data"
] |
pack image data for a wm/picture tag .
|
train
| true
|
11,436
|
def test_purge_old_cookies(config_stub, fake_save_manager):
line_parser_stub = [COOKIE1, COOKIE2, SESSION_COOKIE, EXPIRED_COOKIE]
jar = cookies.CookieJar(line_parser=line_parser_stub)
assert (len(jar.allCookies()) == 4)
jar.purge_old_cookies()
raw_cookies = [cookie.toRawForm().data() for cookie in jar.allCookies()]
assert (raw_cookies == [COOKIE1, COOKIE2, SESSION_COOKIE])
|
[
"def",
"test_purge_old_cookies",
"(",
"config_stub",
",",
"fake_save_manager",
")",
":",
"line_parser_stub",
"=",
"[",
"COOKIE1",
",",
"COOKIE2",
",",
"SESSION_COOKIE",
",",
"EXPIRED_COOKIE",
"]",
"jar",
"=",
"cookies",
".",
"CookieJar",
"(",
"line_parser",
"=",
"line_parser_stub",
")",
"assert",
"(",
"len",
"(",
"jar",
".",
"allCookies",
"(",
")",
")",
"==",
"4",
")",
"jar",
".",
"purge_old_cookies",
"(",
")",
"raw_cookies",
"=",
"[",
"cookie",
".",
"toRawForm",
"(",
")",
".",
"data",
"(",
")",
"for",
"cookie",
"in",
"jar",
".",
"allCookies",
"(",
")",
"]",
"assert",
"(",
"raw_cookies",
"==",
"[",
"COOKIE1",
",",
"COOKIE2",
",",
"SESSION_COOKIE",
"]",
")"
] |
test that expired cookies are deleted .
|
train
| false
|
11,437
|
def verbose_check_run_complete_f(f):
filepaths = [l.strip() for l in f]
for fp in filepaths:
if (not exists(fp)):
print ("At least one fp doesn't exist: %s" % fp)
return False
print 'All filepaths exist.'
return True
|
[
"def",
"verbose_check_run_complete_f",
"(",
"f",
")",
":",
"filepaths",
"=",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"f",
"]",
"for",
"fp",
"in",
"filepaths",
":",
"if",
"(",
"not",
"exists",
"(",
"fp",
")",
")",
":",
"print",
"(",
"\"At least one fp doesn't exist: %s\"",
"%",
"fp",
")",
"return",
"False",
"print",
"'All filepaths exist.'",
"return",
"True"
] |
return true if all filepaths exist f: file containing list of filepaths example f: f1 .
|
train
| false
|
11,438
|
def find_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port
|
[
"def",
"find_open_port",
"(",
")",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"s",
".",
"bind",
"(",
"(",
"''",
",",
"0",
")",
")",
"port",
"=",
"s",
".",
"getsockname",
"(",
")",
"[",
"1",
"]",
"s",
".",
"close",
"(",
")",
"return",
"port"
] |
ask the os for an open port .
|
train
| false
|
11,441
|
def create_policy_version(policy_name, policy_document, set_as_default=None, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if (not isinstance(policy_document, six.string_types)):
policy_document = json.dumps(policy_document)
params = {}
for arg in ('set_as_default',):
if (locals()[arg] is not None):
params[arg] = locals()[arg]
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
ret = conn.create_policy_version(policy_arn, policy_document, **params)
vid = ret.get('create_policy_version_response', {}).get('create_policy_version_result', {}).get('policy_version', {}).get('version_id')
log.info('Created {0} policy version {1}.'.format(policy_name, vid))
return {'created': True, 'version_id': vid}
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to create {0} policy version.'
log.error(msg.format(policy_name))
return {'created': False, 'error': __utils__['boto.get_error'](e)}
|
[
"def",
"create_policy_version",
"(",
"policy_name",
",",
"policy_document",
",",
"set_as_default",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"(",
"not",
"isinstance",
"(",
"policy_document",
",",
"six",
".",
"string_types",
")",
")",
":",
"policy_document",
"=",
"json",
".",
"dumps",
"(",
"policy_document",
")",
"params",
"=",
"{",
"}",
"for",
"arg",
"in",
"(",
"'set_as_default'",
",",
")",
":",
"if",
"(",
"locals",
"(",
")",
"[",
"arg",
"]",
"is",
"not",
"None",
")",
":",
"params",
"[",
"arg",
"]",
"=",
"locals",
"(",
")",
"[",
"arg",
"]",
"policy_arn",
"=",
"_get_policy_arn",
"(",
"policy_name",
",",
"region",
",",
"key",
",",
"keyid",
",",
"profile",
")",
"try",
":",
"ret",
"=",
"conn",
".",
"create_policy_version",
"(",
"policy_arn",
",",
"policy_document",
",",
"**",
"params",
")",
"vid",
"=",
"ret",
".",
"get",
"(",
"'create_policy_version_response'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'create_policy_version_result'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'policy_version'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'version_id'",
")",
"log",
".",
"info",
"(",
"'Created {0} policy version {1}.'",
".",
"format",
"(",
"policy_name",
",",
"vid",
")",
")",
"return",
"{",
"'created'",
":",
"True",
",",
"'version_id'",
":",
"vid",
"}",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"e",
")",
"msg",
"=",
"'Failed to create {0} policy version.'",
"log",
".",
"error",
"(",
"msg",
".",
"format",
"(",
"policy_name",
")",
")",
"return",
"{",
"'created'",
":",
"False",
",",
"'error'",
":",
"__utils__",
"[",
"'boto.get_error'",
"]",
"(",
"e",
")",
"}"
] |
given a valid config .
|
train
| true
|
11,442
|
def validate_notempty(root, value, default):
if value:
return (None, value)
else:
return (None, default)
|
[
"def",
"validate_notempty",
"(",
"root",
",",
"value",
",",
"default",
")",
":",
"if",
"value",
":",
"return",
"(",
"None",
",",
"value",
")",
"else",
":",
"return",
"(",
"None",
",",
"default",
")"
] |
if value is empty .
|
train
| false
|
11,443
|
def build_shed_tool_conf_select_field(app):
options = []
for dynamic_tool_conf_filename in app.toolbox.dynamic_conf_filenames():
if dynamic_tool_conf_filename.startswith('./'):
option_label = dynamic_tool_conf_filename.replace('./', '', 1)
else:
option_label = dynamic_tool_conf_filename
options.append((option_label, dynamic_tool_conf_filename))
select_field = SelectField(name='shed_tool_conf')
for option_tup in options:
select_field.add_option(option_tup[0], option_tup[1])
return select_field
|
[
"def",
"build_shed_tool_conf_select_field",
"(",
"app",
")",
":",
"options",
"=",
"[",
"]",
"for",
"dynamic_tool_conf_filename",
"in",
"app",
".",
"toolbox",
".",
"dynamic_conf_filenames",
"(",
")",
":",
"if",
"dynamic_tool_conf_filename",
".",
"startswith",
"(",
"'./'",
")",
":",
"option_label",
"=",
"dynamic_tool_conf_filename",
".",
"replace",
"(",
"'./'",
",",
"''",
",",
"1",
")",
"else",
":",
"option_label",
"=",
"dynamic_tool_conf_filename",
"options",
".",
"append",
"(",
"(",
"option_label",
",",
"dynamic_tool_conf_filename",
")",
")",
"select_field",
"=",
"SelectField",
"(",
"name",
"=",
"'shed_tool_conf'",
")",
"for",
"option_tup",
"in",
"options",
":",
"select_field",
".",
"add_option",
"(",
"option_tup",
"[",
"0",
"]",
",",
"option_tup",
"[",
"1",
"]",
")",
"return",
"select_field"
] |
build a selectfield whose options are the keys in app .
|
train
| false
|
11,444
|
def is_deleted(kev):
return (kev.fflags & select.KQ_NOTE_DELETE)
|
[
"def",
"is_deleted",
"(",
"kev",
")",
":",
"return",
"(",
"kev",
".",
"fflags",
"&",
"select",
".",
"KQ_NOTE_DELETE",
")"
] |
determines whether the given kevent represents deletion .
|
train
| false
|
11,445
|
def samplewise_norm(x, rescale=None, samplewise_center=False, samplewise_std_normalization=False, channel_index=2, epsilon=1e-07):
if rescale:
x *= rescale
if (x.shape[channel_index] == 1):
if samplewise_center:
x = (x - np.mean(x))
if samplewise_std_normalization:
x = (x / np.std(x))
return x
elif (x.shape[channel_index] == 3):
if samplewise_center:
x = (x - np.mean(x, axis=channel_index, keepdims=True))
if samplewise_std_normalization:
x = (x / (np.std(x, axis=channel_index, keepdims=True) + epsilon))
return x
else:
raise Exception(('Unsupported channels %d' % x.shape[channel_index]))
|
[
"def",
"samplewise_norm",
"(",
"x",
",",
"rescale",
"=",
"None",
",",
"samplewise_center",
"=",
"False",
",",
"samplewise_std_normalization",
"=",
"False",
",",
"channel_index",
"=",
"2",
",",
"epsilon",
"=",
"1e-07",
")",
":",
"if",
"rescale",
":",
"x",
"*=",
"rescale",
"if",
"(",
"x",
".",
"shape",
"[",
"channel_index",
"]",
"==",
"1",
")",
":",
"if",
"samplewise_center",
":",
"x",
"=",
"(",
"x",
"-",
"np",
".",
"mean",
"(",
"x",
")",
")",
"if",
"samplewise_std_normalization",
":",
"x",
"=",
"(",
"x",
"/",
"np",
".",
"std",
"(",
"x",
")",
")",
"return",
"x",
"elif",
"(",
"x",
".",
"shape",
"[",
"channel_index",
"]",
"==",
"3",
")",
":",
"if",
"samplewise_center",
":",
"x",
"=",
"(",
"x",
"-",
"np",
".",
"mean",
"(",
"x",
",",
"axis",
"=",
"channel_index",
",",
"keepdims",
"=",
"True",
")",
")",
"if",
"samplewise_std_normalization",
":",
"x",
"=",
"(",
"x",
"/",
"(",
"np",
".",
"std",
"(",
"x",
",",
"axis",
"=",
"channel_index",
",",
"keepdims",
"=",
"True",
")",
"+",
"epsilon",
")",
")",
"return",
"x",
"else",
":",
"raise",
"Exception",
"(",
"(",
"'Unsupported channels %d'",
"%",
"x",
".",
"shape",
"[",
"channel_index",
"]",
")",
")"
] |
normalize an image by rescale .
|
train
| true
|
11,446
|
def canon(*rules, **kwargs):
return exhaust(top_down(exhaust(do_one(*rules)), **kwargs))
|
[
"def",
"canon",
"(",
"*",
"rules",
",",
"**",
"kwargs",
")",
":",
"return",
"exhaust",
"(",
"top_down",
"(",
"exhaust",
"(",
"do_one",
"(",
"*",
"rules",
")",
")",
",",
"**",
"kwargs",
")",
")"
] |
strategy for canonicalization apply each rule in a bottom_up fashion through the tree .
|
train
| false
|
11,447
|
def make_object(cls, **attrs):
class TestObject(cls, ):
"Class that inherits from the given class, but without __slots__.\n\n Note that classes with __slots__ can't have arbitrary attributes monkey-\n patched in, so this is a class that is exactly the same only with a\n __dict__ instead of __slots__.\n "
pass
TestObject.__name__ = ('TestObject_' + cls.__name__)
obj = TestObject()
for (name, value) in attrs.items():
if (name == 'id'):
sha = FixedSha(value)
obj.sha = (lambda : sha)
else:
setattr(obj, name, value)
return obj
|
[
"def",
"make_object",
"(",
"cls",
",",
"**",
"attrs",
")",
":",
"class",
"TestObject",
"(",
"cls",
",",
")",
":",
"pass",
"TestObject",
".",
"__name__",
"=",
"(",
"'TestObject_'",
"+",
"cls",
".",
"__name__",
")",
"obj",
"=",
"TestObject",
"(",
")",
"for",
"(",
"name",
",",
"value",
")",
"in",
"attrs",
".",
"items",
"(",
")",
":",
"if",
"(",
"name",
"==",
"'id'",
")",
":",
"sha",
"=",
"FixedSha",
"(",
"value",
")",
"obj",
".",
"sha",
"=",
"(",
"lambda",
":",
"sha",
")",
"else",
":",
"setattr",
"(",
"obj",
",",
"name",
",",
"value",
")",
"return",
"obj"
] |
make an object for testing and assign some members .
|
train
| false
|
11,450
|
def _create_recent_enrollment_message(course_enrollments, course_modes):
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enroll_messages = [{'course_id': enrollment.course_overview.id, 'course_name': enrollment.course_overview.display_name, 'allow_donation': _allow_donation(course_modes, enrollment.course_overview.id, enrollment)} for enrollment in recently_enrolled_courses]
platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string('enrollment/course_enrollment_message.html', {'course_enrollment_messages': enroll_messages, 'platform_name': platform_name})
|
[
"def",
"_create_recent_enrollment_message",
"(",
"course_enrollments",
",",
"course_modes",
")",
":",
"recently_enrolled_courses",
"=",
"_get_recently_enrolled_courses",
"(",
"course_enrollments",
")",
"if",
"recently_enrolled_courses",
":",
"enroll_messages",
"=",
"[",
"{",
"'course_id'",
":",
"enrollment",
".",
"course_overview",
".",
"id",
",",
"'course_name'",
":",
"enrollment",
".",
"course_overview",
".",
"display_name",
",",
"'allow_donation'",
":",
"_allow_donation",
"(",
"course_modes",
",",
"enrollment",
".",
"course_overview",
".",
"id",
",",
"enrollment",
")",
"}",
"for",
"enrollment",
"in",
"recently_enrolled_courses",
"]",
"platform_name",
"=",
"configuration_helpers",
".",
"get_value",
"(",
"'platform_name'",
",",
"settings",
".",
"PLATFORM_NAME",
")",
"return",
"render_to_string",
"(",
"'enrollment/course_enrollment_message.html'",
",",
"{",
"'course_enrollment_messages'",
":",
"enroll_messages",
",",
"'platform_name'",
":",
"platform_name",
"}",
")"
] |
builds a recent course enrollment message .
|
train
| false
|
11,451
|
def show_negative_chains(model_path):
model = serial.load(model_path)
try:
control.push_load_data(False)
dataset = yaml_parse.load(model.dataset_yaml_src)
finally:
control.pop_load_data()
try:
layer_to_chains = model.layer_to_chains
except AttributeError:
print("This model doesn't have negative chains.")
quit((-1))
vis_chains = get_vis_chains(layer_to_chains, model, dataset)
m = vis_chains.shape[0]
grid_shape = get_grid_shape(m)
return create_patch_viewer(grid_shape, vis_chains, m)
|
[
"def",
"show_negative_chains",
"(",
"model_path",
")",
":",
"model",
"=",
"serial",
".",
"load",
"(",
"model_path",
")",
"try",
":",
"control",
".",
"push_load_data",
"(",
"False",
")",
"dataset",
"=",
"yaml_parse",
".",
"load",
"(",
"model",
".",
"dataset_yaml_src",
")",
"finally",
":",
"control",
".",
"pop_load_data",
"(",
")",
"try",
":",
"layer_to_chains",
"=",
"model",
".",
"layer_to_chains",
"except",
"AttributeError",
":",
"print",
"(",
"\"This model doesn't have negative chains.\"",
")",
"quit",
"(",
"(",
"-",
"1",
")",
")",
"vis_chains",
"=",
"get_vis_chains",
"(",
"layer_to_chains",
",",
"model",
",",
"dataset",
")",
"m",
"=",
"vis_chains",
".",
"shape",
"[",
"0",
"]",
"grid_shape",
"=",
"get_grid_shape",
"(",
"m",
")",
"return",
"create_patch_viewer",
"(",
"grid_shape",
",",
"vis_chains",
",",
"m",
")"
] |
display negative chains .
|
train
| false
|
11,452
|
def glsa_check_list(glsa_list):
cmd = 'glsa-check --quiet --nocolor --cve --list '
if isinstance(glsa_list, list):
for glsa in glsa_list:
cmd += (glsa + ' ')
elif ((glsa_list == 'all') or (glsa_list == 'affected')):
cmd += glsa_list
ret = dict()
out = __salt__['cmd.run'](cmd, python_shell=False).split('\n')
ret = _glsa_list_process_output(out)
return ret
|
[
"def",
"glsa_check_list",
"(",
"glsa_list",
")",
":",
"cmd",
"=",
"'glsa-check --quiet --nocolor --cve --list '",
"if",
"isinstance",
"(",
"glsa_list",
",",
"list",
")",
":",
"for",
"glsa",
"in",
"glsa_list",
":",
"cmd",
"+=",
"(",
"glsa",
"+",
"' '",
")",
"elif",
"(",
"(",
"glsa_list",
"==",
"'all'",
")",
"or",
"(",
"glsa_list",
"==",
"'affected'",
")",
")",
":",
"cmd",
"+=",
"glsa_list",
"ret",
"=",
"dict",
"(",
")",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
".",
"split",
"(",
"'\\n'",
")",
"ret",
"=",
"_glsa_list_process_output",
"(",
"out",
")",
"return",
"ret"
] |
list the status of gentoo linux security advisories glsa_list can contain an arbitrary number of glsa ids .
|
train
| true
|
11,454
|
def includeme(config):
settings = config.get_settings()
config.include('pyramid_tm')
session_factory = get_session_factory(get_engine(settings))
config.registry['dbsession_factory'] = session_factory
config.add_request_method((lambda r: get_tm_session(session_factory, r.tm)), 'dbsession', reify=True)
|
[
"def",
"includeme",
"(",
"config",
")",
":",
"settings",
"=",
"config",
".",
"get_settings",
"(",
")",
"config",
".",
"include",
"(",
"'pyramid_tm'",
")",
"session_factory",
"=",
"get_session_factory",
"(",
"get_engine",
"(",
"settings",
")",
")",
"config",
".",
"registry",
"[",
"'dbsession_factory'",
"]",
"=",
"session_factory",
"config",
".",
"add_request_method",
"(",
"(",
"lambda",
"r",
":",
"get_tm_session",
"(",
"session_factory",
",",
"r",
".",
"tm",
")",
")",
",",
"'dbsession'",
",",
"reify",
"=",
"True",
")"
] |
initialize the model for a pyramid app .
|
train
| false
|
11,456
|
def _base_url():
base_url = 'http://locahost:4400'
if ('proxy' in __opts__):
base_url = __opts__['proxy'].get('base_url', base_url)
return base_url
|
[
"def",
"_base_url",
"(",
")",
":",
"base_url",
"=",
"'http://locahost:4400'",
"if",
"(",
"'proxy'",
"in",
"__opts__",
")",
":",
"base_url",
"=",
"__opts__",
"[",
"'proxy'",
"]",
".",
"get",
"(",
"'base_url'",
",",
"base_url",
")",
"return",
"base_url"
] |
return the proxy configured base url .
|
train
| false
|
11,457
|
def view_splitter(request, su=None, adm=None):
if is_role_request(request, 'super'):
return su(request)
elif is_role_request(request, 'admin'):
return adm(request)
else:
return HttpResponseRedirect(reverse('login'))
|
[
"def",
"view_splitter",
"(",
"request",
",",
"su",
"=",
"None",
",",
"adm",
"=",
"None",
")",
":",
"if",
"is_role_request",
"(",
"request",
",",
"'super'",
")",
":",
"return",
"su",
"(",
"request",
")",
"elif",
"is_role_request",
"(",
"request",
",",
"'admin'",
")",
":",
"return",
"adm",
"(",
"request",
")",
"else",
":",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'login'",
")",
")"
] |
for different user use different view .
|
train
| false
|
11,458
|
def powMod(x, y, mod):
x = mpz(x)
y = mpz(y)
mod = mpz(mod)
return pow(x, y, mod)
|
[
"def",
"powMod",
"(",
"x",
",",
"y",
",",
"mod",
")",
":",
"x",
"=",
"mpz",
"(",
"x",
")",
"y",
"=",
"mpz",
"(",
"y",
")",
"mod",
"=",
"mpz",
"(",
"mod",
")",
"return",
"pow",
"(",
"x",
",",
"y",
",",
"mod",
")"
] |
calculate and return x to the power of y mod mod .
|
train
| false
|
11,459
|
def _symbols(name, n):
try:
lsyms = _symbols_cache[name]
except KeyError:
lsyms = []
_symbols_cache[name] = lsyms
while (len(lsyms) < n):
lsyms.append(Dummy(('%s%i' % (name, len(lsyms)))))
return lsyms[:n]
|
[
"def",
"_symbols",
"(",
"name",
",",
"n",
")",
":",
"try",
":",
"lsyms",
"=",
"_symbols_cache",
"[",
"name",
"]",
"except",
"KeyError",
":",
"lsyms",
"=",
"[",
"]",
"_symbols_cache",
"[",
"name",
"]",
"=",
"lsyms",
"while",
"(",
"len",
"(",
"lsyms",
")",
"<",
"n",
")",
":",
"lsyms",
".",
"append",
"(",
"Dummy",
"(",
"(",
"'%s%i'",
"%",
"(",
"name",
",",
"len",
"(",
"lsyms",
")",
")",
")",
")",
")",
"return",
"lsyms",
"[",
":",
"n",
"]"
] |
get vector of symbols local to this module .
|
train
| false
|
11,460
|
@testing.requires_testing_data
def test_bem_model():
tempdir = _TempDir()
fname_temp = op.join(tempdir, 'temp-bem.fif')
for (kwargs, fname) in zip((dict(), dict(conductivity=[0.3])), [fname_bem_3, fname_bem_1]):
model = make_bem_model('sample', ico=2, subjects_dir=subjects_dir, **kwargs)
model_c = read_bem_surfaces(fname)
_compare_bem_surfaces(model, model_c)
write_bem_surfaces(fname_temp, model)
model_read = read_bem_surfaces(fname_temp)
_compare_bem_surfaces(model, model_c)
_compare_bem_surfaces(model_read, model_c)
assert_raises(ValueError, make_bem_model, 'sample', conductivity=[0.3, 0.006], subjects_dir=subjects_dir)
|
[
"@",
"testing",
".",
"requires_testing_data",
"def",
"test_bem_model",
"(",
")",
":",
"tempdir",
"=",
"_TempDir",
"(",
")",
"fname_temp",
"=",
"op",
".",
"join",
"(",
"tempdir",
",",
"'temp-bem.fif'",
")",
"for",
"(",
"kwargs",
",",
"fname",
")",
"in",
"zip",
"(",
"(",
"dict",
"(",
")",
",",
"dict",
"(",
"conductivity",
"=",
"[",
"0.3",
"]",
")",
")",
",",
"[",
"fname_bem_3",
",",
"fname_bem_1",
"]",
")",
":",
"model",
"=",
"make_bem_model",
"(",
"'sample'",
",",
"ico",
"=",
"2",
",",
"subjects_dir",
"=",
"subjects_dir",
",",
"**",
"kwargs",
")",
"model_c",
"=",
"read_bem_surfaces",
"(",
"fname",
")",
"_compare_bem_surfaces",
"(",
"model",
",",
"model_c",
")",
"write_bem_surfaces",
"(",
"fname_temp",
",",
"model",
")",
"model_read",
"=",
"read_bem_surfaces",
"(",
"fname_temp",
")",
"_compare_bem_surfaces",
"(",
"model",
",",
"model_c",
")",
"_compare_bem_surfaces",
"(",
"model_read",
",",
"model_c",
")",
"assert_raises",
"(",
"ValueError",
",",
"make_bem_model",
",",
"'sample'",
",",
"conductivity",
"=",
"[",
"0.3",
",",
"0.006",
"]",
",",
"subjects_dir",
"=",
"subjects_dir",
")"
] |
test bem model creation from python with i/o .
|
train
| false
|
11,461
|
def project_versions(request, project_slug):
project = get_object_or_404(Project.objects.protected(request.user), slug=project_slug)
versions = Version.objects.public(user=request.user, project=project, only_active=False)
active_versions = versions.filter(active=True)
inactive_versions = versions.filter(active=False)
inactive_filter = VersionSlugFilter(request.GET, queryset=inactive_versions)
active_filter = VersionSlugFilter(request.GET, queryset=active_versions)
wiped = request.GET.get('wipe', '')
wiped_version = versions.filter(slug=wiped)
if (wiped and wiped_version.count()):
messages.success(request, ('Version wiped: ' + wiped))
return render_to_response('projects/project_version_list.html', {'inactive_filter': inactive_filter, 'active_filter': active_filter, 'project': project}, context_instance=RequestContext(request))
|
[
"def",
"project_versions",
"(",
"request",
",",
"project_slug",
")",
":",
"project",
"=",
"get_object_or_404",
"(",
"Project",
".",
"objects",
".",
"protected",
"(",
"request",
".",
"user",
")",
",",
"slug",
"=",
"project_slug",
")",
"versions",
"=",
"Version",
".",
"objects",
".",
"public",
"(",
"user",
"=",
"request",
".",
"user",
",",
"project",
"=",
"project",
",",
"only_active",
"=",
"False",
")",
"active_versions",
"=",
"versions",
".",
"filter",
"(",
"active",
"=",
"True",
")",
"inactive_versions",
"=",
"versions",
".",
"filter",
"(",
"active",
"=",
"False",
")",
"inactive_filter",
"=",
"VersionSlugFilter",
"(",
"request",
".",
"GET",
",",
"queryset",
"=",
"inactive_versions",
")",
"active_filter",
"=",
"VersionSlugFilter",
"(",
"request",
".",
"GET",
",",
"queryset",
"=",
"active_versions",
")",
"wiped",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'wipe'",
",",
"''",
")",
"wiped_version",
"=",
"versions",
".",
"filter",
"(",
"slug",
"=",
"wiped",
")",
"if",
"(",
"wiped",
"and",
"wiped_version",
".",
"count",
"(",
")",
")",
":",
"messages",
".",
"success",
"(",
"request",
",",
"(",
"'Version wiped: '",
"+",
"wiped",
")",
")",
"return",
"render_to_response",
"(",
"'projects/project_version_list.html'",
",",
"{",
"'inactive_filter'",
":",
"inactive_filter",
",",
"'active_filter'",
":",
"active_filter",
",",
"'project'",
":",
"project",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
] |
project versions view shows the available versions and lets the user choose which ones he would like to have built .
|
train
| false
|
11,462
|
def is_attached_console_visible():
return IsWindowVisible(console_window_handle)
|
[
"def",
"is_attached_console_visible",
"(",
")",
":",
"return",
"IsWindowVisible",
"(",
"console_window_handle",
")"
] |
return true if attached console window is visible .
|
train
| false
|
11,463
|
def is_isomorphism_possible(a, b):
n = a.minpoly.degree()
m = b.minpoly.degree()
if ((m % n) != 0):
return False
if (n == m):
return True
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
(i, k, half) = (1, (m // n), (db // 2))
while True:
p = sieve[i]
P = (p ** k)
if (P > half):
break
if (((da % p) % 2) and (not (db % P))):
return False
i += 1
return True
|
[
"def",
"is_isomorphism_possible",
"(",
"a",
",",
"b",
")",
":",
"n",
"=",
"a",
".",
"minpoly",
".",
"degree",
"(",
")",
"m",
"=",
"b",
".",
"minpoly",
".",
"degree",
"(",
")",
"if",
"(",
"(",
"m",
"%",
"n",
")",
"!=",
"0",
")",
":",
"return",
"False",
"if",
"(",
"n",
"==",
"m",
")",
":",
"return",
"True",
"da",
"=",
"a",
".",
"minpoly",
".",
"discriminant",
"(",
")",
"db",
"=",
"b",
".",
"minpoly",
".",
"discriminant",
"(",
")",
"(",
"i",
",",
"k",
",",
"half",
")",
"=",
"(",
"1",
",",
"(",
"m",
"//",
"n",
")",
",",
"(",
"db",
"//",
"2",
")",
")",
"while",
"True",
":",
"p",
"=",
"sieve",
"[",
"i",
"]",
"P",
"=",
"(",
"p",
"**",
"k",
")",
"if",
"(",
"P",
">",
"half",
")",
":",
"break",
"if",
"(",
"(",
"(",
"da",
"%",
"p",
")",
"%",
"2",
")",
"and",
"(",
"not",
"(",
"db",
"%",
"P",
")",
")",
")",
":",
"return",
"False",
"i",
"+=",
"1",
"return",
"True"
] |
returns true if there is a chance for isomorphism .
|
train
| false
|
11,464
|
@debug
@timeit
@cacheit
def mrv_leadterm(e, x):
Omega = SubsSet()
if (not e.has(x)):
return (e, S.Zero)
if (Omega == SubsSet()):
(Omega, exps) = mrv(e, x)
if (not Omega):
series = calculate_series(e, x)
(c0, e0) = series.leadterm(x)
if (e0 != 0):
raise ValueError('e0 should be 0')
return (c0, e0)
if (x in Omega):
Omega_up = moveup2(Omega, x)
e_up = moveup([e], x)[0]
exps_up = moveup([exps], x)[0]
e = e_up
Omega = Omega_up
exps = exps_up
w = Dummy('w', real=True, positive=True, finite=True)
(f, logw) = rewrite(exps, Omega, x, w)
series = calculate_series(f, w, logx=logw)
return series.leadterm(w)
|
[
"@",
"debug",
"@",
"timeit",
"@",
"cacheit",
"def",
"mrv_leadterm",
"(",
"e",
",",
"x",
")",
":",
"Omega",
"=",
"SubsSet",
"(",
")",
"if",
"(",
"not",
"e",
".",
"has",
"(",
"x",
")",
")",
":",
"return",
"(",
"e",
",",
"S",
".",
"Zero",
")",
"if",
"(",
"Omega",
"==",
"SubsSet",
"(",
")",
")",
":",
"(",
"Omega",
",",
"exps",
")",
"=",
"mrv",
"(",
"e",
",",
"x",
")",
"if",
"(",
"not",
"Omega",
")",
":",
"series",
"=",
"calculate_series",
"(",
"e",
",",
"x",
")",
"(",
"c0",
",",
"e0",
")",
"=",
"series",
".",
"leadterm",
"(",
"x",
")",
"if",
"(",
"e0",
"!=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'e0 should be 0'",
")",
"return",
"(",
"c0",
",",
"e0",
")",
"if",
"(",
"x",
"in",
"Omega",
")",
":",
"Omega_up",
"=",
"moveup2",
"(",
"Omega",
",",
"x",
")",
"e_up",
"=",
"moveup",
"(",
"[",
"e",
"]",
",",
"x",
")",
"[",
"0",
"]",
"exps_up",
"=",
"moveup",
"(",
"[",
"exps",
"]",
",",
"x",
")",
"[",
"0",
"]",
"e",
"=",
"e_up",
"Omega",
"=",
"Omega_up",
"exps",
"=",
"exps_up",
"w",
"=",
"Dummy",
"(",
"'w'",
",",
"real",
"=",
"True",
",",
"positive",
"=",
"True",
",",
"finite",
"=",
"True",
")",
"(",
"f",
",",
"logw",
")",
"=",
"rewrite",
"(",
"exps",
",",
"Omega",
",",
"x",
",",
"w",
")",
"series",
"=",
"calculate_series",
"(",
"f",
",",
"w",
",",
"logx",
"=",
"logw",
")",
"return",
"series",
".",
"leadterm",
"(",
"w",
")"
] |
returns for e .
|
train
| false
|
11,465
|
def dense(width, height):
d = {('x', 0, i): i for i in range(width)}
for j in range(1, height):
d.update({('x', j, i): (noop, [('x', (j - 1), k) for k in range(width)]) for i in range(width)})
return (d, [('x', (height - 1), i) for i in range(width)])
|
[
"def",
"dense",
"(",
"width",
",",
"height",
")",
":",
"d",
"=",
"{",
"(",
"'x'",
",",
"0",
",",
"i",
")",
":",
"i",
"for",
"i",
"in",
"range",
"(",
"width",
")",
"}",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"height",
")",
":",
"d",
".",
"update",
"(",
"{",
"(",
"'x'",
",",
"j",
",",
"i",
")",
":",
"(",
"noop",
",",
"[",
"(",
"'x'",
",",
"(",
"j",
"-",
"1",
")",
",",
"k",
")",
"for",
"k",
"in",
"range",
"(",
"width",
")",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"width",
")",
"}",
")",
"return",
"(",
"d",
",",
"[",
"(",
"'x'",
",",
"(",
"height",
"-",
"1",
")",
",",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"width",
")",
"]",
")"
] |
full barriers between each step .
|
train
| false
|
11,466
|
def vm_stat(field):
out = sh('vm_stat')
for line in out.split('\n'):
if (field in line):
break
else:
raise ValueError('line not found')
return (int(re.search('\\d+', line).group(0)) * PAGESIZE)
|
[
"def",
"vm_stat",
"(",
"field",
")",
":",
"out",
"=",
"sh",
"(",
"'vm_stat'",
")",
"for",
"line",
"in",
"out",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"(",
"field",
"in",
"line",
")",
":",
"break",
"else",
":",
"raise",
"ValueError",
"(",
"'line not found'",
")",
"return",
"(",
"int",
"(",
"re",
".",
"search",
"(",
"'\\\\d+'",
",",
"line",
")",
".",
"group",
"(",
"0",
")",
")",
"*",
"PAGESIZE",
")"
] |
wrapper around vm_stat cmdline utility .
|
train
| false
|
11,468
|
def validate_int(s):
try:
return int(s)
except ValueError:
raise ValueError(('Could not convert "%s" to int' % s))
|
[
"def",
"validate_int",
"(",
"s",
")",
":",
"try",
":",
"return",
"int",
"(",
"s",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"(",
"'Could not convert \"%s\" to int'",
"%",
"s",
")",
")"
] |
convert s to int or raise .
|
train
| false
|
11,471
|
def count_from_n_factory(start):
def f(index, collection):
return (index + start)
try:
f.__name__ = ('count_from_%i' % start)
except TypeError:
pass
return f
|
[
"def",
"count_from_n_factory",
"(",
"start",
")",
":",
"def",
"f",
"(",
"index",
",",
"collection",
")",
":",
"return",
"(",
"index",
"+",
"start",
")",
"try",
":",
"f",
".",
"__name__",
"=",
"(",
"'count_from_%i'",
"%",
"start",
")",
"except",
"TypeError",
":",
"pass",
"return",
"f"
] |
numbering function: consecutive integers starting at arbitrary start .
|
train
| false
|
11,472
|
def get_collection_info(collection):
return RESOURCE_ATTRIBUTE_MAP.get(collection)
|
[
"def",
"get_collection_info",
"(",
"collection",
")",
":",
"return",
"RESOURCE_ATTRIBUTE_MAP",
".",
"get",
"(",
"collection",
")"
] |
helper function to retrieve attribute info .
|
train
| false
|
11,474
|
def getCraftSequence():
return 'chop preface outset mill multiply drill lift flow feed home lash fillet limit unpause alteration export'.split()
|
[
"def",
"getCraftSequence",
"(",
")",
":",
"return",
"'chop preface outset mill multiply drill lift flow feed home lash fillet limit unpause alteration export'",
".",
"split",
"(",
")"
] |
get the cutting craft sequence .
|
train
| false
|
11,476
|
def preferredencoding():
try:
pref = locale.getpreferredencoding()
u'TEST'.encode(pref)
except:
pref = u'UTF-8'
return pref
|
[
"def",
"preferredencoding",
"(",
")",
":",
"try",
":",
"pref",
"=",
"locale",
".",
"getpreferredencoding",
"(",
")",
".",
"encode",
"(",
"pref",
")",
"except",
":",
"pref",
"=",
"u'UTF-8'",
"return",
"pref"
] |
get preferred encoding .
|
train
| false
|
11,477
|
def init(mpstate):
return SerialModule(mpstate)
|
[
"def",
"init",
"(",
"mpstate",
")",
":",
"return",
"SerialModule",
"(",
"mpstate",
")"
] |
initialise module .
|
train
| false
|
11,478
|
def _get_model(model_identifier):
try:
Model = models.get_model(*model_identifier.split('.'))
except TypeError:
Model = None
if (Model is None):
raise base.DeserializationError(("Invalid model identifier: '%s'" % model_identifier))
return Model
|
[
"def",
"_get_model",
"(",
"model_identifier",
")",
":",
"try",
":",
"Model",
"=",
"models",
".",
"get_model",
"(",
"*",
"model_identifier",
".",
"split",
"(",
"'.'",
")",
")",
"except",
"TypeError",
":",
"Model",
"=",
"None",
"if",
"(",
"Model",
"is",
"None",
")",
":",
"raise",
"base",
".",
"DeserializationError",
"(",
"(",
"\"Invalid model identifier: '%s'\"",
"%",
"model_identifier",
")",
")",
"return",
"Model"
] |
helper to look up a model from an "app_label .
|
train
| false
|
11,479
|
def get_enabled():
return _get_svc_list('YES')
|
[
"def",
"get_enabled",
"(",
")",
":",
"return",
"_get_svc_list",
"(",
"'YES'",
")"
] |
return a list of all enabled services cli example: .
|
train
| false
|
11,480
|
@pytest.mark.posix
def test_custom_environment_system_env(monkeypatch, pyproc):
monkeypatch.setenv('QUTE_TEST_ENV', 'blubb')
pyproc.code = 'import os; print(os.environ["QUTE_TEST_ENV"])'
pyproc.start(env={})
pyproc.wait_for(data='blubb')
|
[
"@",
"pytest",
".",
"mark",
".",
"posix",
"def",
"test_custom_environment_system_env",
"(",
"monkeypatch",
",",
"pyproc",
")",
":",
"monkeypatch",
".",
"setenv",
"(",
"'QUTE_TEST_ENV'",
",",
"'blubb'",
")",
"pyproc",
".",
"code",
"=",
"'import os; print(os.environ[\"QUTE_TEST_ENV\"])'",
"pyproc",
".",
"start",
"(",
"env",
"=",
"{",
"}",
")",
"pyproc",
".",
"wait_for",
"(",
"data",
"=",
"'blubb'",
")"
] |
when env= .
|
train
| false
|
11,481
|
def queryOutputLength(expression, payload):
infoMsg = 'retrieving the length of query output'
logger.info(infoMsg)
start = time.time()
lengthExprUnescaped = agent.forgeQueryOutputLength(expression)
(count, length) = bisection(payload, lengthExprUnescaped, charsetType=CHARSET_TYPE.DIGITS)
debugMsg = ('performed %d queries in %.2f seconds' % (count, calculateDeltaSeconds(start)))
logger.debug(debugMsg)
if (length == ' '):
length = 0
return length
|
[
"def",
"queryOutputLength",
"(",
"expression",
",",
"payload",
")",
":",
"infoMsg",
"=",
"'retrieving the length of query output'",
"logger",
".",
"info",
"(",
"infoMsg",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"lengthExprUnescaped",
"=",
"agent",
".",
"forgeQueryOutputLength",
"(",
"expression",
")",
"(",
"count",
",",
"length",
")",
"=",
"bisection",
"(",
"payload",
",",
"lengthExprUnescaped",
",",
"charsetType",
"=",
"CHARSET_TYPE",
".",
"DIGITS",
")",
"debugMsg",
"=",
"(",
"'performed %d queries in %.2f seconds'",
"%",
"(",
"count",
",",
"calculateDeltaSeconds",
"(",
"start",
")",
")",
")",
"logger",
".",
"debug",
"(",
"debugMsg",
")",
"if",
"(",
"length",
"==",
"' '",
")",
":",
"length",
"=",
"0",
"return",
"length"
] |
returns the query output length .
|
train
| false
|
11,482
|
def anon_url(*url):
url = u''.join(map(str, url))
uri_pattern = u'^https?://'
unicode_uri_pattern = re.compile(uri_pattern, re.UNICODE)
if (not re.search(unicode_uri_pattern, url)):
url = (u'http://' + url)
return u'{}{}'.format(sickrage.srCore.srConfig.ANON_REDIRECT, url)
|
[
"def",
"anon_url",
"(",
"*",
"url",
")",
":",
"url",
"=",
"u''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"url",
")",
")",
"uri_pattern",
"=",
"u'^https?://'",
"unicode_uri_pattern",
"=",
"re",
".",
"compile",
"(",
"uri_pattern",
",",
"re",
".",
"UNICODE",
")",
"if",
"(",
"not",
"re",
".",
"search",
"(",
"unicode_uri_pattern",
",",
"url",
")",
")",
":",
"url",
"=",
"(",
"u'http://'",
"+",
"url",
")",
"return",
"u'{}{}'",
".",
"format",
"(",
"sickrage",
".",
"srCore",
".",
"srConfig",
".",
"ANON_REDIRECT",
",",
"url",
")"
] |
return a url string consisting of the anonymous redirect url and an arbitrary number of values appended .
|
train
| false
|
11,483
|
def make_secure_stub(credentials, user_agent, stub_class, host, extra_options=None):
channel = make_secure_channel(credentials, user_agent, host, extra_options=extra_options)
return stub_class(channel)
|
[
"def",
"make_secure_stub",
"(",
"credentials",
",",
"user_agent",
",",
"stub_class",
",",
"host",
",",
"extra_options",
"=",
"None",
")",
":",
"channel",
"=",
"make_secure_channel",
"(",
"credentials",
",",
"user_agent",
",",
"host",
",",
"extra_options",
"=",
"extra_options",
")",
"return",
"stub_class",
"(",
"channel",
")"
] |
makes a secure stub for an rpc service .
|
train
| true
|
11,484
|
def ignore_deprecation_warnings(func):
def wrapper(*args, **kw):
with check_warnings(quiet=True, *_deprecations):
return func(*args, **kw)
return wrapper
|
[
"def",
"ignore_deprecation_warnings",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"**",
"kw",
")",
":",
"with",
"check_warnings",
"(",
"quiet",
"=",
"True",
",",
"*",
"_deprecations",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"**",
"kw",
")",
"return",
"wrapper"
] |
ignore deprecation warnings for the wrapped testcase or test method this is useful as the test runner can be set to raise an exception on a deprecation warning .
|
train
| false
|
11,485
|
def findFreePort(interface='127.0.0.1', type=socket.SOCK_STREAM):
family = socket.AF_INET
probe = socket.socket(family, type)
try:
probe.bind((interface, 0))
return probe.getsockname()
finally:
probe.close()
|
[
"def",
"findFreePort",
"(",
"interface",
"=",
"'127.0.0.1'",
",",
"type",
"=",
"socket",
".",
"SOCK_STREAM",
")",
":",
"family",
"=",
"socket",
".",
"AF_INET",
"probe",
"=",
"socket",
".",
"socket",
"(",
"family",
",",
"type",
")",
"try",
":",
"probe",
".",
"bind",
"(",
"(",
"interface",
",",
"0",
")",
")",
"return",
"probe",
".",
"getsockname",
"(",
")",
"finally",
":",
"probe",
".",
"close",
"(",
")"
] |
ask the platform to allocate a free port on the specified interface .
|
train
| false
|
11,486
|
def check_user_permission(user, permission_codename, directory, check_default=True):
if user.is_superuser:
return True
permissions = get_matching_permissions(user, directory, check_default)
return (('administrate' in permissions) or (permission_codename in permissions))
|
[
"def",
"check_user_permission",
"(",
"user",
",",
"permission_codename",
",",
"directory",
",",
"check_default",
"=",
"True",
")",
":",
"if",
"user",
".",
"is_superuser",
":",
"return",
"True",
"permissions",
"=",
"get_matching_permissions",
"(",
"user",
",",
"directory",
",",
"check_default",
")",
"return",
"(",
"(",
"'administrate'",
"in",
"permissions",
")",
"or",
"(",
"permission_codename",
"in",
"permissions",
")",
")"
] |
checks if the current user has the permission to perform permission_codename .
|
train
| false
|
11,488
|
def is_config_valid(config):
for ii in REQUIRED_CONFIG_FIELDS:
try:
if config[ii]:
pass
except KeyError:
logging.error((('Unable to find ' + str(ii)) + ' in configuration'))
return False
return True
|
[
"def",
"is_config_valid",
"(",
"config",
")",
":",
"for",
"ii",
"in",
"REQUIRED_CONFIG_FIELDS",
":",
"try",
":",
"if",
"config",
"[",
"ii",
"]",
":",
"pass",
"except",
"KeyError",
":",
"logging",
".",
"error",
"(",
"(",
"(",
"'Unable to find '",
"+",
"str",
"(",
"ii",
")",
")",
"+",
"' in configuration'",
")",
")",
"return",
"False",
"return",
"True"
] |
takes a configuration and checks to make sure all required properties are present .
|
train
| false
|
11,489
|
def get_lms_link_for_certificate_web_view(user_id, course_key, mode):
assert isinstance(course_key, CourseKey)
lms_base = SiteConfiguration.get_value_for_org(course_key.org, 'LMS_BASE', settings.LMS_BASE)
if (lms_base is None):
return None
return u'//{certificate_web_base}/certificates/user/{user_id}/course/{course_id}?preview={mode}'.format(certificate_web_base=lms_base, user_id=user_id, course_id=unicode(course_key), mode=mode)
|
[
"def",
"get_lms_link_for_certificate_web_view",
"(",
"user_id",
",",
"course_key",
",",
"mode",
")",
":",
"assert",
"isinstance",
"(",
"course_key",
",",
"CourseKey",
")",
"lms_base",
"=",
"SiteConfiguration",
".",
"get_value_for_org",
"(",
"course_key",
".",
"org",
",",
"'LMS_BASE'",
",",
"settings",
".",
"LMS_BASE",
")",
"if",
"(",
"lms_base",
"is",
"None",
")",
":",
"return",
"None",
"return",
"u'//{certificate_web_base}/certificates/user/{user_id}/course/{course_id}?preview={mode}'",
".",
"format",
"(",
"certificate_web_base",
"=",
"lms_base",
",",
"user_id",
"=",
"user_id",
",",
"course_id",
"=",
"unicode",
"(",
"course_key",
")",
",",
"mode",
"=",
"mode",
")"
] |
returns the url to the certificate web view .
|
train
| false
|
11,490
|
@task(bind=True)
def send_activation_email(self, subject, message, from_address, dest_addr):
max_retries = settings.RETRY_ACTIVATION_EMAIL_MAX_ATTEMPTS
retries = self.request.retries
try:
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
log.info('Activation Email has been sent to User {user_email}'.format(user_email=dest_addr))
except NoAuthHandlerFound:
log.info('Retrying sending email to user {dest_addr}, attempt # {attempt} of {max_attempts}'.format(dest_addr=dest_addr, attempt=retries, max_attempts=max_retries))
try:
self.retry(countdown=settings.RETRY_ACTIVATION_EMAIL_TIMEOUT, max_retries=max_retries)
except MaxRetriesExceededError:
log.error('Unable to send activation email to user from "%s" to "%s"', from_address, dest_addr, exc_info=True)
except Exception:
log.exception('Unable to send activation email to user from "%s" to "%s"', from_address, dest_addr, exc_info=True)
raise Exception
|
[
"@",
"task",
"(",
"bind",
"=",
"True",
")",
"def",
"send_activation_email",
"(",
"self",
",",
"subject",
",",
"message",
",",
"from_address",
",",
"dest_addr",
")",
":",
"max_retries",
"=",
"settings",
".",
"RETRY_ACTIVATION_EMAIL_MAX_ATTEMPTS",
"retries",
"=",
"self",
".",
"request",
".",
"retries",
"try",
":",
"mail",
".",
"send_mail",
"(",
"subject",
",",
"message",
",",
"from_address",
",",
"[",
"dest_addr",
"]",
",",
"fail_silently",
"=",
"False",
")",
"log",
".",
"info",
"(",
"'Activation Email has been sent to User {user_email}'",
".",
"format",
"(",
"user_email",
"=",
"dest_addr",
")",
")",
"except",
"NoAuthHandlerFound",
":",
"log",
".",
"info",
"(",
"'Retrying sending email to user {dest_addr}, attempt # {attempt} of {max_attempts}'",
".",
"format",
"(",
"dest_addr",
"=",
"dest_addr",
",",
"attempt",
"=",
"retries",
",",
"max_attempts",
"=",
"max_retries",
")",
")",
"try",
":",
"self",
".",
"retry",
"(",
"countdown",
"=",
"settings",
".",
"RETRY_ACTIVATION_EMAIL_TIMEOUT",
",",
"max_retries",
"=",
"max_retries",
")",
"except",
"MaxRetriesExceededError",
":",
"log",
".",
"error",
"(",
"'Unable to send activation email to user from \"%s\" to \"%s\"'",
",",
"from_address",
",",
"dest_addr",
",",
"exc_info",
"=",
"True",
")",
"except",
"Exception",
":",
"log",
".",
"exception",
"(",
"'Unable to send activation email to user from \"%s\" to \"%s\"'",
",",
"from_address",
",",
"dest_addr",
",",
"exc_info",
"=",
"True",
")",
"raise",
"Exception"
] |
sending an activation email to the users .
|
train
| false
|
11,491
|
def getNewMouseTool():
return ViewpointRotate()
|
[
"def",
"getNewMouseTool",
"(",
")",
":",
"return",
"ViewpointRotate",
"(",
")"
] |
get a new mouse tool .
|
train
| false
|
11,492
|
def view_execution_permitted(context, request, name=''):
reg = _get_registry(request)
provides = ([IViewClassifier] + map_(providedBy, (request, context)))
view = reg.adapters.lookup(provides, ISecuredView, name=name)
if (view is None):
view = reg.adapters.lookup(provides, IView, name=name)
if (view is None):
raise TypeError('No registered view satisfies the constraints. It would not make sense to claim that this view "is" or "is not" permitted.')
return Allowed(('Allowed: view name %r in context %r (no permission defined)' % (name, context)))
return view.__permitted__(context, request)
|
[
"def",
"view_execution_permitted",
"(",
"context",
",",
"request",
",",
"name",
"=",
"''",
")",
":",
"reg",
"=",
"_get_registry",
"(",
"request",
")",
"provides",
"=",
"(",
"[",
"IViewClassifier",
"]",
"+",
"map_",
"(",
"providedBy",
",",
"(",
"request",
",",
"context",
")",
")",
")",
"view",
"=",
"reg",
".",
"adapters",
".",
"lookup",
"(",
"provides",
",",
"ISecuredView",
",",
"name",
"=",
"name",
")",
"if",
"(",
"view",
"is",
"None",
")",
":",
"view",
"=",
"reg",
".",
"adapters",
".",
"lookup",
"(",
"provides",
",",
"IView",
",",
"name",
"=",
"name",
")",
"if",
"(",
"view",
"is",
"None",
")",
":",
"raise",
"TypeError",
"(",
"'No registered view satisfies the constraints. It would not make sense to claim that this view \"is\" or \"is not\" permitted.'",
")",
"return",
"Allowed",
"(",
"(",
"'Allowed: view name %r in context %r (no permission defined)'",
"%",
"(",
"name",
",",
"context",
")",
")",
")",
"return",
"view",
".",
"__permitted__",
"(",
"context",
",",
"request",
")"
] |
if the view specified by context and name is protected by a :term:permission .
|
train
| false
|
11,493
|
@app.route('/events/<public_id>', methods=['GET'])
def event_read_api(public_id):
valid_public_id(public_id)
try:
event = g.db_session.query(Event).filter((Event.namespace_id == g.namespace.id), (Event.public_id == public_id), (Event.deleted_at == None)).one()
except NoResultFound:
raise NotFoundError("Couldn't find event id {0}".format(public_id))
return g.encoder.jsonify(event)
|
[
"@",
"app",
".",
"route",
"(",
"'/events/<public_id>'",
",",
"methods",
"=",
"[",
"'GET'",
"]",
")",
"def",
"event_read_api",
"(",
"public_id",
")",
":",
"valid_public_id",
"(",
"public_id",
")",
"try",
":",
"event",
"=",
"g",
".",
"db_session",
".",
"query",
"(",
"Event",
")",
".",
"filter",
"(",
"(",
"Event",
".",
"namespace_id",
"==",
"g",
".",
"namespace",
".",
"id",
")",
",",
"(",
"Event",
".",
"public_id",
"==",
"public_id",
")",
",",
"(",
"Event",
".",
"deleted_at",
"==",
"None",
")",
")",
".",
"one",
"(",
")",
"except",
"NoResultFound",
":",
"raise",
"NotFoundError",
"(",
"\"Couldn't find event id {0}\"",
".",
"format",
"(",
"public_id",
")",
")",
"return",
"g",
".",
"encoder",
".",
"jsonify",
"(",
"event",
")"
] |
get all data for an existing event .
|
train
| false
|
11,494
|
def _find_script(script_name):
if os.path.isfile(script_name):
return script_name
path = os.getenv('PATH', os.defpath).split(os.pathsep)
for folder in path:
if (folder == ''):
continue
fn = os.path.join(folder, script_name)
if os.path.isfile(fn):
return fn
sys.stderr.write('Could not find script {0}\n'.format(script_name))
raise SystemExit(1)
|
[
"def",
"_find_script",
"(",
"script_name",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"script_name",
")",
":",
"return",
"script_name",
"path",
"=",
"os",
".",
"getenv",
"(",
"'PATH'",
",",
"os",
".",
"defpath",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"for",
"folder",
"in",
"path",
":",
"if",
"(",
"folder",
"==",
"''",
")",
":",
"continue",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"script_name",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fn",
")",
":",
"return",
"fn",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Could not find script {0}\\n'",
".",
"format",
"(",
"script_name",
")",
")",
"raise",
"SystemExit",
"(",
"1",
")"
] |
find the script .
|
train
| true
|
11,496
|
def postBuildPage(page):
pass
|
[
"def",
"postBuildPage",
"(",
"page",
")",
":",
"pass"
] |
called after building a page .
|
train
| false
|
11,497
|
def make_esc(esc_chars):
return (lambda s: u''.join([((u'\\' + c) if (c in esc_chars) else c) for c in s]))
|
[
"def",
"make_esc",
"(",
"esc_chars",
")",
":",
"return",
"(",
"lambda",
"s",
":",
"u''",
".",
"join",
"(",
"[",
"(",
"(",
"u'\\\\'",
"+",
"c",
")",
"if",
"(",
"c",
"in",
"esc_chars",
")",
"else",
"c",
")",
"for",
"c",
"in",
"s",
"]",
")",
")"
] |
function generator for escaping special characters .
|
train
| false
|
11,499
|
def _shell_split(s):
try:
return shlex.split(core.encode(s))
except ValueError:
return [core.encode(s)]
|
[
"def",
"_shell_split",
"(",
"s",
")",
":",
"try",
":",
"return",
"shlex",
".",
"split",
"(",
"core",
".",
"encode",
"(",
"s",
")",
")",
"except",
"ValueError",
":",
"return",
"[",
"core",
".",
"encode",
"(",
"s",
")",
"]"
] |
split string apart into utf-8 encoded words using shell syntax .
|
train
| false
|
11,500
|
@pytest.mark.network
def test_pip_second_command_line_interface_works(script, data):
kwargs = {}
if (pyversion_tuple < (2, 7, 9)):
kwargs['expect_stderr'] = True
args = [('pip%s' % pyversion)]
args.extend(['install', 'INITools==0.2'])
args.extend(['-f', data.packages])
result = script.run(*args, **kwargs)
egg_info_folder = ((script.site_packages / 'INITools-0.2-py%s.egg-info') % pyversion)
initools_folder = (script.site_packages / 'initools')
assert (egg_info_folder in result.files_created), str(result)
assert (initools_folder in result.files_created), str(result)
|
[
"@",
"pytest",
".",
"mark",
".",
"network",
"def",
"test_pip_second_command_line_interface_works",
"(",
"script",
",",
"data",
")",
":",
"kwargs",
"=",
"{",
"}",
"if",
"(",
"pyversion_tuple",
"<",
"(",
"2",
",",
"7",
",",
"9",
")",
")",
":",
"kwargs",
"[",
"'expect_stderr'",
"]",
"=",
"True",
"args",
"=",
"[",
"(",
"'pip%s'",
"%",
"pyversion",
")",
"]",
"args",
".",
"extend",
"(",
"[",
"'install'",
",",
"'INITools==0.2'",
"]",
")",
"args",
".",
"extend",
"(",
"[",
"'-f'",
",",
"data",
".",
"packages",
"]",
")",
"result",
"=",
"script",
".",
"run",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"egg_info_folder",
"=",
"(",
"(",
"script",
".",
"site_packages",
"/",
"'INITools-0.2-py%s.egg-info'",
")",
"%",
"pyversion",
")",
"initools_folder",
"=",
"(",
"script",
".",
"site_packages",
"/",
"'initools'",
")",
"assert",
"(",
"egg_info_folder",
"in",
"result",
".",
"files_created",
")",
",",
"str",
"(",
"result",
")",
"assert",
"(",
"initools_folder",
"in",
"result",
".",
"files_created",
")",
",",
"str",
"(",
"result",
")"
] |
check if pip<pyversion> commands behaves equally .
|
train
| false
|
11,501
|
def parse_future_flags(fp, encoding='latin-1'):
import __future__
pos = fp.tell()
fp.seek(0)
flags = 0
try:
body = fp.read().decode(encoding)
for m in PYTHON_FUTURE_IMPORT_re.finditer(body):
names = [x.strip() for x in m.group(1).split(',')]
for name in names:
flags |= getattr(__future__, name).compiler_flag
finally:
fp.seek(pos)
return flags
|
[
"def",
"parse_future_flags",
"(",
"fp",
",",
"encoding",
"=",
"'latin-1'",
")",
":",
"import",
"__future__",
"pos",
"=",
"fp",
".",
"tell",
"(",
")",
"fp",
".",
"seek",
"(",
"0",
")",
"flags",
"=",
"0",
"try",
":",
"body",
"=",
"fp",
".",
"read",
"(",
")",
".",
"decode",
"(",
"encoding",
")",
"for",
"m",
"in",
"PYTHON_FUTURE_IMPORT_re",
".",
"finditer",
"(",
"body",
")",
":",
"names",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"m",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"','",
")",
"]",
"for",
"name",
"in",
"names",
":",
"flags",
"|=",
"getattr",
"(",
"__future__",
",",
"name",
")",
".",
"compiler_flag",
"finally",
":",
"fp",
".",
"seek",
"(",
"pos",
")",
"return",
"flags"
] |
parse the compiler flags by :mod:__future__ from the given python code .
|
train
| false
|
11,502
|
def generate_random_string(length):
r = random.SystemRandom()
str = ''
chars = (string.letters + string.digits)
while (length > 0):
str += r.choice(chars)
length -= 1
return str
|
[
"def",
"generate_random_string",
"(",
"length",
")",
":",
"r",
"=",
"random",
".",
"SystemRandom",
"(",
")",
"str",
"=",
"''",
"chars",
"=",
"(",
"string",
".",
"letters",
"+",
"string",
".",
"digits",
")",
"while",
"(",
"length",
">",
"0",
")",
":",
"str",
"+=",
"r",
".",
"choice",
"(",
"chars",
")",
"length",
"-=",
"1",
"return",
"str"
] |
return a random string using alphanumeric characters .
|
train
| false
|
11,503
|
def scrub(zpool, stop=False):
ret = {}
ret[zpool] = {}
if exists(zpool):
zpool_cmd = _check_zpool()
cmd = '{zpool_cmd} scrub {stop}{zpool}'.format(zpool_cmd=zpool_cmd, stop=('-s ' if stop else ''), zpool=zpool)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
ret[zpool] = {}
if (res['retcode'] != 0):
ret[zpool]['scrubbing'] = False
if ('stderr' in res):
if ('currently scrubbing' in res['stderr']):
ret[zpool]['scrubbing'] = True
elif ('no active scrub' not in res['stderr']):
ret[zpool]['error'] = res['stderr']
else:
ret[zpool]['error'] = res['stdout']
else:
ret[zpool]['scrubbing'] = (True if (not stop) else False)
else:
ret[zpool] = 'storage pool does not exist'
return ret
|
[
"def",
"scrub",
"(",
"zpool",
",",
"stop",
"=",
"False",
")",
":",
"ret",
"=",
"{",
"}",
"ret",
"[",
"zpool",
"]",
"=",
"{",
"}",
"if",
"exists",
"(",
"zpool",
")",
":",
"zpool_cmd",
"=",
"_check_zpool",
"(",
")",
"cmd",
"=",
"'{zpool_cmd} scrub {stop}{zpool}'",
".",
"format",
"(",
"zpool_cmd",
"=",
"zpool_cmd",
",",
"stop",
"=",
"(",
"'-s '",
"if",
"stop",
"else",
"''",
")",
",",
"zpool",
"=",
"zpool",
")",
"res",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"ret",
"[",
"zpool",
"]",
"=",
"{",
"}",
"if",
"(",
"res",
"[",
"'retcode'",
"]",
"!=",
"0",
")",
":",
"ret",
"[",
"zpool",
"]",
"[",
"'scrubbing'",
"]",
"=",
"False",
"if",
"(",
"'stderr'",
"in",
"res",
")",
":",
"if",
"(",
"'currently scrubbing'",
"in",
"res",
"[",
"'stderr'",
"]",
")",
":",
"ret",
"[",
"zpool",
"]",
"[",
"'scrubbing'",
"]",
"=",
"True",
"elif",
"(",
"'no active scrub'",
"not",
"in",
"res",
"[",
"'stderr'",
"]",
")",
":",
"ret",
"[",
"zpool",
"]",
"[",
"'error'",
"]",
"=",
"res",
"[",
"'stderr'",
"]",
"else",
":",
"ret",
"[",
"zpool",
"]",
"[",
"'error'",
"]",
"=",
"res",
"[",
"'stdout'",
"]",
"else",
":",
"ret",
"[",
"zpool",
"]",
"[",
"'scrubbing'",
"]",
"=",
"(",
"True",
"if",
"(",
"not",
"stop",
")",
"else",
"False",
")",
"else",
":",
"ret",
"[",
"zpool",
"]",
"=",
"'storage pool does not exist'",
"return",
"ret"
] |
returns sluggified string .
|
train
| false
|
11,506
|
def init_worker(log_queue):
signal.signal(signal.SIGINT, signal.SIG_IGN)
log_sink_factory(log_queue)
start_profiling_no_core()
|
[
"def",
"init_worker",
"(",
"log_queue",
")",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIG_IGN",
")",
"log_sink_factory",
"(",
"log_queue",
")",
"start_profiling_no_core",
"(",
")"
] |
this function is called right after each process in the processpool is created .
|
train
| false
|
11,507
|
def updates_dict(*args):
result = {}
for d in args:
result.update(d)
return result
|
[
"def",
"updates_dict",
"(",
"*",
"args",
")",
":",
"result",
"=",
"{",
"}",
"for",
"d",
"in",
"args",
":",
"result",
".",
"update",
"(",
"d",
")",
"return",
"result"
] |
surport update multi dict .
|
train
| false
|
11,508
|
def noop_load(*args, **kwargs):
return (None, None)
|
[
"def",
"noop_load",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"return",
"(",
"None",
",",
"None",
")"
] |
a method that can be substituted in as the load method in a tradingenvironment to prevent it from loading benchmarks .
|
train
| false
|
11,509
|
def BoolCheck(value):
bool_val = truthy(value, default=None)
if (bool_val is None):
raise ValueError((_('Invalid boolean: %s') % value))
return bool_val
|
[
"def",
"BoolCheck",
"(",
"value",
")",
":",
"bool_val",
"=",
"truthy",
"(",
"value",
",",
"default",
"=",
"None",
")",
"if",
"(",
"bool_val",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"(",
"_",
"(",
"'Invalid boolean: %s'",
")",
"%",
"value",
")",
")",
"return",
"bool_val"
] |
convert common yes/no strings into boolean values .
|
train
| false
|
11,510
|
def create_user_partition_json(partition_id, name, description, groups, scheme='random'):
return UserPartition(partition_id, name, description, groups, MockUserPartitionScheme(scheme)).to_json()
|
[
"def",
"create_user_partition_json",
"(",
"partition_id",
",",
"name",
",",
"description",
",",
"groups",
",",
"scheme",
"=",
"'random'",
")",
":",
"return",
"UserPartition",
"(",
"partition_id",
",",
"name",
",",
"description",
",",
"groups",
",",
"MockUserPartitionScheme",
"(",
"scheme",
")",
")",
".",
"to_json",
"(",
")"
] |
helper method to create user partition json .
|
train
| false
|
11,511
|
def addressable_list(type_constraint):
return _addressable_wrapper(AddressableList, type_constraint)
|
[
"def",
"addressable_list",
"(",
"type_constraint",
")",
":",
"return",
"_addressable_wrapper",
"(",
"AddressableList",
",",
"type_constraint",
")"
] |
marks a lists values as satisfying a given type constraint .
|
train
| false
|
11,514
|
def FakeRename(src, dst):
raise OSError(errno.EPERM, 'Operation not permitted', src)
|
[
"def",
"FakeRename",
"(",
"src",
",",
"dst",
")",
":",
"raise",
"OSError",
"(",
"errno",
".",
"EPERM",
",",
"'Operation not permitted'",
",",
"src",
")"
] |
fake version of os .
|
train
| false
|
11,517
|
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def get_sale_order_records(request, course_id):
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
query_features = [('id', 'Order Id'), ('company_name', 'Company Name'), ('company_contact_name', 'Company Contact Name'), ('company_contact_email', 'Company Contact Email'), ('logged_in_username', 'Login Username'), ('logged_in_email', 'Login User Email'), ('purchase_time', 'Date of Sale'), ('customer_reference_number', 'Customer Reference Number'), ('recipient_name', 'Recipient Name'), ('recipient_email', 'Recipient Email'), ('bill_to_street1', 'Street 1'), ('bill_to_street2', 'Street 2'), ('bill_to_city', 'City'), ('bill_to_state', 'State'), ('bill_to_postalcode', 'Postal Code'), ('bill_to_country', 'Country'), ('order_type', 'Order Type'), ('status', 'Order Item Status'), ('coupon_code', 'Coupon Code'), ('list_price', 'List Price'), ('unit_cost', 'Unit Price'), ('quantity', 'Quantity'), ('total_discount', 'Total Discount'), ('total_amount', 'Total Amount Paid')]
db_columns = [x[0] for x in query_features]
csv_columns = [x[1] for x in query_features]
sale_data = instructor_analytics.basic.sale_order_record_features(course_id, db_columns)
(__, datarows) = instructor_analytics.csvs.format_dictlist(sale_data, db_columns)
return instructor_analytics.csvs.create_csv_response('e-commerce_sale_order_records.csv', csv_columns, datarows)
|
[
"@",
"ensure_csrf_cookie",
"@",
"cache_control",
"(",
"no_cache",
"=",
"True",
",",
"no_store",
"=",
"True",
",",
"must_revalidate",
"=",
"True",
")",
"@",
"require_level",
"(",
"'staff'",
")",
"def",
"get_sale_order_records",
"(",
"request",
",",
"course_id",
")",
":",
"course_id",
"=",
"SlashSeparatedCourseKey",
".",
"from_deprecated_string",
"(",
"course_id",
")",
"query_features",
"=",
"[",
"(",
"'id'",
",",
"'Order Id'",
")",
",",
"(",
"'company_name'",
",",
"'Company Name'",
")",
",",
"(",
"'company_contact_name'",
",",
"'Company Contact Name'",
")",
",",
"(",
"'company_contact_email'",
",",
"'Company Contact Email'",
")",
",",
"(",
"'logged_in_username'",
",",
"'Login Username'",
")",
",",
"(",
"'logged_in_email'",
",",
"'Login User Email'",
")",
",",
"(",
"'purchase_time'",
",",
"'Date of Sale'",
")",
",",
"(",
"'customer_reference_number'",
",",
"'Customer Reference Number'",
")",
",",
"(",
"'recipient_name'",
",",
"'Recipient Name'",
")",
",",
"(",
"'recipient_email'",
",",
"'Recipient Email'",
")",
",",
"(",
"'bill_to_street1'",
",",
"'Street 1'",
")",
",",
"(",
"'bill_to_street2'",
",",
"'Street 2'",
")",
",",
"(",
"'bill_to_city'",
",",
"'City'",
")",
",",
"(",
"'bill_to_state'",
",",
"'State'",
")",
",",
"(",
"'bill_to_postalcode'",
",",
"'Postal Code'",
")",
",",
"(",
"'bill_to_country'",
",",
"'Country'",
")",
",",
"(",
"'order_type'",
",",
"'Order Type'",
")",
",",
"(",
"'status'",
",",
"'Order Item Status'",
")",
",",
"(",
"'coupon_code'",
",",
"'Coupon Code'",
")",
",",
"(",
"'list_price'",
",",
"'List Price'",
")",
",",
"(",
"'unit_cost'",
",",
"'Unit Price'",
")",
",",
"(",
"'quantity'",
",",
"'Quantity'",
")",
",",
"(",
"'total_discount'",
",",
"'Total Discount'",
")",
",",
"(",
"'total_amount'",
",",
"'Total Amount Paid'",
")",
"]",
"db_columns",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"query_features",
"]",
"csv_columns",
"=",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"query_features",
"]",
"sale_data",
"=",
"instructor_analytics",
".",
"basic",
".",
"sale_order_record_features",
"(",
"course_id",
",",
"db_columns",
")",
"(",
"__",
",",
"datarows",
")",
"=",
"instructor_analytics",
".",
"csvs",
".",
"format_dictlist",
"(",
"sale_data",
",",
"db_columns",
")",
"return",
"instructor_analytics",
".",
"csvs",
".",
"create_csv_response",
"(",
"'e-commerce_sale_order_records.csv'",
",",
"csv_columns",
",",
"datarows",
")"
] |
return the summary of all sales records for a particular course .
|
train
| false
|
11,518
|
@contextmanager
def for_range_slice_generic(builder, start, stop, step):
intp = start.type
is_pos_step = builder.icmp_signed('>=', step, ir.Constant(intp, 0))
pos_for_range = for_range_slice(builder, start, stop, step, intp, inc=True)
neg_for_range = for_range_slice(builder, start, stop, step, intp, inc=False)
@contextmanager
def cm_cond(cond, inner_cm):
with cond:
with inner_cm as value:
(yield value)
with builder.if_else(is_pos_step, likely=True) as (then, otherwise):
(yield (cm_cond(then, pos_for_range), cm_cond(otherwise, neg_for_range)))
|
[
"@",
"contextmanager",
"def",
"for_range_slice_generic",
"(",
"builder",
",",
"start",
",",
"stop",
",",
"step",
")",
":",
"intp",
"=",
"start",
".",
"type",
"is_pos_step",
"=",
"builder",
".",
"icmp_signed",
"(",
"'>='",
",",
"step",
",",
"ir",
".",
"Constant",
"(",
"intp",
",",
"0",
")",
")",
"pos_for_range",
"=",
"for_range_slice",
"(",
"builder",
",",
"start",
",",
"stop",
",",
"step",
",",
"intp",
",",
"inc",
"=",
"True",
")",
"neg_for_range",
"=",
"for_range_slice",
"(",
"builder",
",",
"start",
",",
"stop",
",",
"step",
",",
"intp",
",",
"inc",
"=",
"False",
")",
"@",
"contextmanager",
"def",
"cm_cond",
"(",
"cond",
",",
"inner_cm",
")",
":",
"with",
"cond",
":",
"with",
"inner_cm",
"as",
"value",
":",
"(",
"yield",
"value",
")",
"with",
"builder",
".",
"if_else",
"(",
"is_pos_step",
",",
"likely",
"=",
"True",
")",
"as",
"(",
"then",
",",
"otherwise",
")",
":",
"(",
"yield",
"(",
"cm_cond",
"(",
"then",
",",
"pos_for_range",
")",
",",
"cm_cond",
"(",
"otherwise",
",",
"neg_for_range",
")",
")",
")"
] |
a helper wrapper for for_range_slice() .
|
train
| false
|
11,521
|
def _float_from_json(value, field):
if _not_null(value, field):
return float(value)
|
[
"def",
"_float_from_json",
"(",
"value",
",",
"field",
")",
":",
"if",
"_not_null",
"(",
"value",
",",
"field",
")",
":",
"return",
"float",
"(",
"value",
")"
] |
coerce value to a float .
|
train
| false
|
11,523
|
def dict_to_numpy_array1(d, mapping=None):
import numpy
if (mapping is None):
s = set(d.keys())
mapping = dict(zip(s, range(len(s))))
n = len(mapping)
a = numpy.zeros(n)
for (k1, i) in mapping.items():
i = mapping[k1]
a[i] = d[k1]
return a
|
[
"def",
"dict_to_numpy_array1",
"(",
"d",
",",
"mapping",
"=",
"None",
")",
":",
"import",
"numpy",
"if",
"(",
"mapping",
"is",
"None",
")",
":",
"s",
"=",
"set",
"(",
"d",
".",
"keys",
"(",
")",
")",
"mapping",
"=",
"dict",
"(",
"zip",
"(",
"s",
",",
"range",
"(",
"len",
"(",
"s",
")",
")",
")",
")",
"n",
"=",
"len",
"(",
"mapping",
")",
"a",
"=",
"numpy",
".",
"zeros",
"(",
"n",
")",
"for",
"(",
"k1",
",",
"i",
")",
"in",
"mapping",
".",
"items",
"(",
")",
":",
"i",
"=",
"mapping",
"[",
"k1",
"]",
"a",
"[",
"i",
"]",
"=",
"d",
"[",
"k1",
"]",
"return",
"a"
] |
convert a dictionary of numbers to a 1d numpy array with optional mapping .
|
train
| false
|
11,524
|
def marshall_now(now=None):
if (not now):
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond)
|
[
"def",
"marshall_now",
"(",
"now",
"=",
"None",
")",
":",
"if",
"(",
"not",
"now",
")",
":",
"now",
"=",
"utcnow",
"(",
")",
"return",
"dict",
"(",
"day",
"=",
"now",
".",
"day",
",",
"month",
"=",
"now",
".",
"month",
",",
"year",
"=",
"now",
".",
"year",
",",
"hour",
"=",
"now",
".",
"hour",
",",
"minute",
"=",
"now",
".",
"minute",
",",
"second",
"=",
"now",
".",
"second",
",",
"microsecond",
"=",
"now",
".",
"microsecond",
")"
] |
make an rpc-safe datetime with microseconds .
|
train
| true
|
11,525
|
def build_html_document(body, title=None):
title_html = ('<h1>{0}</h1>\n'.format(title) if title else '')
html = ['<html>', '<head>', '<title>{title}</title>'.format(title=title), '<style>', '\n body { font-size:10pt }\n table { font-size:10pt;border-width:none;\n border-spacing:0px;border-color:#F8F8F8;border-style:solid }\n th, td { padding:2px;vertical-align:top;\n border-width:1px;border-color:#F8F8F8;border-style:solid; }\n th { font-weight:bold;text-align:left;font-family:times }\n th { color:#666666 }\n a, a.toggle:link, a.toggle:visited {\n background-color:#FFFFFF;color:#000099 }\n a.toggle:hover, a.toggle:active {\n color:#FFFFFF;background-color:#000099 }\n</style></head>', '<body>', title_html, body, '</body>', '</html>']
return '\n'.join(html)
|
[
"def",
"build_html_document",
"(",
"body",
",",
"title",
"=",
"None",
")",
":",
"title_html",
"=",
"(",
"'<h1>{0}</h1>\\n'",
".",
"format",
"(",
"title",
")",
"if",
"title",
"else",
"''",
")",
"html",
"=",
"[",
"'<html>'",
",",
"'<head>'",
",",
"'<title>{title}</title>'",
".",
"format",
"(",
"title",
"=",
"title",
")",
",",
"'<style>'",
",",
"'\\n body { font-size:10pt }\\n table { font-size:10pt;border-width:none;\\n border-spacing:0px;border-color:#F8F8F8;border-style:solid }\\n th, td { padding:2px;vertical-align:top;\\n border-width:1px;border-color:#F8F8F8;border-style:solid; }\\n th { font-weight:bold;text-align:left;font-family:times }\\n th { color:#666666 }\\n a, a.toggle:link, a.toggle:visited {\\n background-color:#FFFFFF;color:#000099 }\\n a.toggle:hover, a.toggle:active {\\n color:#FFFFFF;background-color:#000099 }\\n</style></head>'",
",",
"'<body>'",
",",
"title_html",
",",
"body",
",",
"'</body>'",
",",
"'</html>'",
"]",
"return",
"'\\n'",
".",
"join",
"(",
"html",
")"
] |
produces the html document wrapper for a text/html response .
|
train
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.