id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
12,700
|
def do_wordcount(s):
return len(_word_re.findall(s))
|
[
"def",
"do_wordcount",
"(",
"s",
")",
":",
"return",
"len",
"(",
"_word_re",
".",
"findall",
"(",
"s",
")",
")"
] |
count the words in that string .
|
train
| false
|
12,701
|
def reap_threads(func):
if (not thread):
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
|
[
"def",
"reap_threads",
"(",
"func",
")",
":",
"if",
"(",
"not",
"thread",
")",
":",
"return",
"func",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"decorator",
"(",
"*",
"args",
")",
":",
"key",
"=",
"threading_setup",
"(",
")",
"try",
":",
"return",
"func",
"(",
"*",
"args",
")",
"finally",
":",
"threading_cleanup",
"(",
"*",
"key",
")",
"return",
"decorator"
] |
use this function when threads are being used .
|
train
| false
|
12,702
|
def modClearRefs(s, titlesRefs, namesRefs, charactersRefs):
s = modClearTitleRefs(s, {}, {}, {})
s = modClearCharacterRefs(s, {}, {}, {})
return modClearNameRefs(s, {}, {}, {})
|
[
"def",
"modClearRefs",
"(",
"s",
",",
"titlesRefs",
",",
"namesRefs",
",",
"charactersRefs",
")",
":",
"s",
"=",
"modClearTitleRefs",
"(",
"s",
",",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
")",
"s",
"=",
"modClearCharacterRefs",
"(",
"s",
",",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
")",
"return",
"modClearNameRefs",
"(",
"s",
",",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
")"
] |
remove titles .
|
train
| false
|
12,703
|
def module_content(page, response):
regexp = '<!-- module_content -->(?P<module_content>.*?)<!-- /module_content -->'
blocks = re.finditer(regexp, page, re.DOTALL)
for block in blocks:
response['module_content'] = block.group('module_content').strip()
return response
|
[
"def",
"module_content",
"(",
"page",
",",
"response",
")",
":",
"regexp",
"=",
"'<!-- module_content -->(?P<module_content>.*?)<!-- /module_content -->'",
"blocks",
"=",
"re",
".",
"finditer",
"(",
"regexp",
",",
"page",
",",
"re",
".",
"DOTALL",
")",
"for",
"block",
"in",
"blocks",
":",
"response",
"[",
"'module_content'",
"]",
"=",
"block",
".",
"group",
"(",
"'module_content'",
")",
".",
"strip",
"(",
")",
"return",
"response"
] |
extract module_content .
|
train
| false
|
12,706
|
def _prefix_description_for_number(data, longest_prefix, numobj, lang, script=None, region=None):
e164_num = format_number(numobj, PhoneNumberFormat.E164)
if (not e164_num.startswith(U_PLUS)):
raise Exception('Expect E164 number to start with +')
for prefix_len in range(longest_prefix, 0, (-1)):
prefix = e164_num[1:(1 + prefix_len)]
if (prefix in data):
name = _find_lang(data[prefix], lang, script, region)
if (name is not None):
return name
else:
return U_EMPTY_STRING
return U_EMPTY_STRING
|
[
"def",
"_prefix_description_for_number",
"(",
"data",
",",
"longest_prefix",
",",
"numobj",
",",
"lang",
",",
"script",
"=",
"None",
",",
"region",
"=",
"None",
")",
":",
"e164_num",
"=",
"format_number",
"(",
"numobj",
",",
"PhoneNumberFormat",
".",
"E164",
")",
"if",
"(",
"not",
"e164_num",
".",
"startswith",
"(",
"U_PLUS",
")",
")",
":",
"raise",
"Exception",
"(",
"'Expect E164 number to start with +'",
")",
"for",
"prefix_len",
"in",
"range",
"(",
"longest_prefix",
",",
"0",
",",
"(",
"-",
"1",
")",
")",
":",
"prefix",
"=",
"e164_num",
"[",
"1",
":",
"(",
"1",
"+",
"prefix_len",
")",
"]",
"if",
"(",
"prefix",
"in",
"data",
")",
":",
"name",
"=",
"_find_lang",
"(",
"data",
"[",
"prefix",
"]",
",",
"lang",
",",
"script",
",",
"region",
")",
"if",
"(",
"name",
"is",
"not",
"None",
")",
":",
"return",
"name",
"else",
":",
"return",
"U_EMPTY_STRING",
"return",
"U_EMPTY_STRING"
] |
return a text description of a phonenumber for the given language .
|
train
| true
|
12,707
|
def _namespace_to_ord(namespace):
n = 0
for (i, c) in enumerate(namespace):
n += ((_LEX_DISTANCE[((MAX_NAMESPACE_LENGTH - i) - 1)] * NAMESPACE_CHARACTERS.index(c)) + 1)
return n
|
[
"def",
"_namespace_to_ord",
"(",
"namespace",
")",
":",
"n",
"=",
"0",
"for",
"(",
"i",
",",
"c",
")",
"in",
"enumerate",
"(",
"namespace",
")",
":",
"n",
"+=",
"(",
"(",
"_LEX_DISTANCE",
"[",
"(",
"(",
"MAX_NAMESPACE_LENGTH",
"-",
"i",
")",
"-",
"1",
")",
"]",
"*",
"NAMESPACE_CHARACTERS",
".",
"index",
"(",
"c",
")",
")",
"+",
"1",
")",
"return",
"n"
] |
converts a namespace string into an int representing its lexographic order .
|
train
| true
|
12,708
|
def dict_to_numpy_array2(d, mapping=None):
import numpy
if (mapping is None):
s = set(d.keys())
for (k, v) in d.items():
s.update(v.keys())
mapping = dict(zip(s, range(len(s))))
n = len(mapping)
a = numpy.zeros((n, n))
for (k1, i) in mapping.items():
for (k2, j) in mapping.items():
try:
a[(i, j)] = d[k1][k2]
except KeyError:
pass
return a
|
[
"def",
"dict_to_numpy_array2",
"(",
"d",
",",
"mapping",
"=",
"None",
")",
":",
"import",
"numpy",
"if",
"(",
"mapping",
"is",
"None",
")",
":",
"s",
"=",
"set",
"(",
"d",
".",
"keys",
"(",
")",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"d",
".",
"items",
"(",
")",
":",
"s",
".",
"update",
"(",
"v",
".",
"keys",
"(",
")",
")",
"mapping",
"=",
"dict",
"(",
"zip",
"(",
"s",
",",
"range",
"(",
"len",
"(",
"s",
")",
")",
")",
")",
"n",
"=",
"len",
"(",
"mapping",
")",
"a",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"for",
"(",
"k1",
",",
"i",
")",
"in",
"mapping",
".",
"items",
"(",
")",
":",
"for",
"(",
"k2",
",",
"j",
")",
"in",
"mapping",
".",
"items",
"(",
")",
":",
"try",
":",
"a",
"[",
"(",
"i",
",",
"j",
")",
"]",
"=",
"d",
"[",
"k1",
"]",
"[",
"k2",
"]",
"except",
"KeyError",
":",
"pass",
"return",
"a"
] |
convert a dictionary of dictionaries to a 2d numpy array with optional mapping .
|
train
| false
|
12,710
|
def list_states(saltenv='base'):
return __context__['fileclient'].list_states(saltenv)
|
[
"def",
"list_states",
"(",
"saltenv",
"=",
"'base'",
")",
":",
"return",
"__context__",
"[",
"'fileclient'",
"]",
".",
"list_states",
"(",
"saltenv",
")"
] |
list all the available state modules in an environment .
|
train
| false
|
12,713
|
def close_cover_tilt(hass, entity_id=None):
data = ({ATTR_ENTITY_ID: entity_id} if entity_id else None)
hass.services.call(DOMAIN, SERVICE_CLOSE_COVER_TILT, data)
|
[
"def",
"close_cover_tilt",
"(",
"hass",
",",
"entity_id",
"=",
"None",
")",
":",
"data",
"=",
"(",
"{",
"ATTR_ENTITY_ID",
":",
"entity_id",
"}",
"if",
"entity_id",
"else",
"None",
")",
"hass",
".",
"services",
".",
"call",
"(",
"DOMAIN",
",",
"SERVICE_CLOSE_COVER_TILT",
",",
"data",
")"
] |
close all or specified cover tilt .
|
train
| false
|
12,716
|
def asset_spec_from_abspath(abspath, package):
if (getattr(package, '__name__', None) == '__main__'):
return abspath
pp = (package_path(package) + os.path.sep)
if abspath.startswith(pp):
relpath = abspath[len(pp):]
return ('%s:%s' % (package_name(package), relpath.replace(os.path.sep, '/')))
return abspath
|
[
"def",
"asset_spec_from_abspath",
"(",
"abspath",
",",
"package",
")",
":",
"if",
"(",
"getattr",
"(",
"package",
",",
"'__name__'",
",",
"None",
")",
"==",
"'__main__'",
")",
":",
"return",
"abspath",
"pp",
"=",
"(",
"package_path",
"(",
"package",
")",
"+",
"os",
".",
"path",
".",
"sep",
")",
"if",
"abspath",
".",
"startswith",
"(",
"pp",
")",
":",
"relpath",
"=",
"abspath",
"[",
"len",
"(",
"pp",
")",
":",
"]",
"return",
"(",
"'%s:%s'",
"%",
"(",
"package_name",
"(",
"package",
")",
",",
"relpath",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'/'",
")",
")",
")",
"return",
"abspath"
] |
try to convert an absolute path to a resource in a package to a resource specification if possible; otherwise return the absolute path .
|
train
| false
|
12,718
|
@cache_permission
def can_add_comment(user, project):
return check_permission(user, project, 'trans.add_comment')
|
[
"@",
"cache_permission",
"def",
"can_add_comment",
"(",
"user",
",",
"project",
")",
":",
"return",
"check_permission",
"(",
"user",
",",
"project",
",",
"'trans.add_comment'",
")"
] |
checks whether user can add comment for given project .
|
train
| false
|
12,720
|
def Download(url):
FilenameFromUrl = (lambda url: urlparse.urlparse(url)[2].split('/')[(-1)])
FilenameFromHandler = None
for handler in _realurl_handlers:
url4dnld = handler(url)
if (not url4dnld):
continue
if isinstance(url4dnld, tuple):
(FilenameFromHandler, url) = url4dnld
else:
url = url4dnld
break
dl = FileDownload()
resp = dl.open(url)
if FilenameFromHandler:
filename = FilenameFromHandler
elif dl.filename:
filename = dl.filename
else:
filename = FilenameFromUrl(dl.realurl)
if (not filename):
filename = 'NoName'
if (resp.status_code == 413):
return ('too large', filename, '')
elif (resp.status_code not in (200, 206)):
return ('download failed', filename, '')
elif (not resp.content):
return ('not resuming', filename, '')
else:
return ('', filename, resp.content)
|
[
"def",
"Download",
"(",
"url",
")",
":",
"FilenameFromUrl",
"=",
"(",
"lambda",
"url",
":",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"[",
"2",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"(",
"-",
"1",
")",
"]",
")",
"FilenameFromHandler",
"=",
"None",
"for",
"handler",
"in",
"_realurl_handlers",
":",
"url4dnld",
"=",
"handler",
"(",
"url",
")",
"if",
"(",
"not",
"url4dnld",
")",
":",
"continue",
"if",
"isinstance",
"(",
"url4dnld",
",",
"tuple",
")",
":",
"(",
"FilenameFromHandler",
",",
"url",
")",
"=",
"url4dnld",
"else",
":",
"url",
"=",
"url4dnld",
"break",
"dl",
"=",
"FileDownload",
"(",
")",
"resp",
"=",
"dl",
".",
"open",
"(",
"url",
")",
"if",
"FilenameFromHandler",
":",
"filename",
"=",
"FilenameFromHandler",
"elif",
"dl",
".",
"filename",
":",
"filename",
"=",
"dl",
".",
"filename",
"else",
":",
"filename",
"=",
"FilenameFromUrl",
"(",
"dl",
".",
"realurl",
")",
"if",
"(",
"not",
"filename",
")",
":",
"filename",
"=",
"'NoName'",
"if",
"(",
"resp",
".",
"status_code",
"==",
"413",
")",
":",
"return",
"(",
"'too large'",
",",
"filename",
",",
"''",
")",
"elif",
"(",
"resp",
".",
"status_code",
"not",
"in",
"(",
"200",
",",
"206",
")",
")",
":",
"return",
"(",
"'download failed'",
",",
"filename",
",",
"''",
")",
"elif",
"(",
"not",
"resp",
".",
"content",
")",
":",
"return",
"(",
"'not resuming'",
",",
"filename",
",",
"''",
")",
"else",
":",
"return",
"(",
"''",
",",
"filename",
",",
"resp",
".",
"content",
")"
] |
filedownload工具函数,简化文件下载工作 返回一个元组 .
|
train
| false
|
12,721
|
def compare_torrents(torrent_1, torrent_2):
files1 = [files for files in torrent_1['metainfo'].get_files_with_length() if (files[1] > (1024 * 1024))]
files2 = [files for files in torrent_2['metainfo'].get_files_with_length() if (files[1] > (1024 * 1024))]
if (len(files1) == len(files2)):
for ft1 in files1:
for ft2 in files2:
if ((ft1[1] != ft2[1]) or (levenshtein_dist(ft1[0], ft2[0]) > SIMILARITY_TRESHOLD)):
return False
return True
return False
|
[
"def",
"compare_torrents",
"(",
"torrent_1",
",",
"torrent_2",
")",
":",
"files1",
"=",
"[",
"files",
"for",
"files",
"in",
"torrent_1",
"[",
"'metainfo'",
"]",
".",
"get_files_with_length",
"(",
")",
"if",
"(",
"files",
"[",
"1",
"]",
">",
"(",
"1024",
"*",
"1024",
")",
")",
"]",
"files2",
"=",
"[",
"files",
"for",
"files",
"in",
"torrent_2",
"[",
"'metainfo'",
"]",
".",
"get_files_with_length",
"(",
")",
"if",
"(",
"files",
"[",
"1",
"]",
">",
"(",
"1024",
"*",
"1024",
")",
")",
"]",
"if",
"(",
"len",
"(",
"files1",
")",
"==",
"len",
"(",
"files2",
")",
")",
":",
"for",
"ft1",
"in",
"files1",
":",
"for",
"ft2",
"in",
"files2",
":",
"if",
"(",
"(",
"ft1",
"[",
"1",
"]",
"!=",
"ft2",
"[",
"1",
"]",
")",
"or",
"(",
"levenshtein_dist",
"(",
"ft1",
"[",
"0",
"]",
",",
"ft2",
"[",
"0",
"]",
")",
">",
"SIMILARITY_TRESHOLD",
")",
")",
":",
"return",
"False",
"return",
"True",
"return",
"False"
] |
comparing swarms .
|
train
| false
|
12,724
|
def get_current_traceback(ignore_system_exceptions=False, show_hidden_frames=False, skip=0):
(exc_type, exc_value, tb) = sys.exc_info()
if (ignore_system_exceptions and (exc_type in system_exceptions)):
raise
for x in range_type(skip):
if (tb.tb_next is None):
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if (not show_hidden_frames):
tb.filter_hidden_frames()
return tb
|
[
"def",
"get_current_traceback",
"(",
"ignore_system_exceptions",
"=",
"False",
",",
"show_hidden_frames",
"=",
"False",
",",
"skip",
"=",
"0",
")",
":",
"(",
"exc_type",
",",
"exc_value",
",",
"tb",
")",
"=",
"sys",
".",
"exc_info",
"(",
")",
"if",
"(",
"ignore_system_exceptions",
"and",
"(",
"exc_type",
"in",
"system_exceptions",
")",
")",
":",
"raise",
"for",
"x",
"in",
"range_type",
"(",
"skip",
")",
":",
"if",
"(",
"tb",
".",
"tb_next",
"is",
"None",
")",
":",
"break",
"tb",
"=",
"tb",
".",
"tb_next",
"tb",
"=",
"Traceback",
"(",
"exc_type",
",",
"exc_value",
",",
"tb",
")",
"if",
"(",
"not",
"show_hidden_frames",
")",
":",
"tb",
".",
"filter_hidden_frames",
"(",
")",
"return",
"tb"
] |
get the current exception info as traceback object .
|
train
| true
|
12,728
|
def _consteq(str1, str2):
return ((len(str1) == len(str2)) and (sum(((ord(x) ^ ord(y)) for (x, y) in zip(str1, str2))) == 0))
|
[
"def",
"_consteq",
"(",
"str1",
",",
"str2",
")",
":",
"return",
"(",
"(",
"len",
"(",
"str1",
")",
"==",
"len",
"(",
"str2",
")",
")",
"and",
"(",
"sum",
"(",
"(",
"(",
"ord",
"(",
"x",
")",
"^",
"ord",
"(",
"y",
")",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"zip",
"(",
"str1",
",",
"str2",
")",
")",
")",
"==",
"0",
")",
")"
] |
constant-time string comparison .
|
train
| false
|
12,731
|
def multi_file_load_config(*filenames):
configs = []
profiles = []
for filename in filenames:
try:
loaded = load_config(filename)
except botocore.exceptions.ConfigNotFound:
continue
profiles.append(loaded.pop('profiles'))
configs.append(loaded)
merged_config = _merge_list_of_dicts(configs)
merged_profiles = _merge_list_of_dicts(profiles)
merged_config['profiles'] = merged_profiles
return merged_config
|
[
"def",
"multi_file_load_config",
"(",
"*",
"filenames",
")",
":",
"configs",
"=",
"[",
"]",
"profiles",
"=",
"[",
"]",
"for",
"filename",
"in",
"filenames",
":",
"try",
":",
"loaded",
"=",
"load_config",
"(",
"filename",
")",
"except",
"botocore",
".",
"exceptions",
".",
"ConfigNotFound",
":",
"continue",
"profiles",
".",
"append",
"(",
"loaded",
".",
"pop",
"(",
"'profiles'",
")",
")",
"configs",
".",
"append",
"(",
"loaded",
")",
"merged_config",
"=",
"_merge_list_of_dicts",
"(",
"configs",
")",
"merged_profiles",
"=",
"_merge_list_of_dicts",
"(",
"profiles",
")",
"merged_config",
"[",
"'profiles'",
"]",
"=",
"merged_profiles",
"return",
"merged_config"
] |
load and combine multiple ini configs with profiles .
|
train
| false
|
12,732
|
@sync_performer
def perform_update_s3_error_page(dispatcher, intent):
s3 = boto.connect_s3()
bucket = s3.get_bucket(intent.bucket)
config = bucket.get_website_configuration_obj()
new_error_key = intent.error_key
old_error_key = config.error_key
if (old_error_key == new_error_key):
return None
else:
config.error_key = new_error_key
bucket.set_website_configuration(config)
return old_error_key
|
[
"@",
"sync_performer",
"def",
"perform_update_s3_error_page",
"(",
"dispatcher",
",",
"intent",
")",
":",
"s3",
"=",
"boto",
".",
"connect_s3",
"(",
")",
"bucket",
"=",
"s3",
".",
"get_bucket",
"(",
"intent",
".",
"bucket",
")",
"config",
"=",
"bucket",
".",
"get_website_configuration_obj",
"(",
")",
"new_error_key",
"=",
"intent",
".",
"error_key",
"old_error_key",
"=",
"config",
".",
"error_key",
"if",
"(",
"old_error_key",
"==",
"new_error_key",
")",
":",
"return",
"None",
"else",
":",
"config",
".",
"error_key",
"=",
"new_error_key",
"bucket",
".",
"set_website_configuration",
"(",
"config",
")",
"return",
"old_error_key"
] |
see :class:updates3errorpage .
|
train
| false
|
12,733
|
def test_reason(monkeypatch):
def has_reason(ids, force, reason):
assert (reason == 'expiry')
monkeypatch.setattr(SIGN_ADDONS, has_reason)
call_command('sign_addons', 123, reason='expiry')
|
[
"def",
"test_reason",
"(",
"monkeypatch",
")",
":",
"def",
"has_reason",
"(",
"ids",
",",
"force",
",",
"reason",
")",
":",
"assert",
"(",
"reason",
"==",
"'expiry'",
")",
"monkeypatch",
".",
"setattr",
"(",
"SIGN_ADDONS",
",",
"has_reason",
")",
"call_command",
"(",
"'sign_addons'",
",",
"123",
",",
"reason",
"=",
"'expiry'",
")"
] |
you can pass a reason .
|
train
| false
|
12,734
|
def skip_if_fake(func):
def _skipper(*args, **kw):
'Wrapped skipper function.'
if FLAGS.fake_tests:
raise unittest.SkipTest('Test cannot be run in fake mode')
else:
return func(*args, **kw)
return _skipper
|
[
"def",
"skip_if_fake",
"(",
"func",
")",
":",
"def",
"_skipper",
"(",
"*",
"args",
",",
"**",
"kw",
")",
":",
"if",
"FLAGS",
".",
"fake_tests",
":",
"raise",
"unittest",
".",
"SkipTest",
"(",
"'Test cannot be run in fake mode'",
")",
"else",
":",
"return",
"func",
"(",
"*",
"args",
",",
"**",
"kw",
")",
"return",
"_skipper"
] |
decorator that skips a test if running in fake mode .
|
train
| false
|
12,735
|
@public
def vfield(symbols, domain, order=lex):
_field = FracField(symbols, domain, order)
pollute([sym.name for sym in _field.symbols], _field.gens)
return _field
|
[
"@",
"public",
"def",
"vfield",
"(",
"symbols",
",",
"domain",
",",
"order",
"=",
"lex",
")",
":",
"_field",
"=",
"FracField",
"(",
"symbols",
",",
"domain",
",",
"order",
")",
"pollute",
"(",
"[",
"sym",
".",
"name",
"for",
"sym",
"in",
"_field",
".",
"symbols",
"]",
",",
"_field",
".",
"gens",
")",
"return",
"_field"
] |
construct new rational function field and inject generators into global namespace .
|
train
| false
|
12,738
|
def _use_accelerator(state):
global _convert_agg_to_wx_image
global _convert_agg_to_wx_bitmap
if (getattr(wx, '__version__', '0.0')[0:3] < '2.8'):
if (state and (_wxagg is not None)):
_convert_agg_to_wx_image = _wxagg.convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmap
else:
_convert_agg_to_wx_image = _py_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmap
else:
_convert_agg_to_wx_image = _py_WX28_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap
|
[
"def",
"_use_accelerator",
"(",
"state",
")",
":",
"global",
"_convert_agg_to_wx_image",
"global",
"_convert_agg_to_wx_bitmap",
"if",
"(",
"getattr",
"(",
"wx",
",",
"'__version__'",
",",
"'0.0'",
")",
"[",
"0",
":",
"3",
"]",
"<",
"'2.8'",
")",
":",
"if",
"(",
"state",
"and",
"(",
"_wxagg",
"is",
"not",
"None",
")",
")",
":",
"_convert_agg_to_wx_image",
"=",
"_wxagg",
".",
"convert_agg_to_wx_image",
"_convert_agg_to_wx_bitmap",
"=",
"_wxagg",
".",
"convert_agg_to_wx_bitmap",
"else",
":",
"_convert_agg_to_wx_image",
"=",
"_py_convert_agg_to_wx_image",
"_convert_agg_to_wx_bitmap",
"=",
"_py_convert_agg_to_wx_bitmap",
"else",
":",
"_convert_agg_to_wx_image",
"=",
"_py_WX28_convert_agg_to_wx_image",
"_convert_agg_to_wx_bitmap",
"=",
"_py_WX28_convert_agg_to_wx_bitmap"
] |
enable or disable the wxagg accelerator .
|
train
| false
|
12,739
|
def is_db_connection_error(args):
conn_err_codes = ('2002', '2003', '2006')
for err_code in conn_err_codes:
if (args.find(err_code) != (-1)):
return True
return False
|
[
"def",
"is_db_connection_error",
"(",
"args",
")",
":",
"conn_err_codes",
"=",
"(",
"'2002'",
",",
"'2003'",
",",
"'2006'",
")",
"for",
"err_code",
"in",
"conn_err_codes",
":",
"if",
"(",
"args",
".",
"find",
"(",
"err_code",
")",
"!=",
"(",
"-",
"1",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
return true if error in connecting to db .
|
train
| false
|
12,740
|
def _resolveIPv6(ip, port):
return socket.getaddrinfo(ip, port, 0, 0, 0, _NUMERIC_ONLY)[0][4]
|
[
"def",
"_resolveIPv6",
"(",
"ip",
",",
"port",
")",
":",
"return",
"socket",
".",
"getaddrinfo",
"(",
"ip",
",",
"port",
",",
"0",
",",
"0",
",",
"0",
",",
"_NUMERIC_ONLY",
")",
"[",
"0",
"]",
"[",
"4",
"]"
] |
resolve an ipv6 literal into an ipv6 address .
|
train
| false
|
12,741
|
def create_tag(name=u'', ref=u'', sign=False, settings=None):
view = new_create_tag(name=name, ref=ref, sign=sign, settings=settings, parent=qtutils.active_window())
view.show()
view.raise_()
return view
|
[
"def",
"create_tag",
"(",
"name",
"=",
"u''",
",",
"ref",
"=",
"u''",
",",
"sign",
"=",
"False",
",",
"settings",
"=",
"None",
")",
":",
"view",
"=",
"new_create_tag",
"(",
"name",
"=",
"name",
",",
"ref",
"=",
"ref",
",",
"sign",
"=",
"sign",
",",
"settings",
"=",
"settings",
",",
"parent",
"=",
"qtutils",
".",
"active_window",
"(",
")",
")",
"view",
".",
"show",
"(",
")",
"view",
".",
"raise_",
"(",
")",
"return",
"view"
] |
entry point for external callers .
|
train
| false
|
12,742
|
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
return matrix_transpose(SGR_MATRIX)
|
[
"@",
"frame_transform_graph",
".",
"transform",
"(",
"coord",
".",
"StaticMatrixTransform",
",",
"Sagittarius",
",",
"coord",
".",
"Galactic",
")",
"def",
"sgr_to_galactic",
"(",
")",
":",
"return",
"matrix_transpose",
"(",
"SGR_MATRIX",
")"
] |
compute the transformation matrix from heliocentric sgr coordinates to spherical galactic .
|
train
| false
|
12,744
|
def locate_profile(profile='default'):
from IPython.core.profiledir import ProfileDir, ProfileDirError
try:
pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
except ProfileDirError:
raise IOError(("Couldn't find profile %r" % profile))
return pd.location
|
[
"def",
"locate_profile",
"(",
"profile",
"=",
"'default'",
")",
":",
"from",
"IPython",
".",
"core",
".",
"profiledir",
"import",
"ProfileDir",
",",
"ProfileDirError",
"try",
":",
"pd",
"=",
"ProfileDir",
".",
"find_profile_dir_by_name",
"(",
"get_ipython_dir",
"(",
")",
",",
"profile",
")",
"except",
"ProfileDirError",
":",
"raise",
"IOError",
"(",
"(",
"\"Couldn't find profile %r\"",
"%",
"profile",
")",
")",
"return",
"pd",
".",
"location"
] |
find the path to the folder associated with a given profile .
|
train
| true
|
12,746
|
def example_certificates_status(course_key):
return ExampleCertificateSet.latest_status(course_key)
|
[
"def",
"example_certificates_status",
"(",
"course_key",
")",
":",
"return",
"ExampleCertificateSet",
".",
"latest_status",
"(",
"course_key",
")"
] |
check the status of example certificates for a course .
|
train
| false
|
12,747
|
def dummy_deepcopy(*arg):
return None
|
[
"def",
"dummy_deepcopy",
"(",
"*",
"arg",
")",
":",
"return",
"None"
] |
this is necessary to prevent deepcopy() on anonymous user object that now contains reference to request .
|
train
| false
|
12,748
|
def validate_backup_retention_period(days):
days = positive_integer(days)
if (int(days) > 35):
raise ValueError('DBInstance BackupRetentionPeriod cannot be larger than 35 days.')
return days
|
[
"def",
"validate_backup_retention_period",
"(",
"days",
")",
":",
"days",
"=",
"positive_integer",
"(",
"days",
")",
"if",
"(",
"int",
"(",
"days",
")",
">",
"35",
")",
":",
"raise",
"ValueError",
"(",
"'DBInstance BackupRetentionPeriod cannot be larger than 35 days.'",
")",
"return",
"days"
] |
validate backupretentionperiod for dbinstance .
|
train
| false
|
12,750
|
def make_long_description():
readme_path = README_PATH
readme_md = strip_html_comments(read(readme_path))
history_md = strip_html_comments(read(HISTORY_PATH))
license_md = ('License\n=======\n\n' + read(LICENSE_PATH))
sections = [readme_md, history_md, license_md]
md_description = '\n\n'.join(sections)
md_ext = os.path.splitext(readme_path)[1]
md_description_path = make_temp_path(RST_DESCRIPTION_PATH, new_ext=md_ext)
write(md_description, md_description_path)
rst_temp_path = make_temp_path(RST_DESCRIPTION_PATH)
long_description = convert_md_to_rst(md_path=md_description_path, rst_temp_path=rst_temp_path)
return '\n'.join([RST_LONG_DESCRIPTION_INTRO, long_description])
|
[
"def",
"make_long_description",
"(",
")",
":",
"readme_path",
"=",
"README_PATH",
"readme_md",
"=",
"strip_html_comments",
"(",
"read",
"(",
"readme_path",
")",
")",
"history_md",
"=",
"strip_html_comments",
"(",
"read",
"(",
"HISTORY_PATH",
")",
")",
"license_md",
"=",
"(",
"'License\\n=======\\n\\n'",
"+",
"read",
"(",
"LICENSE_PATH",
")",
")",
"sections",
"=",
"[",
"readme_md",
",",
"history_md",
",",
"license_md",
"]",
"md_description",
"=",
"'\\n\\n'",
".",
"join",
"(",
"sections",
")",
"md_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"readme_path",
")",
"[",
"1",
"]",
"md_description_path",
"=",
"make_temp_path",
"(",
"RST_DESCRIPTION_PATH",
",",
"new_ext",
"=",
"md_ext",
")",
"write",
"(",
"md_description",
",",
"md_description_path",
")",
"rst_temp_path",
"=",
"make_temp_path",
"(",
"RST_DESCRIPTION_PATH",
")",
"long_description",
"=",
"convert_md_to_rst",
"(",
"md_path",
"=",
"md_description_path",
",",
"rst_temp_path",
"=",
"rst_temp_path",
")",
"return",
"'\\n'",
".",
"join",
"(",
"[",
"RST_LONG_DESCRIPTION_INTRO",
",",
"long_description",
"]",
")"
] |
generate the rest long_description for setup() from source files .
|
train
| true
|
12,751
|
def _untested_error(where):
raise RuntimeError(('Unknown %s failure' % (where,)))
|
[
"def",
"_untested_error",
"(",
"where",
")",
":",
"raise",
"RuntimeError",
"(",
"(",
"'Unknown %s failure'",
"%",
"(",
"where",
",",
")",
")",
")"
] |
an openssl api failed somehow .
|
train
| false
|
12,754
|
def royal_road1(individual, order):
nelem = (len(individual) / order)
max_value = int(((2 ** order) - 1))
total = 0
for i in xrange(nelem):
value = int(''.join(map(str, individual[(i * order):((i * order) + order)])), 2)
total += (int(order) * int((value / max_value)))
return (total,)
|
[
"def",
"royal_road1",
"(",
"individual",
",",
"order",
")",
":",
"nelem",
"=",
"(",
"len",
"(",
"individual",
")",
"/",
"order",
")",
"max_value",
"=",
"int",
"(",
"(",
"(",
"2",
"**",
"order",
")",
"-",
"1",
")",
")",
"total",
"=",
"0",
"for",
"i",
"in",
"xrange",
"(",
"nelem",
")",
":",
"value",
"=",
"int",
"(",
"''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"individual",
"[",
"(",
"i",
"*",
"order",
")",
":",
"(",
"(",
"i",
"*",
"order",
")",
"+",
"order",
")",
"]",
")",
")",
",",
"2",
")",
"total",
"+=",
"(",
"int",
"(",
"order",
")",
"*",
"int",
"(",
"(",
"value",
"/",
"max_value",
")",
")",
")",
"return",
"(",
"total",
",",
")"
] |
royal road function r1 as presented by melanie mitchell in : "an introduction to genetic algorithms" .
|
train
| false
|
12,756
|
def verifyResults(lines, testRegex):
lines = removePrompts(lines)
chopped = ''.join([line[:(-1)] for line in lines])
Assert(re.match(testRegex, chopped), ((('Expected Regular Expression=' + testRegex) + '\nActual Lines=') + chopped))
|
[
"def",
"verifyResults",
"(",
"lines",
",",
"testRegex",
")",
":",
"lines",
"=",
"removePrompts",
"(",
"lines",
")",
"chopped",
"=",
"''",
".",
"join",
"(",
"[",
"line",
"[",
":",
"(",
"-",
"1",
")",
"]",
"for",
"line",
"in",
"lines",
"]",
")",
"Assert",
"(",
"re",
".",
"match",
"(",
"testRegex",
",",
"chopped",
")",
",",
"(",
"(",
"(",
"'Expected Regular Expression='",
"+",
"testRegex",
")",
"+",
"'\\nActual Lines='",
")",
"+",
"chopped",
")",
")"
] |
verifies that a set of lines match a regular expression .
|
train
| false
|
12,757
|
def _NewFacetFromPb(pb):
name = _DecodeUTF8(pb.name())
val_type = pb.value().type()
value = _DecodeValue(_GetValue(pb.value()), val_type)
if (val_type == document_pb.FacetValue.ATOM):
return AtomFacet(name, value)
elif (val_type == document_pb.FacetValue.NUMBER):
return NumberFacet(name, value)
return InvalidRequest(('Unknown facet value type %d' % val_type))
|
[
"def",
"_NewFacetFromPb",
"(",
"pb",
")",
":",
"name",
"=",
"_DecodeUTF8",
"(",
"pb",
".",
"name",
"(",
")",
")",
"val_type",
"=",
"pb",
".",
"value",
"(",
")",
".",
"type",
"(",
")",
"value",
"=",
"_DecodeValue",
"(",
"_GetValue",
"(",
"pb",
".",
"value",
"(",
")",
")",
",",
"val_type",
")",
"if",
"(",
"val_type",
"==",
"document_pb",
".",
"FacetValue",
".",
"ATOM",
")",
":",
"return",
"AtomFacet",
"(",
"name",
",",
"value",
")",
"elif",
"(",
"val_type",
"==",
"document_pb",
".",
"FacetValue",
".",
"NUMBER",
")",
":",
"return",
"NumberFacet",
"(",
"name",
",",
"value",
")",
"return",
"InvalidRequest",
"(",
"(",
"'Unknown facet value type %d'",
"%",
"val_type",
")",
")"
] |
constructs a facet from a document_pb .
|
train
| false
|
12,758
|
@register.simple_tag
def crossorigin():
if absolute_uri(settings.STATIC_URL).startswith(options.get('system.url-prefix')):
return ''
return ' crossorigin="anonymous"'
|
[
"@",
"register",
".",
"simple_tag",
"def",
"crossorigin",
"(",
")",
":",
"if",
"absolute_uri",
"(",
"settings",
".",
"STATIC_URL",
")",
".",
"startswith",
"(",
"options",
".",
"get",
"(",
"'system.url-prefix'",
")",
")",
":",
"return",
"''",
"return",
"' crossorigin=\"anonymous\"'"
] |
returns an additional crossorigin="anonymous" snippet for use in a <script> tag if our asset urls are from a different domain than the system .
|
train
| false
|
12,759
|
def construct_pagination_urls(request, course_id, api_next_url, api_previous_url):
def lms_url(url):
'\n Create lms url from api url.\n '
if (url is None):
return None
keys = ('page', 'page_size', 'text')
parsed = urlparse.urlparse(url)
query_params = urlparse.parse_qs(parsed.query)
encoded_query_params = urlencode({key: query_params.get(key)[0] for key in keys if (key in query_params)})
return '{}?{}'.format(request.build_absolute_uri(base_url), encoded_query_params)
base_url = reverse('notes', kwargs={'course_id': course_id})
next_url = lms_url(api_next_url)
previous_url = lms_url(api_previous_url)
return (next_url, previous_url)
|
[
"def",
"construct_pagination_urls",
"(",
"request",
",",
"course_id",
",",
"api_next_url",
",",
"api_previous_url",
")",
":",
"def",
"lms_url",
"(",
"url",
")",
":",
"if",
"(",
"url",
"is",
"None",
")",
":",
"return",
"None",
"keys",
"=",
"(",
"'page'",
",",
"'page_size'",
",",
"'text'",
")",
"parsed",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"query_params",
"=",
"urlparse",
".",
"parse_qs",
"(",
"parsed",
".",
"query",
")",
"encoded_query_params",
"=",
"urlencode",
"(",
"{",
"key",
":",
"query_params",
".",
"get",
"(",
"key",
")",
"[",
"0",
"]",
"for",
"key",
"in",
"keys",
"if",
"(",
"key",
"in",
"query_params",
")",
"}",
")",
"return",
"'{}?{}'",
".",
"format",
"(",
"request",
".",
"build_absolute_uri",
"(",
"base_url",
")",
",",
"encoded_query_params",
")",
"base_url",
"=",
"reverse",
"(",
"'notes'",
",",
"kwargs",
"=",
"{",
"'course_id'",
":",
"course_id",
"}",
")",
"next_url",
"=",
"lms_url",
"(",
"api_next_url",
")",
"previous_url",
"=",
"lms_url",
"(",
"api_previous_url",
")",
"return",
"(",
"next_url",
",",
"previous_url",
")"
] |
construct next and previous urls for lms .
|
train
| false
|
12,760
|
def validate_license_model(license_model):
if (license_model not in VALID_LICENSE_MODELS):
raise ValueError(('DBInstance LicenseModel must be one of: %s' % ', '.join(VALID_LICENSE_MODELS)))
return license_model
|
[
"def",
"validate_license_model",
"(",
"license_model",
")",
":",
"if",
"(",
"license_model",
"not",
"in",
"VALID_LICENSE_MODELS",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'DBInstance LicenseModel must be one of: %s'",
"%",
"', '",
".",
"join",
"(",
"VALID_LICENSE_MODELS",
")",
")",
")",
"return",
"license_model"
] |
validate licensemodel for dbinstance .
|
train
| false
|
12,761
|
def unique_on(*groups):
def wrapper(cls):
cls.__indices__ = getattr(cls, '__indices__', [])
cls.__indices__.extend([{'key_or_list': [(key, pymongo.ASCENDING) for key in group], 'unique': True} for group in groups])
return cls
return wrapper
|
[
"def",
"unique_on",
"(",
"*",
"groups",
")",
":",
"def",
"wrapper",
"(",
"cls",
")",
":",
"cls",
".",
"__indices__",
"=",
"getattr",
"(",
"cls",
",",
"'__indices__'",
",",
"[",
"]",
")",
"cls",
".",
"__indices__",
".",
"extend",
"(",
"[",
"{",
"'key_or_list'",
":",
"[",
"(",
"key",
",",
"pymongo",
".",
"ASCENDING",
")",
"for",
"key",
"in",
"group",
"]",
",",
"'unique'",
":",
"True",
"}",
"for",
"group",
"in",
"groups",
"]",
")",
"return",
"cls",
"return",
"wrapper"
] |
decorator for subclasses of storedobject .
|
train
| false
|
12,762
|
def coerce_put_post(request):
if (request.method == 'PUT'):
if hasattr(request, '_post'):
del request._post
del request._files
try:
request.method = 'POST'
request._load_post_and_files()
request.method = 'PUT'
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = 'PUT'
request.PUT = request.POST
|
[
"def",
"coerce_put_post",
"(",
"request",
")",
":",
"if",
"(",
"request",
".",
"method",
"==",
"'PUT'",
")",
":",
"if",
"hasattr",
"(",
"request",
",",
"'_post'",
")",
":",
"del",
"request",
".",
"_post",
"del",
"request",
".",
"_files",
"try",
":",
"request",
".",
"method",
"=",
"'POST'",
"request",
".",
"_load_post_and_files",
"(",
")",
"request",
".",
"method",
"=",
"'PUT'",
"except",
"AttributeError",
":",
"request",
".",
"META",
"[",
"'REQUEST_METHOD'",
"]",
"=",
"'POST'",
"request",
".",
"_load_post_and_files",
"(",
")",
"request",
".",
"META",
"[",
"'REQUEST_METHOD'",
"]",
"=",
"'PUT'",
"request",
".",
"PUT",
"=",
"request",
".",
"POST"
] |
django doesnt particularly understand rest .
|
train
| true
|
12,763
|
def return_probe_from_definition(df, types):
args = df['args']
retval_type = df['retval_type']
printf_specifier = type_description(retval_type, types)['printf_specifier']
template = Template(RETURN_PROBE_TEMPLATE)
mapping = {'__LIBRARY__': df.get('library', ''), '__NAME__': df['api'], '__ARGS_FORMAT_STRING__': arguments_format_string(args, types), '__RETVAL_FORMAT_SPECIFIER__': printf_specifier, '__ARGUMENTS__': arguments_section(args, types), '__RETVAL__': retval_section(retval_type, types), '__ARGUMENTS_POP_FROM_STACK__': pop_from_stack_section(args)}
return template.substitute(mapping)
|
[
"def",
"return_probe_from_definition",
"(",
"df",
",",
"types",
")",
":",
"args",
"=",
"df",
"[",
"'args'",
"]",
"retval_type",
"=",
"df",
"[",
"'retval_type'",
"]",
"printf_specifier",
"=",
"type_description",
"(",
"retval_type",
",",
"types",
")",
"[",
"'printf_specifier'",
"]",
"template",
"=",
"Template",
"(",
"RETURN_PROBE_TEMPLATE",
")",
"mapping",
"=",
"{",
"'__LIBRARY__'",
":",
"df",
".",
"get",
"(",
"'library'",
",",
"''",
")",
",",
"'__NAME__'",
":",
"df",
"[",
"'api'",
"]",
",",
"'__ARGS_FORMAT_STRING__'",
":",
"arguments_format_string",
"(",
"args",
",",
"types",
")",
",",
"'__RETVAL_FORMAT_SPECIFIER__'",
":",
"printf_specifier",
",",
"'__ARGUMENTS__'",
":",
"arguments_section",
"(",
"args",
",",
"types",
")",
",",
"'__RETVAL__'",
":",
"retval_section",
"(",
"retval_type",
",",
"types",
")",
",",
"'__ARGUMENTS_POP_FROM_STACK__'",
":",
"pop_from_stack_section",
"(",
"args",
")",
"}",
"return",
"template",
".",
"substitute",
"(",
"mapping",
")"
] |
generates a return dtrace probe from the given api definition .
|
train
| false
|
12,764
|
def shift_series(s):
s0 = s.copy()
s0 = s0.shift(1)
s0.iloc[0] = 0.0
return s0
|
[
"def",
"shift_series",
"(",
"s",
")",
":",
"s0",
"=",
"s",
".",
"copy",
"(",
")",
"s0",
"=",
"s0",
".",
"shift",
"(",
"1",
")",
"s0",
".",
"iloc",
"[",
"0",
"]",
"=",
"0.0",
"return",
"s0"
] |
produces a copy of the provided series shifted by one .
|
train
| false
|
12,765
|
def tensorsymmetry(*args):
from sympy.combinatorics import Permutation
def tableau2bsgs(a):
if (len(a) == 1):
n = a[0]
bsgs = get_symmetric_group_sgs(n, 1)
elif all(((x == 1) for x in a)):
n = len(a)
bsgs = get_symmetric_group_sgs(n)
elif (a == [2, 2]):
bsgs = riemann_bsgs
else:
raise NotImplementedError
return bsgs
if (not args):
return TensorSymmetry(Tuple(), Tuple(Permutation(1)))
if ((len(args) == 2) and isinstance(args[1][0], Permutation)):
return TensorSymmetry(args)
(base, sgs) = tableau2bsgs(args[0])
for a in args[1:]:
(basex, sgsx) = tableau2bsgs(a)
(base, sgs) = bsgs_direct_product(base, sgs, basex, sgsx)
return TensorSymmetry(Tuple(base, sgs))
|
[
"def",
"tensorsymmetry",
"(",
"*",
"args",
")",
":",
"from",
"sympy",
".",
"combinatorics",
"import",
"Permutation",
"def",
"tableau2bsgs",
"(",
"a",
")",
":",
"if",
"(",
"len",
"(",
"a",
")",
"==",
"1",
")",
":",
"n",
"=",
"a",
"[",
"0",
"]",
"bsgs",
"=",
"get_symmetric_group_sgs",
"(",
"n",
",",
"1",
")",
"elif",
"all",
"(",
"(",
"(",
"x",
"==",
"1",
")",
"for",
"x",
"in",
"a",
")",
")",
":",
"n",
"=",
"len",
"(",
"a",
")",
"bsgs",
"=",
"get_symmetric_group_sgs",
"(",
"n",
")",
"elif",
"(",
"a",
"==",
"[",
"2",
",",
"2",
"]",
")",
":",
"bsgs",
"=",
"riemann_bsgs",
"else",
":",
"raise",
"NotImplementedError",
"return",
"bsgs",
"if",
"(",
"not",
"args",
")",
":",
"return",
"TensorSymmetry",
"(",
"Tuple",
"(",
")",
",",
"Tuple",
"(",
"Permutation",
"(",
"1",
")",
")",
")",
"if",
"(",
"(",
"len",
"(",
"args",
")",
"==",
"2",
")",
"and",
"isinstance",
"(",
"args",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"Permutation",
")",
")",
":",
"return",
"TensorSymmetry",
"(",
"args",
")",
"(",
"base",
",",
"sgs",
")",
"=",
"tableau2bsgs",
"(",
"args",
"[",
"0",
"]",
")",
"for",
"a",
"in",
"args",
"[",
"1",
":",
"]",
":",
"(",
"basex",
",",
"sgsx",
")",
"=",
"tableau2bsgs",
"(",
"a",
")",
"(",
"base",
",",
"sgs",
")",
"=",
"bsgs_direct_product",
"(",
"base",
",",
"sgs",
",",
"basex",
",",
"sgsx",
")",
"return",
"TensorSymmetry",
"(",
"Tuple",
"(",
"base",
",",
"sgs",
")",
")"
] |
return a tensorsymmetry object .
|
train
| false
|
12,766
|
def getInfillDictionary(arounds, aroundWidth, infillInset, infillWidth, pixelTable, rotatedLoops, testLoops=None):
slightlyGreaterThanInfillInset = (intercircle.globalIntercircleMultiplier * infillInset)
allPoints = intercircle.getPointsFromLoops(rotatedLoops, infillInset, 0.7)
centers = intercircle.getCentersFromPoints(allPoints, slightlyGreaterThanInfillInset)
infillDictionary = {}
for center in centers:
insetCenter = intercircle.getSimplifiedInsetFromClockwiseLoop(center, infillInset)
insetPoint = insetCenter[0]
if ((len(insetCenter) > 2) and intercircle.getIsLarge(insetCenter, infillInset) and euclidean.getIsInFilledRegion(rotatedLoops, insetPoint)):
around = euclidean.getSimplifiedLoop(center, infillInset)
euclidean.addLoopToPixelTable(around, pixelTable, aroundWidth)
arounds.append(around)
insetLoop = intercircle.getSimplifiedInsetFromClockwiseLoop(center, infillInset)
euclidean.addXIntersectionsFromLoopForTable(insetLoop, infillDictionary, infillWidth)
if (testLoops != None):
testLoops.append(insetLoop)
return infillDictionary
|
[
"def",
"getInfillDictionary",
"(",
"arounds",
",",
"aroundWidth",
",",
"infillInset",
",",
"infillWidth",
",",
"pixelTable",
",",
"rotatedLoops",
",",
"testLoops",
"=",
"None",
")",
":",
"slightlyGreaterThanInfillInset",
"=",
"(",
"intercircle",
".",
"globalIntercircleMultiplier",
"*",
"infillInset",
")",
"allPoints",
"=",
"intercircle",
".",
"getPointsFromLoops",
"(",
"rotatedLoops",
",",
"infillInset",
",",
"0.7",
")",
"centers",
"=",
"intercircle",
".",
"getCentersFromPoints",
"(",
"allPoints",
",",
"slightlyGreaterThanInfillInset",
")",
"infillDictionary",
"=",
"{",
"}",
"for",
"center",
"in",
"centers",
":",
"insetCenter",
"=",
"intercircle",
".",
"getSimplifiedInsetFromClockwiseLoop",
"(",
"center",
",",
"infillInset",
")",
"insetPoint",
"=",
"insetCenter",
"[",
"0",
"]",
"if",
"(",
"(",
"len",
"(",
"insetCenter",
")",
">",
"2",
")",
"and",
"intercircle",
".",
"getIsLarge",
"(",
"insetCenter",
",",
"infillInset",
")",
"and",
"euclidean",
".",
"getIsInFilledRegion",
"(",
"rotatedLoops",
",",
"insetPoint",
")",
")",
":",
"around",
"=",
"euclidean",
".",
"getSimplifiedLoop",
"(",
"center",
",",
"infillInset",
")",
"euclidean",
".",
"addLoopToPixelTable",
"(",
"around",
",",
"pixelTable",
",",
"aroundWidth",
")",
"arounds",
".",
"append",
"(",
"around",
")",
"insetLoop",
"=",
"intercircle",
".",
"getSimplifiedInsetFromClockwiseLoop",
"(",
"center",
",",
"infillInset",
")",
"euclidean",
".",
"addXIntersectionsFromLoopForTable",
"(",
"insetLoop",
",",
"infillDictionary",
",",
"infillWidth",
")",
"if",
"(",
"testLoops",
"!=",
"None",
")",
":",
"testLoops",
".",
"append",
"(",
"insetLoop",
")",
"return",
"infillDictionary"
] |
get combined fill loops which include most of the points .
|
train
| false
|
12,767
|
def GetQueryNodeTextUnicode(node):
if ((node.getType() == QueryParser.VALUE) and (len(node.children) >= 2)):
return u''.join((c.getText() for c in node.children[1:]))
elif (node.getType() == QueryParser.VALUE):
return None
return node.getText()
|
[
"def",
"GetQueryNodeTextUnicode",
"(",
"node",
")",
":",
"if",
"(",
"(",
"node",
".",
"getType",
"(",
")",
"==",
"QueryParser",
".",
"VALUE",
")",
"and",
"(",
"len",
"(",
"node",
".",
"children",
")",
">=",
"2",
")",
")",
":",
"return",
"u''",
".",
"join",
"(",
"(",
"c",
".",
"getText",
"(",
")",
"for",
"c",
"in",
"node",
".",
"children",
"[",
"1",
":",
"]",
")",
")",
"elif",
"(",
"node",
".",
"getType",
"(",
")",
"==",
"QueryParser",
".",
"VALUE",
")",
":",
"return",
"None",
"return",
"node",
".",
"getText",
"(",
")"
] |
returns the unicode text from node .
|
train
| false
|
12,768
|
def assert_request_user_has_resource_api_permission(request, resource_api, permission_type):
has_permission = request_user_has_resource_api_permission(request=request, resource_api=resource_api, permission_type=permission_type)
if (not has_permission):
user_db = get_user_db_from_request(request=request)
raise ResourceAccessDeniedError(user_db=user_db, resource_db=resource_api, permission_type=permission_type)
|
[
"def",
"assert_request_user_has_resource_api_permission",
"(",
"request",
",",
"resource_api",
",",
"permission_type",
")",
":",
"has_permission",
"=",
"request_user_has_resource_api_permission",
"(",
"request",
"=",
"request",
",",
"resource_api",
"=",
"resource_api",
",",
"permission_type",
"=",
"permission_type",
")",
"if",
"(",
"not",
"has_permission",
")",
":",
"user_db",
"=",
"get_user_db_from_request",
"(",
"request",
"=",
"request",
")",
"raise",
"ResourceAccessDeniedError",
"(",
"user_db",
"=",
"user_db",
",",
"resource_db",
"=",
"resource_api",
",",
"permission_type",
"=",
"permission_type",
")"
] |
check that currently logged-in user has specified permission for the resource which is to be created .
|
train
| false
|
12,769
|
def processSVGElementg(elementNode, svgReader):
if ('id' not in elementNode.attributes):
return
idString = elementNode.attributes['id']
if ('beginningOfControlSection' in elementNode.attributes):
if (elementNode.attributes['beginningOfControlSection'].lower()[:1] == 't'):
svgReader.stopProcessing = True
return
idStringLower = idString.lower()
zIndex = idStringLower.find('z:')
if (zIndex < 0):
idStringLower = getLabelString(elementNode.attributes)
zIndex = idStringLower.find('z:')
if (zIndex < 0):
return
floatFromValue = euclidean.getFloatFromValue(idStringLower[(zIndex + len('z:')):].strip())
if (floatFromValue != None):
svgReader.z = floatFromValue
|
[
"def",
"processSVGElementg",
"(",
"elementNode",
",",
"svgReader",
")",
":",
"if",
"(",
"'id'",
"not",
"in",
"elementNode",
".",
"attributes",
")",
":",
"return",
"idString",
"=",
"elementNode",
".",
"attributes",
"[",
"'id'",
"]",
"if",
"(",
"'beginningOfControlSection'",
"in",
"elementNode",
".",
"attributes",
")",
":",
"if",
"(",
"elementNode",
".",
"attributes",
"[",
"'beginningOfControlSection'",
"]",
".",
"lower",
"(",
")",
"[",
":",
"1",
"]",
"==",
"'t'",
")",
":",
"svgReader",
".",
"stopProcessing",
"=",
"True",
"return",
"idStringLower",
"=",
"idString",
".",
"lower",
"(",
")",
"zIndex",
"=",
"idStringLower",
".",
"find",
"(",
"'z:'",
")",
"if",
"(",
"zIndex",
"<",
"0",
")",
":",
"idStringLower",
"=",
"getLabelString",
"(",
"elementNode",
".",
"attributes",
")",
"zIndex",
"=",
"idStringLower",
".",
"find",
"(",
"'z:'",
")",
"if",
"(",
"zIndex",
"<",
"0",
")",
":",
"return",
"floatFromValue",
"=",
"euclidean",
".",
"getFloatFromValue",
"(",
"idStringLower",
"[",
"(",
"zIndex",
"+",
"len",
"(",
"'z:'",
")",
")",
":",
"]",
".",
"strip",
"(",
")",
")",
"if",
"(",
"floatFromValue",
"!=",
"None",
")",
":",
"svgReader",
".",
"z",
"=",
"floatFromValue"
] |
process elementnode by svgreader .
|
train
| false
|
12,770
|
def CDLMATHOLD(barDs, count, penetration=(-4e+37)):
return call_talib_with_ohlc(barDs, count, talib.CDLMATHOLD, penetration)
|
[
"def",
"CDLMATHOLD",
"(",
"barDs",
",",
"count",
",",
"penetration",
"=",
"(",
"-",
"4e+37",
")",
")",
":",
"return",
"call_talib_with_ohlc",
"(",
"barDs",
",",
"count",
",",
"talib",
".",
"CDLMATHOLD",
",",
"penetration",
")"
] |
mat hold .
|
train
| false
|
12,771
|
@aborts
def test_require_single_missing_key():
require('blah')
|
[
"@",
"aborts",
"def",
"test_require_single_missing_key",
"(",
")",
":",
"require",
"(",
"'blah'",
")"
] |
when given a single non-existent key .
|
train
| false
|
12,772
|
def csm_shape(csm):
return csm_properties(csm)[3]
|
[
"def",
"csm_shape",
"(",
"csm",
")",
":",
"return",
"csm_properties",
"(",
"csm",
")",
"[",
"3",
"]"
] |
return the shape field of the sparse variable .
|
train
| false
|
12,773
|
def rss_warnings():
rss = RSS()
rss.channel.title = 'SABnzbd Warnings'
rss.channel.description = 'Overview of warnings/errors'
rss.channel.link = 'http://sabnzbd.org/'
rss.channel.language = 'en'
for warn in sabnzbd.GUIHANDLER.content():
item = Item()
item.title = warn
rss.addItem(item)
rss.channel.lastBuildDate = std_time(time.time())
rss.channel.pubDate = rss.channel.lastBuildDate
return rss.write()
|
[
"def",
"rss_warnings",
"(",
")",
":",
"rss",
"=",
"RSS",
"(",
")",
"rss",
".",
"channel",
".",
"title",
"=",
"'SABnzbd Warnings'",
"rss",
".",
"channel",
".",
"description",
"=",
"'Overview of warnings/errors'",
"rss",
".",
"channel",
".",
"link",
"=",
"'http://sabnzbd.org/'",
"rss",
".",
"channel",
".",
"language",
"=",
"'en'",
"for",
"warn",
"in",
"sabnzbd",
".",
"GUIHANDLER",
".",
"content",
"(",
")",
":",
"item",
"=",
"Item",
"(",
")",
"item",
".",
"title",
"=",
"warn",
"rss",
".",
"addItem",
"(",
"item",
")",
"rss",
".",
"channel",
".",
"lastBuildDate",
"=",
"std_time",
"(",
"time",
".",
"time",
"(",
")",
")",
"rss",
".",
"channel",
".",
"pubDate",
"=",
"rss",
".",
"channel",
".",
"lastBuildDate",
"return",
"rss",
".",
"write",
"(",
")"
] |
return an rss feed with last warnings/errors .
|
train
| false
|
12,774
|
def model_format_dict(obj):
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {'verbose_name': force_unicode(opts.verbose_name), 'verbose_name_plural': force_unicode(opts.verbose_name_plural)}
|
[
"def",
"model_format_dict",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"models",
".",
"Model",
",",
"models",
".",
"base",
".",
"ModelBase",
")",
")",
":",
"opts",
"=",
"obj",
".",
"_meta",
"elif",
"isinstance",
"(",
"obj",
",",
"models",
".",
"query",
".",
"QuerySet",
")",
":",
"opts",
"=",
"obj",
".",
"model",
".",
"_meta",
"else",
":",
"opts",
"=",
"obj",
"return",
"{",
"'verbose_name'",
":",
"force_unicode",
"(",
"opts",
".",
"verbose_name",
")",
",",
"'verbose_name_plural'",
":",
"force_unicode",
"(",
"opts",
".",
"verbose_name_plural",
")",
"}"
] |
return a dict with keys verbose_name and verbose_name_plural .
|
train
| false
|
12,775
|
def load_nietzsche_dataset(path='data/nietzsche/'):
print 'Load or Download nietzsche dataset > {}'.format(path)
filename = 'nietzsche.txt'
url = 'https://s3.amazonaws.com/text-datasets/'
filepath = maybe_download_and_extract(filename, path, url)
with open(filepath, 'r') as f:
words = f.read()
return words
|
[
"def",
"load_nietzsche_dataset",
"(",
"path",
"=",
"'data/nietzsche/'",
")",
":",
"print",
"'Load or Download nietzsche dataset > {}'",
".",
"format",
"(",
"path",
")",
"filename",
"=",
"'nietzsche.txt'",
"url",
"=",
"'https://s3.amazonaws.com/text-datasets/'",
"filepath",
"=",
"maybe_download_and_extract",
"(",
"filename",
",",
"path",
",",
"url",
")",
"with",
"open",
"(",
"filepath",
",",
"'r'",
")",
"as",
"f",
":",
"words",
"=",
"f",
".",
"read",
"(",
")",
"return",
"words"
] |
load nietzsche dataset .
|
train
| false
|
12,776
|
def get_custom_metric(client, project_id, custom_metric_type):
request = client.projects().metricDescriptors().list(name=project_id, filter='metric.type=starts_with("{}")'.format(custom_metric_type))
response = request.execute()
print 'ListCustomMetrics response:'
pprint.pprint(response)
try:
return response['metricDescriptors']
except KeyError:
return None
|
[
"def",
"get_custom_metric",
"(",
"client",
",",
"project_id",
",",
"custom_metric_type",
")",
":",
"request",
"=",
"client",
".",
"projects",
"(",
")",
".",
"metricDescriptors",
"(",
")",
".",
"list",
"(",
"name",
"=",
"project_id",
",",
"filter",
"=",
"'metric.type=starts_with(\"{}\")'",
".",
"format",
"(",
"custom_metric_type",
")",
")",
"response",
"=",
"request",
".",
"execute",
"(",
")",
"print",
"'ListCustomMetrics response:'",
"pprint",
".",
"pprint",
"(",
"response",
")",
"try",
":",
"return",
"response",
"[",
"'metricDescriptors'",
"]",
"except",
"KeyError",
":",
"return",
"None"
] |
retrieve the custom metric we created .
|
train
| false
|
12,779
|
def _CheckText(value, name='value', empty_ok=True):
return _ValidateString(value, name, MAXIMUM_FIELD_VALUE_LENGTH, empty_ok)
|
[
"def",
"_CheckText",
"(",
"value",
",",
"name",
"=",
"'value'",
",",
"empty_ok",
"=",
"True",
")",
":",
"return",
"_ValidateString",
"(",
"value",
",",
"name",
",",
"MAXIMUM_FIELD_VALUE_LENGTH",
",",
"empty_ok",
")"
] |
checks the field text is a valid string .
|
train
| false
|
12,780
|
def get_system_date_time(utc_offset=None):
offset_time = _get_offset_time(utc_offset)
return datetime.strftime(offset_time, '%Y-%m-%d %H:%M:%S')
|
[
"def",
"get_system_date_time",
"(",
"utc_offset",
"=",
"None",
")",
":",
"offset_time",
"=",
"_get_offset_time",
"(",
"utc_offset",
")",
"return",
"datetime",
".",
"strftime",
"(",
"offset_time",
",",
"'%Y-%m-%d %H:%M:%S'",
")"
] |
get the system date/time .
|
train
| false
|
12,781
|
def Arcsin(name, a=0, b=1):
return rv(name, ArcsinDistribution, (a, b))
|
[
"def",
"Arcsin",
"(",
"name",
",",
"a",
"=",
"0",
",",
"b",
"=",
"1",
")",
":",
"return",
"rv",
"(",
"name",
",",
"ArcsinDistribution",
",",
"(",
"a",
",",
"b",
")",
")"
] |
create a continuous random variable with an arcsin distribution .
|
train
| false
|
12,782
|
def delete_virtual_disk_spec(client_factory, device):
virtual_device_config = client_factory.create('ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = 'remove'
virtual_device_config.fileOperation = 'destroy'
virtual_device_config.device = device
return virtual_device_config
|
[
"def",
"delete_virtual_disk_spec",
"(",
"client_factory",
",",
"device",
")",
":",
"virtual_device_config",
"=",
"client_factory",
".",
"create",
"(",
"'ns0:VirtualDeviceConfigSpec'",
")",
"virtual_device_config",
".",
"operation",
"=",
"'remove'",
"virtual_device_config",
".",
"fileOperation",
"=",
"'destroy'",
"virtual_device_config",
".",
"device",
"=",
"device",
"return",
"virtual_device_config"
] |
builds spec for the deletion of an already existing virtual disk from vm .
|
train
| false
|
12,784
|
def unarchive(host, source_material):
if (source_material.endswith('.gz') or source_material.endswith('.gzip')):
host.run(('gunzip "%s"' % utils.sh_escape(source_material)))
source_material = '.'.join(source_material.split('.')[:(-1)])
elif source_material.endswith('bz2'):
host.run(('bunzip2 "%s"' % utils.sh_escape(source_material)))
source_material = '.'.join(source_material.split('.')[:(-1)])
if source_material.endswith('.tar'):
retval = host.run(('tar -C "%s" -xvf "%s"' % (utils.sh_escape(os.path.dirname(source_material)), utils.sh_escape(source_material))))
source_material = os.path.join(os.path.dirname(source_material), retval.stdout.split()[0])
return source_material
|
[
"def",
"unarchive",
"(",
"host",
",",
"source_material",
")",
":",
"if",
"(",
"source_material",
".",
"endswith",
"(",
"'.gz'",
")",
"or",
"source_material",
".",
"endswith",
"(",
"'.gzip'",
")",
")",
":",
"host",
".",
"run",
"(",
"(",
"'gunzip \"%s\"'",
"%",
"utils",
".",
"sh_escape",
"(",
"source_material",
")",
")",
")",
"source_material",
"=",
"'.'",
".",
"join",
"(",
"source_material",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"(",
"-",
"1",
")",
"]",
")",
"elif",
"source_material",
".",
"endswith",
"(",
"'bz2'",
")",
":",
"host",
".",
"run",
"(",
"(",
"'bunzip2 \"%s\"'",
"%",
"utils",
".",
"sh_escape",
"(",
"source_material",
")",
")",
")",
"source_material",
"=",
"'.'",
".",
"join",
"(",
"source_material",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"(",
"-",
"1",
")",
"]",
")",
"if",
"source_material",
".",
"endswith",
"(",
"'.tar'",
")",
":",
"retval",
"=",
"host",
".",
"run",
"(",
"(",
"'tar -C \"%s\" -xvf \"%s\"'",
"%",
"(",
"utils",
".",
"sh_escape",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"source_material",
")",
")",
",",
"utils",
".",
"sh_escape",
"(",
"source_material",
")",
")",
")",
")",
"source_material",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"source_material",
")",
",",
"retval",
".",
"stdout",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"return",
"source_material"
] |
extract the contents of a tar or zip file at *archive_path* into the directory *dest* .
|
train
| false
|
12,785
|
@utils.arg('network', metavar='<network>', help=_('UUID or label of network.'))
@deprecated_network
def do_network_show(cs, args):
network = utils.find_resource(cs.networks, args.network)
utils.print_dict(network.to_dict())
|
[
"@",
"utils",
".",
"arg",
"(",
"'network'",
",",
"metavar",
"=",
"'<network>'",
",",
"help",
"=",
"_",
"(",
"'UUID or label of network.'",
")",
")",
"@",
"deprecated_network",
"def",
"do_network_show",
"(",
"cs",
",",
"args",
")",
":",
"network",
"=",
"utils",
".",
"find_resource",
"(",
"cs",
".",
"networks",
",",
"args",
".",
"network",
")",
"utils",
".",
"print_dict",
"(",
"network",
".",
"to_dict",
"(",
")",
")"
] |
show details about the given network .
|
train
| false
|
12,788
|
def _filter_discardable_metadata(metadata):
return {name: val for (name, val) in metadata.items() if (val is not _DISCARD)}
|
[
"def",
"_filter_discardable_metadata",
"(",
"metadata",
")",
":",
"return",
"{",
"name",
":",
"val",
"for",
"(",
"name",
",",
"val",
")",
"in",
"metadata",
".",
"items",
"(",
")",
"if",
"(",
"val",
"is",
"not",
"_DISCARD",
")",
"}"
] |
return a copy of a dict .
|
train
| false
|
12,789
|
@conf.commands.register
def wrpcap(filename, pkt, *args, **kargs):
with PcapWriter(filename, *args, **kargs) as fdesc:
fdesc.write(pkt)
|
[
"@",
"conf",
".",
"commands",
".",
"register",
"def",
"wrpcap",
"(",
"filename",
",",
"pkt",
",",
"*",
"args",
",",
"**",
"kargs",
")",
":",
"with",
"PcapWriter",
"(",
"filename",
",",
"*",
"args",
",",
"**",
"kargs",
")",
"as",
"fdesc",
":",
"fdesc",
".",
"write",
"(",
"pkt",
")"
] |
write a list of packets to a pcap file gz: set to 1 to save a gzipped capture linktype: force linktype value endianness: "<" or ">" .
|
train
| false
|
12,790
|
def make_conditional(response, last_modified=None, etag=None, max_age=0):
response.cache_control.must_revalidate = True
response.cache_control.max_age = max_age
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(request.httprequest)
|
[
"def",
"make_conditional",
"(",
"response",
",",
"last_modified",
"=",
"None",
",",
"etag",
"=",
"None",
",",
"max_age",
"=",
"0",
")",
":",
"response",
".",
"cache_control",
".",
"must_revalidate",
"=",
"True",
"response",
".",
"cache_control",
".",
"max_age",
"=",
"max_age",
"if",
"last_modified",
":",
"response",
".",
"last_modified",
"=",
"last_modified",
"if",
"etag",
":",
"response",
".",
"set_etag",
"(",
"etag",
")",
"return",
"response",
".",
"make_conditional",
"(",
"request",
".",
"httprequest",
")"
] |
makes the provided response conditional based upon the request .
|
train
| false
|
12,791
|
def register_assert_rewrite(*names):
for name in names:
if (not isinstance(name, str)):
msg = 'expected module names as *args, got {0} instead'
raise TypeError(msg.format(repr(names)))
for hook in sys.meta_path:
if isinstance(hook, rewrite.AssertionRewritingHook):
importhook = hook
break
else:
importhook = DummyRewriteHook()
importhook.mark_rewrite(*names)
|
[
"def",
"register_assert_rewrite",
"(",
"*",
"names",
")",
":",
"for",
"name",
"in",
"names",
":",
"if",
"(",
"not",
"isinstance",
"(",
"name",
",",
"str",
")",
")",
":",
"msg",
"=",
"'expected module names as *args, got {0} instead'",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"repr",
"(",
"names",
")",
")",
")",
"for",
"hook",
"in",
"sys",
".",
"meta_path",
":",
"if",
"isinstance",
"(",
"hook",
",",
"rewrite",
".",
"AssertionRewritingHook",
")",
":",
"importhook",
"=",
"hook",
"break",
"else",
":",
"importhook",
"=",
"DummyRewriteHook",
"(",
")",
"importhook",
".",
"mark_rewrite",
"(",
"*",
"names",
")"
] |
register one or more module names to be rewritten on import .
|
train
| false
|
12,792
|
def onSelectAccountDBInterface(accountName):
return 'default'
|
[
"def",
"onSelectAccountDBInterface",
"(",
"accountName",
")",
":",
"return",
"'default'"
] |
kbengine method .
|
train
| false
|
12,793
|
def LoadCheckFromFile(file_path, check_id, overwrite_if_exists=True):
configs = LoadConfigsFromFile(file_path)
conf = configs.get(check_id)
check = Check(**conf)
check.Validate()
CheckRegistry.RegisterCheck(check, source=('file:%s' % file_path), overwrite_if_exists=overwrite_if_exists)
logging.debug('Loaded check %s from %s', check.check_id, file_path)
return check
|
[
"def",
"LoadCheckFromFile",
"(",
"file_path",
",",
"check_id",
",",
"overwrite_if_exists",
"=",
"True",
")",
":",
"configs",
"=",
"LoadConfigsFromFile",
"(",
"file_path",
")",
"conf",
"=",
"configs",
".",
"get",
"(",
"check_id",
")",
"check",
"=",
"Check",
"(",
"**",
"conf",
")",
"check",
".",
"Validate",
"(",
")",
"CheckRegistry",
".",
"RegisterCheck",
"(",
"check",
",",
"source",
"=",
"(",
"'file:%s'",
"%",
"file_path",
")",
",",
"overwrite_if_exists",
"=",
"overwrite_if_exists",
")",
"logging",
".",
"debug",
"(",
"'Loaded check %s from %s'",
",",
"check",
".",
"check_id",
",",
"file_path",
")",
"return",
"check"
] |
load a single check from a file .
|
train
| true
|
12,794
|
@pytest.mark.parametrize('url1, url2', [('http://example.com', ''), ('', 'http://example.com')])
def test_same_domain_invalid_url(url1, url2):
with pytest.raises(urlutils.InvalidUrlError):
urlutils.same_domain(QUrl(url1), QUrl(url2))
|
[
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"'url1, url2'",
",",
"[",
"(",
"'http://example.com'",
",",
"''",
")",
",",
"(",
"''",
",",
"'http://example.com'",
")",
"]",
")",
"def",
"test_same_domain_invalid_url",
"(",
"url1",
",",
"url2",
")",
":",
"with",
"pytest",
".",
"raises",
"(",
"urlutils",
".",
"InvalidUrlError",
")",
":",
"urlutils",
".",
"same_domain",
"(",
"QUrl",
"(",
"url1",
")",
",",
"QUrl",
"(",
"url2",
")",
")"
] |
test same_domain with invalid urls .
|
train
| false
|
12,795
|
@register.tag
def permission_request_form(parser, token):
return PermissionFormNode.handle_token(parser, token, approved=False)
|
[
"@",
"register",
".",
"tag",
"def",
"permission_request_form",
"(",
"parser",
",",
"token",
")",
":",
"return",
"PermissionFormNode",
".",
"handle_token",
"(",
"parser",
",",
"token",
",",
"approved",
"=",
"False",
")"
] |
renders an "add permissions" form for the given object .
|
train
| false
|
12,797
|
def convert_BooleanProperty(model, prop, kwargs):
return f.BooleanField(**kwargs)
|
[
"def",
"convert_BooleanProperty",
"(",
"model",
",",
"prop",
",",
"kwargs",
")",
":",
"return",
"f",
".",
"BooleanField",
"(",
"**",
"kwargs",
")"
] |
returns a form field for a db .
|
train
| false
|
12,798
|
def grad_sum(x, y, z):
raise NotImplementedError('TODO: implement this function.')
|
[
"def",
"grad_sum",
"(",
"x",
",",
"y",
",",
"z",
")",
":",
"raise",
"NotImplementedError",
"(",
"'TODO: implement this function.'",
")"
] |
x: a theano variable y: a theano variable z: a theano expression involving x and y returns dz / dx + dz / dy .
|
train
| false
|
12,799
|
def preserve_spaces(txt):
txt = re.sub('(?P<space>[ ]{2,})', (lambda mo: (' ' + (' ' * (len(mo.group('space')) - 1)))), txt)
txt = txt.replace(' DCTB ', ' ')
return txt
|
[
"def",
"preserve_spaces",
"(",
"txt",
")",
":",
"txt",
"=",
"re",
".",
"sub",
"(",
"'(?P<space>[ ]{2,})'",
",",
"(",
"lambda",
"mo",
":",
"(",
"' '",
"+",
"(",
"' '",
"*",
"(",
"len",
"(",
"mo",
".",
"group",
"(",
"'space'",
")",
")",
"-",
"1",
")",
")",
")",
")",
",",
"txt",
")",
"txt",
"=",
"txt",
".",
"replace",
"(",
"' DCTB '",
",",
"' '",
")",
"return",
"txt"
] |
replaces spaces multiple spaces with entities .
|
train
| false
|
12,800
|
def _is_valid_path(path, urlconf=None):
try:
urlresolvers.resolve(path, urlconf)
return True
except urlresolvers.Resolver404:
return False
|
[
"def",
"_is_valid_path",
"(",
"path",
",",
"urlconf",
"=",
"None",
")",
":",
"try",
":",
"urlresolvers",
".",
"resolve",
"(",
"path",
",",
"urlconf",
")",
"return",
"True",
"except",
"urlresolvers",
".",
"Resolver404",
":",
"return",
"False"
] |
returns true if the given path resolves against the default url resolver .
|
train
| false
|
12,801
|
def extract_entities(template):
if ((template is None) or _RE_NONE_ENTITIES.search(template)):
return MATCH_ALL
extraction = _RE_GET_ENTITIES.findall(template)
if (len(extraction) > 0):
return list(set(extraction))
return MATCH_ALL
|
[
"def",
"extract_entities",
"(",
"template",
")",
":",
"if",
"(",
"(",
"template",
"is",
"None",
")",
"or",
"_RE_NONE_ENTITIES",
".",
"search",
"(",
"template",
")",
")",
":",
"return",
"MATCH_ALL",
"extraction",
"=",
"_RE_GET_ENTITIES",
".",
"findall",
"(",
"template",
")",
"if",
"(",
"len",
"(",
"extraction",
")",
">",
"0",
")",
":",
"return",
"list",
"(",
"set",
"(",
"extraction",
")",
")",
"return",
"MATCH_ALL"
] |
extract all entities for state_changed listener from template string .
|
train
| false
|
12,802
|
def p_unary_expression_6(t):
pass
|
[
"def",
"p_unary_expression_6",
"(",
"t",
")",
":",
"pass"
] |
unary_expression : sizeof lparen type_name rparen .
|
train
| false
|
12,804
|
def _create_char_spinner():
while True:
for c in '|/-\\':
(yield c)
|
[
"def",
"_create_char_spinner",
"(",
")",
":",
"while",
"True",
":",
"for",
"c",
"in",
"'|/-\\\\'",
":",
"(",
"yield",
"c",
")"
] |
creates a generator yielding a char based spinner .
|
train
| false
|
12,805
|
def _setRandomEncoderResolution(minResolution=0.001):
encoder = model_params.MODEL_PARAMS['modelParams']['sensorParams']['encoders']['value']
if (encoder['type'] == 'RandomDistributedScalarEncoder'):
rangePadding = (abs((_INPUT_MAX - _INPUT_MIN)) * 0.2)
minValue = (_INPUT_MIN - rangePadding)
maxValue = (_INPUT_MAX + rangePadding)
resolution = max(minResolution, ((maxValue - minValue) / encoder.pop('numBuckets')))
encoder['resolution'] = resolution
|
[
"def",
"_setRandomEncoderResolution",
"(",
"minResolution",
"=",
"0.001",
")",
":",
"encoder",
"=",
"model_params",
".",
"MODEL_PARAMS",
"[",
"'modelParams'",
"]",
"[",
"'sensorParams'",
"]",
"[",
"'encoders'",
"]",
"[",
"'value'",
"]",
"if",
"(",
"encoder",
"[",
"'type'",
"]",
"==",
"'RandomDistributedScalarEncoder'",
")",
":",
"rangePadding",
"=",
"(",
"abs",
"(",
"(",
"_INPUT_MAX",
"-",
"_INPUT_MIN",
")",
")",
"*",
"0.2",
")",
"minValue",
"=",
"(",
"_INPUT_MIN",
"-",
"rangePadding",
")",
"maxValue",
"=",
"(",
"_INPUT_MAX",
"+",
"rangePadding",
")",
"resolution",
"=",
"max",
"(",
"minResolution",
",",
"(",
"(",
"maxValue",
"-",
"minValue",
")",
"/",
"encoder",
".",
"pop",
"(",
"'numBuckets'",
")",
")",
")",
"encoder",
"[",
"'resolution'",
"]",
"=",
"resolution"
] |
given model params .
|
train
| true
|
12,807
|
def condor_stop(external_id):
failure_message = None
try:
check_call(('condor_rm', external_id))
except CalledProcessError:
failure_message = 'condor_rm failed'
except Exception as e:
('error encountered calling condor_rm: %s' % e)
return failure_message
|
[
"def",
"condor_stop",
"(",
"external_id",
")",
":",
"failure_message",
"=",
"None",
"try",
":",
"check_call",
"(",
"(",
"'condor_rm'",
",",
"external_id",
")",
")",
"except",
"CalledProcessError",
":",
"failure_message",
"=",
"'condor_rm failed'",
"except",
"Exception",
"as",
"e",
":",
"(",
"'error encountered calling condor_rm: %s'",
"%",
"e",
")",
"return",
"failure_message"
] |
stop running condor job and return a failure_message if this fails .
|
train
| true
|
12,808
|
def mimedata_from_paths(paths):
abspaths = [core.abspath(path) for path in paths]
urls = [QtCore.QUrl.fromLocalFile(path) for path in abspaths]
mimedata = QtCore.QMimeData()
mimedata.setUrls(urls)
paths_text = core.list2cmdline(abspaths)
encoding = gitcfg.current().get(u'cola.dragencoding', u'utf-16')
moz_text = core.encode(paths_text, encoding=encoding)
mimedata.setData(u'text/x-moz-url', moz_text)
return mimedata
|
[
"def",
"mimedata_from_paths",
"(",
"paths",
")",
":",
"abspaths",
"=",
"[",
"core",
".",
"abspath",
"(",
"path",
")",
"for",
"path",
"in",
"paths",
"]",
"urls",
"=",
"[",
"QtCore",
".",
"QUrl",
".",
"fromLocalFile",
"(",
"path",
")",
"for",
"path",
"in",
"abspaths",
"]",
"mimedata",
"=",
"QtCore",
".",
"QMimeData",
"(",
")",
"mimedata",
".",
"setUrls",
"(",
"urls",
")",
"paths_text",
"=",
"core",
".",
"list2cmdline",
"(",
"abspaths",
")",
"encoding",
"=",
"gitcfg",
".",
"current",
"(",
")",
".",
"get",
"(",
"u'cola.dragencoding'",
",",
"u'utf-16'",
")",
"moz_text",
"=",
"core",
".",
"encode",
"(",
"paths_text",
",",
"encoding",
"=",
"encoding",
")",
"mimedata",
".",
"setData",
"(",
"u'text/x-moz-url'",
",",
"moz_text",
")",
"return",
"mimedata"
] |
return mimedata with a list of absolute path urls .
|
train
| false
|
12,809
|
@click.command(u'reset-perms')
@pass_context
def reset_perms(context):
from frappe.permissions import reset_perms
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
for d in frappe.db.sql_list(u'select name from `tabDocType`\n DCTB DCTB DCTB DCTB where istable=0 and custom=0'):
frappe.clear_cache(doctype=d)
reset_perms(d)
finally:
frappe.destroy()
|
[
"@",
"click",
".",
"command",
"(",
"u'reset-perms'",
")",
"@",
"pass_context",
"def",
"reset_perms",
"(",
"context",
")",
":",
"from",
"frappe",
".",
"permissions",
"import",
"reset_perms",
"for",
"site",
"in",
"context",
".",
"sites",
":",
"try",
":",
"frappe",
".",
"init",
"(",
"site",
"=",
"site",
")",
"frappe",
".",
"connect",
"(",
")",
"for",
"d",
"in",
"frappe",
".",
"db",
".",
"sql_list",
"(",
"u'select name from `tabDocType`\\n DCTB DCTB DCTB DCTB where istable=0 and custom=0'",
")",
":",
"frappe",
".",
"clear_cache",
"(",
"doctype",
"=",
"d",
")",
"reset_perms",
"(",
"d",
")",
"finally",
":",
"frappe",
".",
"destroy",
"(",
")"
] |
reset permissions for given doctype .
|
train
| false
|
12,810
|
def treetypes(root):
w = NodeTypeWriter()
w.visit(root)
return u'\n'.join((([u''] + w.result) + [u'']))
|
[
"def",
"treetypes",
"(",
"root",
")",
":",
"w",
"=",
"NodeTypeWriter",
"(",
")",
"w",
".",
"visit",
"(",
"root",
")",
"return",
"u'\\n'",
".",
"join",
"(",
"(",
"(",
"[",
"u''",
"]",
"+",
"w",
".",
"result",
")",
"+",
"[",
"u''",
"]",
")",
")"
] |
returns a string representing the tree by class names .
|
train
| false
|
12,811
|
def _quote_name(name):
return connection.ops.quote_name(name)
|
[
"def",
"_quote_name",
"(",
"name",
")",
":",
"return",
"connection",
".",
"ops",
".",
"quote_name",
"(",
"name",
")"
] |
shorthand for connection .
|
train
| false
|
12,812
|
def _log_error_with_data(msg, post):
id = random.randint(0, 99999999)
msg = ('[%s] %s (dumping data)' % (id, msg))
paypal_log.error(msg)
logme = {'txn_id': post.get('txn_id'), 'txn_type': post.get('txn_type'), 'payer_email': post.get('payer_email'), 'receiver_email': post.get('receiver_email'), 'payment_status': post.get('payment_status'), 'payment_type': post.get('payment_type'), 'mc_gross': post.get('mc_gross'), 'item_number': post.get('item_number')}
paypal_log.error(('[%s] PayPal Data: %s' % (id, logme)))
|
[
"def",
"_log_error_with_data",
"(",
"msg",
",",
"post",
")",
":",
"id",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"99999999",
")",
"msg",
"=",
"(",
"'[%s] %s (dumping data)'",
"%",
"(",
"id",
",",
"msg",
")",
")",
"paypal_log",
".",
"error",
"(",
"msg",
")",
"logme",
"=",
"{",
"'txn_id'",
":",
"post",
".",
"get",
"(",
"'txn_id'",
")",
",",
"'txn_type'",
":",
"post",
".",
"get",
"(",
"'txn_type'",
")",
",",
"'payer_email'",
":",
"post",
".",
"get",
"(",
"'payer_email'",
")",
",",
"'receiver_email'",
":",
"post",
".",
"get",
"(",
"'receiver_email'",
")",
",",
"'payment_status'",
":",
"post",
".",
"get",
"(",
"'payment_status'",
")",
",",
"'payment_type'",
":",
"post",
".",
"get",
"(",
"'payment_type'",
")",
",",
"'mc_gross'",
":",
"post",
".",
"get",
"(",
"'mc_gross'",
")",
",",
"'item_number'",
":",
"post",
".",
"get",
"(",
"'item_number'",
")",
"}",
"paypal_log",
".",
"error",
"(",
"(",
"'[%s] PayPal Data: %s'",
"%",
"(",
"id",
",",
"logme",
")",
")",
")"
] |
log a message along with some of the post info from paypal .
|
train
| false
|
12,813
|
def rsa_sign(xml, ref_uri, private_key, password=None, cert=None, c14n_exc=True, sign_template=SIGN_REF_TMPL, key_info_template=KEY_INFO_RSA_TMPL):
ref_xml = canonicalize(xml, c14n_exc)
signed_info = (sign_template % {'ref_uri': ref_uri, 'digest_value': sha1_hash_digest(ref_xml)})
signed_info = canonicalize(signed_info, c14n_exc)
pkey = RSA.load_key(private_key, (lambda *args, **kwargs: password))
signature = pkey.sign(hashlib.sha1(signed_info).digest())
return {'ref_xml': ref_xml, 'ref_uri': ref_uri, 'signed_info': signed_info, 'signature_value': base64.b64encode(signature), 'key_info': key_info(pkey, cert, key_info_template)}
|
[
"def",
"rsa_sign",
"(",
"xml",
",",
"ref_uri",
",",
"private_key",
",",
"password",
"=",
"None",
",",
"cert",
"=",
"None",
",",
"c14n_exc",
"=",
"True",
",",
"sign_template",
"=",
"SIGN_REF_TMPL",
",",
"key_info_template",
"=",
"KEY_INFO_RSA_TMPL",
")",
":",
"ref_xml",
"=",
"canonicalize",
"(",
"xml",
",",
"c14n_exc",
")",
"signed_info",
"=",
"(",
"sign_template",
"%",
"{",
"'ref_uri'",
":",
"ref_uri",
",",
"'digest_value'",
":",
"sha1_hash_digest",
"(",
"ref_xml",
")",
"}",
")",
"signed_info",
"=",
"canonicalize",
"(",
"signed_info",
",",
"c14n_exc",
")",
"pkey",
"=",
"RSA",
".",
"load_key",
"(",
"private_key",
",",
"(",
"lambda",
"*",
"args",
",",
"**",
"kwargs",
":",
"password",
")",
")",
"signature",
"=",
"pkey",
".",
"sign",
"(",
"hashlib",
".",
"sha1",
"(",
"signed_info",
")",
".",
"digest",
"(",
")",
")",
"return",
"{",
"'ref_xml'",
":",
"ref_xml",
",",
"'ref_uri'",
":",
"ref_uri",
",",
"'signed_info'",
":",
"signed_info",
",",
"'signature_value'",
":",
"base64",
".",
"b64encode",
"(",
"signature",
")",
",",
"'key_info'",
":",
"key_info",
"(",
"pkey",
",",
"cert",
",",
"key_info_template",
")",
"}"
] |
sign an xml document usign rsa .
|
train
| false
|
12,814
|
def OutHeader1(text):
OutHeader(text, '=')
|
[
"def",
"OutHeader1",
"(",
"text",
")",
":",
"OutHeader",
"(",
"text",
",",
"'='",
")"
] |
output a level 1 header comment .
|
train
| false
|
12,816
|
def _read_pfid_ed(fid):
out = {'comment_size': read_int32(fid), 'name': read_str(fid, 17)}
fid.seek(9, 1)
out.update({'pdf_number': read_int16(fid), 'total_events': read_int32(fid), 'timestamp': read_int32(fid), 'flags': read_int32(fid), 'de_process': read_int32(fid), 'checksum': read_int32(fid), 'ed_id': read_int32(fid), 'win_width': read_float(fid), 'win_offset': read_float(fid)})
fid.seek(8, 1)
return out
|
[
"def",
"_read_pfid_ed",
"(",
"fid",
")",
":",
"out",
"=",
"{",
"'comment_size'",
":",
"read_int32",
"(",
"fid",
")",
",",
"'name'",
":",
"read_str",
"(",
"fid",
",",
"17",
")",
"}",
"fid",
".",
"seek",
"(",
"9",
",",
"1",
")",
"out",
".",
"update",
"(",
"{",
"'pdf_number'",
":",
"read_int16",
"(",
"fid",
")",
",",
"'total_events'",
":",
"read_int32",
"(",
"fid",
")",
",",
"'timestamp'",
":",
"read_int32",
"(",
"fid",
")",
",",
"'flags'",
":",
"read_int32",
"(",
"fid",
")",
",",
"'de_process'",
":",
"read_int32",
"(",
"fid",
")",
",",
"'checksum'",
":",
"read_int32",
"(",
"fid",
")",
",",
"'ed_id'",
":",
"read_int32",
"(",
"fid",
")",
",",
"'win_width'",
":",
"read_float",
"(",
"fid",
")",
",",
"'win_offset'",
":",
"read_float",
"(",
"fid",
")",
"}",
")",
"fid",
".",
"seek",
"(",
"8",
",",
"1",
")",
"return",
"out"
] |
read pdf ed file .
|
train
| false
|
12,820
|
def create_realistic_servicepair(test):
from_pool = StoragePool(reactor, create_zfs_pool(test), FilePath(test.mktemp()))
from_config = FilePath(test.mktemp())
from_service = VolumeService(from_config, from_pool, reactor=Clock())
from_service.startService()
test.addCleanup(from_service.stopService)
to_pool = StoragePool(reactor, create_zfs_pool(test), FilePath(test.mktemp()))
to_config = FilePath(test.mktemp())
to_service = VolumeService(to_config, to_pool, reactor=Clock())
to_service.startService()
test.addCleanup(to_service.stopService)
remote = RemoteVolumeManager(MutatingProcessNode(to_service), to_config)
origin_remote = LocalVolumeManager(from_service)
return ServicePair(from_service=from_service, to_service=to_service, remote=remote, origin_remote=origin_remote)
|
[
"def",
"create_realistic_servicepair",
"(",
"test",
")",
":",
"from_pool",
"=",
"StoragePool",
"(",
"reactor",
",",
"create_zfs_pool",
"(",
"test",
")",
",",
"FilePath",
"(",
"test",
".",
"mktemp",
"(",
")",
")",
")",
"from_config",
"=",
"FilePath",
"(",
"test",
".",
"mktemp",
"(",
")",
")",
"from_service",
"=",
"VolumeService",
"(",
"from_config",
",",
"from_pool",
",",
"reactor",
"=",
"Clock",
"(",
")",
")",
"from_service",
".",
"startService",
"(",
")",
"test",
".",
"addCleanup",
"(",
"from_service",
".",
"stopService",
")",
"to_pool",
"=",
"StoragePool",
"(",
"reactor",
",",
"create_zfs_pool",
"(",
"test",
")",
",",
"FilePath",
"(",
"test",
".",
"mktemp",
"(",
")",
")",
")",
"to_config",
"=",
"FilePath",
"(",
"test",
".",
"mktemp",
"(",
")",
")",
"to_service",
"=",
"VolumeService",
"(",
"to_config",
",",
"to_pool",
",",
"reactor",
"=",
"Clock",
"(",
")",
")",
"to_service",
".",
"startService",
"(",
")",
"test",
".",
"addCleanup",
"(",
"to_service",
".",
"stopService",
")",
"remote",
"=",
"RemoteVolumeManager",
"(",
"MutatingProcessNode",
"(",
"to_service",
")",
",",
"to_config",
")",
"origin_remote",
"=",
"LocalVolumeManager",
"(",
"from_service",
")",
"return",
"ServicePair",
"(",
"from_service",
"=",
"from_service",
",",
"to_service",
"=",
"to_service",
",",
"remote",
"=",
"remote",
",",
"origin_remote",
"=",
"origin_remote",
")"
] |
create a servicepair that uses zfs for testing remotevolumemanager .
|
train
| false
|
12,821
|
def receiver(signal, **kwargs):
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
|
[
"def",
"receiver",
"(",
"signal",
",",
"**",
"kwargs",
")",
":",
"def",
"_decorator",
"(",
"func",
")",
":",
"if",
"isinstance",
"(",
"signal",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"s",
"in",
"signal",
":",
"s",
".",
"connect",
"(",
"func",
",",
"**",
"kwargs",
")",
"else",
":",
"signal",
".",
"connect",
"(",
"func",
",",
"**",
"kwargs",
")",
"return",
"func",
"return",
"_decorator"
] |
a decorator for connecting receivers to signals .
|
train
| true
|
12,823
|
def atol(*args):
try:
s = args[0]
except IndexError:
raise TypeError(('function requires at least 1 argument: %d given' % len(args)))
if (type(s) == _StringType):
return _apply(_long, args)
else:
raise TypeError(('argument 1: expected string, %s found' % type(s).__name__))
|
[
"def",
"atol",
"(",
"*",
"args",
")",
":",
"try",
":",
"s",
"=",
"args",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"TypeError",
"(",
"(",
"'function requires at least 1 argument: %d given'",
"%",
"len",
"(",
"args",
")",
")",
")",
"if",
"(",
"type",
"(",
"s",
")",
"==",
"_StringType",
")",
":",
"return",
"_apply",
"(",
"_long",
",",
"args",
")",
"else",
":",
"raise",
"TypeError",
"(",
"(",
"'argument 1: expected string, %s found'",
"%",
"type",
"(",
"s",
")",
".",
"__name__",
")",
")"
] |
atol -> long return the long integer represented by the string s in the given base .
|
train
| false
|
12,824
|
def generate_mtime_map(opts, path_map):
file_map = {}
for (saltenv, path_list) in six.iteritems(path_map):
for path in path_list:
for (directory, dirnames, filenames) in os.walk(path):
dirnames[:] = [d for d in dirnames if (not is_file_ignored(opts, d))]
for item in filenames:
try:
file_path = os.path.join(directory, item)
file_map[file_path] = os.path.getmtime(file_path)
except (OSError, IOError):
log.info('Failed to get mtime on {0}, dangling symlink ?'.format(file_path))
continue
return file_map
|
[
"def",
"generate_mtime_map",
"(",
"opts",
",",
"path_map",
")",
":",
"file_map",
"=",
"{",
"}",
"for",
"(",
"saltenv",
",",
"path_list",
")",
"in",
"six",
".",
"iteritems",
"(",
"path_map",
")",
":",
"for",
"path",
"in",
"path_list",
":",
"for",
"(",
"directory",
",",
"dirnames",
",",
"filenames",
")",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"dirnames",
"[",
":",
"]",
"=",
"[",
"d",
"for",
"d",
"in",
"dirnames",
"if",
"(",
"not",
"is_file_ignored",
"(",
"opts",
",",
"d",
")",
")",
"]",
"for",
"item",
"in",
"filenames",
":",
"try",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"item",
")",
"file_map",
"[",
"file_path",
"]",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"file_path",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
":",
"log",
".",
"info",
"(",
"'Failed to get mtime on {0}, dangling symlink ?'",
".",
"format",
"(",
"file_path",
")",
")",
"continue",
"return",
"file_map"
] |
generate a dict of filename -> mtime .
|
train
| false
|
12,825
|
def test_replace_tuple():
replaced = replace_hy_obj((long_type(0),), HyInteger(13))
assert (type(replaced) == HyList)
assert (type(replaced[0]) == HyInteger)
assert (replaced == HyList([HyInteger(0)]))
|
[
"def",
"test_replace_tuple",
"(",
")",
":",
"replaced",
"=",
"replace_hy_obj",
"(",
"(",
"long_type",
"(",
"0",
")",
",",
")",
",",
"HyInteger",
"(",
"13",
")",
")",
"assert",
"(",
"type",
"(",
"replaced",
")",
"==",
"HyList",
")",
"assert",
"(",
"type",
"(",
"replaced",
"[",
"0",
"]",
")",
"==",
"HyInteger",
")",
"assert",
"(",
"replaced",
"==",
"HyList",
"(",
"[",
"HyInteger",
"(",
"0",
")",
"]",
")",
")"
] |
test replacing tuples .
|
train
| false
|
12,826
|
def _get_count(queryset):
try:
return queryset.count()
except (AttributeError, TypeError):
return len(queryset)
|
[
"def",
"_get_count",
"(",
"queryset",
")",
":",
"try",
":",
"return",
"queryset",
".",
"count",
"(",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
")",
":",
"return",
"len",
"(",
"queryset",
")"
] |
determine an object count .
|
train
| false
|
12,827
|
def read_cache_entry(f):
beginoffset = f.tell()
ctime = read_cache_time(f)
mtime = read_cache_time(f)
(dev, ino, mode, uid, gid, size, sha, flags) = struct.unpack('>LLLLLL20sH', f.read(((20 + (4 * 6)) + 2)))
name = f.read((flags & 4095))
real_size = (((f.tell() - beginoffset) + 8) & (~ 7))
f.read(((beginoffset + real_size) - f.tell()))
return (name, ctime, mtime, dev, ino, mode, uid, gid, size, sha_to_hex(sha), (flags & (~ 4095)))
|
[
"def",
"read_cache_entry",
"(",
"f",
")",
":",
"beginoffset",
"=",
"f",
".",
"tell",
"(",
")",
"ctime",
"=",
"read_cache_time",
"(",
"f",
")",
"mtime",
"=",
"read_cache_time",
"(",
"f",
")",
"(",
"dev",
",",
"ino",
",",
"mode",
",",
"uid",
",",
"gid",
",",
"size",
",",
"sha",
",",
"flags",
")",
"=",
"struct",
".",
"unpack",
"(",
"'>LLLLLL20sH'",
",",
"f",
".",
"read",
"(",
"(",
"(",
"20",
"+",
"(",
"4",
"*",
"6",
")",
")",
"+",
"2",
")",
")",
")",
"name",
"=",
"f",
".",
"read",
"(",
"(",
"flags",
"&",
"4095",
")",
")",
"real_size",
"=",
"(",
"(",
"(",
"f",
".",
"tell",
"(",
")",
"-",
"beginoffset",
")",
"+",
"8",
")",
"&",
"(",
"~",
"7",
")",
")",
"f",
".",
"read",
"(",
"(",
"(",
"beginoffset",
"+",
"real_size",
")",
"-",
"f",
".",
"tell",
"(",
")",
")",
")",
"return",
"(",
"name",
",",
"ctime",
",",
"mtime",
",",
"dev",
",",
"ino",
",",
"mode",
",",
"uid",
",",
"gid",
",",
"size",
",",
"sha_to_hex",
"(",
"sha",
")",
",",
"(",
"flags",
"&",
"(",
"~",
"4095",
")",
")",
")"
] |
read an entry from a cache file .
|
train
| false
|
12,829
|
def _cast_params_from(params, context, schemas):
result = {}
for name in params:
param_schema = {}
for schema in schemas:
if (name in schema):
param_schema = schema[name]
result[name] = _cast(context[name], param_schema)
return result
|
[
"def",
"_cast_params_from",
"(",
"params",
",",
"context",
",",
"schemas",
")",
":",
"result",
"=",
"{",
"}",
"for",
"name",
"in",
"params",
":",
"param_schema",
"=",
"{",
"}",
"for",
"schema",
"in",
"schemas",
":",
"if",
"(",
"name",
"in",
"schema",
")",
":",
"param_schema",
"=",
"schema",
"[",
"name",
"]",
"result",
"[",
"name",
"]",
"=",
"_cast",
"(",
"context",
"[",
"name",
"]",
",",
"param_schema",
")",
"return",
"result"
] |
pick a list of parameters from context and cast each of them according to the schemas provided .
|
train
| false
|
12,833
|
def flat_user_answer(user_answer):
def parse_user_answer(answer):
key = answer.keys()[0]
value = answer.values()[0]
if isinstance(value, dict):
complex_value_list = []
v_value = value
while isinstance(v_value, dict):
v_key = v_value.keys()[0]
v_value = v_value.values()[0]
complex_value_list.append(v_key)
complex_value = '{0}'.format(v_value)
for i in reversed(complex_value_list):
complex_value = '{0}[{1}]'.format(complex_value, i)
res = {key: complex_value}
return res
else:
return answer
result = []
for answer in user_answer:
parse_answer = parse_user_answer(answer)
result.append(parse_answer)
return result
|
[
"def",
"flat_user_answer",
"(",
"user_answer",
")",
":",
"def",
"parse_user_answer",
"(",
"answer",
")",
":",
"key",
"=",
"answer",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"value",
"=",
"answer",
".",
"values",
"(",
")",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"complex_value_list",
"=",
"[",
"]",
"v_value",
"=",
"value",
"while",
"isinstance",
"(",
"v_value",
",",
"dict",
")",
":",
"v_key",
"=",
"v_value",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"v_value",
"=",
"v_value",
".",
"values",
"(",
")",
"[",
"0",
"]",
"complex_value_list",
".",
"append",
"(",
"v_key",
")",
"complex_value",
"=",
"'{0}'",
".",
"format",
"(",
"v_value",
")",
"for",
"i",
"in",
"reversed",
"(",
"complex_value_list",
")",
":",
"complex_value",
"=",
"'{0}[{1}]'",
".",
"format",
"(",
"complex_value",
",",
"i",
")",
"res",
"=",
"{",
"key",
":",
"complex_value",
"}",
"return",
"res",
"else",
":",
"return",
"answer",
"result",
"=",
"[",
"]",
"for",
"answer",
"in",
"user_answer",
":",
"parse_answer",
"=",
"parse_user_answer",
"(",
"answer",
")",
"result",
".",
"append",
"(",
"parse_answer",
")",
"return",
"result"
] |
convert nested user_answer to flat format .
|
train
| false
|
12,836
|
def closed_issue(issue, after=None):
if (issue['state'] == 'closed'):
if ((after is None) or (parse_timestamp(issue['closed_at']) > after)):
return True
return False
|
[
"def",
"closed_issue",
"(",
"issue",
",",
"after",
"=",
"None",
")",
":",
"if",
"(",
"issue",
"[",
"'state'",
"]",
"==",
"'closed'",
")",
":",
"if",
"(",
"(",
"after",
"is",
"None",
")",
"or",
"(",
"parse_timestamp",
"(",
"issue",
"[",
"'closed_at'",
"]",
")",
">",
"after",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
returns true iff this issue was closed after given date .
|
train
| true
|
12,837
|
def segment(sequence, aliases):
if (not (sequence or aliases)):
return
for (alias, parts) in aliases.items():
variants = {alias: OrderByTuple(parts), OrderBy(alias).opposite: OrderByTuple(parts).opposite}
for (valias, vparts) in variants.items():
if (list(sequence[:len(vparts)]) == list(vparts)):
tail_aliases = dict(aliases)
del tail_aliases[alias]
tail_sequence = sequence[len(vparts):]
if tail_sequence:
for tail in segment(tail_sequence, tail_aliases):
(yield tuple(chain([valias], tail)))
else:
continue
else:
(yield tuple([valias]))
|
[
"def",
"segment",
"(",
"sequence",
",",
"aliases",
")",
":",
"if",
"(",
"not",
"(",
"sequence",
"or",
"aliases",
")",
")",
":",
"return",
"for",
"(",
"alias",
",",
"parts",
")",
"in",
"aliases",
".",
"items",
"(",
")",
":",
"variants",
"=",
"{",
"alias",
":",
"OrderByTuple",
"(",
"parts",
")",
",",
"OrderBy",
"(",
"alias",
")",
".",
"opposite",
":",
"OrderByTuple",
"(",
"parts",
")",
".",
"opposite",
"}",
"for",
"(",
"valias",
",",
"vparts",
")",
"in",
"variants",
".",
"items",
"(",
")",
":",
"if",
"(",
"list",
"(",
"sequence",
"[",
":",
"len",
"(",
"vparts",
")",
"]",
")",
"==",
"list",
"(",
"vparts",
")",
")",
":",
"tail_aliases",
"=",
"dict",
"(",
"aliases",
")",
"del",
"tail_aliases",
"[",
"alias",
"]",
"tail_sequence",
"=",
"sequence",
"[",
"len",
"(",
"vparts",
")",
":",
"]",
"if",
"tail_sequence",
":",
"for",
"tail",
"in",
"segment",
"(",
"tail_sequence",
",",
"tail_aliases",
")",
":",
"(",
"yield",
"tuple",
"(",
"chain",
"(",
"[",
"valias",
"]",
",",
"tail",
")",
")",
")",
"else",
":",
"continue",
"else",
":",
"(",
"yield",
"tuple",
"(",
"[",
"valias",
"]",
")",
")"
] |
translates a flat sequence of items into a set of prefixed aliases .
|
train
| false
|
12,838
|
def pretty_use_unicode(flag=None):
global _use_unicode
global unicode_warnings
if (flag is None):
return _use_unicode
if unicode_warnings:
known = [('LATIN SUBSCRIPT SMALL LETTER %s' % i) for i in 'HKLMNPST']
unicode_warnings = '\n'.join([l for l in unicode_warnings.splitlines() if (not any(((i in l) for i in known)))])
if (flag and unicode_warnings):
warnings.warn(unicode_warnings)
unicode_warnings = ''
use_unicode_prev = _use_unicode
_use_unicode = flag
return use_unicode_prev
|
[
"def",
"pretty_use_unicode",
"(",
"flag",
"=",
"None",
")",
":",
"global",
"_use_unicode",
"global",
"unicode_warnings",
"if",
"(",
"flag",
"is",
"None",
")",
":",
"return",
"_use_unicode",
"if",
"unicode_warnings",
":",
"known",
"=",
"[",
"(",
"'LATIN SUBSCRIPT SMALL LETTER %s'",
"%",
"i",
")",
"for",
"i",
"in",
"'HKLMNPST'",
"]",
"unicode_warnings",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"l",
"for",
"l",
"in",
"unicode_warnings",
".",
"splitlines",
"(",
")",
"if",
"(",
"not",
"any",
"(",
"(",
"(",
"i",
"in",
"l",
")",
"for",
"i",
"in",
"known",
")",
")",
")",
"]",
")",
"if",
"(",
"flag",
"and",
"unicode_warnings",
")",
":",
"warnings",
".",
"warn",
"(",
"unicode_warnings",
")",
"unicode_warnings",
"=",
"''",
"use_unicode_prev",
"=",
"_use_unicode",
"_use_unicode",
"=",
"flag",
"return",
"use_unicode_prev"
] |
set whether pretty-printer should use unicode by default .
|
train
| false
|
12,839
|
def None2NULL(o, d):
return NULL
|
[
"def",
"None2NULL",
"(",
"o",
",",
"d",
")",
":",
"return",
"NULL"
] |
convert none to null .
|
train
| false
|
12,840
|
def add_channel_arguments(parser):
parser.add_argument('--repo-data', dest='repo_data', default=None, help='Published repository data (will be fetched from --channel if not available and written). Defaults to [channel_name]-repodata.json.')
parser.add_argument('--diff-hours', dest='diff_hours', default='25', help='If finding all recently changed recipes, use this number of hours.')
parser.add_argument('--recipes-dir', dest='recipes_dir', default='./bioconda-recipes')
|
[
"def",
"add_channel_arguments",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'--repo-data'",
",",
"dest",
"=",
"'repo_data'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Published repository data (will be fetched from --channel if not available and written). Defaults to [channel_name]-repodata.json.'",
")",
"parser",
".",
"add_argument",
"(",
"'--diff-hours'",
",",
"dest",
"=",
"'diff_hours'",
",",
"default",
"=",
"'25'",
",",
"help",
"=",
"'If finding all recently changed recipes, use this number of hours.'",
")",
"parser",
".",
"add_argument",
"(",
"'--recipes-dir'",
",",
"dest",
"=",
"'recipes_dir'",
",",
"default",
"=",
"'./bioconda-recipes'",
")"
] |
add arguments only used if running mulled over a whole conda channel .
|
train
| false
|
12,841
|
@contextlib.contextmanager
def obj_target_cell(obj, cell):
with try_target_cell(obj._context, cell) as target:
with obj.obj_alternate_context(target):
(yield)
|
[
"@",
"contextlib",
".",
"contextmanager",
"def",
"obj_target_cell",
"(",
"obj",
",",
"cell",
")",
":",
"with",
"try_target_cell",
"(",
"obj",
".",
"_context",
",",
"cell",
")",
"as",
"target",
":",
"with",
"obj",
".",
"obj_alternate_context",
"(",
"target",
")",
":",
"(",
"yield",
")"
] |
run with objects context set to a specific cell .
|
train
| false
|
12,842
|
def _list_certs(certificatestore='My'):
ret = dict()
pscmd = list()
blacklist_keys = ['DnsNameList', 'Thumbprint']
cert_path = 'Cert:\\LocalMachine\\{0}'.format(certificatestore)
pscmd.append("Get-ChildItem -Path '{0}' | Select-Object".format(cert_path))
pscmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version')
cmd_ret = _srvmgr(func=str().join(pscmd), as_json=True)
try:
items = json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
_LOG.error('Unable to parse return data as Json.')
for item in items:
cert_info = dict()
for key in item:
if (key not in blacklist_keys):
cert_info[key.lower()] = item[key]
cert_info['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']]
ret[item['Thumbprint']] = cert_info
return ret
|
[
"def",
"_list_certs",
"(",
"certificatestore",
"=",
"'My'",
")",
":",
"ret",
"=",
"dict",
"(",
")",
"pscmd",
"=",
"list",
"(",
")",
"blacklist_keys",
"=",
"[",
"'DnsNameList'",
",",
"'Thumbprint'",
"]",
"cert_path",
"=",
"'Cert:\\\\LocalMachine\\\\{0}'",
".",
"format",
"(",
"certificatestore",
")",
"pscmd",
".",
"append",
"(",
"\"Get-ChildItem -Path '{0}' | Select-Object\"",
".",
"format",
"(",
"cert_path",
")",
")",
"pscmd",
".",
"append",
"(",
"' DnsNameList, SerialNumber, Subject, Thumbprint, Version'",
")",
"cmd_ret",
"=",
"_srvmgr",
"(",
"func",
"=",
"str",
"(",
")",
".",
"join",
"(",
"pscmd",
")",
",",
"as_json",
"=",
"True",
")",
"try",
":",
"items",
"=",
"json",
".",
"loads",
"(",
"cmd_ret",
"[",
"'stdout'",
"]",
",",
"strict",
"=",
"False",
")",
"except",
"ValueError",
":",
"_LOG",
".",
"error",
"(",
"'Unable to parse return data as Json.'",
")",
"for",
"item",
"in",
"items",
":",
"cert_info",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"item",
":",
"if",
"(",
"key",
"not",
"in",
"blacklist_keys",
")",
":",
"cert_info",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"item",
"[",
"key",
"]",
"cert_info",
"[",
"'dnsnames'",
"]",
"=",
"[",
"name",
"[",
"'Unicode'",
"]",
"for",
"name",
"in",
"item",
"[",
"'DnsNameList'",
"]",
"]",
"ret",
"[",
"item",
"[",
"'Thumbprint'",
"]",
"]",
"=",
"cert_info",
"return",
"ret"
] |
list details of available certificates .
|
train
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.