id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
23,362 | def _search_lineages(cli_config, func, initial_rv):
configs_dir = cli_config.renewal_configs_dir
util.make_or_verify_dir(configs_dir, mode=493, uid=os.geteuid())
rv = initial_rv
for renewal_file in storage.renewal_conf_files(cli_config):
try:
candidate_lineage = storage.RenewableCert(renewal_file, cli_config)
except (errors.CertStorageError, IOError):
logger.debug('Renewal conf file %s is broken. Skipping.', renewal_file)
logger.debug('Traceback was:\n%s', traceback.format_exc())
continue
rv = func(candidate_lineage, rv)
return rv
| [
"def",
"_search_lineages",
"(",
"cli_config",
",",
"func",
",",
"initial_rv",
")",
":",
"configs_dir",
"=",
"cli_config",
".",
"renewal_configs_dir",
"util",
".",
"make_or_verify_dir",
"(",
"configs_dir",
",",
"mode",
"=",
"493",
",",
"uid",
"=",
"os",
".",
"geteuid",
"(",
")",
")",
"rv",
"=",
"initial_rv",
"for",
"renewal_file",
"in",
"storage",
".",
"renewal_conf_files",
"(",
"cli_config",
")",
":",
"try",
":",
"candidate_lineage",
"=",
"storage",
".",
"RenewableCert",
"(",
"renewal_file",
",",
"cli_config",
")",
"except",
"(",
"errors",
".",
"CertStorageError",
",",
"IOError",
")",
":",
"logger",
".",
"debug",
"(",
"'Renewal conf file %s is broken. Skipping.'",
",",
"renewal_file",
")",
"logger",
".",
"debug",
"(",
"'Traceback was:\\n%s'",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
"continue",
"rv",
"=",
"func",
"(",
"candidate_lineage",
",",
"rv",
")",
"return",
"rv"
] | iterate func over unbroken lineages . | train | false |
23,364 | def test_classification_report_imbalanced_multiclass_with_string_label():
(y_true, y_pred, _) = make_prediction(binary=False)
y_true = np.array(['blue', 'green', 'red'])[y_true]
y_pred = np.array(['blue', 'green', 'red'])[y_pred]
expected_report = 'pre rec spe f1 geo iba sup blue 0.83 0.79 0.92 0.81 0.86 0.74 24 green 0.33 0.10 0.86 0.15 0.44 0.19 31 red 0.42 0.90 0.55 0.57 0.63 0.37 20 avg / total 0.51 0.53 0.80 0.47 0.62 0.41 75'
report = classification_report_imbalanced(y_true, y_pred)
assert_equal(_format_report(report), expected_report)
expected_report = 'pre rec spe f1 geo iba sup a 0.83 0.79 0.92 0.81 0.86 0.74 24 b 0.33 0.10 0.86 0.15 0.44 0.19 31 c 0.42 0.90 0.55 0.57 0.63 0.37 20 avg / total 0.51 0.53 0.80 0.47 0.62 0.41 75'
report = classification_report_imbalanced(y_true, y_pred, target_names=['a', 'b', 'c'])
assert_equal(_format_report(report), expected_report)
| [
"def",
"test_classification_report_imbalanced_multiclass_with_string_label",
"(",
")",
":",
"(",
"y_true",
",",
"y_pred",
",",
"_",
")",
"=",
"make_prediction",
"(",
"binary",
"=",
"False",
")",
"y_true",
"=",
"np",
".",
"array",
"(",
"[",
"'blue'",
",",
"'green'",
",",
"'red'",
"]",
")",
"[",
"y_true",
"]",
"y_pred",
"=",
"np",
".",
"array",
"(",
"[",
"'blue'",
",",
"'green'",
",",
"'red'",
"]",
")",
"[",
"y_pred",
"]",
"expected_report",
"=",
"'pre rec spe f1 geo iba sup blue 0.83 0.79 0.92 0.81 0.86 0.74 24 green 0.33 0.10 0.86 0.15 0.44 0.19 31 red 0.42 0.90 0.55 0.57 0.63 0.37 20 avg / total 0.51 0.53 0.80 0.47 0.62 0.41 75'",
"report",
"=",
"classification_report_imbalanced",
"(",
"y_true",
",",
"y_pred",
")",
"assert_equal",
"(",
"_format_report",
"(",
"report",
")",
",",
"expected_report",
")",
"expected_report",
"=",
"'pre rec spe f1 geo iba sup a 0.83 0.79 0.92 0.81 0.86 0.74 24 b 0.33 0.10 0.86 0.15 0.44 0.19 31 c 0.42 0.90 0.55 0.57 0.63 0.37 20 avg / total 0.51 0.53 0.80 0.47 0.62 0.41 75'",
"report",
"=",
"classification_report_imbalanced",
"(",
"y_true",
",",
"y_pred",
",",
"target_names",
"=",
"[",
"'a'",
",",
"'b'",
",",
"'c'",
"]",
")",
"assert_equal",
"(",
"_format_report",
"(",
"report",
")",
",",
"expected_report",
")"
] | test the report with string label . | train | false |
23,365 | def _win32_getvalue(key, name, default=''):
try:
from win32api import RegQueryValueEx
except ImportError:
import _winreg
RegQueryValueEx = _winreg.QueryValueEx
try:
return RegQueryValueEx(key, name)
except:
return default
| [
"def",
"_win32_getvalue",
"(",
"key",
",",
"name",
",",
"default",
"=",
"''",
")",
":",
"try",
":",
"from",
"win32api",
"import",
"RegQueryValueEx",
"except",
"ImportError",
":",
"import",
"_winreg",
"RegQueryValueEx",
"=",
"_winreg",
".",
"QueryValueEx",
"try",
":",
"return",
"RegQueryValueEx",
"(",
"key",
",",
"name",
")",
"except",
":",
"return",
"default"
] | read a value for name from the registry key . | train | false |
23,366 | def do_ScpCreate(po):
global g_createdSCP
scp = ScpCreate(_get_option(po, 'binding_string'), _get_option(po, 'service_class'), _get_option(po, 'account_name_sam', None), keywords=_get_option(po, 'keywords', None))
g_createdSCP = scp
return scp.distinguishedName
| [
"def",
"do_ScpCreate",
"(",
"po",
")",
":",
"global",
"g_createdSCP",
"scp",
"=",
"ScpCreate",
"(",
"_get_option",
"(",
"po",
",",
"'binding_string'",
")",
",",
"_get_option",
"(",
"po",
",",
"'service_class'",
")",
",",
"_get_option",
"(",
"po",
",",
"'account_name_sam'",
",",
"None",
")",
",",
"keywords",
"=",
"_get_option",
"(",
"po",
",",
"'keywords'",
",",
"None",
")",
")",
"g_createdSCP",
"=",
"scp",
"return",
"scp",
".",
"distinguishedName"
] | create a service connection point . | train | false |
23,367 | def test_model_table():
class OccupationTable(tables.Table, ):
class Meta:
model = Occupation
expected = ['id', 'name', 'region', 'boolean', 'boolean_with_choices']
assert (expected == list(OccupationTable.base_columns.keys()))
class OccupationTable2(tables.Table, ):
extra = tables.Column()
class Meta:
model = Occupation
expected.append('extra')
assert (expected == list(OccupationTable2.base_columns.keys()))
from django.db import models
class ComplexModel(models.Model, ):
char = models.CharField(max_length=200)
fk = models.ForeignKey('self', on_delete=models.CASCADE)
m2m = models.ManyToManyField('self')
class Meta:
app_label = 'django_tables2_test'
class ComplexTable(tables.Table, ):
class Meta:
model = ComplexModel
assert (['id', 'char', 'fk'] == list(ComplexTable.base_columns.keys()))
| [
"def",
"test_model_table",
"(",
")",
":",
"class",
"OccupationTable",
"(",
"tables",
".",
"Table",
",",
")",
":",
"class",
"Meta",
":",
"model",
"=",
"Occupation",
"expected",
"=",
"[",
"'id'",
",",
"'name'",
",",
"'region'",
",",
"'boolean'",
",",
"'boolean_with_choices'",
"]",
"assert",
"(",
"expected",
"==",
"list",
"(",
"OccupationTable",
".",
"base_columns",
".",
"keys",
"(",
")",
")",
")",
"class",
"OccupationTable2",
"(",
"tables",
".",
"Table",
",",
")",
":",
"extra",
"=",
"tables",
".",
"Column",
"(",
")",
"class",
"Meta",
":",
"model",
"=",
"Occupation",
"expected",
".",
"append",
"(",
"'extra'",
")",
"assert",
"(",
"expected",
"==",
"list",
"(",
"OccupationTable2",
".",
"base_columns",
".",
"keys",
"(",
")",
")",
")",
"from",
"django",
".",
"db",
"import",
"models",
"class",
"ComplexModel",
"(",
"models",
".",
"Model",
",",
")",
":",
"char",
"=",
"models",
".",
"CharField",
"(",
"max_length",
"=",
"200",
")",
"fk",
"=",
"models",
".",
"ForeignKey",
"(",
"'self'",
",",
"on_delete",
"=",
"models",
".",
"CASCADE",
")",
"m2m",
"=",
"models",
".",
"ManyToManyField",
"(",
"'self'",
")",
"class",
"Meta",
":",
"app_label",
"=",
"'django_tables2_test'",
"class",
"ComplexTable",
"(",
"tables",
".",
"Table",
",",
")",
":",
"class",
"Meta",
":",
"model",
"=",
"ComplexModel",
"assert",
"(",
"[",
"'id'",
",",
"'char'",
",",
"'fk'",
"]",
"==",
"list",
"(",
"ComplexTable",
".",
"base_columns",
".",
"keys",
"(",
")",
")",
")"
] | the model option on a table causes the table to dynamically add columns based on the fields . | train | false |
23,368 | def heuristic_log_sanitize(data, no_log_values=None):
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
try:
end = data.rindex('@', 0, begin)
except ValueError:
output.insert(0, data[0:begin])
break
sep = None
sep_search_end = end
while (not sep):
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
begin = 0
try:
sep = data.index(':', (begin + 3), end)
except ValueError:
if (begin == 0):
output.insert(0, data[0:begin])
break
sep_search_end = begin
continue
if sep:
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:(sep + 1)])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
| [
"def",
"heuristic_log_sanitize",
"(",
"data",
",",
"no_log_values",
"=",
"None",
")",
":",
"data",
"=",
"to_native",
"(",
"data",
")",
"output",
"=",
"[",
"]",
"begin",
"=",
"len",
"(",
"data",
")",
"prev_begin",
"=",
"begin",
"sep",
"=",
"1",
"while",
"sep",
":",
"try",
":",
"end",
"=",
"data",
".",
"rindex",
"(",
"'@'",
",",
"0",
",",
"begin",
")",
"except",
"ValueError",
":",
"output",
".",
"insert",
"(",
"0",
",",
"data",
"[",
"0",
":",
"begin",
"]",
")",
"break",
"sep",
"=",
"None",
"sep_search_end",
"=",
"end",
"while",
"(",
"not",
"sep",
")",
":",
"try",
":",
"begin",
"=",
"data",
".",
"rindex",
"(",
"'://'",
",",
"0",
",",
"sep_search_end",
")",
"except",
"ValueError",
":",
"begin",
"=",
"0",
"try",
":",
"sep",
"=",
"data",
".",
"index",
"(",
"':'",
",",
"(",
"begin",
"+",
"3",
")",
",",
"end",
")",
"except",
"ValueError",
":",
"if",
"(",
"begin",
"==",
"0",
")",
":",
"output",
".",
"insert",
"(",
"0",
",",
"data",
"[",
"0",
":",
"begin",
"]",
")",
"break",
"sep_search_end",
"=",
"begin",
"continue",
"if",
"sep",
":",
"output",
".",
"insert",
"(",
"0",
",",
"data",
"[",
"end",
":",
"prev_begin",
"]",
")",
"output",
".",
"insert",
"(",
"0",
",",
"'********'",
")",
"output",
".",
"insert",
"(",
"0",
",",
"data",
"[",
"begin",
":",
"(",
"sep",
"+",
"1",
")",
"]",
")",
"prev_begin",
"=",
"begin",
"output",
"=",
"''",
".",
"join",
"(",
"output",
")",
"if",
"no_log_values",
":",
"output",
"=",
"remove_values",
"(",
"output",
",",
"no_log_values",
")",
"return",
"output"
] | remove strings that look like passwords from log messages . | train | false |
23,369 | def memoizedproperty(func):
inner_attname = ('__%s' % func.__name__)
def new_fget(self):
if (not hasattr(self, '_cache_')):
self._cache_ = dict()
cache = self._cache_
if (inner_attname not in cache):
cache[inner_attname] = func(self)
return cache[inner_attname]
return property(new_fget)
| [
"def",
"memoizedproperty",
"(",
"func",
")",
":",
"inner_attname",
"=",
"(",
"'__%s'",
"%",
"func",
".",
"__name__",
")",
"def",
"new_fget",
"(",
"self",
")",
":",
"if",
"(",
"not",
"hasattr",
"(",
"self",
",",
"'_cache_'",
")",
")",
":",
"self",
".",
"_cache_",
"=",
"dict",
"(",
")",
"cache",
"=",
"self",
".",
"_cache_",
"if",
"(",
"inner_attname",
"not",
"in",
"cache",
")",
":",
"cache",
"[",
"inner_attname",
"]",
"=",
"func",
"(",
"self",
")",
"return",
"cache",
"[",
"inner_attname",
"]",
"return",
"property",
"(",
"new_fget",
")"
] | decorator to cause a method to cache its results in self for each combination of inputs and return the cached result on subsequent calls . | train | true |
23,370 | def intcurve_diffequ(vector_field, param, start_point, coord_sys=None):
if ((contravariant_order(vector_field) != 1) or covariant_order(vector_field)):
raise ValueError('The supplied field was not a vector field.')
coord_sys = (coord_sys if coord_sys else start_point._coord_sys)
gammas = [Function(('f_%d' % i))(param) for i in range(start_point._coord_sys.dim)]
arbitrary_p = Point(coord_sys, gammas)
coord_functions = coord_sys.coord_functions()
equations = [simplify((diff(cf.rcall(arbitrary_p), param) - vector_field.rcall(cf).rcall(arbitrary_p))) for cf in coord_functions]
init_cond = [simplify((cf.rcall(arbitrary_p).subs(param, 0) - cf.rcall(start_point))) for cf in coord_functions]
return (equations, init_cond)
| [
"def",
"intcurve_diffequ",
"(",
"vector_field",
",",
"param",
",",
"start_point",
",",
"coord_sys",
"=",
"None",
")",
":",
"if",
"(",
"(",
"contravariant_order",
"(",
"vector_field",
")",
"!=",
"1",
")",
"or",
"covariant_order",
"(",
"vector_field",
")",
")",
":",
"raise",
"ValueError",
"(",
"'The supplied field was not a vector field.'",
")",
"coord_sys",
"=",
"(",
"coord_sys",
"if",
"coord_sys",
"else",
"start_point",
".",
"_coord_sys",
")",
"gammas",
"=",
"[",
"Function",
"(",
"(",
"'f_%d'",
"%",
"i",
")",
")",
"(",
"param",
")",
"for",
"i",
"in",
"range",
"(",
"start_point",
".",
"_coord_sys",
".",
"dim",
")",
"]",
"arbitrary_p",
"=",
"Point",
"(",
"coord_sys",
",",
"gammas",
")",
"coord_functions",
"=",
"coord_sys",
".",
"coord_functions",
"(",
")",
"equations",
"=",
"[",
"simplify",
"(",
"(",
"diff",
"(",
"cf",
".",
"rcall",
"(",
"arbitrary_p",
")",
",",
"param",
")",
"-",
"vector_field",
".",
"rcall",
"(",
"cf",
")",
".",
"rcall",
"(",
"arbitrary_p",
")",
")",
")",
"for",
"cf",
"in",
"coord_functions",
"]",
"init_cond",
"=",
"[",
"simplify",
"(",
"(",
"cf",
".",
"rcall",
"(",
"arbitrary_p",
")",
".",
"subs",
"(",
"param",
",",
"0",
")",
"-",
"cf",
".",
"rcall",
"(",
"start_point",
")",
")",
")",
"for",
"cf",
"in",
"coord_functions",
"]",
"return",
"(",
"equations",
",",
"init_cond",
")"
] | return the differential equation for an integral curve of the field . | train | false |
23,371 | def _BeforeBlockOffsets(single, with_body):
return {single: SINGLE_LINE, with_body: WITH_BODY}
| [
"def",
"_BeforeBlockOffsets",
"(",
"single",
",",
"with_body",
")",
":",
"return",
"{",
"single",
":",
"SINGLE_LINE",
",",
"with_body",
":",
"WITH_BODY",
"}"
] | valid alternative indent offsets for continued lines before blocks . | train | false |
23,372 | def default_latex_docclass(config):
if (config.language == 'ja'):
return {'manual': 'jsbook', 'howto': 'jreport'}
else:
return {}
| [
"def",
"default_latex_docclass",
"(",
"config",
")",
":",
"if",
"(",
"config",
".",
"language",
"==",
"'ja'",
")",
":",
"return",
"{",
"'manual'",
":",
"'jsbook'",
",",
"'howto'",
":",
"'jreport'",
"}",
"else",
":",
"return",
"{",
"}"
] | better default latex_docclass settings for specific languages . | train | false |
23,373 | def _load_internal_django(path, debug):
import google.appengine._internal.django.conf
import google.appengine._internal.django.template.loader
from google.appengine._internal import django
abspath = os.path.abspath(path)
if (not debug):
template = template_cache.get(abspath, None)
else:
template = None
if (not template):
(directory, file_name) = os.path.split(abspath)
settings = dict(TEMPLATE_LOADERS=('google.appengine._internal.django.template.loaders.filesystem.load_template_source',), TEMPLATE_DIRS=(directory,), TEMPLATE_DEBUG=debug, DEBUG=debug)
django.conf.settings.configure(**settings)
template = django.template.loader.get_template(file_name)
if (not debug):
template_cache[abspath] = template
def wrap_render(context, orig_render=template.render):
django.conf.settings.configure(**settings)
return orig_render(context)
template.render = wrap_render
return template
| [
"def",
"_load_internal_django",
"(",
"path",
",",
"debug",
")",
":",
"import",
"google",
".",
"appengine",
".",
"_internal",
".",
"django",
".",
"conf",
"import",
"google",
".",
"appengine",
".",
"_internal",
".",
"django",
".",
"template",
".",
"loader",
"from",
"google",
".",
"appengine",
".",
"_internal",
"import",
"django",
"abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"if",
"(",
"not",
"debug",
")",
":",
"template",
"=",
"template_cache",
".",
"get",
"(",
"abspath",
",",
"None",
")",
"else",
":",
"template",
"=",
"None",
"if",
"(",
"not",
"template",
")",
":",
"(",
"directory",
",",
"file_name",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"abspath",
")",
"settings",
"=",
"dict",
"(",
"TEMPLATE_LOADERS",
"=",
"(",
"'google.appengine._internal.django.template.loaders.filesystem.load_template_source'",
",",
")",
",",
"TEMPLATE_DIRS",
"=",
"(",
"directory",
",",
")",
",",
"TEMPLATE_DEBUG",
"=",
"debug",
",",
"DEBUG",
"=",
"debug",
")",
"django",
".",
"conf",
".",
"settings",
".",
"configure",
"(",
"**",
"settings",
")",
"template",
"=",
"django",
".",
"template",
".",
"loader",
".",
"get_template",
"(",
"file_name",
")",
"if",
"(",
"not",
"debug",
")",
":",
"template_cache",
"[",
"abspath",
"]",
"=",
"template",
"def",
"wrap_render",
"(",
"context",
",",
"orig_render",
"=",
"template",
".",
"render",
")",
":",
"django",
".",
"conf",
".",
"settings",
".",
"configure",
"(",
"**",
"settings",
")",
"return",
"orig_render",
"(",
"context",
")",
"template",
".",
"render",
"=",
"wrap_render",
"return",
"template"
] | load the given template using the django found in apphosting . | train | false |
23,374 | def rule_match(component, cmd):
if (component == cmd):
return True
expanded = rule_expand(component, cmd)
if (cmd in expanded):
return True
return False
| [
"def",
"rule_match",
"(",
"component",
",",
"cmd",
")",
":",
"if",
"(",
"component",
"==",
"cmd",
")",
":",
"return",
"True",
"expanded",
"=",
"rule_expand",
"(",
"component",
",",
"cmd",
")",
"if",
"(",
"cmd",
"in",
"expanded",
")",
":",
"return",
"True",
"return",
"False"
] | see if one rule component matches . | train | true |
23,375 | def cleanup_instance(xenapi, instance, vm_ref, vm_rec):
xenapi._vmops._destroy(instance, vm_ref)
| [
"def",
"cleanup_instance",
"(",
"xenapi",
",",
"instance",
",",
"vm_ref",
",",
"vm_rec",
")",
":",
"xenapi",
".",
"_vmops",
".",
"_destroy",
"(",
"instance",
",",
"vm_ref",
")"
] | delete orphaned instances . | train | false |
23,376 | def RenderTokenApprovalTemplate(oauth_callback):
template_dict = {'oauth_callback': cgi.escape(oauth_callback, quote=True)}
return (TOKEN_APPROVAL_TEMPLATE % template_dict)
| [
"def",
"RenderTokenApprovalTemplate",
"(",
"oauth_callback",
")",
":",
"template_dict",
"=",
"{",
"'oauth_callback'",
":",
"cgi",
".",
"escape",
"(",
"oauth_callback",
",",
"quote",
"=",
"True",
")",
"}",
"return",
"(",
"TOKEN_APPROVAL_TEMPLATE",
"%",
"template_dict",
")"
] | renders the token approval page . | train | false |
23,377 | def _dot0(a, b):
if (b.ndim <= 2):
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert((-1), 0)
axes.pop(0)
return dot(a, b.transpose(axes))
| [
"def",
"_dot0",
"(",
"a",
",",
"b",
")",
":",
"if",
"(",
"b",
".",
"ndim",
"<=",
"2",
")",
":",
"return",
"dot",
"(",
"a",
",",
"b",
")",
"else",
":",
"axes",
"=",
"list",
"(",
"range",
"(",
"b",
".",
"ndim",
")",
")",
"axes",
".",
"insert",
"(",
"(",
"-",
"1",
")",
",",
"0",
")",
"axes",
".",
"pop",
"(",
"0",
")",
"return",
"dot",
"(",
"a",
",",
"b",
".",
"transpose",
"(",
"axes",
")",
")"
] | similar to numpy . | train | false |
23,378 | def flush_cached_instance(sender, instance, **kwargs):
if (not hasattr(instance, 'flush_cached_instance')):
return
sender.flush_cached_instance(instance, force=True)
| [
"def",
"flush_cached_instance",
"(",
"sender",
",",
"instance",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"not",
"hasattr",
"(",
"instance",
",",
"'flush_cached_instance'",
")",
")",
":",
"return",
"sender",
".",
"flush_cached_instance",
"(",
"instance",
",",
"force",
"=",
"True",
")"
] | flush the idmapper cache only for a given instance . | train | false |
23,379 | def parseCDDA(self):
(yield UInt16(self, 'cda_version', 'CD file version (currently 1)'))
(yield UInt16(self, 'track_no', 'Number of track'))
(yield textHandler(UInt32(self, 'disc_serial', 'Disc serial number'), formatSerialNumber))
(yield UInt32(self, 'hsg_offset', 'Track offset (HSG format)'))
(yield UInt32(self, 'hsg_length', 'Track length (HSG format)'))
(yield RedBook(self, 'rb_offset', 'Track offset (Red-book format)'))
(yield RedBook(self, 'rb_length', 'Track length (Red-book format)'))
| [
"def",
"parseCDDA",
"(",
"self",
")",
":",
"(",
"yield",
"UInt16",
"(",
"self",
",",
"'cda_version'",
",",
"'CD file version (currently 1)'",
")",
")",
"(",
"yield",
"UInt16",
"(",
"self",
",",
"'track_no'",
",",
"'Number of track'",
")",
")",
"(",
"yield",
"textHandler",
"(",
"UInt32",
"(",
"self",
",",
"'disc_serial'",
",",
"'Disc serial number'",
")",
",",
"formatSerialNumber",
")",
")",
"(",
"yield",
"UInt32",
"(",
"self",
",",
"'hsg_offset'",
",",
"'Track offset (HSG format)'",
")",
")",
"(",
"yield",
"UInt32",
"(",
"self",
",",
"'hsg_length'",
",",
"'Track length (HSG format)'",
")",
")",
"(",
"yield",
"RedBook",
"(",
"self",
",",
"'rb_offset'",
",",
"'Track offset (Red-book format)'",
")",
")",
"(",
"yield",
"RedBook",
"(",
"self",
",",
"'rb_length'",
",",
"'Track length (Red-book format)'",
")",
")"
] | hsg address format: number of 1/75 second hsg offset = *75 + frame + 150 hsg length = *75 + frame . | train | false |
23,380 | def _get_modpkg_path(dotted_name, pathlist=None):
parts = dotted_name.split('.', 1)
if (len(parts) > 1):
try:
(file, pathname, description) = imp.find_module(parts[0], pathlist)
if file:
file.close()
except ImportError:
return None
if (description[2] == imp.PKG_DIRECTORY):
pathname = _get_modpkg_path(parts[1], [pathname])
else:
pathname = None
else:
try:
(file, pathname, description) = imp.find_module(dotted_name, pathlist)
if file:
file.close()
if (description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]):
pathname = None
except ImportError:
pathname = None
return pathname
| [
"def",
"_get_modpkg_path",
"(",
"dotted_name",
",",
"pathlist",
"=",
"None",
")",
":",
"parts",
"=",
"dotted_name",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"if",
"(",
"len",
"(",
"parts",
")",
">",
"1",
")",
":",
"try",
":",
"(",
"file",
",",
"pathname",
",",
"description",
")",
"=",
"imp",
".",
"find_module",
"(",
"parts",
"[",
"0",
"]",
",",
"pathlist",
")",
"if",
"file",
":",
"file",
".",
"close",
"(",
")",
"except",
"ImportError",
":",
"return",
"None",
"if",
"(",
"description",
"[",
"2",
"]",
"==",
"imp",
".",
"PKG_DIRECTORY",
")",
":",
"pathname",
"=",
"_get_modpkg_path",
"(",
"parts",
"[",
"1",
"]",
",",
"[",
"pathname",
"]",
")",
"else",
":",
"pathname",
"=",
"None",
"else",
":",
"try",
":",
"(",
"file",
",",
"pathname",
",",
"description",
")",
"=",
"imp",
".",
"find_module",
"(",
"dotted_name",
",",
"pathlist",
")",
"if",
"file",
":",
"file",
".",
"close",
"(",
")",
"if",
"(",
"description",
"[",
"2",
"]",
"not",
"in",
"[",
"imp",
".",
"PY_SOURCE",
",",
"imp",
".",
"PKG_DIRECTORY",
"]",
")",
":",
"pathname",
"=",
"None",
"except",
"ImportError",
":",
"pathname",
"=",
"None",
"return",
"pathname"
] | get the filesystem path for a module or a package . | train | true |
23,381 | def timestamp_to_datetime(timestamp):
ts = datetime.datetime.strptime(timestamp[:(-10)], '%Y-%m-%dT%H:%M:%S')
tz_hours = int(timestamp[(-5):(-3)])
tz_mins = (int(timestamp[(-2):]) * int((timestamp[(-6):(-5)] + '1')))
tz_delta = datetime.timedelta(hours=tz_hours, minutes=tz_mins)
return (ts + tz_delta)
| [
"def",
"timestamp_to_datetime",
"(",
"timestamp",
")",
":",
"ts",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"timestamp",
"[",
":",
"(",
"-",
"10",
")",
"]",
",",
"'%Y-%m-%dT%H:%M:%S'",
")",
"tz_hours",
"=",
"int",
"(",
"timestamp",
"[",
"(",
"-",
"5",
")",
":",
"(",
"-",
"3",
")",
"]",
")",
"tz_mins",
"=",
"(",
"int",
"(",
"timestamp",
"[",
"(",
"-",
"2",
")",
":",
"]",
")",
"*",
"int",
"(",
"(",
"timestamp",
"[",
"(",
"-",
"6",
")",
":",
"(",
"-",
"5",
")",
"]",
"+",
"'1'",
")",
")",
")",
"tz_delta",
"=",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"tz_hours",
",",
"minutes",
"=",
"tz_mins",
")",
"return",
"(",
"ts",
"+",
"tz_delta",
")"
] | converts a utc timestamp to a datetime . | train | false |
23,382 | def import_from_string(val, setting_name):
try:
parts = val.split(u'.')
(module_path, class_name) = (u'.'.join(parts[:(-1)]), parts[(-1)])
module = importlib.import_module(module_path)
return getattr(module, class_name)
except (ImportError, AttributeError) as e:
msg = (u"Could not import '%s' for API setting '%s'. %s: %s." % (val, setting_name, e.__class__.__name__, e))
raise ImportError(msg)
| [
"def",
"import_from_string",
"(",
"val",
",",
"setting_name",
")",
":",
"try",
":",
"parts",
"=",
"val",
".",
"split",
"(",
"u'.'",
")",
"(",
"module_path",
",",
"class_name",
")",
"=",
"(",
"u'.'",
".",
"join",
"(",
"parts",
"[",
":",
"(",
"-",
"1",
")",
"]",
")",
",",
"parts",
"[",
"(",
"-",
"1",
")",
"]",
")",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"module_path",
")",
"return",
"getattr",
"(",
"module",
",",
"class_name",
")",
"except",
"(",
"ImportError",
",",
"AttributeError",
")",
"as",
"e",
":",
"msg",
"=",
"(",
"u\"Could not import '%s' for API setting '%s'. %s: %s.\"",
"%",
"(",
"val",
",",
"setting_name",
",",
"e",
".",
"__class__",
".",
"__name__",
",",
"e",
")",
")",
"raise",
"ImportError",
"(",
"msg",
")"
] | attempt to import a class from a string representation . | train | true |
23,383 | def usesFlushLoggedErrors(test):
if ((sys.version_info[:2] == (2, 7)) and (twisted.version <= versions.Version('twisted', 9, 0, 0))):
test.skip = 'flushLoggedErrors is broken on Python==2.7 and Twisted<=9.0.0'
return test
| [
"def",
"usesFlushLoggedErrors",
"(",
"test",
")",
":",
"if",
"(",
"(",
"sys",
".",
"version_info",
"[",
":",
"2",
"]",
"==",
"(",
"2",
",",
"7",
")",
")",
"and",
"(",
"twisted",
".",
"version",
"<=",
"versions",
".",
"Version",
"(",
"'twisted'",
",",
"9",
",",
"0",
",",
"0",
")",
")",
")",
":",
"test",
".",
"skip",
"=",
"'flushLoggedErrors is broken on Python==2.7 and Twisted<=9.0.0'",
"return",
"test"
] | decorate a test method that uses flushloggederrors with this decorator . | train | false |
23,384 | def tokenize_snippet_text(snippet_instance, text, indent, allowed_tokens_in_text, allowed_tokens_in_tabstops, token_to_textobject):
seen_ts = {}
all_tokens = []
def _do_parse(parent, text, allowed_tokens):
'Recursive function that actually creates the objects.'
tokens = list(tokenize(text, indent, parent.start, allowed_tokens))
for token in tokens:
all_tokens.append((parent, token))
if isinstance(token, TabStopToken):
ts = TabStop(parent, token)
seen_ts[token.number] = ts
_do_parse(ts, token.initial_text, allowed_tokens_in_tabstops)
else:
klass = token_to_textobject.get(token.__class__, None)
if (klass is not None):
klass(parent, token)
_do_parse(snippet_instance, text, allowed_tokens_in_text)
return (all_tokens, seen_ts)
| [
"def",
"tokenize_snippet_text",
"(",
"snippet_instance",
",",
"text",
",",
"indent",
",",
"allowed_tokens_in_text",
",",
"allowed_tokens_in_tabstops",
",",
"token_to_textobject",
")",
":",
"seen_ts",
"=",
"{",
"}",
"all_tokens",
"=",
"[",
"]",
"def",
"_do_parse",
"(",
"parent",
",",
"text",
",",
"allowed_tokens",
")",
":",
"tokens",
"=",
"list",
"(",
"tokenize",
"(",
"text",
",",
"indent",
",",
"parent",
".",
"start",
",",
"allowed_tokens",
")",
")",
"for",
"token",
"in",
"tokens",
":",
"all_tokens",
".",
"append",
"(",
"(",
"parent",
",",
"token",
")",
")",
"if",
"isinstance",
"(",
"token",
",",
"TabStopToken",
")",
":",
"ts",
"=",
"TabStop",
"(",
"parent",
",",
"token",
")",
"seen_ts",
"[",
"token",
".",
"number",
"]",
"=",
"ts",
"_do_parse",
"(",
"ts",
",",
"token",
".",
"initial_text",
",",
"allowed_tokens_in_tabstops",
")",
"else",
":",
"klass",
"=",
"token_to_textobject",
".",
"get",
"(",
"token",
".",
"__class__",
",",
"None",
")",
"if",
"(",
"klass",
"is",
"not",
"None",
")",
":",
"klass",
"(",
"parent",
",",
"token",
")",
"_do_parse",
"(",
"snippet_instance",
",",
"text",
",",
"allowed_tokens_in_text",
")",
"return",
"(",
"all_tokens",
",",
"seen_ts",
")"
] | turns text into a stream of tokens and creates the text objects from those tokens that are mentioned in token_to_textobject assuming the current indent . | train | false |
23,385 | def service_exists(s_name, **connection_args):
return (_service_get(s_name, **connection_args) is not None)
| [
"def",
"service_exists",
"(",
"s_name",
",",
"**",
"connection_args",
")",
":",
"return",
"(",
"_service_get",
"(",
"s_name",
",",
"**",
"connection_args",
")",
"is",
"not",
"None",
")"
] | checks if a service exists cli example: . | train | false |
23,386 | def mobile_view(is_user=False):
return view_auth_classes(is_user)
| [
"def",
"mobile_view",
"(",
"is_user",
"=",
"False",
")",
":",
"return",
"view_auth_classes",
"(",
"is_user",
")"
] | function and class decorator that abstracts the authentication and permission checks for mobile api views . | train | false |
23,387 | def sendStayAwake():
return False
| [
"def",
"sendStayAwake",
"(",
")",
":",
"return",
"False"
] | sends a signal to your system to indicate that the computer is in use and should not sleep . | train | false |
23,388 | def calcHA2(algo, pszMethod, pszDigestUri, pszQop, pszHEntity):
m = algorithms[algo]()
m.update(pszMethod)
m.update(':')
m.update(pszDigestUri)
if (pszQop == 'auth-int'):
m.update(':')
m.update(pszHEntity)
return m.digest().encode('hex')
| [
"def",
"calcHA2",
"(",
"algo",
",",
"pszMethod",
",",
"pszDigestUri",
",",
"pszQop",
",",
"pszHEntity",
")",
":",
"m",
"=",
"algorithms",
"[",
"algo",
"]",
"(",
")",
"m",
".",
"update",
"(",
"pszMethod",
")",
"m",
".",
"update",
"(",
"':'",
")",
"m",
".",
"update",
"(",
"pszDigestUri",
")",
"if",
"(",
"pszQop",
"==",
"'auth-int'",
")",
":",
"m",
".",
"update",
"(",
"':'",
")",
"m",
".",
"update",
"(",
"pszHEntity",
")",
"return",
"m",
".",
"digest",
"(",
")",
".",
"encode",
"(",
"'hex'",
")"
] | compute h from rfc 2617 . | train | false |
23,389 | def _get_vm_ref_from_vm_uuid(session, instance_uuid):
vm_refs = session._call_method(session.vim, 'FindAllByUuid', session.vim.service_content.searchIndex, uuid=instance_uuid, vmSearch=True, instanceUuid=True)
if vm_refs:
return vm_refs[0]
| [
"def",
"_get_vm_ref_from_vm_uuid",
"(",
"session",
",",
"instance_uuid",
")",
":",
"vm_refs",
"=",
"session",
".",
"_call_method",
"(",
"session",
".",
"vim",
",",
"'FindAllByUuid'",
",",
"session",
".",
"vim",
".",
"service_content",
".",
"searchIndex",
",",
"uuid",
"=",
"instance_uuid",
",",
"vmSearch",
"=",
"True",
",",
"instanceUuid",
"=",
"True",
")",
"if",
"vm_refs",
":",
"return",
"vm_refs",
"[",
"0",
"]"
] | get reference to the vm . | train | false |
23,390 | def ensure_tenant_absent(keystone, tenant, check_mode):
if (not tenant_exists(keystone, tenant)):
return False
if check_mode:
return True
| [
"def",
"ensure_tenant_absent",
"(",
"keystone",
",",
"tenant",
",",
"check_mode",
")",
":",
"if",
"(",
"not",
"tenant_exists",
"(",
"keystone",
",",
"tenant",
")",
")",
":",
"return",
"False",
"if",
"check_mode",
":",
"return",
"True"
] | ensure that a tenant does not exist return true if the tenant was removed . | train | false |
23,391 | @pytest.mark.django_db
def test_convert(project0_nongnu, store0):
store0.sync()
for db_unit in store0.units.iterator():
store_unit = store0.file.store.findid(db_unit.getid())
newunit = db_unit.convert(store0.file.store.UnitClass)
assert (str(newunit) == str(store_unit))
| [
"@",
"pytest",
".",
"mark",
".",
"django_db",
"def",
"test_convert",
"(",
"project0_nongnu",
",",
"store0",
")",
":",
"store0",
".",
"sync",
"(",
")",
"for",
"db_unit",
"in",
"store0",
".",
"units",
".",
"iterator",
"(",
")",
":",
"store_unit",
"=",
"store0",
".",
"file",
".",
"store",
".",
"findid",
"(",
"db_unit",
".",
"getid",
"(",
")",
")",
"newunit",
"=",
"db_unit",
".",
"convert",
"(",
"store0",
".",
"file",
".",
"store",
".",
"UnitClass",
")",
"assert",
"(",
"str",
"(",
"newunit",
")",
"==",
"str",
"(",
"store_unit",
")",
")"
] | tests that in-db and on-disk units match after format conversion . | train | false |
23,392 | def average_pooling_nd(x, ksize, stride=None, pad=0, use_cudnn=True):
ndim = len(x.shape[2:])
return AveragePoolingND(ndim, ksize, stride=stride, pad=pad, use_cudnn=use_cudnn)(x)
| [
"def",
"average_pooling_nd",
"(",
"x",
",",
"ksize",
",",
"stride",
"=",
"None",
",",
"pad",
"=",
"0",
",",
"use_cudnn",
"=",
"True",
")",
":",
"ndim",
"=",
"len",
"(",
"x",
".",
"shape",
"[",
"2",
":",
"]",
")",
"return",
"AveragePoolingND",
"(",
"ndim",
",",
"ksize",
",",
"stride",
"=",
"stride",
",",
"pad",
"=",
"pad",
",",
"use_cudnn",
"=",
"use_cudnn",
")",
"(",
"x",
")"
] | n-dimensionally spatial average pooling function . | train | false |
23,395 | def _logstash(url, data):
result = salt.utils.http.query(url, 'POST', header_dict=_HEADERS, data=json.dumps(data), decode=True, status=True, opts=__opts__)
return result
| [
"def",
"_logstash",
"(",
"url",
",",
"data",
")",
":",
"result",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"url",
",",
"'POST'",
",",
"header_dict",
"=",
"_HEADERS",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"decode",
"=",
"True",
",",
"status",
"=",
"True",
",",
"opts",
"=",
"__opts__",
")",
"return",
"result"
] | issues http queries to the logstash server . | train | true |
23,396 | def do_lzop_put(creds, url, local_path, gpg_key):
assert url.endswith('.lzo')
blobstore = get_blobstore(storage.StorageLayout(url))
with tempfile.NamedTemporaryFile(mode='r+b', buffering=pipebuf.PIPE_BUF_BYTES) as tf:
with pipeline.get_upload_pipeline(open(local_path, 'rb'), tf, gpg_key=gpg_key):
pass
tf.flush()
clock_start = time.time()
tf.seek(0)
k = blobstore.uri_put_file(creds, url, tf)
clock_finish = time.time()
kib_per_second = format_kib_per_second(clock_start, clock_finish, k.size)
return kib_per_second
| [
"def",
"do_lzop_put",
"(",
"creds",
",",
"url",
",",
"local_path",
",",
"gpg_key",
")",
":",
"assert",
"url",
".",
"endswith",
"(",
"'.lzo'",
")",
"blobstore",
"=",
"get_blobstore",
"(",
"storage",
".",
"StorageLayout",
"(",
"url",
")",
")",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'r+b'",
",",
"buffering",
"=",
"pipebuf",
".",
"PIPE_BUF_BYTES",
")",
"as",
"tf",
":",
"with",
"pipeline",
".",
"get_upload_pipeline",
"(",
"open",
"(",
"local_path",
",",
"'rb'",
")",
",",
"tf",
",",
"gpg_key",
"=",
"gpg_key",
")",
":",
"pass",
"tf",
".",
"flush",
"(",
")",
"clock_start",
"=",
"time",
".",
"time",
"(",
")",
"tf",
".",
"seek",
"(",
"0",
")",
"k",
"=",
"blobstore",
".",
"uri_put_file",
"(",
"creds",
",",
"url",
",",
"tf",
")",
"clock_finish",
"=",
"time",
".",
"time",
"(",
")",
"kib_per_second",
"=",
"format_kib_per_second",
"(",
"clock_start",
",",
"clock_finish",
",",
"k",
".",
"size",
")",
"return",
"kib_per_second"
] | compress and upload a given local path . | train | true |
23,397 | def create_capture(source):
try:
source = int(source)
except ValueError:
pass
else:
return cv2.VideoCapture(source)
source = str(source).strip()
if source.startswith('synth'):
ss = filter(None, source.split(':'))
params = dict((s.split('=') for s in ss[1:]))
try:
Class = classes[params['class']]
except:
Class = VideoSynthBase
return Class(**params)
return cv2.VideoCapture(source)
| [
"def",
"create_capture",
"(",
"source",
")",
":",
"try",
":",
"source",
"=",
"int",
"(",
"source",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"return",
"cv2",
".",
"VideoCapture",
"(",
"source",
")",
"source",
"=",
"str",
"(",
"source",
")",
".",
"strip",
"(",
")",
"if",
"source",
".",
"startswith",
"(",
"'synth'",
")",
":",
"ss",
"=",
"filter",
"(",
"None",
",",
"source",
".",
"split",
"(",
"':'",
")",
")",
"params",
"=",
"dict",
"(",
"(",
"s",
".",
"split",
"(",
"'='",
")",
"for",
"s",
"in",
"ss",
"[",
"1",
":",
"]",
")",
")",
"try",
":",
"Class",
"=",
"classes",
"[",
"params",
"[",
"'class'",
"]",
"]",
"except",
":",
"Class",
"=",
"VideoSynthBase",
"return",
"Class",
"(",
"**",
"params",
")",
"return",
"cv2",
".",
"VideoCapture",
"(",
"source",
")"
] | source: <int> or <int> or <filename> or synth:<params> . | train | false |
23,398 | def load_bytes(value, units):
magnitudes = {'KiB': 1, 'MiB': 2, 'GiB': 3, 'TiB': 4}
if (units not in magnitudes):
raise InvalidUnits('{} not a recognized unit'.format(units))
return int((value * (1024 ** magnitudes[units])))
| [
"def",
"load_bytes",
"(",
"value",
",",
"units",
")",
":",
"magnitudes",
"=",
"{",
"'KiB'",
":",
"1",
",",
"'MiB'",
":",
"2",
",",
"'GiB'",
":",
"3",
",",
"'TiB'",
":",
"4",
"}",
"if",
"(",
"units",
"not",
"in",
"magnitudes",
")",
":",
"raise",
"InvalidUnits",
"(",
"'{} not a recognized unit'",
".",
"format",
"(",
"units",
")",
")",
"return",
"int",
"(",
"(",
"value",
"*",
"(",
"1024",
"**",
"magnitudes",
"[",
"units",
"]",
")",
")",
")"
] | load sequence of bytes . | train | false |
23,399 | def start_client(config, args):
logger.info('Start client mode')
global client
from glances.client import GlancesClient
client = GlancesClient(config=config, args=args)
if (not client.login()):
logger.critical('The server version is not compatible with the client')
sys.exit(2)
client.serve_forever()
client.end()
| [
"def",
"start_client",
"(",
"config",
",",
"args",
")",
":",
"logger",
".",
"info",
"(",
"'Start client mode'",
")",
"global",
"client",
"from",
"glances",
".",
"client",
"import",
"GlancesClient",
"client",
"=",
"GlancesClient",
"(",
"config",
"=",
"config",
",",
"args",
"=",
"args",
")",
"if",
"(",
"not",
"client",
".",
"login",
"(",
")",
")",
":",
"logger",
".",
"critical",
"(",
"'The server version is not compatible with the client'",
")",
"sys",
".",
"exit",
"(",
"2",
")",
"client",
".",
"serve_forever",
"(",
")",
"client",
".",
"end",
"(",
")"
] | start the client mode . | train | false |
23,400 | def make_path_bkrcache(r):
return (((AUTOTEST_CACHE_DIR + '/recipes/') + r) + '/beaker_recipe.cache')
| [
"def",
"make_path_bkrcache",
"(",
"r",
")",
":",
"return",
"(",
"(",
"(",
"AUTOTEST_CACHE_DIR",
"+",
"'/recipes/'",
")",
"+",
"r",
")",
"+",
"'/beaker_recipe.cache'",
")"
] | converts a recipe id into an internal path for cacheing recipe . | train | false |
23,402 | def generate_valid_slug(source, parent, language):
if parent:
qs = Title.objects.filter(language=language, page__parent=parent)
else:
qs = Title.objects.filter(language=language, page__parent__isnull=True)
used = list(qs.values_list('slug', flat=True))
baseslug = slugify(source)
slug = baseslug
i = 1
if used:
while (slug in used):
slug = ('%s-%s' % (baseslug, i))
i += 1
return slug
| [
"def",
"generate_valid_slug",
"(",
"source",
",",
"parent",
",",
"language",
")",
":",
"if",
"parent",
":",
"qs",
"=",
"Title",
".",
"objects",
".",
"filter",
"(",
"language",
"=",
"language",
",",
"page__parent",
"=",
"parent",
")",
"else",
":",
"qs",
"=",
"Title",
".",
"objects",
".",
"filter",
"(",
"language",
"=",
"language",
",",
"page__parent__isnull",
"=",
"True",
")",
"used",
"=",
"list",
"(",
"qs",
".",
"values_list",
"(",
"'slug'",
",",
"flat",
"=",
"True",
")",
")",
"baseslug",
"=",
"slugify",
"(",
"source",
")",
"slug",
"=",
"baseslug",
"i",
"=",
"1",
"if",
"used",
":",
"while",
"(",
"slug",
"in",
"used",
")",
":",
"slug",
"=",
"(",
"'%s-%s'",
"%",
"(",
"baseslug",
",",
"i",
")",
")",
"i",
"+=",
"1",
"return",
"slug"
] | generate a valid slug for a page from source for the given language . | train | false |
23,403 | def unpackbits(myarray):
if (myarray.dtype != cupy.uint8):
raise TypeError('Expected an input array of unsigned byte data type')
unpacked = cupy.ndarray((myarray.size * 8), dtype=cupy.uint8)
cupy.ElementwiseKernel('raw uint8 myarray', 'T unpacked', 'unpacked = (myarray[i / 8] >> (7 - i % 8)) & 1;', 'unpackbits_kernel')(myarray, unpacked)
return unpacked
| [
"def",
"unpackbits",
"(",
"myarray",
")",
":",
"if",
"(",
"myarray",
".",
"dtype",
"!=",
"cupy",
".",
"uint8",
")",
":",
"raise",
"TypeError",
"(",
"'Expected an input array of unsigned byte data type'",
")",
"unpacked",
"=",
"cupy",
".",
"ndarray",
"(",
"(",
"myarray",
".",
"size",
"*",
"8",
")",
",",
"dtype",
"=",
"cupy",
".",
"uint8",
")",
"cupy",
".",
"ElementwiseKernel",
"(",
"'raw uint8 myarray'",
",",
"'T unpacked'",
",",
"'unpacked = (myarray[i / 8] >> (7 - i % 8)) & 1;'",
",",
"'unpackbits_kernel'",
")",
"(",
"myarray",
",",
"unpacked",
")",
"return",
"unpacked"
] | unpacks elements of a uint8 array into a binary-valued output array . | train | false |
23,406 | def get_translation_dict_from_file(path, lang, app):
cleaned = {}
if os.path.exists(path):
csv_content = read_csv_file(path)
for item in csv_content:
if (len(item) == 3):
cleaned[item[1]] = strip(item[2])
elif (len(item) == 2):
cleaned[item[0]] = strip(item[1])
else:
raise Exception(u"Bad translation in '{app}' for language '{lang}': {values}".format(app=app, lang=lang, values=repr(item).encode(u'utf-8')))
return cleaned
| [
"def",
"get_translation_dict_from_file",
"(",
"path",
",",
"lang",
",",
"app",
")",
":",
"cleaned",
"=",
"{",
"}",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"csv_content",
"=",
"read_csv_file",
"(",
"path",
")",
"for",
"item",
"in",
"csv_content",
":",
"if",
"(",
"len",
"(",
"item",
")",
"==",
"3",
")",
":",
"cleaned",
"[",
"item",
"[",
"1",
"]",
"]",
"=",
"strip",
"(",
"item",
"[",
"2",
"]",
")",
"elif",
"(",
"len",
"(",
"item",
")",
"==",
"2",
")",
":",
"cleaned",
"[",
"item",
"[",
"0",
"]",
"]",
"=",
"strip",
"(",
"item",
"[",
"1",
"]",
")",
"else",
":",
"raise",
"Exception",
"(",
"u\"Bad translation in '{app}' for language '{lang}': {values}\"",
".",
"format",
"(",
"app",
"=",
"app",
",",
"lang",
"=",
"lang",
",",
"values",
"=",
"repr",
"(",
"item",
")",
".",
"encode",
"(",
"u'utf-8'",
")",
")",
")",
"return",
"cleaned"
] | load translation dict from given path . | train | false |
23,408 | def check_async(paths, options, rootdir=None):
LOGGER.info('Async code checking is enabled.')
path_queue = Queue.Queue()
result_queue = Queue.Queue()
for num in range(CPU_COUNT):
worker = Worker(path_queue, result_queue)
worker.setDaemon(True)
LOGGER.info('Start worker #%s', (num + 1))
worker.start()
for path in paths:
path_queue.put((path, dict(options=options, rootdir=rootdir)))
path_queue.join()
errors = []
while True:
try:
errors += result_queue.get(False)
except Queue.Empty:
break
return errors
| [
"def",
"check_async",
"(",
"paths",
",",
"options",
",",
"rootdir",
"=",
"None",
")",
":",
"LOGGER",
".",
"info",
"(",
"'Async code checking is enabled.'",
")",
"path_queue",
"=",
"Queue",
".",
"Queue",
"(",
")",
"result_queue",
"=",
"Queue",
".",
"Queue",
"(",
")",
"for",
"num",
"in",
"range",
"(",
"CPU_COUNT",
")",
":",
"worker",
"=",
"Worker",
"(",
"path_queue",
",",
"result_queue",
")",
"worker",
".",
"setDaemon",
"(",
"True",
")",
"LOGGER",
".",
"info",
"(",
"'Start worker #%s'",
",",
"(",
"num",
"+",
"1",
")",
")",
"worker",
".",
"start",
"(",
")",
"for",
"path",
"in",
"paths",
":",
"path_queue",
".",
"put",
"(",
"(",
"path",
",",
"dict",
"(",
"options",
"=",
"options",
",",
"rootdir",
"=",
"rootdir",
")",
")",
")",
"path_queue",
".",
"join",
"(",
")",
"errors",
"=",
"[",
"]",
"while",
"True",
":",
"try",
":",
"errors",
"+=",
"result_queue",
".",
"get",
"(",
"False",
")",
"except",
"Queue",
".",
"Empty",
":",
"break",
"return",
"errors"
] | check given paths asynchronously . | train | true |
23,409 | def astroid_wrapper(func, modname):
print(('parsing %s...' % modname))
try:
return func(modname)
except AstroidBuildingException as exc:
print(exc)
except Exception as exc:
import traceback
traceback.print_exc()
| [
"def",
"astroid_wrapper",
"(",
"func",
",",
"modname",
")",
":",
"print",
"(",
"(",
"'parsing %s...'",
"%",
"modname",
")",
")",
"try",
":",
"return",
"func",
"(",
"modname",
")",
"except",
"AstroidBuildingException",
"as",
"exc",
":",
"print",
"(",
"exc",
")",
"except",
"Exception",
"as",
"exc",
":",
"import",
"traceback",
"traceback",
".",
"print_exc",
"(",
")"
] | wrapper to give to astroidmanager . | train | false |
23,412 | def _escape_xref(xref_match):
xref = xref_match.group()
xref = xref.replace('/', '%2F')
xref = xref.replace('?', '%3F')
xref = xref.replace('#', '%23')
return xref
| [
"def",
"_escape_xref",
"(",
"xref_match",
")",
":",
"xref",
"=",
"xref_match",
".",
"group",
"(",
")",
"xref",
"=",
"xref",
".",
"replace",
"(",
"'/'",
",",
"'%2F'",
")",
"xref",
"=",
"xref",
".",
"replace",
"(",
"'?'",
",",
"'%3F'",
")",
"xref",
"=",
"xref",
".",
"replace",
"(",
"'#'",
",",
"'%23'",
")",
"return",
"xref"
] | escape things that need to be escaped if theyre in a cross-reference . | train | true |
23,413 | def unpack_file_url(link, location, download_dir=None, hashes=None):
link_path = url_to_path(link.url_without_fragment)
if is_dir_url(link):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
if hashes:
hashes.check_against_path(link_path)
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir, hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
unpack_file(from_path, location, content_type, link)
if (download_dir and (not already_downloaded_path)):
_copy_file(from_path, download_dir, link)
| [
"def",
"unpack_file_url",
"(",
"link",
",",
"location",
",",
"download_dir",
"=",
"None",
",",
"hashes",
"=",
"None",
")",
":",
"link_path",
"=",
"url_to_path",
"(",
"link",
".",
"url_without_fragment",
")",
"if",
"is_dir_url",
"(",
"link",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"location",
")",
":",
"rmtree",
"(",
"location",
")",
"shutil",
".",
"copytree",
"(",
"link_path",
",",
"location",
",",
"symlinks",
"=",
"True",
")",
"if",
"download_dir",
":",
"logger",
".",
"info",
"(",
"'Link is a directory, ignoring download_dir'",
")",
"return",
"if",
"hashes",
":",
"hashes",
".",
"check_against_path",
"(",
"link_path",
")",
"already_downloaded_path",
"=",
"None",
"if",
"download_dir",
":",
"already_downloaded_path",
"=",
"_check_download_dir",
"(",
"link",
",",
"download_dir",
",",
"hashes",
")",
"if",
"already_downloaded_path",
":",
"from_path",
"=",
"already_downloaded_path",
"else",
":",
"from_path",
"=",
"link_path",
"content_type",
"=",
"mimetypes",
".",
"guess_type",
"(",
"from_path",
")",
"[",
"0",
"]",
"unpack_file",
"(",
"from_path",
",",
"location",
",",
"content_type",
",",
"link",
")",
"if",
"(",
"download_dir",
"and",
"(",
"not",
"already_downloaded_path",
")",
")",
":",
"_copy_file",
"(",
"from_path",
",",
"download_dir",
",",
"link",
")"
] | unpack link into location . | train | true |
23,414 | def nobody_uid():
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return (-1)
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = (1 + max((x[2] for x in pwd.getpwall())))
return nobody
| [
"def",
"nobody_uid",
"(",
")",
":",
"global",
"nobody",
"if",
"nobody",
":",
"return",
"nobody",
"try",
":",
"import",
"pwd",
"except",
"ImportError",
":",
"return",
"(",
"-",
"1",
")",
"try",
":",
"nobody",
"=",
"pwd",
".",
"getpwnam",
"(",
"'nobody'",
")",
"[",
"2",
"]",
"except",
"KeyError",
":",
"nobody",
"=",
"(",
"1",
"+",
"max",
"(",
"(",
"x",
"[",
"2",
"]",
"for",
"x",
"in",
"pwd",
".",
"getpwall",
"(",
")",
")",
")",
")",
"return",
"nobody"
] | internal routine to get nobodys uid . | train | true |
23,415 | def haproxy_pid():
try:
pid = subprocess.check_output(['pidof', 'haproxy'])
except subprocess.CalledProcessError:
return None
return pid.rstrip()
| [
"def",
"haproxy_pid",
"(",
")",
":",
"try",
":",
"pid",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'pidof'",
",",
"'haproxy'",
"]",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"return",
"None",
"return",
"pid",
".",
"rstrip",
"(",
")"
] | finds out the pid of haproxy process . | train | false |
23,416 | def on_log(request, page_name):
page = Page.query.filter_by(name=page_name).first()
if (page is None):
return page_missing(request, page_name, False)
return Response(generate_template('action_log.html', page=page))
| [
"def",
"on_log",
"(",
"request",
",",
"page_name",
")",
":",
"page",
"=",
"Page",
".",
"query",
".",
"filter_by",
"(",
"name",
"=",
"page_name",
")",
".",
"first",
"(",
")",
"if",
"(",
"page",
"is",
"None",
")",
":",
"return",
"page_missing",
"(",
"request",
",",
"page_name",
",",
"False",
")",
"return",
"Response",
"(",
"generate_template",
"(",
"'action_log.html'",
",",
"page",
"=",
"page",
")",
")"
] | show the list of recent changes . | train | true |
23,417 | def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
if (rcond is not None):
cond = rcond
if (cond in [None, (-1)]):
t = spectrum.dtype.char.lower()
factor = {'f': 1000.0, 'd': 1000000.0}
cond = (factor[t] * np.finfo(t).eps)
eps = (cond * np.max(abs(spectrum)))
return eps
| [
"def",
"_eigvalsh_to_eps",
"(",
"spectrum",
",",
"cond",
"=",
"None",
",",
"rcond",
"=",
"None",
")",
":",
"if",
"(",
"rcond",
"is",
"not",
"None",
")",
":",
"cond",
"=",
"rcond",
"if",
"(",
"cond",
"in",
"[",
"None",
",",
"(",
"-",
"1",
")",
"]",
")",
":",
"t",
"=",
"spectrum",
".",
"dtype",
".",
"char",
".",
"lower",
"(",
")",
"factor",
"=",
"{",
"'f'",
":",
"1000.0",
",",
"'d'",
":",
"1000000.0",
"}",
"cond",
"=",
"(",
"factor",
"[",
"t",
"]",
"*",
"np",
".",
"finfo",
"(",
"t",
")",
".",
"eps",
")",
"eps",
"=",
"(",
"cond",
"*",
"np",
".",
"max",
"(",
"abs",
"(",
"spectrum",
")",
")",
")",
"return",
"eps"
] | determine which eigenvalues are "small" given the spectrum . | train | false |
23,420 | def require_foreign(namespace, symbol=None):
try:
if (symbol is None):
get_foreign_module(namespace)
else:
get_foreign_struct(namespace, symbol)
except ForeignError as e:
raise ImportError(e)
| [
"def",
"require_foreign",
"(",
"namespace",
",",
"symbol",
"=",
"None",
")",
":",
"try",
":",
"if",
"(",
"symbol",
"is",
"None",
")",
":",
"get_foreign_module",
"(",
"namespace",
")",
"else",
":",
"get_foreign_struct",
"(",
"namespace",
",",
"symbol",
")",
"except",
"ForeignError",
"as",
"e",
":",
"raise",
"ImportError",
"(",
"e",
")"
] | raises importerror if the specified foreign module isnt supported or the needed dependencies arent installed . | train | true |
23,421 | def save_indent(TokenClass, start=False):
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if (context.next_indent < context.indent):
while (context.next_indent < context.indent):
context.indent = context.indent_stack.pop()
if (context.next_indent > context.indent):
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
(yield (match.start(), TokenClass, text))
if extra:
(yield ((match.start() + len(text)), TokenClass.Error, extra))
context.pos = match.end()
return callback
| [
"def",
"save_indent",
"(",
"TokenClass",
",",
"start",
"=",
"False",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"extra",
"=",
"''",
"if",
"start",
":",
"context",
".",
"next_indent",
"=",
"len",
"(",
"text",
")",
"if",
"(",
"context",
".",
"next_indent",
"<",
"context",
".",
"indent",
")",
":",
"while",
"(",
"context",
".",
"next_indent",
"<",
"context",
".",
"indent",
")",
":",
"context",
".",
"indent",
"=",
"context",
".",
"indent_stack",
".",
"pop",
"(",
")",
"if",
"(",
"context",
".",
"next_indent",
">",
"context",
".",
"indent",
")",
":",
"extra",
"=",
"text",
"[",
"context",
".",
"indent",
":",
"]",
"text",
"=",
"text",
"[",
":",
"context",
".",
"indent",
"]",
"else",
":",
"context",
".",
"next_indent",
"+=",
"len",
"(",
"text",
")",
"if",
"text",
":",
"(",
"yield",
"(",
"match",
".",
"start",
"(",
")",
",",
"TokenClass",
",",
"text",
")",
")",
"if",
"extra",
":",
"(",
"yield",
"(",
"(",
"match",
".",
"start",
"(",
")",
"+",
"len",
"(",
"text",
")",
")",
",",
"TokenClass",
".",
"Error",
",",
"extra",
")",
")",
"context",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
] | save a possible indentation level . | train | true |
23,422 | def show_dbs(*dbs):
if dbs:
log.debug('get dbs from pillar: {0}'.format(dbs))
result = {}
for db in dbs:
result[db] = __salt__['pillar.get'](('oracle:dbs:' + db))
return result
else:
pillar_dbs = __salt__['pillar.get']('oracle:dbs')
log.debug('get all ({0}) dbs from pillar'.format(len(pillar_dbs)))
return pillar_dbs
| [
"def",
"show_dbs",
"(",
"*",
"dbs",
")",
":",
"if",
"dbs",
":",
"log",
".",
"debug",
"(",
"'get dbs from pillar: {0}'",
".",
"format",
"(",
"dbs",
")",
")",
"result",
"=",
"{",
"}",
"for",
"db",
"in",
"dbs",
":",
"result",
"[",
"db",
"]",
"=",
"__salt__",
"[",
"'pillar.get'",
"]",
"(",
"(",
"'oracle:dbs:'",
"+",
"db",
")",
")",
"return",
"result",
"else",
":",
"pillar_dbs",
"=",
"__salt__",
"[",
"'pillar.get'",
"]",
"(",
"'oracle:dbs'",
")",
"log",
".",
"debug",
"(",
"'get all ({0}) dbs from pillar'",
".",
"format",
"(",
"len",
"(",
"pillar_dbs",
")",
")",
")",
"return",
"pillar_dbs"
] | show databases configuration from pillar . | train | true |
23,424 | def check_render_pipe_str(pipestr, renderers, blacklist, whitelist):
parts = [r.strip() for r in pipestr.split('|')]
results = []
try:
if ((parts[0] == pipestr) and (pipestr in OLD_STYLE_RENDERERS)):
parts = OLD_STYLE_RENDERERS[pipestr].split('|')
for part in parts:
(name, argline) = (part + ' ').split(' ', 1)
if ((whitelist and (name not in whitelist)) or (blacklist and (name in blacklist))):
log.warning('The renderer "{0}" is disallowed by cofiguration and will be skipped.'.format(name))
continue
results.append((renderers[name], argline.strip()))
return results
except KeyError:
log.error('The renderer "{0}" is not available'.format(pipestr))
return []
| [
"def",
"check_render_pipe_str",
"(",
"pipestr",
",",
"renderers",
",",
"blacklist",
",",
"whitelist",
")",
":",
"parts",
"=",
"[",
"r",
".",
"strip",
"(",
")",
"for",
"r",
"in",
"pipestr",
".",
"split",
"(",
"'|'",
")",
"]",
"results",
"=",
"[",
"]",
"try",
":",
"if",
"(",
"(",
"parts",
"[",
"0",
"]",
"==",
"pipestr",
")",
"and",
"(",
"pipestr",
"in",
"OLD_STYLE_RENDERERS",
")",
")",
":",
"parts",
"=",
"OLD_STYLE_RENDERERS",
"[",
"pipestr",
"]",
".",
"split",
"(",
"'|'",
")",
"for",
"part",
"in",
"parts",
":",
"(",
"name",
",",
"argline",
")",
"=",
"(",
"part",
"+",
"' '",
")",
".",
"split",
"(",
"' '",
",",
"1",
")",
"if",
"(",
"(",
"whitelist",
"and",
"(",
"name",
"not",
"in",
"whitelist",
")",
")",
"or",
"(",
"blacklist",
"and",
"(",
"name",
"in",
"blacklist",
")",
")",
")",
":",
"log",
".",
"warning",
"(",
"'The renderer \"{0}\" is disallowed by cofiguration and will be skipped.'",
".",
"format",
"(",
"name",
")",
")",
"continue",
"results",
".",
"append",
"(",
"(",
"renderers",
"[",
"name",
"]",
",",
"argline",
".",
"strip",
"(",
")",
")",
")",
"return",
"results",
"except",
"KeyError",
":",
"log",
".",
"error",
"(",
"'The renderer \"{0}\" is not available'",
".",
"format",
"(",
"pipestr",
")",
")",
"return",
"[",
"]"
] | check that all renderers specified in the pipe string are available . | train | true |
23,427 | def to_size_in_mb(data_size, unit):
if (unit == 'G'):
return (data_size * 1024)
elif (unit == 'B'):
return (data_size / (1024 * 1024.0))
else:
return data_size
| [
"def",
"to_size_in_mb",
"(",
"data_size",
",",
"unit",
")",
":",
"if",
"(",
"unit",
"==",
"'G'",
")",
":",
"return",
"(",
"data_size",
"*",
"1024",
")",
"elif",
"(",
"unit",
"==",
"'B'",
")",
":",
"return",
"(",
"data_size",
"/",
"(",
"1024",
"*",
"1024.0",
")",
")",
"else",
":",
"return",
"data_size"
] | convert size in given unit: gb or b to size in mb . | train | false |
23,428 | def default_output_format(content_type='application/json', apply_globally=False, api=None):
def decorator(formatter):
formatter = hug.output_format.content_type(content_type)(formatter)
if apply_globally:
hug.defaults.output_format = formatter
else:
apply_to_api = (hug.API(api) if api else hug.api.from_object(formatter))
apply_to_api.http.output_format = formatter
return formatter
return decorator
| [
"def",
"default_output_format",
"(",
"content_type",
"=",
"'application/json'",
",",
"apply_globally",
"=",
"False",
",",
"api",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"formatter",
")",
":",
"formatter",
"=",
"hug",
".",
"output_format",
".",
"content_type",
"(",
"content_type",
")",
"(",
"formatter",
")",
"if",
"apply_globally",
":",
"hug",
".",
"defaults",
".",
"output_format",
"=",
"formatter",
"else",
":",
"apply_to_api",
"=",
"(",
"hug",
".",
"API",
"(",
"api",
")",
"if",
"api",
"else",
"hug",
".",
"api",
".",
"from_object",
"(",
"formatter",
")",
")",
"apply_to_api",
".",
"http",
".",
"output_format",
"=",
"formatter",
"return",
"formatter",
"return",
"decorator"
] | a decorator that allows you to override the default output format for an api . | train | false |
23,429 | def GetTestNames():
test_name = options.testname
test_path = os.path.join(_BASE_PATH, 'tests')
files = os.listdir(test_path)
_LOCAL = {}
for file in files:
if (file.startswith('test') and file.endswith('js')):
_LOCAL[file[:(-3)]] = {'name': file[:(-3)], 'images': [], 'warnings': {}, 'alert': None}
if options.list:
return _LOCAL.keys()
if test_name:
if (not (test_name in _LOCAL.keys())):
print 'Invalid testname provided. Use --list for a list of all available tests.'
exit()
_LOCAL = {}
_LOCAL[test_name] = {'name': test_name, 'images': [], 'warnings': {}, 'alert': None}
global _SUMMARY
_SUMMARY = _LOCAL
return _LOCAL.keys()
| [
"def",
"GetTestNames",
"(",
")",
":",
"test_name",
"=",
"options",
".",
"testname",
"test_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_BASE_PATH",
",",
"'tests'",
")",
"files",
"=",
"os",
".",
"listdir",
"(",
"test_path",
")",
"_LOCAL",
"=",
"{",
"}",
"for",
"file",
"in",
"files",
":",
"if",
"(",
"file",
".",
"startswith",
"(",
"'test'",
")",
"and",
"file",
".",
"endswith",
"(",
"'js'",
")",
")",
":",
"_LOCAL",
"[",
"file",
"[",
":",
"(",
"-",
"3",
")",
"]",
"]",
"=",
"{",
"'name'",
":",
"file",
"[",
":",
"(",
"-",
"3",
")",
"]",
",",
"'images'",
":",
"[",
"]",
",",
"'warnings'",
":",
"{",
"}",
",",
"'alert'",
":",
"None",
"}",
"if",
"options",
".",
"list",
":",
"return",
"_LOCAL",
".",
"keys",
"(",
")",
"if",
"test_name",
":",
"if",
"(",
"not",
"(",
"test_name",
"in",
"_LOCAL",
".",
"keys",
"(",
")",
")",
")",
":",
"print",
"'Invalid testname provided. Use --list for a list of all available tests.'",
"exit",
"(",
")",
"_LOCAL",
"=",
"{",
"}",
"_LOCAL",
"[",
"test_name",
"]",
"=",
"{",
"'name'",
":",
"test_name",
",",
"'images'",
":",
"[",
"]",
",",
"'warnings'",
":",
"{",
"}",
",",
"'alert'",
":",
"None",
"}",
"global",
"_SUMMARY",
"_SUMMARY",
"=",
"_LOCAL",
"return",
"_LOCAL",
".",
"keys",
"(",
")"
] | grab the test files in the js directory and store the filenames in a test name global dict . | train | false |
23,430 | def add_svn_segment(powerline):
try:
_add_svn_segment(powerline)
except OSError:
pass
except subprocess.CalledProcessError:
pass
| [
"def",
"add_svn_segment",
"(",
"powerline",
")",
":",
"try",
":",
"_add_svn_segment",
"(",
"powerline",
")",
"except",
"OSError",
":",
"pass",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"pass"
] | wraps _add_svn_segment in exception handling . | train | false |
23,431 | def py_library(name, srcs=[], deps=[], base=None, visibility=None, **kwargs):
target = PythonLibrary(name, srcs, deps, base, visibility, kwargs)
blade.blade.register_target(target)
| [
"def",
"py_library",
"(",
"name",
",",
"srcs",
"=",
"[",
"]",
",",
"deps",
"=",
"[",
"]",
",",
"base",
"=",
"None",
",",
"visibility",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"target",
"=",
"PythonLibrary",
"(",
"name",
",",
"srcs",
",",
"deps",
",",
"base",
",",
"visibility",
",",
"kwargs",
")",
"blade",
".",
"blade",
".",
"register_target",
"(",
"target",
")"
] | python library . | train | false |
23,432 | def render_number(children):
children_latex = [k.latex for k in children]
suffix = ''
if (children_latex[(-1)] in SUFFIXES):
suffix = children_latex.pop()
suffix = u'\\text{{{s}}}'.format(s=suffix)
if ('E' in children_latex):
pos = children_latex.index('E')
mantissa = ''.join(children_latex[:pos])
exponent = ''.join(children_latex[(pos + 1):])
latex = u'{m}\\!\\times\\!10^{{{e}}}{s}'.format(m=mantissa, e=exponent, s=suffix)
return LatexRendered(latex, tall=True)
else:
easy_number = ''.join(children_latex)
return LatexRendered((easy_number + suffix))
| [
"def",
"render_number",
"(",
"children",
")",
":",
"children_latex",
"=",
"[",
"k",
".",
"latex",
"for",
"k",
"in",
"children",
"]",
"suffix",
"=",
"''",
"if",
"(",
"children_latex",
"[",
"(",
"-",
"1",
")",
"]",
"in",
"SUFFIXES",
")",
":",
"suffix",
"=",
"children_latex",
".",
"pop",
"(",
")",
"suffix",
"=",
"u'\\\\text{{{s}}}'",
".",
"format",
"(",
"s",
"=",
"suffix",
")",
"if",
"(",
"'E'",
"in",
"children_latex",
")",
":",
"pos",
"=",
"children_latex",
".",
"index",
"(",
"'E'",
")",
"mantissa",
"=",
"''",
".",
"join",
"(",
"children_latex",
"[",
":",
"pos",
"]",
")",
"exponent",
"=",
"''",
".",
"join",
"(",
"children_latex",
"[",
"(",
"pos",
"+",
"1",
")",
":",
"]",
")",
"latex",
"=",
"u'{m}\\\\!\\\\times\\\\!10^{{{e}}}{s}'",
".",
"format",
"(",
"m",
"=",
"mantissa",
",",
"e",
"=",
"exponent",
",",
"s",
"=",
"suffix",
")",
"return",
"LatexRendered",
"(",
"latex",
",",
"tall",
"=",
"True",
")",
"else",
":",
"easy_number",
"=",
"''",
".",
"join",
"(",
"children_latex",
")",
"return",
"LatexRendered",
"(",
"(",
"easy_number",
"+",
"suffix",
")",
")"
] | combine the elements forming the number . | train | false |
23,433 | def dssp_dict_from_pdb_file(in_file, DSSP='dssp'):
p = subprocess.Popen([DSSP, in_file], universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
if err.strip():
warnings.warn(err)
if (not out.strip()):
raise Exception('DSSP failed to produce an output')
(out_dict, keys) = _make_dssp_dict(StringIO(out))
return (out_dict, keys)
| [
"def",
"dssp_dict_from_pdb_file",
"(",
"in_file",
",",
"DSSP",
"=",
"'dssp'",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"DSSP",
",",
"in_file",
"]",
",",
"universal_newlines",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"(",
"out",
",",
"err",
")",
"=",
"p",
".",
"communicate",
"(",
")",
"if",
"err",
".",
"strip",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"err",
")",
"if",
"(",
"not",
"out",
".",
"strip",
"(",
")",
")",
":",
"raise",
"Exception",
"(",
"'DSSP failed to produce an output'",
")",
"(",
"out_dict",
",",
"keys",
")",
"=",
"_make_dssp_dict",
"(",
"StringIO",
"(",
"out",
")",
")",
"return",
"(",
"out_dict",
",",
"keys",
")"
] | create a dssp dictionary from a pdb file . | train | false |
23,434 | def get_internal_tenant_context():
project_id = CONF.cinder_internal_tenant_project_id
user_id = CONF.cinder_internal_tenant_user_id
if (project_id and user_id):
return RequestContext(user_id=user_id, project_id=project_id, is_admin=True)
else:
LOG.warning(_LW('Unable to get internal tenant context: Missing required config parameters.'))
return None
| [
"def",
"get_internal_tenant_context",
"(",
")",
":",
"project_id",
"=",
"CONF",
".",
"cinder_internal_tenant_project_id",
"user_id",
"=",
"CONF",
".",
"cinder_internal_tenant_user_id",
"if",
"(",
"project_id",
"and",
"user_id",
")",
":",
"return",
"RequestContext",
"(",
"user_id",
"=",
"user_id",
",",
"project_id",
"=",
"project_id",
",",
"is_admin",
"=",
"True",
")",
"else",
":",
"LOG",
".",
"warning",
"(",
"_LW",
"(",
"'Unable to get internal tenant context: Missing required config parameters.'",
")",
")",
"return",
"None"
] | build and return the cinder internal tenant context object this request context will only work for internal cinder operations . | train | false |
23,437 | def UnpickleFromFile(datafile):
encoded_records = pickle.load(datafile)
records = []
for encoded_record in encoded_records:
record = datamodel_pb.RequestStatProto(encoded_record)
records.append(record)
datafile.close()
return records
| [
"def",
"UnpickleFromFile",
"(",
"datafile",
")",
":",
"encoded_records",
"=",
"pickle",
".",
"load",
"(",
"datafile",
")",
"records",
"=",
"[",
"]",
"for",
"encoded_record",
"in",
"encoded_records",
":",
"record",
"=",
"datamodel_pb",
".",
"RequestStatProto",
"(",
"encoded_record",
")",
"records",
".",
"append",
"(",
"record",
")",
"datafile",
".",
"close",
"(",
")",
"return",
"records"
] | reads appstats data from file . | train | false |
23,438 | def vtk_output(obj):
if vtk_old():
return obj.output
return obj.get_output()
| [
"def",
"vtk_output",
"(",
"obj",
")",
":",
"if",
"vtk_old",
"(",
")",
":",
"return",
"obj",
".",
"output",
"return",
"obj",
".",
"get_output",
"(",
")"
] | configure the input data for vtk pipeline object obj . | train | false |
23,439 | def test_lex_line_counting():
entry = tokenize('(foo (one two))')[0]
assert (entry.start_line == 1)
assert (entry.start_column == 1)
assert (entry.end_line == 1)
assert (entry.end_column == 15)
entry = entry[1]
assert (entry.start_line == 1)
assert (entry.start_column == 6)
assert (entry.end_line == 1)
assert (entry.end_column == 14)
| [
"def",
"test_lex_line_counting",
"(",
")",
":",
"entry",
"=",
"tokenize",
"(",
"'(foo (one two))'",
")",
"[",
"0",
"]",
"assert",
"(",
"entry",
".",
"start_line",
"==",
"1",
")",
"assert",
"(",
"entry",
".",
"start_column",
"==",
"1",
")",
"assert",
"(",
"entry",
".",
"end_line",
"==",
"1",
")",
"assert",
"(",
"entry",
".",
"end_column",
"==",
"15",
")",
"entry",
"=",
"entry",
"[",
"1",
"]",
"assert",
"(",
"entry",
".",
"start_line",
"==",
"1",
")",
"assert",
"(",
"entry",
".",
"start_column",
"==",
"6",
")",
"assert",
"(",
"entry",
".",
"end_line",
"==",
"1",
")",
"assert",
"(",
"entry",
".",
"end_column",
"==",
"14",
")"
] | make sure we can count lines / columns . | train | false |
23,441 | def _im_func(f):
if hasattr(f, '__func__'):
return f.__func__
else:
return f
| [
"def",
"_im_func",
"(",
"f",
")",
":",
"if",
"hasattr",
"(",
"f",
",",
"'__func__'",
")",
":",
"return",
"f",
".",
"__func__",
"else",
":",
"return",
"f"
] | wrapper to get at the underlying function belonging to a method . | train | false |
23,443 | def EncodeControlTuples(ldapControls):
if (ldapControls is None):
return None
else:
result = [c.getEncodedTuple() for c in ldapControls]
return result
| [
"def",
"EncodeControlTuples",
"(",
"ldapControls",
")",
":",
"if",
"(",
"ldapControls",
"is",
"None",
")",
":",
"return",
"None",
"else",
":",
"result",
"=",
"[",
"c",
".",
"getEncodedTuple",
"(",
")",
"for",
"c",
"in",
"ldapControls",
"]",
"return",
"result"
] | return list of readily encoded 3-tuples which can be directly passed to c module _ldap . | train | false |
23,444 | def MININDEX(ds, count, timeperiod=(- (2 ** 31))):
return call_talib_with_ds(ds, count, talib.MININDEX, timeperiod)
| [
"def",
"MININDEX",
"(",
"ds",
",",
"count",
",",
"timeperiod",
"=",
"(",
"-",
"(",
"2",
"**",
"31",
")",
")",
")",
":",
"return",
"call_talib_with_ds",
"(",
"ds",
",",
"count",
",",
"talib",
".",
"MININDEX",
",",
"timeperiod",
")"
] | index of lowest value over a specified period . | train | false |
23,445 | def evalcontextfunction(f):
f.evalcontextfunction = True
return f
| [
"def",
"evalcontextfunction",
"(",
"f",
")",
":",
"f",
".",
"evalcontextfunction",
"=",
"True",
"return",
"f"
] | this decoraotr can be used to mark a function or method as an eval context callable . | train | false |
23,446 | def splithost(url):
global _hostprog
if (_hostprog is None):
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if (path and (not path.startswith('/'))):
path = ('/' + path)
return (host_port, path)
return (None, url)
| [
"def",
"splithost",
"(",
"url",
")",
":",
"global",
"_hostprog",
"if",
"(",
"_hostprog",
"is",
"None",
")",
":",
"import",
"re",
"_hostprog",
"=",
"re",
".",
"compile",
"(",
"'^//([^/?]*)(.*)$'",
")",
"match",
"=",
"_hostprog",
".",
"match",
"(",
"url",
")",
"if",
"match",
":",
"host_port",
"=",
"match",
".",
"group",
"(",
"1",
")",
"path",
"=",
"match",
".",
"group",
"(",
"2",
")",
"if",
"(",
"path",
"and",
"(",
"not",
"path",
".",
"startswith",
"(",
"'/'",
")",
")",
")",
":",
"path",
"=",
"(",
"'/'",
"+",
"path",
")",
"return",
"(",
"host_port",
",",
"path",
")",
"return",
"(",
"None",
",",
"url",
")"
] | splithost --> host[:port] . | train | true |
23,447 | def remove_last_xmltag_in_file(fname, tag=None):
fh = open(fname, 'r+')
i = (-1)
last_tag = []
while True:
fh.seek(i, 2)
char = fh.read(1)
if (not char.isspace()):
last_tag.append(char)
if (char == '<'):
break
i -= 1
last_tag = ''.join(last_tag[::(-1)])
last_tag = last_tag.rstrip('>').lstrip('</')
if ((tag is not None) and (tag != last_tag)):
etxt = ('The given xml tag (%s) was not the last one in the file' % tag)
raise RuntimeError(etxt)
i -= 1
while True:
fh.seek(i, 2)
char = fh.read(1)
if ((not (char == ' ')) and (not (char == ' DCTB '))):
break
if (fh.tell() == 1):
break
i -= 1
fh.truncate()
fh.close()
return last_tag
| [
"def",
"remove_last_xmltag_in_file",
"(",
"fname",
",",
"tag",
"=",
"None",
")",
":",
"fh",
"=",
"open",
"(",
"fname",
",",
"'r+'",
")",
"i",
"=",
"(",
"-",
"1",
")",
"last_tag",
"=",
"[",
"]",
"while",
"True",
":",
"fh",
".",
"seek",
"(",
"i",
",",
"2",
")",
"char",
"=",
"fh",
".",
"read",
"(",
"1",
")",
"if",
"(",
"not",
"char",
".",
"isspace",
"(",
")",
")",
":",
"last_tag",
".",
"append",
"(",
"char",
")",
"if",
"(",
"char",
"==",
"'<'",
")",
":",
"break",
"i",
"-=",
"1",
"last_tag",
"=",
"''",
".",
"join",
"(",
"last_tag",
"[",
":",
":",
"(",
"-",
"1",
")",
"]",
")",
"last_tag",
"=",
"last_tag",
".",
"rstrip",
"(",
"'>'",
")",
".",
"lstrip",
"(",
"'</'",
")",
"if",
"(",
"(",
"tag",
"is",
"not",
"None",
")",
"and",
"(",
"tag",
"!=",
"last_tag",
")",
")",
":",
"etxt",
"=",
"(",
"'The given xml tag (%s) was not the last one in the file'",
"%",
"tag",
")",
"raise",
"RuntimeError",
"(",
"etxt",
")",
"i",
"-=",
"1",
"while",
"True",
":",
"fh",
".",
"seek",
"(",
"i",
",",
"2",
")",
"char",
"=",
"fh",
".",
"read",
"(",
"1",
")",
"if",
"(",
"(",
"not",
"(",
"char",
"==",
"' '",
")",
")",
"and",
"(",
"not",
"(",
"char",
"==",
"' DCTB '",
")",
")",
")",
":",
"break",
"if",
"(",
"fh",
".",
"tell",
"(",
")",
"==",
"1",
")",
":",
"break",
"i",
"-=",
"1",
"fh",
".",
"truncate",
"(",
")",
"fh",
".",
"close",
"(",
")",
"return",
"last_tag"
] | given an xml file name and a tag . | train | false |
23,449 | def filetype(mode):
if stat.S_ISLNK(mode):
return 'link'
elif stat.S_ISDIR(mode):
return 'dir'
elif stat.S_ISREG(mode):
return 'file'
else:
return 'unknown'
| [
"def",
"filetype",
"(",
"mode",
")",
":",
"if",
"stat",
".",
"S_ISLNK",
"(",
"mode",
")",
":",
"return",
"'link'",
"elif",
"stat",
".",
"S_ISDIR",
"(",
"mode",
")",
":",
"return",
"'dir'",
"elif",
"stat",
".",
"S_ISREG",
"(",
"mode",
")",
":",
"return",
"'file'",
"else",
":",
"return",
"'unknown'"
] | returns "dir" or "file" according to what type path is . | train | false |
23,452 | def get_readme_file_names(repository_name):
readme_files = ['readme', 'read_me', 'install']
valid_filenames = [('%s.txt' % f) for f in readme_files]
valid_filenames.extend([('%s.rst' % f) for f in readme_files])
valid_filenames.extend(readme_files)
valid_filenames.append(('%s.txt' % repository_name))
valid_filenames.append(('%s.rst' % repository_name))
return valid_filenames
| [
"def",
"get_readme_file_names",
"(",
"repository_name",
")",
":",
"readme_files",
"=",
"[",
"'readme'",
",",
"'read_me'",
",",
"'install'",
"]",
"valid_filenames",
"=",
"[",
"(",
"'%s.txt'",
"%",
"f",
")",
"for",
"f",
"in",
"readme_files",
"]",
"valid_filenames",
".",
"extend",
"(",
"[",
"(",
"'%s.rst'",
"%",
"f",
")",
"for",
"f",
"in",
"readme_files",
"]",
")",
"valid_filenames",
".",
"extend",
"(",
"readme_files",
")",
"valid_filenames",
".",
"append",
"(",
"(",
"'%s.txt'",
"%",
"repository_name",
")",
")",
"valid_filenames",
".",
"append",
"(",
"(",
"'%s.rst'",
"%",
"repository_name",
")",
")",
"return",
"valid_filenames"
] | return a list of file names that will be categorized as readme files for the received repository_name . | train | false |
23,453 | def _get_init_fn():
if (FLAGS.checkpoint_path is None):
return None
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(('Ignoring --checkpoint_path because a checkpoint already exists in %s' % FLAGS.train_dir))
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip() for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if (not excluded):
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info(('Fine-tuning from %s' % checkpoint_path))
return slim.assign_from_checkpoint_fn(checkpoint_path, variables_to_restore, ignore_missing_vars=FLAGS.ignore_missing_vars)
| [
"def",
"_get_init_fn",
"(",
")",
":",
"if",
"(",
"FLAGS",
".",
"checkpoint_path",
"is",
"None",
")",
":",
"return",
"None",
"if",
"tf",
".",
"train",
".",
"latest_checkpoint",
"(",
"FLAGS",
".",
"train_dir",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"(",
"'Ignoring --checkpoint_path because a checkpoint already exists in %s'",
"%",
"FLAGS",
".",
"train_dir",
")",
")",
"return",
"None",
"exclusions",
"=",
"[",
"]",
"if",
"FLAGS",
".",
"checkpoint_exclude_scopes",
":",
"exclusions",
"=",
"[",
"scope",
".",
"strip",
"(",
")",
"for",
"scope",
"in",
"FLAGS",
".",
"checkpoint_exclude_scopes",
".",
"split",
"(",
"','",
")",
"]",
"variables_to_restore",
"=",
"[",
"]",
"for",
"var",
"in",
"slim",
".",
"get_model_variables",
"(",
")",
":",
"excluded",
"=",
"False",
"for",
"exclusion",
"in",
"exclusions",
":",
"if",
"var",
".",
"op",
".",
"name",
".",
"startswith",
"(",
"exclusion",
")",
":",
"excluded",
"=",
"True",
"break",
"if",
"(",
"not",
"excluded",
")",
":",
"variables_to_restore",
".",
"append",
"(",
"var",
")",
"if",
"tf",
".",
"gfile",
".",
"IsDirectory",
"(",
"FLAGS",
".",
"checkpoint_path",
")",
":",
"checkpoint_path",
"=",
"tf",
".",
"train",
".",
"latest_checkpoint",
"(",
"FLAGS",
".",
"checkpoint_path",
")",
"else",
":",
"checkpoint_path",
"=",
"FLAGS",
".",
"checkpoint_path",
"tf",
".",
"logging",
".",
"info",
"(",
"(",
"'Fine-tuning from %s'",
"%",
"checkpoint_path",
")",
")",
"return",
"slim",
".",
"assign_from_checkpoint_fn",
"(",
"checkpoint_path",
",",
"variables_to_restore",
",",
"ignore_missing_vars",
"=",
"FLAGS",
".",
"ignore_missing_vars",
")"
] | returns a function run by the chief worker to warm-start the training . | train | false |
23,458 | def get_closed_threads(exploration_id, has_suggestion):
threads = get_threads(exploration_id)
closed_threads = []
for thread in threads:
if ((thread.has_suggestion == has_suggestion) and (thread.status != feedback_models.STATUS_CHOICES_OPEN)):
closed_threads.append(thread)
return closed_threads
| [
"def",
"get_closed_threads",
"(",
"exploration_id",
",",
"has_suggestion",
")",
":",
"threads",
"=",
"get_threads",
"(",
"exploration_id",
")",
"closed_threads",
"=",
"[",
"]",
"for",
"thread",
"in",
"threads",
":",
"if",
"(",
"(",
"thread",
".",
"has_suggestion",
"==",
"has_suggestion",
")",
"and",
"(",
"thread",
".",
"status",
"!=",
"feedback_models",
".",
"STATUS_CHOICES_OPEN",
")",
")",
":",
"closed_threads",
".",
"append",
"(",
"thread",
")",
"return",
"closed_threads"
] | fetches all closed threads of the given exploration id . | train | false |
23,459 | def make_error_series(rare_mat, groups, std_type):
err_ser = dict()
collapsed_ser = dict()
seen = set()
pre_err = {}
ops = [k for k in groups]
notfound = []
for o in ops:
pre_err[o] = []
for samID in groups[o]:
pre_err[o].append(rare_mat[samID])
min_len = min([(len(i) - i.count('nan')) for i in pre_err[o]])
pre_err[o] = [x[:min_len] for x in pre_err[o]]
for o in ops:
opsarray = array(pre_err[o])
mn = mean(opsarray, 0)
collapsed_ser[o] = mn.tolist()
if (std_type == 'stderr'):
stderr_result = stderr(opsarray, 0)
err_ser[o] = stderr_result.tolist()
else:
stddev = std(opsarray, 0)
err_ser[o] = stddev.tolist()
return (collapsed_ser, err_ser, ops)
| [
"def",
"make_error_series",
"(",
"rare_mat",
",",
"groups",
",",
"std_type",
")",
":",
"err_ser",
"=",
"dict",
"(",
")",
"collapsed_ser",
"=",
"dict",
"(",
")",
"seen",
"=",
"set",
"(",
")",
"pre_err",
"=",
"{",
"}",
"ops",
"=",
"[",
"k",
"for",
"k",
"in",
"groups",
"]",
"notfound",
"=",
"[",
"]",
"for",
"o",
"in",
"ops",
":",
"pre_err",
"[",
"o",
"]",
"=",
"[",
"]",
"for",
"samID",
"in",
"groups",
"[",
"o",
"]",
":",
"pre_err",
"[",
"o",
"]",
".",
"append",
"(",
"rare_mat",
"[",
"samID",
"]",
")",
"min_len",
"=",
"min",
"(",
"[",
"(",
"len",
"(",
"i",
")",
"-",
"i",
".",
"count",
"(",
"'nan'",
")",
")",
"for",
"i",
"in",
"pre_err",
"[",
"o",
"]",
"]",
")",
"pre_err",
"[",
"o",
"]",
"=",
"[",
"x",
"[",
":",
"min_len",
"]",
"for",
"x",
"in",
"pre_err",
"[",
"o",
"]",
"]",
"for",
"o",
"in",
"ops",
":",
"opsarray",
"=",
"array",
"(",
"pre_err",
"[",
"o",
"]",
")",
"mn",
"=",
"mean",
"(",
"opsarray",
",",
"0",
")",
"collapsed_ser",
"[",
"o",
"]",
"=",
"mn",
".",
"tolist",
"(",
")",
"if",
"(",
"std_type",
"==",
"'stderr'",
")",
":",
"stderr_result",
"=",
"stderr",
"(",
"opsarray",
",",
"0",
")",
"err_ser",
"[",
"o",
"]",
"=",
"stderr_result",
".",
"tolist",
"(",
")",
"else",
":",
"stddev",
"=",
"std",
"(",
"opsarray",
",",
"0",
")",
"err_ser",
"[",
"o",
"]",
"=",
"stddev",
".",
"tolist",
"(",
")",
"return",
"(",
"collapsed_ser",
",",
"err_ser",
",",
"ops",
")"
] | create mean and error bar series for the supplied mapping category . | train | false |
23,460 | def _rows_to_ndarray(cursor):
return np.squeeze(np.array([row[3:] for row in cursor.fetchall()]))
| [
"def",
"_rows_to_ndarray",
"(",
"cursor",
")",
":",
"return",
"np",
".",
"squeeze",
"(",
"np",
".",
"array",
"(",
"[",
"row",
"[",
"3",
":",
"]",
"for",
"row",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"]",
")",
")"
] | convert sql row to ndarray . | train | false |
23,461 | def top_contributors_kb(start=None, end=None, product=None, count=10, page=1):
return top_contributors_l10n(start, end, settings.WIKI_DEFAULT_LANGUAGE, product, count)
| [
"def",
"top_contributors_kb",
"(",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"product",
"=",
"None",
",",
"count",
"=",
"10",
",",
"page",
"=",
"1",
")",
":",
"return",
"top_contributors_l10n",
"(",
"start",
",",
"end",
",",
"settings",
".",
"WIKI_DEFAULT_LANGUAGE",
",",
"product",
",",
"count",
")"
] | get the top kb editors . | train | false |
23,462 | def webob_factory(url):
base_url = url
def web_request(url, method=None, body=None):
req = webob.Request.blank(('%s%s' % (base_url, url)))
if method:
req.content_type = 'application/json'
req.method = method
if body:
req.body = jsonutils.dumps(body)
return req
return web_request
| [
"def",
"webob_factory",
"(",
"url",
")",
":",
"base_url",
"=",
"url",
"def",
"web_request",
"(",
"url",
",",
"method",
"=",
"None",
",",
"body",
"=",
"None",
")",
":",
"req",
"=",
"webob",
".",
"Request",
".",
"blank",
"(",
"(",
"'%s%s'",
"%",
"(",
"base_url",
",",
"url",
")",
")",
")",
"if",
"method",
":",
"req",
".",
"content_type",
"=",
"'application/json'",
"req",
".",
"method",
"=",
"method",
"if",
"body",
":",
"req",
".",
"body",
"=",
"jsonutils",
".",
"dumps",
"(",
"body",
")",
"return",
"req",
"return",
"web_request"
] | factory for removing duplicate webob code from tests . | train | false |
23,463 | def make_env(*packages):
if (not os.path.exists(env_root)):
os.makedirs(env_root)
env = os.path.join(env_root, os.path.basename(py_exe))
py = pjoin(env, 'bin', 'python')
if (not os.path.exists(py)):
run('python -m virtualenv {} -p {}'.format(pipes.quote(env), pipes.quote(py_exe)))
py = pjoin(env, 'bin', 'python')
run([py, '-V'])
install(py, 'pip', 'setuptools')
if packages:
install(py, *packages)
return py
| [
"def",
"make_env",
"(",
"*",
"packages",
")",
":",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"env_root",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"env_root",
")",
"env",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env_root",
",",
"os",
".",
"path",
".",
"basename",
"(",
"py_exe",
")",
")",
"py",
"=",
"pjoin",
"(",
"env",
",",
"'bin'",
",",
"'python'",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"py",
")",
")",
":",
"run",
"(",
"'python -m virtualenv {} -p {}'",
".",
"format",
"(",
"pipes",
".",
"quote",
"(",
"env",
")",
",",
"pipes",
".",
"quote",
"(",
"py_exe",
")",
")",
")",
"py",
"=",
"pjoin",
"(",
"env",
",",
"'bin'",
",",
"'python'",
")",
"run",
"(",
"[",
"py",
",",
"'-V'",
"]",
")",
"install",
"(",
"py",
",",
"'pip'",
",",
"'setuptools'",
")",
"if",
"packages",
":",
"install",
"(",
"py",
",",
"*",
"packages",
")",
"return",
"py"
] | make a virtualenv assumes which python has the virtualenv package . | train | false |
23,464 | def affine_map(points1, points2):
A = np.ones((4, 4))
A[:, :3] = points1
B = np.ones((4, 4))
B[:, :3] = points2
matrix = np.eye(4)
for i in range(3):
matrix[i] = np.linalg.solve(A, B[:, i])
return matrix
| [
"def",
"affine_map",
"(",
"points1",
",",
"points2",
")",
":",
"A",
"=",
"np",
".",
"ones",
"(",
"(",
"4",
",",
"4",
")",
")",
"A",
"[",
":",
",",
":",
"3",
"]",
"=",
"points1",
"B",
"=",
"np",
".",
"ones",
"(",
"(",
"4",
",",
"4",
")",
")",
"B",
"[",
":",
",",
":",
"3",
"]",
"=",
"points2",
"matrix",
"=",
"np",
".",
"eye",
"(",
"4",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"matrix",
"[",
"i",
"]",
"=",
"np",
".",
"linalg",
".",
"solve",
"(",
"A",
",",
"B",
"[",
":",
",",
"i",
"]",
")",
"return",
"matrix"
] | find a 3d transformation matrix that maps points1 onto points2 . | train | true |
23,465 | def callWithLogger(logger, func, *args, **kw):
try:
lp = logger.logPrefix()
except KeyboardInterrupt:
raise
except:
lp = '(buggy logPrefix method)'
err(system=lp)
try:
return callWithContext({'system': lp}, func, *args, **kw)
except KeyboardInterrupt:
raise
except:
err(system=lp)
| [
"def",
"callWithLogger",
"(",
"logger",
",",
"func",
",",
"*",
"args",
",",
"**",
"kw",
")",
":",
"try",
":",
"lp",
"=",
"logger",
".",
"logPrefix",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"raise",
"except",
":",
"lp",
"=",
"'(buggy logPrefix method)'",
"err",
"(",
"system",
"=",
"lp",
")",
"try",
":",
"return",
"callWithContext",
"(",
"{",
"'system'",
":",
"lp",
"}",
",",
"func",
",",
"*",
"args",
",",
"**",
"kw",
")",
"except",
"KeyboardInterrupt",
":",
"raise",
"except",
":",
"err",
"(",
"system",
"=",
"lp",
")"
] | utility method which wraps a function in a try:/except: . | train | false |
23,466 | def CreateTasksFilter(pc, tasks):
if (not tasks):
return None
objspecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task) for task in tasks]
propspec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task, pathSet=[], all=True)
filterspec = vmodl.query.PropertyCollector.FilterSpec()
filterspec.objectSet = objspecs
filterspec.propSet = [propspec]
return pc.CreateFilter(filterspec, True)
| [
"def",
"CreateTasksFilter",
"(",
"pc",
",",
"tasks",
")",
":",
"if",
"(",
"not",
"tasks",
")",
":",
"return",
"None",
"objspecs",
"=",
"[",
"vmodl",
".",
"query",
".",
"PropertyCollector",
".",
"ObjectSpec",
"(",
"obj",
"=",
"task",
")",
"for",
"task",
"in",
"tasks",
"]",
"propspec",
"=",
"vmodl",
".",
"query",
".",
"PropertyCollector",
".",
"PropertySpec",
"(",
"type",
"=",
"vim",
".",
"Task",
",",
"pathSet",
"=",
"[",
"]",
",",
"all",
"=",
"True",
")",
"filterspec",
"=",
"vmodl",
".",
"query",
".",
"PropertyCollector",
".",
"FilterSpec",
"(",
")",
"filterspec",
".",
"objectSet",
"=",
"objspecs",
"filterspec",
".",
"propSet",
"=",
"[",
"propspec",
"]",
"return",
"pc",
".",
"CreateFilter",
"(",
"filterspec",
",",
"True",
")"
] | create property collector filter for tasks . | train | true |
23,469 | def verify_email(user, dest=None):
from r2.lib.pages import VerifyEmail
user.email_verified = False
user._commit()
Award.take_away('verified_email', user)
token = EmailVerificationToken._new(user)
base = (g.https_endpoint or g.origin)
emaillink = ((base + '/verification/') + token._id)
if dest:
emaillink += ('?dest=%s' % dest)
g.log.debug(('Generated email verification link: ' + emaillink))
_system_email(user.email, VerifyEmail(user=user, emaillink=emaillink).render(style='email'), Email.Kind.VERIFY_EMAIL)
| [
"def",
"verify_email",
"(",
"user",
",",
"dest",
"=",
"None",
")",
":",
"from",
"r2",
".",
"lib",
".",
"pages",
"import",
"VerifyEmail",
"user",
".",
"email_verified",
"=",
"False",
"user",
".",
"_commit",
"(",
")",
"Award",
".",
"take_away",
"(",
"'verified_email'",
",",
"user",
")",
"token",
"=",
"EmailVerificationToken",
".",
"_new",
"(",
"user",
")",
"base",
"=",
"(",
"g",
".",
"https_endpoint",
"or",
"g",
".",
"origin",
")",
"emaillink",
"=",
"(",
"(",
"base",
"+",
"'/verification/'",
")",
"+",
"token",
".",
"_id",
")",
"if",
"dest",
":",
"emaillink",
"+=",
"(",
"'?dest=%s'",
"%",
"dest",
")",
"g",
".",
"log",
".",
"debug",
"(",
"(",
"'Generated email verification link: '",
"+",
"emaillink",
")",
")",
"_system_email",
"(",
"user",
".",
"email",
",",
"VerifyEmail",
"(",
"user",
"=",
"user",
",",
"emaillink",
"=",
"emaillink",
")",
".",
"render",
"(",
"style",
"=",
"'email'",
")",
",",
"Email",
".",
"Kind",
".",
"VERIFY_EMAIL",
")"
] | for verifying an email address . | train | false |
23,470 | @task
def get_sympy_short_version():
version = get_sympy_version()
parts = version.split('.')
non_rc_parts = [i for i in parts if i.isdigit()]
return '.'.join(non_rc_parts)
| [
"@",
"task",
"def",
"get_sympy_short_version",
"(",
")",
":",
"version",
"=",
"get_sympy_version",
"(",
")",
"parts",
"=",
"version",
".",
"split",
"(",
"'.'",
")",
"non_rc_parts",
"=",
"[",
"i",
"for",
"i",
"in",
"parts",
"if",
"i",
".",
"isdigit",
"(",
")",
"]",
"return",
"'.'",
".",
"join",
"(",
"non_rc_parts",
")"
] | get the short version of sympy being released . | train | false |
23,473 | def tailLines(filename, linesback):
avgcharsperline = 150
file = open(filename, 'r')
while 1:
try:
file.seek((((-1) * avgcharsperline) * linesback), 2)
except IOError:
file.seek(0)
if (file.tell() == 0):
atstart = 1
else:
atstart = 0
lines = file.read().split('\n')
if ((len(lines) > (linesback + 1)) or atstart):
break
avgcharsperline = (avgcharsperline * 1.3)
file.close()
if (len(lines) > linesback):
start = ((len(lines) - linesback) - 1)
else:
start = 0
return lines[start:(len(lines) - 1)]
| [
"def",
"tailLines",
"(",
"filename",
",",
"linesback",
")",
":",
"avgcharsperline",
"=",
"150",
"file",
"=",
"open",
"(",
"filename",
",",
"'r'",
")",
"while",
"1",
":",
"try",
":",
"file",
".",
"seek",
"(",
"(",
"(",
"(",
"-",
"1",
")",
"*",
"avgcharsperline",
")",
"*",
"linesback",
")",
",",
"2",
")",
"except",
"IOError",
":",
"file",
".",
"seek",
"(",
"0",
")",
"if",
"(",
"file",
".",
"tell",
"(",
")",
"==",
"0",
")",
":",
"atstart",
"=",
"1",
"else",
":",
"atstart",
"=",
"0",
"lines",
"=",
"file",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"(",
"(",
"len",
"(",
"lines",
")",
">",
"(",
"linesback",
"+",
"1",
")",
")",
"or",
"atstart",
")",
":",
"break",
"avgcharsperline",
"=",
"(",
"avgcharsperline",
"*",
"1.3",
")",
"file",
".",
"close",
"(",
")",
"if",
"(",
"len",
"(",
"lines",
")",
">",
"linesback",
")",
":",
"start",
"=",
"(",
"(",
"len",
"(",
"lines",
")",
"-",
"linesback",
")",
"-",
"1",
")",
"else",
":",
"start",
"=",
"0",
"return",
"lines",
"[",
"start",
":",
"(",
"len",
"(",
"lines",
")",
"-",
"1",
")",
"]"
] | python tail - modified from recipe at URL returns list of [linesback] lines from end of [filename] . | train | false |
23,476 | def get_dependencies():
deps = {'netaddr': HAS_NETADDR, 'python-novaclient': nova.check_nova()}
return config.check_driver_dependencies(__virtualname__, deps)
| [
"def",
"get_dependencies",
"(",
")",
":",
"deps",
"=",
"{",
"'netaddr'",
":",
"HAS_NETADDR",
",",
"'python-novaclient'",
":",
"nova",
".",
"check_nova",
"(",
")",
"}",
"return",
"config",
".",
"check_driver_dependencies",
"(",
"__virtualname__",
",",
"deps",
")"
] | warn if dependencies arent met . | train | false |
23,477 | def packb(o, **kwargs):
return Packer(**kwargs).pack(o)
| [
"def",
"packb",
"(",
"o",
",",
"**",
"kwargs",
")",
":",
"return",
"Packer",
"(",
"**",
"kwargs",
")",
".",
"pack",
"(",
"o",
")"
] | pack object o and return packed bytes see :class:packer for options . | train | false |
23,478 | def _is_arraylike(x):
return (hasattr(x, '__len__') or hasattr(x, 'shape') or hasattr(x, '__array__'))
| [
"def",
"_is_arraylike",
"(",
"x",
")",
":",
"return",
"(",
"hasattr",
"(",
"x",
",",
"'__len__'",
")",
"or",
"hasattr",
"(",
"x",
",",
"'shape'",
")",
"or",
"hasattr",
"(",
"x",
",",
"'__array__'",
")",
")"
] | returns whether the input is array-like . | train | false |
23,479 | def parse_metric_family(buf):
n = 0
while (n < len(buf)):
(msg_len, new_pos) = _DecodeVarint32(buf, n)
n = new_pos
msg_buf = buf[n:(n + msg_len)]
n += msg_len
message = metrics_pb2.MetricFamily()
message.ParseFromString(msg_buf)
(yield message)
| [
"def",
"parse_metric_family",
"(",
"buf",
")",
":",
"n",
"=",
"0",
"while",
"(",
"n",
"<",
"len",
"(",
"buf",
")",
")",
":",
"(",
"msg_len",
",",
"new_pos",
")",
"=",
"_DecodeVarint32",
"(",
"buf",
",",
"n",
")",
"n",
"=",
"new_pos",
"msg_buf",
"=",
"buf",
"[",
"n",
":",
"(",
"n",
"+",
"msg_len",
")",
"]",
"n",
"+=",
"msg_len",
"message",
"=",
"metrics_pb2",
".",
"MetricFamily",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"msg_buf",
")",
"(",
"yield",
"message",
")"
] | parse the binary buffer in input . | train | true |
23,480 | @frappe.whitelist()
def add_column(board_name, column_title):
doc = frappe.get_doc(u'Kanban Board', board_name)
for col in doc.columns:
if (column_title == col.column_name):
frappe.throw(_(u'Column <b>{0}</b> already exist.').format(column_title))
doc.append(u'columns', dict(column_name=column_title))
doc.save()
return doc.columns
| [
"@",
"frappe",
".",
"whitelist",
"(",
")",
"def",
"add_column",
"(",
"board_name",
",",
"column_title",
")",
":",
"doc",
"=",
"frappe",
".",
"get_doc",
"(",
"u'Kanban Board'",
",",
"board_name",
")",
"for",
"col",
"in",
"doc",
".",
"columns",
":",
"if",
"(",
"column_title",
"==",
"col",
".",
"column_name",
")",
":",
"frappe",
".",
"throw",
"(",
"_",
"(",
"u'Column <b>{0}</b> already exist.'",
")",
".",
"format",
"(",
"column_title",
")",
")",
"doc",
".",
"append",
"(",
"u'columns'",
",",
"dict",
"(",
"column_name",
"=",
"column_title",
")",
")",
"doc",
".",
"save",
"(",
")",
"return",
"doc",
".",
"columns"
] | add field to the given spyne object also mapped as a sqlalchemy object to a sqlalchemy table . | train | false |
23,482 | def _marker_symbol(tagname, value, namespace=None):
if (namespace is not None):
tagname = ('{%s}%s' % (namespace, tagname))
return Element(tagname, val=safe_string(value))
| [
"def",
"_marker_symbol",
"(",
"tagname",
",",
"value",
",",
"namespace",
"=",
"None",
")",
":",
"if",
"(",
"namespace",
"is",
"not",
"None",
")",
":",
"tagname",
"=",
"(",
"'{%s}%s'",
"%",
"(",
"namespace",
",",
"tagname",
")",
")",
"return",
"Element",
"(",
"tagname",
",",
"val",
"=",
"safe_string",
"(",
"value",
")",
")"
] | override serialisation because explicit none required . | train | false |
23,483 | def _setup_fd(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, (flags | os.O_NONBLOCK))
set_buf_size(fd)
| [
"def",
"_setup_fd",
"(",
"fd",
")",
":",
"flags",
"=",
"fcntl",
".",
"fcntl",
"(",
"fd",
",",
"fcntl",
".",
"F_GETFL",
")",
"fcntl",
".",
"fcntl",
"(",
"fd",
",",
"fcntl",
".",
"F_SETFL",
",",
"(",
"flags",
"|",
"os",
".",
"O_NONBLOCK",
")",
")",
"set_buf_size",
"(",
"fd",
")"
] | common set-up code for initializing a file descriptor . | train | true |
23,484 | def test_scenario_outline_representation_without_colors():
feature_file = ojoin('..', 'simple_features', '1st_feature_dir', 'some.feature')
feature = Feature.from_file(feature_file)
assert_equals(feature.scenarios[0].represented(), ' Scenario Outline: Add two numbers # tests/functional/simple_features/1st_feature_dir/some.feature:10\n')
| [
"def",
"test_scenario_outline_representation_without_colors",
"(",
")",
":",
"feature_file",
"=",
"ojoin",
"(",
"'..'",
",",
"'simple_features'",
",",
"'1st_feature_dir'",
",",
"'some.feature'",
")",
"feature",
"=",
"Feature",
".",
"from_file",
"(",
"feature_file",
")",
"assert_equals",
"(",
"feature",
".",
"scenarios",
"[",
"0",
"]",
".",
"represented",
"(",
")",
",",
"' Scenario Outline: Add two numbers # tests/functional/simple_features/1st_feature_dir/some.feature:10\\n'",
")"
] | scenario outline represented without colors . | train | false |
23,485 | def delete_api_integration_response(restApiId, resourcePath, httpMethod, statusCode, region=None, key=None, keyid=None, profile=None):
try:
resource = describe_api_resource(restApiId, resourcePath, region=region, key=key, keyid=keyid, profile=profile).get('resource')
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_integration_response(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod, statusCode=statusCode)
return {'deleted': True}
return {'deleted': False, 'error': 'no such resource'}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
| [
"def",
"delete_api_integration_response",
"(",
"restApiId",
",",
"resourcePath",
",",
"httpMethod",
",",
"statusCode",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"resource",
"=",
"describe_api_resource",
"(",
"restApiId",
",",
"resourcePath",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
".",
"get",
"(",
"'resource'",
")",
"if",
"resource",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"conn",
".",
"delete_integration_response",
"(",
"restApiId",
"=",
"restApiId",
",",
"resourceId",
"=",
"resource",
"[",
"'id'",
"]",
",",
"httpMethod",
"=",
"httpMethod",
",",
"statusCode",
"=",
"statusCode",
")",
"return",
"{",
"'deleted'",
":",
"True",
"}",
"return",
"{",
"'deleted'",
":",
"False",
",",
"'error'",
":",
"'no such resource'",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'deleted'",
":",
"False",
",",
"'error'",
":",
"salt",
".",
"utils",
".",
"boto3",
".",
"get_error",
"(",
"e",
")",
"}"
] | deletes an integration response for a given method in a given api cli example: . | train | true |
23,486 | @profiler.trace
def firewall_create(request, **kwargs):
body = {'firewall': kwargs}
firewall = neutronclient(request).create_firewall(body).get('firewall')
return Firewall(firewall)
| [
"@",
"profiler",
".",
"trace",
"def",
"firewall_create",
"(",
"request",
",",
"**",
"kwargs",
")",
":",
"body",
"=",
"{",
"'firewall'",
":",
"kwargs",
"}",
"firewall",
"=",
"neutronclient",
"(",
"request",
")",
".",
"create_firewall",
"(",
"body",
")",
".",
"get",
"(",
"'firewall'",
")",
"return",
"Firewall",
"(",
"firewall",
")"
] | create a firewall for specified policy . | train | false |
23,489 | def list_with_non_imports(lst):
lst[0].seed
import collections as col
lst[1][10]
| [
"def",
"list_with_non_imports",
"(",
"lst",
")",
":",
"lst",
"[",
"0",
"]",
".",
"seed",
"import",
"collections",
"as",
"col",
"lst",
"[",
"1",
"]",
"[",
"10",
"]"
] | should be able to work with tuples and lists and still import stuff . | train | false |
23,491 | def symlinks_supported():
tmpdir = tempfile.mkdtemp()
original_path = os.path.join(tmpdir, 'original')
symlink_path = os.path.join(tmpdir, 'symlink')
os.makedirs(original_path)
try:
os.symlink(original_path, symlink_path)
supported = True
except (OSError, NotImplementedError, AttributeError):
supported = False
else:
os.remove(symlink_path)
finally:
os.rmdir(original_path)
os.rmdir(tmpdir)
return supported
| [
"def",
"symlinks_supported",
"(",
")",
":",
"tmpdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"original_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"'original'",
")",
"symlink_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"'symlink'",
")",
"os",
".",
"makedirs",
"(",
"original_path",
")",
"try",
":",
"os",
".",
"symlink",
"(",
"original_path",
",",
"symlink_path",
")",
"supported",
"=",
"True",
"except",
"(",
"OSError",
",",
"NotImplementedError",
",",
"AttributeError",
")",
":",
"supported",
"=",
"False",
"else",
":",
"os",
".",
"remove",
"(",
"symlink_path",
")",
"finally",
":",
"os",
".",
"rmdir",
"(",
"original_path",
")",
"os",
".",
"rmdir",
"(",
"tmpdir",
")",
"return",
"supported"
] | a function to check if creating symlinks are supported in the host platform and/or if they are allowed to be created . | train | false |
23,492 | @real_memoize
def is_smartos_zone():
if (not is_smartos()):
return False
else:
cmd = ['zonename']
try:
zonename = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
return False
if zonename.returncode:
return False
if (zonename.stdout.read().strip() == 'global'):
return False
return True
| [
"@",
"real_memoize",
"def",
"is_smartos_zone",
"(",
")",
":",
"if",
"(",
"not",
"is_smartos",
"(",
")",
")",
":",
"return",
"False",
"else",
":",
"cmd",
"=",
"[",
"'zonename'",
"]",
"try",
":",
"zonename",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"False",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"except",
"OSError",
":",
"return",
"False",
"if",
"zonename",
".",
"returncode",
":",
"return",
"False",
"if",
"(",
"zonename",
".",
"stdout",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"==",
"'global'",
")",
":",
"return",
"False",
"return",
"True"
] | function to return if host is smartos and not the gz . | train | true |
23,494 | def gep(builder, ptr, *inds, **kws):
name = kws.pop('name', '')
inbounds = kws.pop('inbounds', False)
assert (not kws)
idx = []
for i in inds:
if isinstance(i, utils.INT_TYPES):
ind = int32_t(i)
else:
ind = i
idx.append(ind)
return builder.gep(ptr, idx, name=name, inbounds=inbounds)
| [
"def",
"gep",
"(",
"builder",
",",
"ptr",
",",
"*",
"inds",
",",
"**",
"kws",
")",
":",
"name",
"=",
"kws",
".",
"pop",
"(",
"'name'",
",",
"''",
")",
"inbounds",
"=",
"kws",
".",
"pop",
"(",
"'inbounds'",
",",
"False",
")",
"assert",
"(",
"not",
"kws",
")",
"idx",
"=",
"[",
"]",
"for",
"i",
"in",
"inds",
":",
"if",
"isinstance",
"(",
"i",
",",
"utils",
".",
"INT_TYPES",
")",
":",
"ind",
"=",
"int32_t",
"(",
"i",
")",
"else",
":",
"ind",
"=",
"i",
"idx",
".",
"append",
"(",
"ind",
")",
"return",
"builder",
".",
"gep",
"(",
"ptr",
",",
"idx",
",",
"name",
"=",
"name",
",",
"inbounds",
"=",
"inbounds",
")"
] | emit a getelementptr instruction for the given pointer and indices . | train | false |
23,496 | def _api_config_set_colorscheme(output, kwargs):
value = kwargs.get('value')
value2 = kwargs.get('value2')
if value:
cfg.web_color.set(value)
if value2:
cfg.web_color2.set(value2)
if (value or value2):
return report(output)
else:
return report(output, _MSG_NO_VALUE)
| [
"def",
"_api_config_set_colorscheme",
"(",
"output",
",",
"kwargs",
")",
":",
"value",
"=",
"kwargs",
".",
"get",
"(",
"'value'",
")",
"value2",
"=",
"kwargs",
".",
"get",
"(",
"'value2'",
")",
"if",
"value",
":",
"cfg",
".",
"web_color",
".",
"set",
"(",
"value",
")",
"if",
"value2",
":",
"cfg",
".",
"web_color2",
".",
"set",
"(",
"value2",
")",
"if",
"(",
"value",
"or",
"value2",
")",
":",
"return",
"report",
"(",
"output",
")",
"else",
":",
"return",
"report",
"(",
"output",
",",
"_MSG_NO_VALUE",
")"
] | api: accepts output . | train | false |
23,498 | def expand_ipv6_address(address):
if (not is_valid_ipv6_address(address)):
raise ValueError(("'%s' isn't a valid IPv6 address" % address))
if ('::' in address):
missing_groups = (7 - address.count(':'))
address = address.replace('::', ('::' + (':' * missing_groups)))
for index in range(8):
start = (index * 5)
end = (address.index(':', start) if (index != 7) else len(address))
missing_zeros = (4 - (end - start))
if (missing_zeros > 0):
address = ((address[:start] + ('0' * missing_zeros)) + address[start:])
return address
| [
"def",
"expand_ipv6_address",
"(",
"address",
")",
":",
"if",
"(",
"not",
"is_valid_ipv6_address",
"(",
"address",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"'%s' isn't a valid IPv6 address\"",
"%",
"address",
")",
")",
"if",
"(",
"'::'",
"in",
"address",
")",
":",
"missing_groups",
"=",
"(",
"7",
"-",
"address",
".",
"count",
"(",
"':'",
")",
")",
"address",
"=",
"address",
".",
"replace",
"(",
"'::'",
",",
"(",
"'::'",
"+",
"(",
"':'",
"*",
"missing_groups",
")",
")",
")",
"for",
"index",
"in",
"range",
"(",
"8",
")",
":",
"start",
"=",
"(",
"index",
"*",
"5",
")",
"end",
"=",
"(",
"address",
".",
"index",
"(",
"':'",
",",
"start",
")",
"if",
"(",
"index",
"!=",
"7",
")",
"else",
"len",
"(",
"address",
")",
")",
"missing_zeros",
"=",
"(",
"4",
"-",
"(",
"end",
"-",
"start",
")",
")",
"if",
"(",
"missing_zeros",
">",
"0",
")",
":",
"address",
"=",
"(",
"(",
"address",
"[",
":",
"start",
"]",
"+",
"(",
"'0'",
"*",
"missing_zeros",
")",
")",
"+",
"address",
"[",
"start",
":",
"]",
")",
"return",
"address"
] | expands abbreviated ipv6 addresses to their full colon separated hex format . | train | false |
23,499 | def associate_dhcp_options_to_vpc(dhcp_options_id, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None):
try:
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
if (not vpc_id):
return {'associated': False, 'error': {'message': 'VPC {0} does not exist.'.format((vpc_name or vpc_id))}}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn.associate_dhcp_options(dhcp_options_id, vpc_id):
log.info('DHCP options with id {0} were associated with VPC {1}'.format(dhcp_options_id, vpc_id))
return {'associated': True}
else:
log.warning('DHCP options with id {0} were not associated with VPC {1}'.format(dhcp_options_id, vpc_id))
return {'associated': False, 'error': {'message': 'DHCP options could not be associated.'}}
except BotoServerError as e:
return {'associated': False, 'error': salt.utils.boto.get_error(e)}
| [
"def",
"associate_dhcp_options_to_vpc",
"(",
"dhcp_options_id",
",",
"vpc_id",
"=",
"None",
",",
"vpc_name",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"vpc_id",
"=",
"check_vpc",
"(",
"vpc_id",
",",
"vpc_name",
",",
"region",
",",
"key",
",",
"keyid",
",",
"profile",
")",
"if",
"(",
"not",
"vpc_id",
")",
":",
"return",
"{",
"'associated'",
":",
"False",
",",
"'error'",
":",
"{",
"'message'",
":",
"'VPC {0} does not exist.'",
".",
"format",
"(",
"(",
"vpc_name",
"or",
"vpc_id",
")",
")",
"}",
"}",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"conn",
".",
"associate_dhcp_options",
"(",
"dhcp_options_id",
",",
"vpc_id",
")",
":",
"log",
".",
"info",
"(",
"'DHCP options with id {0} were associated with VPC {1}'",
".",
"format",
"(",
"dhcp_options_id",
",",
"vpc_id",
")",
")",
"return",
"{",
"'associated'",
":",
"True",
"}",
"else",
":",
"log",
".",
"warning",
"(",
"'DHCP options with id {0} were not associated with VPC {1}'",
".",
"format",
"(",
"dhcp_options_id",
",",
"vpc_id",
")",
")",
"return",
"{",
"'associated'",
":",
"False",
",",
"'error'",
":",
"{",
"'message'",
":",
"'DHCP options could not be associated.'",
"}",
"}",
"except",
"BotoServerError",
"as",
"e",
":",
"return",
"{",
"'associated'",
":",
"False",
",",
"'error'",
":",
"salt",
".",
"utils",
".",
"boto",
".",
"get_error",
"(",
"e",
")",
"}"
] | given valid dhcp options id and a valid vpc id . | train | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.