id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
20,764 | def load_auto_suggestion_bindings():
registry = Registry()
handle = registry.add_binding
suggestion_available = Condition((lambda cli: ((cli.current_buffer.suggestion is not None) and cli.current_buffer.document.is_cursor_at_the_end)))
@handle(Keys.ControlF, filter=suggestion_available)
@handle(Keys.ControlE, filter=suggestion_available)
@handle(Keys.Right, filter=suggestion_available)
def _(event):
u' Accept suggestion. '
b = event.current_buffer
suggestion = b.suggestion
if suggestion:
b.insert_text(suggestion.text)
return registry
| [
"def",
"load_auto_suggestion_bindings",
"(",
")",
":",
"registry",
"=",
"Registry",
"(",
")",
"handle",
"=",
"registry",
".",
"add_binding",
"suggestion_available",
"=",
"Condition",
"(",
"(",
"lambda",
"cli",
":",
"(",
"(",
"cli",
".",
"current_buffer",
".",
"suggestion",
"is",
"not",
"None",
")",
"and",
"cli",
".",
"current_buffer",
".",
"document",
".",
"is_cursor_at_the_end",
")",
")",
")",
"@",
"handle",
"(",
"Keys",
".",
"ControlF",
",",
"filter",
"=",
"suggestion_available",
")",
"@",
"handle",
"(",
"Keys",
".",
"ControlE",
",",
"filter",
"=",
"suggestion_available",
")",
"@",
"handle",
"(",
"Keys",
".",
"Right",
",",
"filter",
"=",
"suggestion_available",
")",
"def",
"_",
"(",
"event",
")",
":",
"b",
"=",
"event",
".",
"current_buffer",
"suggestion",
"=",
"b",
".",
"suggestion",
"if",
"suggestion",
":",
"b",
".",
"insert_text",
"(",
"suggestion",
".",
"text",
")",
"return",
"registry"
] | key bindings for accepting auto suggestion text . | train | true |
20,765 | def is_backend_frozen(context, host, cluster_name):
return IMPL.is_backend_frozen(context, host, cluster_name)
| [
"def",
"is_backend_frozen",
"(",
"context",
",",
"host",
",",
"cluster_name",
")",
":",
"return",
"IMPL",
".",
"is_backend_frozen",
"(",
"context",
",",
"host",
",",
"cluster_name",
")"
] | check if a storage backend is frozen based on host and cluster_name . | train | false |
20,766 | def make_cookie(name, value, max_age=None, path='/', domain=None, secure=False, httponly=False, comment=None):
if (value is None):
value = ''
max_age = 0
expires = 'Wed, 31-Dec-97 23:59:59 GMT'
elif isinstance(max_age, timedelta):
max_age = ((((max_age.days * 60) * 60) * 24) + max_age.seconds)
expires = max_age
else:
expires = max_age
morsel = Morsel(name, value)
if (domain is not None):
morsel.domain = bytes_(domain)
if (path is not None):
morsel.path = bytes_(path)
if httponly:
morsel.httponly = True
if secure:
morsel.secure = True
if (max_age is not None):
morsel.max_age = max_age
if (expires is not None):
morsel.expires = expires
if (comment is not None):
morsel.comment = bytes_(comment)
return morsel.serialize()
| [
"def",
"make_cookie",
"(",
"name",
",",
"value",
",",
"max_age",
"=",
"None",
",",
"path",
"=",
"'/'",
",",
"domain",
"=",
"None",
",",
"secure",
"=",
"False",
",",
"httponly",
"=",
"False",
",",
"comment",
"=",
"None",
")",
":",
"if",
"(",
"value",
"is",
"None",
")",
":",
"value",
"=",
"''",
"max_age",
"=",
"0",
"expires",
"=",
"'Wed, 31-Dec-97 23:59:59 GMT'",
"elif",
"isinstance",
"(",
"max_age",
",",
"timedelta",
")",
":",
"max_age",
"=",
"(",
"(",
"(",
"(",
"max_age",
".",
"days",
"*",
"60",
")",
"*",
"60",
")",
"*",
"24",
")",
"+",
"max_age",
".",
"seconds",
")",
"expires",
"=",
"max_age",
"else",
":",
"expires",
"=",
"max_age",
"morsel",
"=",
"Morsel",
"(",
"name",
",",
"value",
")",
"if",
"(",
"domain",
"is",
"not",
"None",
")",
":",
"morsel",
".",
"domain",
"=",
"bytes_",
"(",
"domain",
")",
"if",
"(",
"path",
"is",
"not",
"None",
")",
":",
"morsel",
".",
"path",
"=",
"bytes_",
"(",
"path",
")",
"if",
"httponly",
":",
"morsel",
".",
"httponly",
"=",
"True",
"if",
"secure",
":",
"morsel",
".",
"secure",
"=",
"True",
"if",
"(",
"max_age",
"is",
"not",
"None",
")",
":",
"morsel",
".",
"max_age",
"=",
"max_age",
"if",
"(",
"expires",
"is",
"not",
"None",
")",
":",
"morsel",
".",
"expires",
"=",
"expires",
"if",
"(",
"comment",
"is",
"not",
"None",
")",
":",
"morsel",
".",
"comment",
"=",
"bytes_",
"(",
"comment",
")",
"return",
"morsel",
".",
"serialize",
"(",
")"
] | create and return a cookie . | train | false |
20,768 | def delete_app_tars(location):
for (dir_path, _, filenames) in os.walk(location):
for filename in filenames:
if (not remove('{0}/{1}'.format(dir_path, filename))):
return False
return True
| [
"def",
"delete_app_tars",
"(",
"location",
")",
":",
"for",
"(",
"dir_path",
",",
"_",
",",
"filenames",
")",
"in",
"os",
".",
"walk",
"(",
"location",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"if",
"(",
"not",
"remove",
"(",
"'{0}/{1}'",
".",
"format",
"(",
"dir_path",
",",
"filename",
")",
")",
")",
":",
"return",
"False",
"return",
"True"
] | deletes applications tars from the designated location . | train | false |
20,769 | @library.global_function
def is_watching_discussion_locale(user, locale):
return NewPostInLocaleEvent.is_notifying(user, locale=locale)
| [
"@",
"library",
".",
"global_function",
"def",
"is_watching_discussion_locale",
"(",
"user",
",",
"locale",
")",
":",
"return",
"NewPostInLocaleEvent",
".",
"is_notifying",
"(",
"user",
",",
"locale",
"=",
"locale",
")"
] | return true if user is watching the discussion for locale and false . | train | false |
20,771 | def _autocomplete(client, url_part, input_text, offset=None, location=None, radius=None, language=None, type=None, components=None):
params = {'input': input_text}
if offset:
params['offset'] = offset
if location:
params['location'] = convert.latlng(location)
if radius:
params['radius'] = radius
if language:
params['language'] = language
if type:
params['type'] = type
if components:
params['components'] = convert.components(components)
url = ('/maps/api/place/%sautocomplete/json' % url_part)
return client._get(url, params)['predictions']
| [
"def",
"_autocomplete",
"(",
"client",
",",
"url_part",
",",
"input_text",
",",
"offset",
"=",
"None",
",",
"location",
"=",
"None",
",",
"radius",
"=",
"None",
",",
"language",
"=",
"None",
",",
"type",
"=",
"None",
",",
"components",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'input'",
":",
"input_text",
"}",
"if",
"offset",
":",
"params",
"[",
"'offset'",
"]",
"=",
"offset",
"if",
"location",
":",
"params",
"[",
"'location'",
"]",
"=",
"convert",
".",
"latlng",
"(",
"location",
")",
"if",
"radius",
":",
"params",
"[",
"'radius'",
"]",
"=",
"radius",
"if",
"language",
":",
"params",
"[",
"'language'",
"]",
"=",
"language",
"if",
"type",
":",
"params",
"[",
"'type'",
"]",
"=",
"type",
"if",
"components",
":",
"params",
"[",
"'components'",
"]",
"=",
"convert",
".",
"components",
"(",
"components",
")",
"url",
"=",
"(",
"'/maps/api/place/%sautocomplete/json'",
"%",
"url_part",
")",
"return",
"client",
".",
"_get",
"(",
"url",
",",
"params",
")",
"[",
"'predictions'",
"]"
] | internal handler for autocomplete and autocomplete_query . | train | false |
20,772 | def get_time_in_millisecs(datetime_obj):
seconds = (time.mktime(datetime_obj.timetuple()) * 1000)
return (seconds + (datetime_obj.microsecond / 1000.0))
| [
"def",
"get_time_in_millisecs",
"(",
"datetime_obj",
")",
":",
"seconds",
"=",
"(",
"time",
".",
"mktime",
"(",
"datetime_obj",
".",
"timetuple",
"(",
")",
")",
"*",
"1000",
")",
"return",
"(",
"seconds",
"+",
"(",
"datetime_obj",
".",
"microsecond",
"/",
"1000.0",
")",
")"
] | returns time in milliseconds since the epoch . | train | false |
20,773 | def test_iforest():
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid({'n_estimators': [3], 'max_samples': [0.5, 1.0, 3], 'bootstrap': [True, False]})
with ignore_warnings():
for params in grid:
IsolationForest(random_state=rng, **params).fit(X_train).predict(X_test)
| [
"def",
"test_iforest",
"(",
")",
":",
"X_train",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"1",
"]",
",",
"[",
"1",
",",
"2",
"]",
"]",
")",
"X_test",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"2",
",",
"1",
"]",
",",
"[",
"1",
",",
"1",
"]",
"]",
")",
"grid",
"=",
"ParameterGrid",
"(",
"{",
"'n_estimators'",
":",
"[",
"3",
"]",
",",
"'max_samples'",
":",
"[",
"0.5",
",",
"1.0",
",",
"3",
"]",
",",
"'bootstrap'",
":",
"[",
"True",
",",
"False",
"]",
"}",
")",
"with",
"ignore_warnings",
"(",
")",
":",
"for",
"params",
"in",
"grid",
":",
"IsolationForest",
"(",
"random_state",
"=",
"rng",
",",
"**",
"params",
")",
".",
"fit",
"(",
"X_train",
")",
".",
"predict",
"(",
"X_test",
")"
] | check isolation forest for various parameter settings . | train | false |
20,774 | def localtime(value=None, timezone=None):
if (value is None):
value = now()
if (timezone is None):
timezone = get_current_timezone()
if is_naive(value):
raise ValueError('localtime() cannot be applied to a naive datetime')
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
value = timezone.normalize(value)
return value
| [
"def",
"localtime",
"(",
"value",
"=",
"None",
",",
"timezone",
"=",
"None",
")",
":",
"if",
"(",
"value",
"is",
"None",
")",
":",
"value",
"=",
"now",
"(",
")",
"if",
"(",
"timezone",
"is",
"None",
")",
":",
"timezone",
"=",
"get_current_timezone",
"(",
")",
"if",
"is_naive",
"(",
"value",
")",
":",
"raise",
"ValueError",
"(",
"'localtime() cannot be applied to a naive datetime'",
")",
"value",
"=",
"value",
".",
"astimezone",
"(",
"timezone",
")",
"if",
"hasattr",
"(",
"timezone",
",",
"'normalize'",
")",
":",
"value",
"=",
"timezone",
".",
"normalize",
"(",
"value",
")",
"return",
"value"
] | converts a datetime to local time in the active time zone . | train | false |
20,775 | def _serialize_dependencies(artifact):
dependencies = {}
for relation in artifact.metadata.attributes.dependencies.values():
serialized_dependency = []
if isinstance(relation, declarative.ListAttributeDefinition):
for dep in relation.get_value(artifact):
serialized_dependency.append(dep.id)
else:
relation_data = relation.get_value(artifact)
if relation_data:
serialized_dependency.append(relation.get_value(artifact).id)
dependencies[relation.name] = serialized_dependency
return dependencies
| [
"def",
"_serialize_dependencies",
"(",
"artifact",
")",
":",
"dependencies",
"=",
"{",
"}",
"for",
"relation",
"in",
"artifact",
".",
"metadata",
".",
"attributes",
".",
"dependencies",
".",
"values",
"(",
")",
":",
"serialized_dependency",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"relation",
",",
"declarative",
".",
"ListAttributeDefinition",
")",
":",
"for",
"dep",
"in",
"relation",
".",
"get_value",
"(",
"artifact",
")",
":",
"serialized_dependency",
".",
"append",
"(",
"dep",
".",
"id",
")",
"else",
":",
"relation_data",
"=",
"relation",
".",
"get_value",
"(",
"artifact",
")",
"if",
"relation_data",
":",
"serialized_dependency",
".",
"append",
"(",
"relation",
".",
"get_value",
"(",
"artifact",
")",
".",
"id",
")",
"dependencies",
"[",
"relation",
".",
"name",
"]",
"=",
"serialized_dependency",
"return",
"dependencies"
] | returns a dict of serialized dependencies for given artifact . | train | false |
20,779 | def timeparse(sval):
match = re.match(((u'\\s*' + TIMEFORMAT) + u'\\s*$'), sval, re.I)
if ((not match) or (not match.group(0).strip())):
return
mdict = match.groupdict()
return sum(((MULTIPLIERS[k] * cast(v)) for (k, v) in mdict.items() if (v is not None)))
| [
"def",
"timeparse",
"(",
"sval",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"(",
"(",
"u'\\\\s*'",
"+",
"TIMEFORMAT",
")",
"+",
"u'\\\\s*$'",
")",
",",
"sval",
",",
"re",
".",
"I",
")",
"if",
"(",
"(",
"not",
"match",
")",
"or",
"(",
"not",
"match",
".",
"group",
"(",
"0",
")",
".",
"strip",
"(",
")",
")",
")",
":",
"return",
"mdict",
"=",
"match",
".",
"groupdict",
"(",
")",
"return",
"sum",
"(",
"(",
"(",
"MULTIPLIERS",
"[",
"k",
"]",
"*",
"cast",
"(",
"v",
")",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"mdict",
".",
"items",
"(",
")",
"if",
"(",
"v",
"is",
"not",
"None",
")",
")",
")"
] | parse a time expression . | train | true |
20,780 | def next_host():
host = _determine_host(query(quiet=True))
print(host)
return host
| [
"def",
"next_host",
"(",
")",
":",
"host",
"=",
"_determine_host",
"(",
"query",
"(",
"quiet",
"=",
"True",
")",
")",
"print",
"(",
"host",
")",
"return",
"host"
] | return the host to use for the next autodeployed vm . | train | false |
20,781 | def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples, n_components):
X_embedded = params.reshape(n_samples, n_components)
n = pdist(X_embedded, 'sqeuclidean')
n += 1.0
n /= degrees_of_freedom
n **= ((degrees_of_freedom + 1.0) / (-2.0))
Q = np.maximum((n / (2.0 * np.sum(n))), MACHINE_EPSILON)
if (len(P.shape) == 2):
P = squareform(P)
kl_divergence = (2.0 * np.dot(P, np.log((P / Q))))
return kl_divergence
| [
"def",
"_kl_divergence_error",
"(",
"params",
",",
"P",
",",
"neighbors",
",",
"degrees_of_freedom",
",",
"n_samples",
",",
"n_components",
")",
":",
"X_embedded",
"=",
"params",
".",
"reshape",
"(",
"n_samples",
",",
"n_components",
")",
"n",
"=",
"pdist",
"(",
"X_embedded",
",",
"'sqeuclidean'",
")",
"n",
"+=",
"1.0",
"n",
"/=",
"degrees_of_freedom",
"n",
"**=",
"(",
"(",
"degrees_of_freedom",
"+",
"1.0",
")",
"/",
"(",
"-",
"2.0",
")",
")",
"Q",
"=",
"np",
".",
"maximum",
"(",
"(",
"n",
"/",
"(",
"2.0",
"*",
"np",
".",
"sum",
"(",
"n",
")",
")",
")",
",",
"MACHINE_EPSILON",
")",
"if",
"(",
"len",
"(",
"P",
".",
"shape",
")",
"==",
"2",
")",
":",
"P",
"=",
"squareform",
"(",
"P",
")",
"kl_divergence",
"=",
"(",
"2.0",
"*",
"np",
".",
"dot",
"(",
"P",
",",
"np",
".",
"log",
"(",
"(",
"P",
"/",
"Q",
")",
")",
")",
")",
"return",
"kl_divergence"
] | t-sne objective function: the absolute error of the kl divergence of p_ijs and q_ijs . | train | false |
20,782 | def universal_path(path):
if (_os.name != 'posix'):
path = path.replace(_os.path.sep, '/')
return path
| [
"def",
"universal_path",
"(",
"path",
")",
":",
"if",
"(",
"_os",
".",
"name",
"!=",
"'posix'",
")",
":",
"path",
"=",
"path",
".",
"replace",
"(",
"_os",
".",
"path",
".",
"sep",
",",
"'/'",
")",
"return",
"path"
] | converts a path name from its operating system-specific format to a universal path notation . | train | false |
20,783 | def scipy_exponweib_sucks(value, alpha, beta):
pdf = np.log(sp.exponweib.pdf(value, 1, alpha, scale=beta))
if np.isinf(pdf):
return sp.exponweib.logpdf(value, 1, alpha, scale=beta)
return pdf
| [
"def",
"scipy_exponweib_sucks",
"(",
"value",
",",
"alpha",
",",
"beta",
")",
":",
"pdf",
"=",
"np",
".",
"log",
"(",
"sp",
".",
"exponweib",
".",
"pdf",
"(",
"value",
",",
"1",
",",
"alpha",
",",
"scale",
"=",
"beta",
")",
")",
"if",
"np",
".",
"isinf",
"(",
"pdf",
")",
":",
"return",
"sp",
".",
"exponweib",
".",
"logpdf",
"(",
"value",
",",
"1",
",",
"alpha",
",",
"scale",
"=",
"beta",
")",
"return",
"pdf"
] | this function is required because scipys implementation of the weibull pdf fails for some valid combinations of parameters . | train | false |
20,784 | @ajax_required
@get_unit_context('view')
def get_more_context(request, unit, **kwargs_):
store = request.store
json = {}
gap = int(request.GET.get('gap', 0))
qty = int(request.GET.get('qty', 1))
json['ctx'] = _filter_ctx_units(store.units, unit, qty, gap)
return JsonResponse(json)
| [
"@",
"ajax_required",
"@",
"get_unit_context",
"(",
"'view'",
")",
"def",
"get_more_context",
"(",
"request",
",",
"unit",
",",
"**",
"kwargs_",
")",
":",
"store",
"=",
"request",
".",
"store",
"json",
"=",
"{",
"}",
"gap",
"=",
"int",
"(",
"request",
".",
"GET",
".",
"get",
"(",
"'gap'",
",",
"0",
")",
")",
"qty",
"=",
"int",
"(",
"request",
".",
"GET",
".",
"get",
"(",
"'qty'",
",",
"1",
")",
")",
"json",
"[",
"'ctx'",
"]",
"=",
"_filter_ctx_units",
"(",
"store",
".",
"units",
",",
"unit",
",",
"qty",
",",
"gap",
")",
"return",
"JsonResponse",
"(",
"json",
")"
] | retrieves more context units . | train | false |
20,785 | def test_periodic_command_fixed_delay():
fd = schedule.PeriodicCommandFixedDelay.at_time(at=datetime.datetime.now(), delay=datetime.timedelta(seconds=2), function=(lambda : None))
assert (fd.due() is True)
assert (fd.next().due() is False)
| [
"def",
"test_periodic_command_fixed_delay",
"(",
")",
":",
"fd",
"=",
"schedule",
".",
"PeriodicCommandFixedDelay",
".",
"at_time",
"(",
"at",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
",",
"delay",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"2",
")",
",",
"function",
"=",
"(",
"lambda",
":",
"None",
")",
")",
"assert",
"(",
"fd",
".",
"due",
"(",
")",
"is",
"True",
")",
"assert",
"(",
"fd",
".",
"next",
"(",
")",
".",
"due",
"(",
")",
"is",
"False",
")"
] | test that we can construct a periodic command with a fixed initial delay . | train | false |
20,786 | def test_tl_sk_estimator():
check_estimator(TomekLinks)
| [
"def",
"test_tl_sk_estimator",
"(",
")",
":",
"check_estimator",
"(",
"TomekLinks",
")"
] | test the sklearn estimator compatibility . | train | false |
20,787 | def _callable_repr(method):
try:
return fullyQualifiedName(method)
except AttributeError:
return safe_repr(method)
| [
"def",
"_callable_repr",
"(",
"method",
")",
":",
"try",
":",
"return",
"fullyQualifiedName",
"(",
"method",
")",
"except",
"AttributeError",
":",
"return",
"safe_repr",
"(",
"method",
")"
] | get a useful representation method . | train | false |
20,788 | def test_min_zero():
mlp = MLP(input_space=VectorSpace(1), layers=[Maxout(layer_name='test_layer', num_units=1, num_pieces=2, irange=0.05, min_zero=True)])
X = T.matrix()
output = mlp.fprop(X)
f = function([X], output, mode='DEBUG_MODE')
f(np.zeros((1, 1)).astype(X.dtype))
| [
"def",
"test_min_zero",
"(",
")",
":",
"mlp",
"=",
"MLP",
"(",
"input_space",
"=",
"VectorSpace",
"(",
"1",
")",
",",
"layers",
"=",
"[",
"Maxout",
"(",
"layer_name",
"=",
"'test_layer'",
",",
"num_units",
"=",
"1",
",",
"num_pieces",
"=",
"2",
",",
"irange",
"=",
"0.05",
",",
"min_zero",
"=",
"True",
")",
"]",
")",
"X",
"=",
"T",
".",
"matrix",
"(",
")",
"output",
"=",
"mlp",
".",
"fprop",
"(",
"X",
")",
"f",
"=",
"function",
"(",
"[",
"X",
"]",
",",
"output",
",",
"mode",
"=",
"'DEBUG_MODE'",
")",
"f",
"(",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"1",
")",
")",
".",
"astype",
"(",
"X",
".",
"dtype",
")",
")"
] | this test guards against a bug where the size of the zero buffer used with the min_zero flag was specified to have the wrong size . | train | false |
20,789 | def words(string, filter=(lambda w: w.strip("'").isalnum()), punctuation=PUNCTUATION, **kwargs):
string = decode_utf8(string)
string = re.sub("([a-z|A-Z])'(m|s|ve|re|ll|d)", u'\\1 <QUOTE/>\\2', string)
string = re.sub("(c|d|gl|j|l|m|n|s|t|un)'([a-z|A-Z])", u'\\1<QUOTE/> \\2', string)
words = (w.strip(punctuation).replace(u'<QUOTE/>', "'", 1) for w in string.split())
words = (w for w in words if ((filter is None) or (filter(w) is not False)))
words = [w for w in words if w]
return words
| [
"def",
"words",
"(",
"string",
",",
"filter",
"=",
"(",
"lambda",
"w",
":",
"w",
".",
"strip",
"(",
"\"'\"",
")",
".",
"isalnum",
"(",
")",
")",
",",
"punctuation",
"=",
"PUNCTUATION",
",",
"**",
"kwargs",
")",
":",
"string",
"=",
"decode_utf8",
"(",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"\"([a-z|A-Z])'(m|s|ve|re|ll|d)\"",
",",
"u'\\\\1 <QUOTE/>\\\\2'",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"\"(c|d|gl|j|l|m|n|s|t|un)'([a-z|A-Z])\"",
",",
"u'\\\\1<QUOTE/> \\\\2'",
",",
"string",
")",
"words",
"=",
"(",
"w",
".",
"strip",
"(",
"punctuation",
")",
".",
"replace",
"(",
"u'<QUOTE/>'",
",",
"\"'\"",
",",
"1",
")",
"for",
"w",
"in",
"string",
".",
"split",
"(",
")",
")",
"words",
"=",
"(",
"w",
"for",
"w",
"in",
"words",
"if",
"(",
"(",
"filter",
"is",
"None",
")",
"or",
"(",
"filter",
"(",
"w",
")",
"is",
"not",
"False",
")",
")",
")",
"words",
"=",
"[",
"w",
"for",
"w",
"in",
"words",
"if",
"w",
"]",
"return",
"words"
] | an iterator over tokens in text . | train | false |
20,790 | def run_gdb(*args, **env_vars):
if env_vars:
env = os.environ.copy()
env.update(env_vars)
else:
env = None
base_cmd = ('gdb', '--batch', '-nx')
if ((gdb_major_version, gdb_minor_version) >= (7, 4)):
base_cmd += ('-iex', ('add-auto-load-safe-path ' + checkout_hook_path))
(out, err) = subprocess.Popen((base_cmd + args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env).communicate()
return (out.decode('utf-8', 'replace'), err.decode('utf-8', 'replace'))
| [
"def",
"run_gdb",
"(",
"*",
"args",
",",
"**",
"env_vars",
")",
":",
"if",
"env_vars",
":",
"env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"env",
".",
"update",
"(",
"env_vars",
")",
"else",
":",
"env",
"=",
"None",
"base_cmd",
"=",
"(",
"'gdb'",
",",
"'--batch'",
",",
"'-nx'",
")",
"if",
"(",
"(",
"gdb_major_version",
",",
"gdb_minor_version",
")",
">=",
"(",
"7",
",",
"4",
")",
")",
":",
"base_cmd",
"+=",
"(",
"'-iex'",
",",
"(",
"'add-auto-load-safe-path '",
"+",
"checkout_hook_path",
")",
")",
"(",
"out",
",",
"err",
")",
"=",
"subprocess",
".",
"Popen",
"(",
"(",
"base_cmd",
"+",
"args",
")",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"env",
"=",
"env",
")",
".",
"communicate",
"(",
")",
"return",
"(",
"out",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
",",
"err",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")"
] | runs gdb in --batch mode with the additional arguments given by *args . | train | false |
20,792 | def fixed_ip_cidrs(fixed_ips):
return [ip_to_cidr(fixed_ip['ip_address'], fixed_ip.get('prefixlen')) for fixed_ip in fixed_ips]
| [
"def",
"fixed_ip_cidrs",
"(",
"fixed_ips",
")",
":",
"return",
"[",
"ip_to_cidr",
"(",
"fixed_ip",
"[",
"'ip_address'",
"]",
",",
"fixed_ip",
".",
"get",
"(",
"'prefixlen'",
")",
")",
"for",
"fixed_ip",
"in",
"fixed_ips",
"]"
] | create a list of a ports fixed ips in cidr notation . | train | false |
20,794 | def check_rpc_status(status):
status.check_initialized()
error_class = RpcError.from_state(status.state)
if (error_class is not None):
if (error_class is ApplicationError):
raise error_class(status.error_message, status.error_name)
else:
raise error_class(status.error_message)
| [
"def",
"check_rpc_status",
"(",
"status",
")",
":",
"status",
".",
"check_initialized",
"(",
")",
"error_class",
"=",
"RpcError",
".",
"from_state",
"(",
"status",
".",
"state",
")",
"if",
"(",
"error_class",
"is",
"not",
"None",
")",
":",
"if",
"(",
"error_class",
"is",
"ApplicationError",
")",
":",
"raise",
"error_class",
"(",
"status",
".",
"error_message",
",",
"status",
".",
"error_name",
")",
"else",
":",
"raise",
"error_class",
"(",
"status",
".",
"error_message",
")"
] | function converts an error status to a raised exception . | train | false |
20,795 | def _ensure_tuple_or_list(arg_name, tuple_or_list):
if (not isinstance(tuple_or_list, (tuple, list))):
raise TypeError(('Expected %s to be a tuple or list. Received %r' % (arg_name, tuple_or_list)))
return list(tuple_or_list)
| [
"def",
"_ensure_tuple_or_list",
"(",
"arg_name",
",",
"tuple_or_list",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"tuple_or_list",
",",
"(",
"tuple",
",",
"list",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'Expected %s to be a tuple or list. Received %r'",
"%",
"(",
"arg_name",
",",
"tuple_or_list",
")",
")",
")",
"return",
"list",
"(",
"tuple_or_list",
")"
] | ensures an input is a tuple or list . | train | true |
20,796 | def get_security_checks():
_populate_security_checks()
return _security_checks
| [
"def",
"get_security_checks",
"(",
")",
":",
"_populate_security_checks",
"(",
")",
"return",
"_security_checks"
] | return the list of security checks . | train | false |
20,797 | def Interface(ys, xs):
if isinstance(ys, (list, tuple)):
queue = list(ys)
else:
queue = [ys]
out = OrderedDict()
if isinstance(xs, (list, tuple)):
for x in xs:
out[x] = []
else:
out[xs] = []
done = set()
while queue:
y = queue.pop()
if (y in done):
continue
done = done.union(set([y]))
for x in y.op.inputs:
if (x in out):
out[x].append(y)
else:
assert (id(x) not in [id(foo) for foo in out])
queue.extend(y.op.inputs)
return out
| [
"def",
"Interface",
"(",
"ys",
",",
"xs",
")",
":",
"if",
"isinstance",
"(",
"ys",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"queue",
"=",
"list",
"(",
"ys",
")",
"else",
":",
"queue",
"=",
"[",
"ys",
"]",
"out",
"=",
"OrderedDict",
"(",
")",
"if",
"isinstance",
"(",
"xs",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"x",
"in",
"xs",
":",
"out",
"[",
"x",
"]",
"=",
"[",
"]",
"else",
":",
"out",
"[",
"xs",
"]",
"=",
"[",
"]",
"done",
"=",
"set",
"(",
")",
"while",
"queue",
":",
"y",
"=",
"queue",
".",
"pop",
"(",
")",
"if",
"(",
"y",
"in",
"done",
")",
":",
"continue",
"done",
"=",
"done",
".",
"union",
"(",
"set",
"(",
"[",
"y",
"]",
")",
")",
"for",
"x",
"in",
"y",
".",
"op",
".",
"inputs",
":",
"if",
"(",
"x",
"in",
"out",
")",
":",
"out",
"[",
"x",
"]",
".",
"append",
"(",
"y",
")",
"else",
":",
"assert",
"(",
"id",
"(",
"x",
")",
"not",
"in",
"[",
"id",
"(",
"foo",
")",
"for",
"foo",
"in",
"out",
"]",
")",
"queue",
".",
"extend",
"(",
"y",
".",
"op",
".",
"inputs",
")",
"return",
"out"
] | maps xs to consumers . | train | false |
20,798 | def jaccard_coefficient(X, Y):
if (X is Y):
X = Y = np.asanyarray(X)
else:
X = np.asanyarray(X)
Y = np.asanyarray(Y)
result = []
i = 0
for arrayX in X:
result.append([])
for arrayY in Y:
n_XY = np.intersect1d(arrayY, arrayX).size
result[i].append((n_XY / ((float(len(arrayX)) + len(arrayY)) - n_XY)))
result[i] = np.array(result[i])
i += 1
return np.array(result)
| [
"def",
"jaccard_coefficient",
"(",
"X",
",",
"Y",
")",
":",
"if",
"(",
"X",
"is",
"Y",
")",
":",
"X",
"=",
"Y",
"=",
"np",
".",
"asanyarray",
"(",
"X",
")",
"else",
":",
"X",
"=",
"np",
".",
"asanyarray",
"(",
"X",
")",
"Y",
"=",
"np",
".",
"asanyarray",
"(",
"Y",
")",
"result",
"=",
"[",
"]",
"i",
"=",
"0",
"for",
"arrayX",
"in",
"X",
":",
"result",
".",
"append",
"(",
"[",
"]",
")",
"for",
"arrayY",
"in",
"Y",
":",
"n_XY",
"=",
"np",
".",
"intersect1d",
"(",
"arrayY",
",",
"arrayX",
")",
".",
"size",
"result",
"[",
"i",
"]",
".",
"append",
"(",
"(",
"n_XY",
"/",
"(",
"(",
"float",
"(",
"len",
"(",
"arrayX",
")",
")",
"+",
"len",
"(",
"arrayY",
")",
")",
"-",
"n_XY",
")",
")",
")",
"result",
"[",
"i",
"]",
"=",
"np",
".",
"array",
"(",
"result",
"[",
"i",
"]",
")",
"i",
"+=",
"1",
"return",
"np",
".",
"array",
"(",
"result",
")"
] | compute the jaccard coefficient of all node pairs in ebunch . | train | false |
20,799 | def full_rary_tree(r, n, create_using=None):
G = nx.empty_graph(n, create_using)
G.add_edges_from(_tree_edges(n, r))
return G
| [
"def",
"full_rary_tree",
"(",
"r",
",",
"n",
",",
"create_using",
"=",
"None",
")",
":",
"G",
"=",
"nx",
".",
"empty_graph",
"(",
"n",
",",
"create_using",
")",
"G",
".",
"add_edges_from",
"(",
"_tree_edges",
"(",
"n",
",",
"r",
")",
")",
"return",
"G"
] | creates a full r-ary tree of n vertices . | train | false |
20,801 | def norm01c(arr, center):
arr = arr.copy()
arr -= center
arr /= (max((2 * arr.max()), ((-2) * arr.min())) + 1e-10)
arr += 0.5
assert (arr.min() >= 0)
assert (arr.max() <= 1)
return arr
| [
"def",
"norm01c",
"(",
"arr",
",",
"center",
")",
":",
"arr",
"=",
"arr",
".",
"copy",
"(",
")",
"arr",
"-=",
"center",
"arr",
"/=",
"(",
"max",
"(",
"(",
"2",
"*",
"arr",
".",
"max",
"(",
")",
")",
",",
"(",
"(",
"-",
"2",
")",
"*",
"arr",
".",
"min",
"(",
")",
")",
")",
"+",
"1e-10",
")",
"arr",
"+=",
"0.5",
"assert",
"(",
"arr",
".",
"min",
"(",
")",
">=",
"0",
")",
"assert",
"(",
"arr",
".",
"max",
"(",
")",
"<=",
"1",
")",
"return",
"arr"
] | maps the input range to [0 . | train | false |
20,802 | def getRandomString(slength):
s = u''.join((random.choice(char_choices) for i in range(slength)))
ns = u''
lns = len(ns)
while (lns < slength):
ns += (s[lns:(lns + random.choice([1, 3, 5, 7, 9]))] + ' ')
lns = len(ns)
return ns[:slength]
| [
"def",
"getRandomString",
"(",
"slength",
")",
":",
"s",
"=",
"u''",
".",
"join",
"(",
"(",
"random",
".",
"choice",
"(",
"char_choices",
")",
"for",
"i",
"in",
"range",
"(",
"slength",
")",
")",
")",
"ns",
"=",
"u''",
"lns",
"=",
"len",
"(",
"ns",
")",
"while",
"(",
"lns",
"<",
"slength",
")",
":",
"ns",
"+=",
"(",
"s",
"[",
"lns",
":",
"(",
"lns",
"+",
"random",
".",
"choice",
"(",
"[",
"1",
",",
"3",
",",
"5",
",",
"7",
",",
"9",
"]",
")",
")",
"]",
"+",
"' '",
")",
"lns",
"=",
"len",
"(",
"ns",
")",
"return",
"ns",
"[",
":",
"slength",
"]"
] | create a random text string of length slength from the unichar values in char_choices; then split the random text into words of random length from [1 . | train | false |
20,803 | def df_from_json(data, rename=True, **kwargs):
parsed = None
if isinstance(data, str):
with open(data) as data_file:
data = json.load(data_file)
if isinstance(data, list):
parsed = json_normalize(data)
elif isinstance(data, dict):
for (k, v) in iteritems(data):
if isinstance(v, list):
parsed = json_normalize(v)
if (rename and (parsed is not None)):
parsed = denormalize_column_names(parsed)
return parsed
| [
"def",
"df_from_json",
"(",
"data",
",",
"rename",
"=",
"True",
",",
"**",
"kwargs",
")",
":",
"parsed",
"=",
"None",
"if",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"with",
"open",
"(",
"data",
")",
"as",
"data_file",
":",
"data",
"=",
"json",
".",
"load",
"(",
"data_file",
")",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"parsed",
"=",
"json_normalize",
"(",
"data",
")",
"elif",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"for",
"(",
"k",
",",
"v",
")",
"in",
"iteritems",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"parsed",
"=",
"json_normalize",
"(",
"v",
")",
"if",
"(",
"rename",
"and",
"(",
"parsed",
"is",
"not",
"None",
")",
")",
":",
"parsed",
"=",
"denormalize_column_names",
"(",
"parsed",
")",
"return",
"parsed"
] | attempt to produce :class:pandas . | train | false |
20,804 | def _reverse_path(path):
path_parts = path.split(_PATH_CELL_SEP)
path_parts.reverse()
return _PATH_CELL_SEP.join(path_parts)
| [
"def",
"_reverse_path",
"(",
"path",
")",
":",
"path_parts",
"=",
"path",
".",
"split",
"(",
"_PATH_CELL_SEP",
")",
"path_parts",
".",
"reverse",
"(",
")",
"return",
"_PATH_CELL_SEP",
".",
"join",
"(",
"path_parts",
")"
] | reverse a path . | train | false |
20,805 | def checkCoherenceMeasure(topics1, topics2, coherence):
if (coherence in boolean_document_based):
cm1 = CoherenceModel(topics=topics1, corpus=corpus, dictionary=dictionary, coherence=coherence)
cm2 = CoherenceModel(topics=topics2, corpus=corpus, dictionary=dictionary, coherence=coherence)
else:
cm1 = CoherenceModel(topics=topics1, texts=texts, dictionary=dictionary, coherence=coherence)
cm2 = CoherenceModel(topics=topics2, texts=texts, dictionary=dictionary, coherence=coherence)
return (cm1.get_coherence() > cm2.get_coherence())
| [
"def",
"checkCoherenceMeasure",
"(",
"topics1",
",",
"topics2",
",",
"coherence",
")",
":",
"if",
"(",
"coherence",
"in",
"boolean_document_based",
")",
":",
"cm1",
"=",
"CoherenceModel",
"(",
"topics",
"=",
"topics1",
",",
"corpus",
"=",
"corpus",
",",
"dictionary",
"=",
"dictionary",
",",
"coherence",
"=",
"coherence",
")",
"cm2",
"=",
"CoherenceModel",
"(",
"topics",
"=",
"topics2",
",",
"corpus",
"=",
"corpus",
",",
"dictionary",
"=",
"dictionary",
",",
"coherence",
"=",
"coherence",
")",
"else",
":",
"cm1",
"=",
"CoherenceModel",
"(",
"topics",
"=",
"topics1",
",",
"texts",
"=",
"texts",
",",
"dictionary",
"=",
"dictionary",
",",
"coherence",
"=",
"coherence",
")",
"cm2",
"=",
"CoherenceModel",
"(",
"topics",
"=",
"topics2",
",",
"texts",
"=",
"texts",
",",
"dictionary",
"=",
"dictionary",
",",
"coherence",
"=",
"coherence",
")",
"return",
"(",
"cm1",
".",
"get_coherence",
"(",
")",
">",
"cm2",
".",
"get_coherence",
"(",
")",
")"
] | check provided topic coherence algorithm on given topics . | train | false |
20,806 | def coord_type_from_ctype(ctype):
if ((ctype[:4] in ['RA--']) or (ctype[1:4] == 'LON')):
return ('longitude', None)
elif (ctype[:4] in ['HPLN']):
return ('longitude', 180.0)
elif ((ctype[:4] in ['DEC-', 'HPLT']) or (ctype[1:4] == 'LAT')):
return ('latitude', None)
else:
return ('scalar', None)
| [
"def",
"coord_type_from_ctype",
"(",
"ctype",
")",
":",
"if",
"(",
"(",
"ctype",
"[",
":",
"4",
"]",
"in",
"[",
"'RA--'",
"]",
")",
"or",
"(",
"ctype",
"[",
"1",
":",
"4",
"]",
"==",
"'LON'",
")",
")",
":",
"return",
"(",
"'longitude'",
",",
"None",
")",
"elif",
"(",
"ctype",
"[",
":",
"4",
"]",
"in",
"[",
"'HPLN'",
"]",
")",
":",
"return",
"(",
"'longitude'",
",",
"180.0",
")",
"elif",
"(",
"(",
"ctype",
"[",
":",
"4",
"]",
"in",
"[",
"'DEC-'",
",",
"'HPLT'",
"]",
")",
"or",
"(",
"ctype",
"[",
"1",
":",
"4",
"]",
"==",
"'LAT'",
")",
")",
":",
"return",
"(",
"'latitude'",
",",
"None",
")",
"else",
":",
"return",
"(",
"'scalar'",
",",
"None",
")"
] | determine whether a particular wcs ctype corresponds to an angle or scalar coordinate . | train | false |
20,807 | def _register_assert_equal_wrapper(type_, assert_eq):
@assert_equal.register(type_, type_)
def assert_ndframe_equal(result, expected, path=(), msg='', **kwargs):
try:
assert_eq(result, expected, **filter_kwargs(assert_eq, kwargs))
except AssertionError as e:
raise AssertionError((_fmt_msg(msg) + '\n'.join((str(e), _fmt_path(path)))))
return assert_ndframe_equal
| [
"def",
"_register_assert_equal_wrapper",
"(",
"type_",
",",
"assert_eq",
")",
":",
"@",
"assert_equal",
".",
"register",
"(",
"type_",
",",
"type_",
")",
"def",
"assert_ndframe_equal",
"(",
"result",
",",
"expected",
",",
"path",
"=",
"(",
")",
",",
"msg",
"=",
"''",
",",
"**",
"kwargs",
")",
":",
"try",
":",
"assert_eq",
"(",
"result",
",",
"expected",
",",
"**",
"filter_kwargs",
"(",
"assert_eq",
",",
"kwargs",
")",
")",
"except",
"AssertionError",
"as",
"e",
":",
"raise",
"AssertionError",
"(",
"(",
"_fmt_msg",
"(",
"msg",
")",
"+",
"'\\n'",
".",
"join",
"(",
"(",
"str",
"(",
"e",
")",
",",
"_fmt_path",
"(",
"path",
")",
")",
")",
")",
")",
"return",
"assert_ndframe_equal"
] | register a new check for an ndframe object . | train | false |
20,808 | def CDLMATCHINGLOW(barDs, count):
return call_talib_with_ohlc(barDs, count, talib.CDLMATCHINGLOW)
| [
"def",
"CDLMATCHINGLOW",
"(",
"barDs",
",",
"count",
")",
":",
"return",
"call_talib_with_ohlc",
"(",
"barDs",
",",
"count",
",",
"talib",
".",
"CDLMATCHINGLOW",
")"
] | matching low . | train | false |
20,809 | @pytest.fixture
def replay_file(replay_test_dir, template_name):
file_name = '{}.json'.format(template_name)
return os.path.join(replay_test_dir, file_name)
| [
"@",
"pytest",
".",
"fixture",
"def",
"replay_file",
"(",
"replay_test_dir",
",",
"template_name",
")",
":",
"file_name",
"=",
"'{}.json'",
".",
"format",
"(",
"template_name",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"replay_test_dir",
",",
"file_name",
")"
] | fixture to return a actual file name of the dump . | train | false |
20,810 | def ricker_function(resolution, center, width):
x = np.linspace(0, (resolution - 1), resolution)
x = (((2 / ((np.sqrt((3 * width)) * (np.pi ** 1)) / 4)) * (1 - (((x - center) ** 2) / (width ** 2)))) * np.exp(((- ((x - center) ** 2)) / (2 * (width ** 2)))))
return x
| [
"def",
"ricker_function",
"(",
"resolution",
",",
"center",
",",
"width",
")",
":",
"x",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"(",
"resolution",
"-",
"1",
")",
",",
"resolution",
")",
"x",
"=",
"(",
"(",
"(",
"2",
"/",
"(",
"(",
"np",
".",
"sqrt",
"(",
"(",
"3",
"*",
"width",
")",
")",
"*",
"(",
"np",
".",
"pi",
"**",
"1",
")",
")",
"/",
"4",
")",
")",
"*",
"(",
"1",
"-",
"(",
"(",
"(",
"x",
"-",
"center",
")",
"**",
"2",
")",
"/",
"(",
"width",
"**",
"2",
")",
")",
")",
")",
"*",
"np",
".",
"exp",
"(",
"(",
"(",
"-",
"(",
"(",
"x",
"-",
"center",
")",
"**",
"2",
")",
")",
"/",
"(",
"2",
"*",
"(",
"width",
"**",
"2",
")",
")",
")",
")",
")",
"return",
"x"
] | discrete sub-sampled ricker wavelet . | train | false |
20,812 | def _int0oo(g1, g2, x):
(eta, _) = _get_coeff_exp(g1.argument, x)
(omega, _) = _get_coeff_exp(g2.argument, x)
def neg(l):
return [(- x) for x in l]
a1 = (neg(g1.bm) + list(g2.an))
a2 = (list(g2.aother) + neg(g1.bother))
b1 = (neg(g1.an) + list(g2.bm))
b2 = (list(g2.bother) + neg(g1.aother))
return (meijerg(a1, a2, b1, b2, (omega / eta)) / eta)
| [
"def",
"_int0oo",
"(",
"g1",
",",
"g2",
",",
"x",
")",
":",
"(",
"eta",
",",
"_",
")",
"=",
"_get_coeff_exp",
"(",
"g1",
".",
"argument",
",",
"x",
")",
"(",
"omega",
",",
"_",
")",
"=",
"_get_coeff_exp",
"(",
"g2",
".",
"argument",
",",
"x",
")",
"def",
"neg",
"(",
"l",
")",
":",
"return",
"[",
"(",
"-",
"x",
")",
"for",
"x",
"in",
"l",
"]",
"a1",
"=",
"(",
"neg",
"(",
"g1",
".",
"bm",
")",
"+",
"list",
"(",
"g2",
".",
"an",
")",
")",
"a2",
"=",
"(",
"list",
"(",
"g2",
".",
"aother",
")",
"+",
"neg",
"(",
"g1",
".",
"bother",
")",
")",
"b1",
"=",
"(",
"neg",
"(",
"g1",
".",
"an",
")",
"+",
"list",
"(",
"g2",
".",
"bm",
")",
")",
"b2",
"=",
"(",
"list",
"(",
"g2",
".",
"bother",
")",
"+",
"neg",
"(",
"g1",
".",
"aother",
")",
")",
"return",
"(",
"meijerg",
"(",
"a1",
",",
"a2",
",",
"b1",
",",
"b2",
",",
"(",
"omega",
"/",
"eta",
")",
")",
"/",
"eta",
")"
] | express integral from zero to infinity g1*g2 using a g function . | train | false |
20,813 | def sp_zeros_like(x):
(_, _, indptr, shape) = csm_properties(x)
return CSM(format=x.format)(data=np.array([], dtype=x.type.dtype), indices=np.array([], dtype='int32'), indptr=tensor.zeros_like(indptr), shape=shape)
| [
"def",
"sp_zeros_like",
"(",
"x",
")",
":",
"(",
"_",
",",
"_",
",",
"indptr",
",",
"shape",
")",
"=",
"csm_properties",
"(",
"x",
")",
"return",
"CSM",
"(",
"format",
"=",
"x",
".",
"format",
")",
"(",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"x",
".",
"type",
".",
"dtype",
")",
",",
"indices",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"'int32'",
")",
",",
"indptr",
"=",
"tensor",
".",
"zeros_like",
"(",
"indptr",
")",
",",
"shape",
"=",
"shape",
")"
] | construct a sparse matrix of zeros . | train | false |
20,814 | def EscapeXcodeDefine(s):
return re.sub(_xcode_define_re, '\\\\\\1', s)
| [
"def",
"EscapeXcodeDefine",
"(",
"s",
")",
":",
"return",
"re",
".",
"sub",
"(",
"_xcode_define_re",
",",
"'\\\\\\\\\\\\1'",
",",
"s",
")"
] | we must escape the defines that we give to xcode so that it knows not to split on spaces and to respect backslash and quote literals . | train | false |
20,816 | def _bdev(dev=None):
if (dev is None):
dev = _fssys('cache0')
else:
dev = _bcpath(dev)
if (not dev):
return False
else:
return _devbase(os.path.realpath(os.path.join(dev, '../')))
| [
"def",
"_bdev",
"(",
"dev",
"=",
"None",
")",
":",
"if",
"(",
"dev",
"is",
"None",
")",
":",
"dev",
"=",
"_fssys",
"(",
"'cache0'",
")",
"else",
":",
"dev",
"=",
"_bcpath",
"(",
"dev",
")",
"if",
"(",
"not",
"dev",
")",
":",
"return",
"False",
"else",
":",
"return",
"_devbase",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dev",
",",
"'../'",
")",
")",
")"
] | resolve a bcachex or cache to a real dev :return: basename of bcache dev . | train | false |
20,817 | def _validate_credentials_file(module, credentials_file, require_valid_json=True, check_libcloud=False):
try:
with open(credentials_file) as credentials:
json.loads(credentials.read())
if (check_libcloud and (LooseVersion(libcloud.__version__) < '0.17.0')):
module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. Upgrade to libcloud>=0.17.0.')
return True
except IOError as e:
module.fail_json(msg=('GCP Credentials File %s not found.' % credentials_file), changed=False)
return False
except ValueError as e:
if require_valid_json:
module.fail_json(msg=('GCP Credentials File %s invalid. Must be valid JSON.' % credentials_file), changed=False)
else:
display.deprecated(msg='Non-JSON credentials file provided. This format is deprecated. Please generate a new JSON key from the Google Cloud console', version=2.5)
return True
| [
"def",
"_validate_credentials_file",
"(",
"module",
",",
"credentials_file",
",",
"require_valid_json",
"=",
"True",
",",
"check_libcloud",
"=",
"False",
")",
":",
"try",
":",
"with",
"open",
"(",
"credentials_file",
")",
"as",
"credentials",
":",
"json",
".",
"loads",
"(",
"credentials",
".",
"read",
"(",
")",
")",
"if",
"(",
"check_libcloud",
"and",
"(",
"LooseVersion",
"(",
"libcloud",
".",
"__version__",
")",
"<",
"'0.17.0'",
")",
")",
":",
"module",
".",
"fail_json",
"(",
"msg",
"=",
"'Using JSON credentials but libcloud minimum version not met. Upgrade to libcloud>=0.17.0.'",
")",
"return",
"True",
"except",
"IOError",
"as",
"e",
":",
"module",
".",
"fail_json",
"(",
"msg",
"=",
"(",
"'GCP Credentials File %s not found.'",
"%",
"credentials_file",
")",
",",
"changed",
"=",
"False",
")",
"return",
"False",
"except",
"ValueError",
"as",
"e",
":",
"if",
"require_valid_json",
":",
"module",
".",
"fail_json",
"(",
"msg",
"=",
"(",
"'GCP Credentials File %s invalid. Must be valid JSON.'",
"%",
"credentials_file",
")",
",",
"changed",
"=",
"False",
")",
"else",
":",
"display",
".",
"deprecated",
"(",
"msg",
"=",
"'Non-JSON credentials file provided. This format is deprecated. Please generate a new JSON key from the Google Cloud console'",
",",
"version",
"=",
"2.5",
")",
"return",
"True"
] | check for valid credentials file . | train | false |
20,820 | def _bytesRepr(bytestring):
if (not isinstance(bytestring, bytes)):
raise TypeError(('Expected bytes not %r' % (bytestring,)))
if _PY3:
return repr(bytestring)
else:
return ('b' + repr(bytestring))
| [
"def",
"_bytesRepr",
"(",
"bytestring",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"bytestring",
",",
"bytes",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'Expected bytes not %r'",
"%",
"(",
"bytestring",
",",
")",
")",
")",
"if",
"_PY3",
":",
"return",
"repr",
"(",
"bytestring",
")",
"else",
":",
"return",
"(",
"'b'",
"+",
"repr",
"(",
"bytestring",
")",
")"
] | provide a repr for a byte string that begins with b on both python 2 and 3 . | train | false |
20,821 | def get_subtitle_file_path(lang_code=None, youtube_id=None):
srt_path = os.path.join(settings.STATIC_ROOT, 'srt')
if lang_code:
srt_path = os.path.join(srt_path, lcode_to_django_dir(lang_code), 'subtitles')
if youtube_id:
srt_path = os.path.join(srt_path, (youtube_id + '.vtt'))
return srt_path
| [
"def",
"get_subtitle_file_path",
"(",
"lang_code",
"=",
"None",
",",
"youtube_id",
"=",
"None",
")",
":",
"srt_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"STATIC_ROOT",
",",
"'srt'",
")",
"if",
"lang_code",
":",
"srt_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"srt_path",
",",
"lcode_to_django_dir",
"(",
"lang_code",
")",
",",
"'subtitles'",
")",
"if",
"youtube_id",
":",
"srt_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"srt_path",
",",
"(",
"youtube_id",
"+",
"'.vtt'",
")",
")",
"return",
"srt_path"
] | both central and distributed servers must make these available at a web-accessible location . | train | false |
20,822 | def show_disk(name, call=None):
if (call != 'action'):
raise SaltCloudSystemExit('The show_disks action must be called with -a or --action.')
ret = {}
params = {'Action': 'DescribeInstanceDisks', 'InstanceId': name}
items = query(params=params)
for disk in items['Disks']['Disk']:
ret[disk['DiskId']] = {}
for item in disk:
ret[disk['DiskId']][item] = str(disk[item])
return ret
| [
"def",
"show_disk",
"(",
"name",
",",
"call",
"=",
"None",
")",
":",
"if",
"(",
"call",
"!=",
"'action'",
")",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The show_disks action must be called with -a or --action.'",
")",
"ret",
"=",
"{",
"}",
"params",
"=",
"{",
"'Action'",
":",
"'DescribeInstanceDisks'",
",",
"'InstanceId'",
":",
"name",
"}",
"items",
"=",
"query",
"(",
"params",
"=",
"params",
")",
"for",
"disk",
"in",
"items",
"[",
"'Disks'",
"]",
"[",
"'Disk'",
"]",
":",
"ret",
"[",
"disk",
"[",
"'DiskId'",
"]",
"]",
"=",
"{",
"}",
"for",
"item",
"in",
"disk",
":",
"ret",
"[",
"disk",
"[",
"'DiskId'",
"]",
"]",
"[",
"item",
"]",
"=",
"str",
"(",
"disk",
"[",
"item",
"]",
")",
"return",
"ret"
] | show the details of an existing disk . | train | false |
20,823 | def resolve_addon_path(addon_config, file_name):
source_path = os.path.join(settings.ADDON_PATH, addon_config.short_name, 'static', file_name)
if os.path.exists(source_path):
return os.path.join('/', 'static', 'public', 'js', addon_config.short_name, file_name)
return None
| [
"def",
"resolve_addon_path",
"(",
"addon_config",
",",
"file_name",
")",
":",
"source_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"ADDON_PATH",
",",
"addon_config",
".",
"short_name",
",",
"'static'",
",",
"file_name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"source_path",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"'/'",
",",
"'static'",
",",
"'public'",
",",
"'js'",
",",
"addon_config",
".",
"short_name",
",",
"file_name",
")",
"return",
"None"
] | check for addon asset in source directory ; if file is found . | train | false |
20,824 | def _undo_op(arg, string, strict=False):
if hasattr(arg.owner, 'op'):
owner = arg.owner
if (string in str(owner.op)):
return owner.inputs[0]
elif strict:
raise ValueError((((string + ' not found in op ') + str(owner.op)) + '.'))
elif strict:
raise ValueError((((string + ' op not found in variable ') + str(arg)) + '.'))
return arg
| [
"def",
"_undo_op",
"(",
"arg",
",",
"string",
",",
"strict",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"arg",
".",
"owner",
",",
"'op'",
")",
":",
"owner",
"=",
"arg",
".",
"owner",
"if",
"(",
"string",
"in",
"str",
"(",
"owner",
".",
"op",
")",
")",
":",
"return",
"owner",
".",
"inputs",
"[",
"0",
"]",
"elif",
"strict",
":",
"raise",
"ValueError",
"(",
"(",
"(",
"(",
"string",
"+",
"' not found in op '",
")",
"+",
"str",
"(",
"owner",
".",
"op",
")",
")",
"+",
"'.'",
")",
")",
"elif",
"strict",
":",
"raise",
"ValueError",
"(",
"(",
"(",
"(",
"string",
"+",
"' op not found in variable '",
")",
"+",
"str",
"(",
"arg",
")",
")",
"+",
"'.'",
")",
")",
"return",
"arg"
] | undo symbolic op if string is in str . | train | false |
20,826 | @frappe.whitelist(allow_guest=True)
def add_comment(args=None):
if (not args):
args = frappe.local.form_dict
route = args.get(u'route')
doc = frappe.get_doc(args[u'reference_doctype'], args[u'reference_name'])
comment = doc.add_comment(u'Comment', args[u'comment'], comment_by=args[u'comment_by'])
comment.flags.ignore_permissions = True
comment.sender_full_name = args[u'comment_by_fullname']
comment.save()
clear_cache(route)
commentors = [d[0] for d in frappe.db.sql(u"select sender from `tabCommunication`\n DCTB DCTB where\n DCTB DCTB DCTB communication_type = 'Comment' and comment_type = 'Comment'\n DCTB DCTB DCTB and reference_doctype=%s\n DCTB DCTB DCTB and reference_name=%s", (comment.reference_doctype, comment.reference_name))]
owner = frappe.db.get_value(doc.doctype, doc.name, u'owner')
recipients = list(set((commentors if (owner == u'Administrator') else (commentors + [owner]))))
message = _(u'{0} by {1}').format(frappe.utils.markdown(args.get(u'comment')), comment.sender_full_name)
message += u"<p><a href='{0}/{1}' style='font-size: 80%'>{2}</a></p>".format(frappe.utils.get_request_site_address(), route, _(u'View it in your browser'))
from frappe.email.queue import send
send(recipients=recipients, subject=_(u'New comment on {0} {1}').format(doc.doctype, doc.name), message=message, reference_doctype=doc.doctype, reference_name=doc.name)
template = frappe.get_template(u'templates/includes/comments/comment.html')
return template.render({u'comment': comment.as_dict()})
| [
"@",
"frappe",
".",
"whitelist",
"(",
"allow_guest",
"=",
"True",
")",
"def",
"add_comment",
"(",
"args",
"=",
"None",
")",
":",
"if",
"(",
"not",
"args",
")",
":",
"args",
"=",
"frappe",
".",
"local",
".",
"form_dict",
"route",
"=",
"args",
".",
"get",
"(",
"u'route'",
")",
"doc",
"=",
"frappe",
".",
"get_doc",
"(",
"args",
"[",
"u'reference_doctype'",
"]",
",",
"args",
"[",
"u'reference_name'",
"]",
")",
"comment",
"=",
"doc",
".",
"add_comment",
"(",
"u'Comment'",
",",
"args",
"[",
"u'comment'",
"]",
",",
"comment_by",
"=",
"args",
"[",
"u'comment_by'",
"]",
")",
"comment",
".",
"flags",
".",
"ignore_permissions",
"=",
"True",
"comment",
".",
"sender_full_name",
"=",
"args",
"[",
"u'comment_by_fullname'",
"]",
"comment",
".",
"save",
"(",
")",
"clear_cache",
"(",
"route",
")",
"commentors",
"=",
"[",
"d",
"[",
"0",
"]",
"for",
"d",
"in",
"frappe",
".",
"db",
".",
"sql",
"(",
"u\"select sender from `tabCommunication`\\n DCTB DCTB where\\n DCTB DCTB DCTB communication_type = 'Comment' and comment_type = 'Comment'\\n DCTB DCTB DCTB and reference_doctype=%s\\n DCTB DCTB DCTB and reference_name=%s\"",
",",
"(",
"comment",
".",
"reference_doctype",
",",
"comment",
".",
"reference_name",
")",
")",
"]",
"owner",
"=",
"frappe",
".",
"db",
".",
"get_value",
"(",
"doc",
".",
"doctype",
",",
"doc",
".",
"name",
",",
"u'owner'",
")",
"recipients",
"=",
"list",
"(",
"set",
"(",
"(",
"commentors",
"if",
"(",
"owner",
"==",
"u'Administrator'",
")",
"else",
"(",
"commentors",
"+",
"[",
"owner",
"]",
")",
")",
")",
")",
"message",
"=",
"_",
"(",
"u'{0} by {1}'",
")",
".",
"format",
"(",
"frappe",
".",
"utils",
".",
"markdown",
"(",
"args",
".",
"get",
"(",
"u'comment'",
")",
")",
",",
"comment",
".",
"sender_full_name",
")",
"message",
"+=",
"u\"<p><a href='{0}/{1}' style='font-size: 80%'>{2}</a></p>\"",
".",
"format",
"(",
"frappe",
".",
"utils",
".",
"get_request_site_address",
"(",
")",
",",
"route",
",",
"_",
"(",
"u'View it in your browser'",
")",
")",
"from",
"frappe",
".",
"email",
".",
"queue",
"import",
"send",
"send",
"(",
"recipients",
"=",
"recipients",
",",
"subject",
"=",
"_",
"(",
"u'New comment on {0} {1}'",
")",
".",
"format",
"(",
"doc",
".",
"doctype",
",",
"doc",
".",
"name",
")",
",",
"message",
"=",
"message",
",",
"reference_doctype",
"=",
"doc",
".",
"doctype",
",",
"reference_name",
"=",
"doc",
".",
"name",
")",
"template",
"=",
"frappe",
".",
"get_template",
"(",
"u'templates/includes/comments/comment.html'",
")",
"return",
"template",
".",
"render",
"(",
"{",
"u'comment'",
":",
"comment",
".",
"as_dict",
"(",
")",
"}",
")"
] | allow any logged user to post a comment . | train | false |
20,827 | def _unpickle_app(cls, pickler, *args):
return pickler()(cls, *args)
| [
"def",
"_unpickle_app",
"(",
"cls",
",",
"pickler",
",",
"*",
"args",
")",
":",
"return",
"pickler",
"(",
")",
"(",
"cls",
",",
"*",
"args",
")"
] | rebuild app for versions 2 . | train | false |
20,829 | @calculator(542180608)
def calculate_perf_100nsec_timer(previous, current, property_name):
n0 = previous[property_name]
n1 = current[property_name]
d0 = previous['Timestamp_Sys100NS']
d1 = current['Timestamp_Sys100NS']
if ((n0 is None) or (n1 is None)):
return
return (((n1 - n0) / (d1 - d0)) * 100)
| [
"@",
"calculator",
"(",
"542180608",
")",
"def",
"calculate_perf_100nsec_timer",
"(",
"previous",
",",
"current",
",",
"property_name",
")",
":",
"n0",
"=",
"previous",
"[",
"property_name",
"]",
"n1",
"=",
"current",
"[",
"property_name",
"]",
"d0",
"=",
"previous",
"[",
"'Timestamp_Sys100NS'",
"]",
"d1",
"=",
"current",
"[",
"'Timestamp_Sys100NS'",
"]",
"if",
"(",
"(",
"n0",
"is",
"None",
")",
"or",
"(",
"n1",
"is",
"None",
")",
")",
":",
"return",
"return",
"(",
"(",
"(",
"n1",
"-",
"n0",
")",
"/",
"(",
"d1",
"-",
"d0",
")",
")",
"*",
"100",
")"
] | perf_100nsec_timer URL . | train | true |
20,830 | def set_enterprise_branding_filter_param(request, provider_id):
ec_uuid = request.GET.get('ec_src', None)
if provider_id:
LOGGER.info("Session key 'ENTERPRISE_CUSTOMER_BRANDING_OVERRIDE_DETAILS' has been set with provider_id '%s'", provider_id)
request.session[ENTERPRISE_CUSTOMER_BRANDING_OVERRIDE_DETAILS] = {'provider_id': provider_id}
elif ec_uuid:
LOGGER.info("Session key 'ENTERPRISE_CUSTOMER_BRANDING_OVERRIDE_DETAILS' has been set with ec_uuid '%s'", ec_uuid)
request.session[ENTERPRISE_CUSTOMER_BRANDING_OVERRIDE_DETAILS] = {'ec_uuid': ec_uuid}
| [
"def",
"set_enterprise_branding_filter_param",
"(",
"request",
",",
"provider_id",
")",
":",
"ec_uuid",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'ec_src'",
",",
"None",
")",
"if",
"provider_id",
":",
"LOGGER",
".",
"info",
"(",
"\"Session key 'ENTERPRISE_CUSTOMER_BRANDING_OVERRIDE_DETAILS' has been set with provider_id '%s'\"",
",",
"provider_id",
")",
"request",
".",
"session",
"[",
"ENTERPRISE_CUSTOMER_BRANDING_OVERRIDE_DETAILS",
"]",
"=",
"{",
"'provider_id'",
":",
"provider_id",
"}",
"elif",
"ec_uuid",
":",
"LOGGER",
".",
"info",
"(",
"\"Session key 'ENTERPRISE_CUSTOMER_BRANDING_OVERRIDE_DETAILS' has been set with ec_uuid '%s'\"",
",",
"ec_uuid",
")",
"request",
".",
"session",
"[",
"ENTERPRISE_CUSTOMER_BRANDING_OVERRIDE_DETAILS",
"]",
"=",
"{",
"'ec_uuid'",
":",
"ec_uuid",
"}"
] | setting enterprise_customer_branding_override_details in session . | train | false |
20,831 | def rereviewqueuetheme_checksum(rqt, **kw):
dupe_personas = Persona.objects.filter(checksum=make_checksum((rqt.header_path or rqt.theme.header_path), (rqt.footer_path or rqt.theme.footer_path)))
if dupe_personas.exists():
rqt.dupe_persona = dupe_personas[0]
rqt.save()
| [
"def",
"rereviewqueuetheme_checksum",
"(",
"rqt",
",",
"**",
"kw",
")",
":",
"dupe_personas",
"=",
"Persona",
".",
"objects",
".",
"filter",
"(",
"checksum",
"=",
"make_checksum",
"(",
"(",
"rqt",
".",
"header_path",
"or",
"rqt",
".",
"theme",
".",
"header_path",
")",
",",
"(",
"rqt",
".",
"footer_path",
"or",
"rqt",
".",
"theme",
".",
"footer_path",
")",
")",
")",
"if",
"dupe_personas",
".",
"exists",
"(",
")",
":",
"rqt",
".",
"dupe_persona",
"=",
"dupe_personas",
"[",
"0",
"]",
"rqt",
".",
"save",
"(",
")"
] | check for possible duplicate theme images . | train | false |
20,832 | def notify_about_volume_type_access_usage(context, volume_type_id, project_id, event_suffix, host=None):
notifier_info = {'volume_type_id': volume_type_id, 'project_id': project_id}
if (not host):
host = CONF.host
notifier = rpc.get_notifier('volume_type_project', host)
notifier.info(context, ('volume_type_project.%s' % event_suffix), notifier_info)
| [
"def",
"notify_about_volume_type_access_usage",
"(",
"context",
",",
"volume_type_id",
",",
"project_id",
",",
"event_suffix",
",",
"host",
"=",
"None",
")",
":",
"notifier_info",
"=",
"{",
"'volume_type_id'",
":",
"volume_type_id",
",",
"'project_id'",
":",
"project_id",
"}",
"if",
"(",
"not",
"host",
")",
":",
"host",
"=",
"CONF",
".",
"host",
"notifier",
"=",
"rpc",
".",
"get_notifier",
"(",
"'volume_type_project'",
",",
"host",
")",
"notifier",
".",
"info",
"(",
"context",
",",
"(",
"'volume_type_project.%s'",
"%",
"event_suffix",
")",
",",
"notifier_info",
")"
] | notify about successful usage type-access- command . | train | false |
20,833 | def qicon_to_sni_image_list(qicon):
import socket
ans = dbus.Array(signature=u'(iiay)')
if (not qicon.isNull()):
sizes = (qicon.availableSizes() or (QSize(x, x) for x in (32, 64, 128, 256)))
tc = ('L' if (array.array('I').itemsize < 4) else 'I')
for size in sizes:
i = qicon.pixmap(size).toImage().convertToFormat(QImage.Format_ARGB32)
(w, h) = (i.width(), i.height())
data = i.constBits().asstring(((4 * w) * h))
if (socket.htonl(1) != 1):
data = array.array(tc, i.constBits().asstring(((4 * i.width()) * i.height())))
data.byteswap()
data = data.tostring()
ans.append((w, h, dbus.ByteArray(data)))
return ans
| [
"def",
"qicon_to_sni_image_list",
"(",
"qicon",
")",
":",
"import",
"socket",
"ans",
"=",
"dbus",
".",
"Array",
"(",
"signature",
"=",
"u'(iiay)'",
")",
"if",
"(",
"not",
"qicon",
".",
"isNull",
"(",
")",
")",
":",
"sizes",
"=",
"(",
"qicon",
".",
"availableSizes",
"(",
")",
"or",
"(",
"QSize",
"(",
"x",
",",
"x",
")",
"for",
"x",
"in",
"(",
"32",
",",
"64",
",",
"128",
",",
"256",
")",
")",
")",
"tc",
"=",
"(",
"'L'",
"if",
"(",
"array",
".",
"array",
"(",
"'I'",
")",
".",
"itemsize",
"<",
"4",
")",
"else",
"'I'",
")",
"for",
"size",
"in",
"sizes",
":",
"i",
"=",
"qicon",
".",
"pixmap",
"(",
"size",
")",
".",
"toImage",
"(",
")",
".",
"convertToFormat",
"(",
"QImage",
".",
"Format_ARGB32",
")",
"(",
"w",
",",
"h",
")",
"=",
"(",
"i",
".",
"width",
"(",
")",
",",
"i",
".",
"height",
"(",
")",
")",
"data",
"=",
"i",
".",
"constBits",
"(",
")",
".",
"asstring",
"(",
"(",
"(",
"4",
"*",
"w",
")",
"*",
"h",
")",
")",
"if",
"(",
"socket",
".",
"htonl",
"(",
"1",
")",
"!=",
"1",
")",
":",
"data",
"=",
"array",
".",
"array",
"(",
"tc",
",",
"i",
".",
"constBits",
"(",
")",
".",
"asstring",
"(",
"(",
"(",
"4",
"*",
"i",
".",
"width",
"(",
")",
")",
"*",
"i",
".",
"height",
"(",
")",
")",
")",
")",
"data",
".",
"byteswap",
"(",
")",
"data",
"=",
"data",
".",
"tostring",
"(",
")",
"ans",
".",
"append",
"(",
"(",
"w",
",",
"h",
",",
"dbus",
".",
"ByteArray",
"(",
"data",
")",
")",
")",
"return",
"ans"
] | see URL . | train | false |
20,834 | def get_secondary_language(current_site=None):
current_site = (current_site or Site.objects.get_current())
return get_languages()[current_site.id][1]['code']
| [
"def",
"get_secondary_language",
"(",
"current_site",
"=",
"None",
")",
":",
"current_site",
"=",
"(",
"current_site",
"or",
"Site",
".",
"objects",
".",
"get_current",
"(",
")",
")",
"return",
"get_languages",
"(",
")",
"[",
"current_site",
".",
"id",
"]",
"[",
"1",
"]",
"[",
"'code'",
"]"
] | fetch the other language of the current site settings . | train | false |
20,835 | def primed():
try:
db_access = cassandra_interface.DatastoreProxy()
except cassandra.InvalidRequest:
return False
try:
return (db_access.get_metadata(cassandra_interface.PRIMED_KEY) == 'true')
finally:
db_access.close()
| [
"def",
"primed",
"(",
")",
":",
"try",
":",
"db_access",
"=",
"cassandra_interface",
".",
"DatastoreProxy",
"(",
")",
"except",
"cassandra",
".",
"InvalidRequest",
":",
"return",
"False",
"try",
":",
"return",
"(",
"db_access",
".",
"get_metadata",
"(",
"cassandra_interface",
".",
"PRIMED_KEY",
")",
"==",
"'true'",
")",
"finally",
":",
"db_access",
".",
"close",
"(",
")"
] | check if the required keyspace and tables are present . | train | false |
20,836 | def err_xml(message, type='Sender', code='ValidationError'):
return ('<ErrorResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">\n <Error>\n <Type>%s</Type>\n <Code>%s</Code>\n <Message>%s</Message>\n </Error>\n <RequestId>eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee</RequestId>\n</ErrorResponse>' % (type, code, message))
| [
"def",
"err_xml",
"(",
"message",
",",
"type",
"=",
"'Sender'",
",",
"code",
"=",
"'ValidationError'",
")",
":",
"return",
"(",
"'<ErrorResponse xmlns=\"http://elasticmapreduce.amazonaws.com/doc/2009-03-31\">\\n <Error>\\n <Type>%s</Type>\\n <Code>%s</Code>\\n <Message>%s</Message>\\n </Error>\\n <RequestId>eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee</RequestId>\\n</ErrorResponse>'",
"%",
"(",
"type",
",",
"code",
",",
"message",
")",
")"
] | use this to create the body of boto response errors . | train | false |
20,838 | @then(u'undefined-step snippets should not exist for')
def step_undefined_step_snippets_should_not_exist_for_table(context):
assert context.table, 'REQUIRES: table'
for row in context.table.rows:
step = row['Step']
step_undefined_step_snippet_should_not_exist_for(context, step)
| [
"@",
"then",
"(",
"u'undefined-step snippets should not exist for'",
")",
"def",
"step_undefined_step_snippets_should_not_exist_for_table",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"'REQUIRES: table'",
"for",
"row",
"in",
"context",
".",
"table",
".",
"rows",
":",
"step",
"=",
"row",
"[",
"'Step'",
"]",
"step_undefined_step_snippet_should_not_exist_for",
"(",
"context",
",",
"step",
")"
] | checks if undefined-step snippets are not provided . | train | false |
20,839 | def _action_to_coord(board, a):
if (a == _pass_action(board.size)):
return pachi_py.PASS_COORD
if (a == _resign_action(board.size)):
return pachi_py.RESIGN_COORD
return board.ij_to_coord((a // board.size), (a % board.size))
| [
"def",
"_action_to_coord",
"(",
"board",
",",
"a",
")",
":",
"if",
"(",
"a",
"==",
"_pass_action",
"(",
"board",
".",
"size",
")",
")",
":",
"return",
"pachi_py",
".",
"PASS_COORD",
"if",
"(",
"a",
"==",
"_resign_action",
"(",
"board",
".",
"size",
")",
")",
":",
"return",
"pachi_py",
".",
"RESIGN_COORD",
"return",
"board",
".",
"ij_to_coord",
"(",
"(",
"a",
"//",
"board",
".",
"size",
")",
",",
"(",
"a",
"%",
"board",
".",
"size",
")",
")"
] | converts actions to pachi coordinates . | train | false |
20,840 | def _get_seo_parent_title(slug_dict, document_locale):
if slug_dict['seo_root']:
try:
seo_root_doc = Document.objects.get(locale=document_locale, slug=slug_dict['seo_root'])
return (u' - %s' % seo_root_doc.title)
except Document.DoesNotExist:
pass
return ''
| [
"def",
"_get_seo_parent_title",
"(",
"slug_dict",
",",
"document_locale",
")",
":",
"if",
"slug_dict",
"[",
"'seo_root'",
"]",
":",
"try",
":",
"seo_root_doc",
"=",
"Document",
".",
"objects",
".",
"get",
"(",
"locale",
"=",
"document_locale",
",",
"slug",
"=",
"slug_dict",
"[",
"'seo_root'",
"]",
")",
"return",
"(",
"u' - %s'",
"%",
"seo_root_doc",
".",
"title",
")",
"except",
"Document",
".",
"DoesNotExist",
":",
"pass",
"return",
"''"
] | get parent-title information for seo purposes . | train | false |
20,841 | def get_unallocated_public_ips(module, cp_driver, lb_driver, network_domain, reuse_free, count=0):
free_ips = []
if (reuse_free is True):
blocks_with_unallocated = get_blocks_with_unallocated(module, cp_driver, lb_driver, network_domain)
free_ips = blocks_with_unallocated['unallocated_addresses']
if (len(free_ips) < count):
num_needed = (count - len(free_ips))
for i in range(num_needed):
block = cp_driver.ex_add_public_ip_block_to_network_domain(network_domain)
block_dict = get_block_allocation(module, cp_driver, lb_driver, network_domain, block)
for addr in block_dict['addresses']:
free_ips.append(addr['address'])
if (len(free_ips) >= count):
break
return {'changed': True, 'msg': 'Allocated public IP block(s)', 'addresses': free_ips[:count]}
else:
return {'changed': False, 'msg': ('Found enough unallocated IPs' + ' without provisioning.'), 'addresses': free_ips}
| [
"def",
"get_unallocated_public_ips",
"(",
"module",
",",
"cp_driver",
",",
"lb_driver",
",",
"network_domain",
",",
"reuse_free",
",",
"count",
"=",
"0",
")",
":",
"free_ips",
"=",
"[",
"]",
"if",
"(",
"reuse_free",
"is",
"True",
")",
":",
"blocks_with_unallocated",
"=",
"get_blocks_with_unallocated",
"(",
"module",
",",
"cp_driver",
",",
"lb_driver",
",",
"network_domain",
")",
"free_ips",
"=",
"blocks_with_unallocated",
"[",
"'unallocated_addresses'",
"]",
"if",
"(",
"len",
"(",
"free_ips",
")",
"<",
"count",
")",
":",
"num_needed",
"=",
"(",
"count",
"-",
"len",
"(",
"free_ips",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_needed",
")",
":",
"block",
"=",
"cp_driver",
".",
"ex_add_public_ip_block_to_network_domain",
"(",
"network_domain",
")",
"block_dict",
"=",
"get_block_allocation",
"(",
"module",
",",
"cp_driver",
",",
"lb_driver",
",",
"network_domain",
",",
"block",
")",
"for",
"addr",
"in",
"block_dict",
"[",
"'addresses'",
"]",
":",
"free_ips",
".",
"append",
"(",
"addr",
"[",
"'address'",
"]",
")",
"if",
"(",
"len",
"(",
"free_ips",
")",
">=",
"count",
")",
":",
"break",
"return",
"{",
"'changed'",
":",
"True",
",",
"'msg'",
":",
"'Allocated public IP block(s)'",
",",
"'addresses'",
":",
"free_ips",
"[",
":",
"count",
"]",
"}",
"else",
":",
"return",
"{",
"'changed'",
":",
"False",
",",
"'msg'",
":",
"(",
"'Found enough unallocated IPs'",
"+",
"' without provisioning.'",
")",
",",
"'addresses'",
":",
"free_ips",
"}"
] | get and/or provision unallocated public ips . | train | false |
20,842 | def simple_first(kv):
return (isinstance(kv[1], (list, dict, tuple)), kv[0])
| [
"def",
"simple_first",
"(",
"kv",
")",
":",
"return",
"(",
"isinstance",
"(",
"kv",
"[",
"1",
"]",
",",
"(",
"list",
",",
"dict",
",",
"tuple",
")",
")",
",",
"kv",
"[",
"0",
"]",
")"
] | helper function to pass to item_sort_key to sort simple elements to the top . | train | false |
20,844 | def _validate_permission_types(resource_db, permission_types):
resource_db = _validate_resource_type(resource_db=resource_db)
resource_type = resource_db.get_resource_type()
valid_permission_types = PermissionType.get_valid_permissions_for_resource_type(resource_type)
for permission_type in permission_types:
if (permission_type not in valid_permission_types):
raise ValueError(('Invalid permission type: %s' % permission_type))
return permission_types
| [
"def",
"_validate_permission_types",
"(",
"resource_db",
",",
"permission_types",
")",
":",
"resource_db",
"=",
"_validate_resource_type",
"(",
"resource_db",
"=",
"resource_db",
")",
"resource_type",
"=",
"resource_db",
".",
"get_resource_type",
"(",
")",
"valid_permission_types",
"=",
"PermissionType",
".",
"get_valid_permissions_for_resource_type",
"(",
"resource_type",
")",
"for",
"permission_type",
"in",
"permission_types",
":",
"if",
"(",
"permission_type",
"not",
"in",
"valid_permission_types",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'Invalid permission type: %s'",
"%",
"permission_type",
")",
")",
"return",
"permission_types"
] | validate that the permission_types list only contains valid values for the provided resource . | train | false |
20,845 | def yml_or_yaml_path(basedir, basename):
basepath = os.path.join(basedir, basename)
yml_path = (basepath + '.yml')
yaml_path = (basepath + '.yaml')
if os.path.exists(yaml_path):
if os.path.exists(yml_path):
raise ValueError('Both {0} and {1} exist.'.format(yml_path, yaml_path))
return yaml_path
return yml_path
| [
"def",
"yml_or_yaml_path",
"(",
"basedir",
",",
"basename",
")",
":",
"basepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"basename",
")",
"yml_path",
"=",
"(",
"basepath",
"+",
"'.yml'",
")",
"yaml_path",
"=",
"(",
"basepath",
"+",
"'.yaml'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"yaml_path",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"yml_path",
")",
":",
"raise",
"ValueError",
"(",
"'Both {0} and {1} exist.'",
".",
"format",
"(",
"yml_path",
",",
"yaml_path",
")",
")",
"return",
"yaml_path",
"return",
"yml_path"
] | return a path to the requested yaml file . | train | false |
20,847 | def show_message(message, msg_type=None, text_color=None, back_color=None, scroll_speed=None):
text_color = (text_color or [255, 255, 255])
back_color = (back_color or [0, 0, 0])
scroll_speed = (scroll_speed or 0.1)
color_by_type = {'error': [255, 0, 0], 'warning': [255, 100, 0], 'success': [0, 255, 0], 'info': [0, 0, 255]}
if (msg_type in color_by_type):
text_color = color_by_type[msg_type]
_sensehat.show_message(message, scroll_speed, text_color, back_color)
return {'message': message}
| [
"def",
"show_message",
"(",
"message",
",",
"msg_type",
"=",
"None",
",",
"text_color",
"=",
"None",
",",
"back_color",
"=",
"None",
",",
"scroll_speed",
"=",
"None",
")",
":",
"text_color",
"=",
"(",
"text_color",
"or",
"[",
"255",
",",
"255",
",",
"255",
"]",
")",
"back_color",
"=",
"(",
"back_color",
"or",
"[",
"0",
",",
"0",
",",
"0",
"]",
")",
"scroll_speed",
"=",
"(",
"scroll_speed",
"or",
"0.1",
")",
"color_by_type",
"=",
"{",
"'error'",
":",
"[",
"255",
",",
"0",
",",
"0",
"]",
",",
"'warning'",
":",
"[",
"255",
",",
"100",
",",
"0",
"]",
",",
"'success'",
":",
"[",
"0",
",",
"255",
",",
"0",
"]",
",",
"'info'",
":",
"[",
"0",
",",
"0",
",",
"255",
"]",
"}",
"if",
"(",
"msg_type",
"in",
"color_by_type",
")",
":",
"text_color",
"=",
"color_by_type",
"[",
"msg_type",
"]",
"_sensehat",
".",
"show_message",
"(",
"message",
",",
"scroll_speed",
",",
"text_color",
",",
"back_color",
")",
"return",
"{",
"'message'",
":",
"message",
"}"
] | displays a message on the led matrix . | train | true |
20,848 | def vnorm(a, ord=None, axis=None, dtype=None, keepdims=False, split_every=None):
if ((ord is None) or (ord == 'fro')):
ord = 2
if (ord == np.inf):
return max(abs(a), axis=axis, keepdims=keepdims, split_every=split_every)
elif (ord == (- np.inf)):
return min(abs(a), axis=axis, keepdims=keepdims, split_every=split_every)
elif (ord == 1):
return sum(abs(a), axis=axis, dtype=dtype, keepdims=keepdims, split_every=split_every)
elif ((ord % 2) == 0):
return (sum((a ** ord), axis=axis, dtype=dtype, keepdims=keepdims, split_every=split_every) ** (1.0 / ord))
else:
return (sum((abs(a) ** ord), axis=axis, dtype=dtype, keepdims=keepdims, split_every=split_every) ** (1.0 / ord))
| [
"def",
"vnorm",
"(",
"a",
",",
"ord",
"=",
"None",
",",
"axis",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"keepdims",
"=",
"False",
",",
"split_every",
"=",
"None",
")",
":",
"if",
"(",
"(",
"ord",
"is",
"None",
")",
"or",
"(",
"ord",
"==",
"'fro'",
")",
")",
":",
"ord",
"=",
"2",
"if",
"(",
"ord",
"==",
"np",
".",
"inf",
")",
":",
"return",
"max",
"(",
"abs",
"(",
"a",
")",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"keepdims",
",",
"split_every",
"=",
"split_every",
")",
"elif",
"(",
"ord",
"==",
"(",
"-",
"np",
".",
"inf",
")",
")",
":",
"return",
"min",
"(",
"abs",
"(",
"a",
")",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"keepdims",
",",
"split_every",
"=",
"split_every",
")",
"elif",
"(",
"ord",
"==",
"1",
")",
":",
"return",
"sum",
"(",
"abs",
"(",
"a",
")",
",",
"axis",
"=",
"axis",
",",
"dtype",
"=",
"dtype",
",",
"keepdims",
"=",
"keepdims",
",",
"split_every",
"=",
"split_every",
")",
"elif",
"(",
"(",
"ord",
"%",
"2",
")",
"==",
"0",
")",
":",
"return",
"(",
"sum",
"(",
"(",
"a",
"**",
"ord",
")",
",",
"axis",
"=",
"axis",
",",
"dtype",
"=",
"dtype",
",",
"keepdims",
"=",
"keepdims",
",",
"split_every",
"=",
"split_every",
")",
"**",
"(",
"1.0",
"/",
"ord",
")",
")",
"else",
":",
"return",
"(",
"sum",
"(",
"(",
"abs",
"(",
"a",
")",
"**",
"ord",
")",
",",
"axis",
"=",
"axis",
",",
"dtype",
"=",
"dtype",
",",
"keepdims",
"=",
"keepdims",
",",
"split_every",
"=",
"split_every",
")",
"**",
"(",
"1.0",
"/",
"ord",
")",
")"
] | vector norm see np . | train | false |
20,849 | def extension_element_to_element(extension_element, translation_functions, namespace=None):
try:
element_namespace = extension_element.namespace
except AttributeError:
element_namespace = extension_element.c_namespace
if (element_namespace == namespace):
try:
try:
ets = translation_functions[extension_element.tag]
except AttributeError:
ets = translation_functions[extension_element.c_tag]
return ets(extension_element.to_string())
except KeyError:
pass
return None
| [
"def",
"extension_element_to_element",
"(",
"extension_element",
",",
"translation_functions",
",",
"namespace",
"=",
"None",
")",
":",
"try",
":",
"element_namespace",
"=",
"extension_element",
".",
"namespace",
"except",
"AttributeError",
":",
"element_namespace",
"=",
"extension_element",
".",
"c_namespace",
"if",
"(",
"element_namespace",
"==",
"namespace",
")",
":",
"try",
":",
"try",
":",
"ets",
"=",
"translation_functions",
"[",
"extension_element",
".",
"tag",
"]",
"except",
"AttributeError",
":",
"ets",
"=",
"translation_functions",
"[",
"extension_element",
".",
"c_tag",
"]",
"return",
"ets",
"(",
"extension_element",
".",
"to_string",
"(",
")",
")",
"except",
"KeyError",
":",
"pass",
"return",
"None"
] | convert an extension element to a normal element . | train | true |
20,850 | def _get_cython_type_upcast(dtype):
if is_integer_dtype(dtype):
return 'int64_t'
elif is_float_dtype(dtype):
return 'double'
else:
return 'object'
| [
"def",
"_get_cython_type_upcast",
"(",
"dtype",
")",
":",
"if",
"is_integer_dtype",
"(",
"dtype",
")",
":",
"return",
"'int64_t'",
"elif",
"is_float_dtype",
"(",
"dtype",
")",
":",
"return",
"'double'",
"else",
":",
"return",
"'object'"
] | upcast a dtype to int64_t . | train | false |
20,851 | def disable_logging():
flexmock(logging).should_receive('error').and_return()
flexmock(logging).should_receive('warning').and_return()
flexmock(logging).should_receive('info').and_return()
| [
"def",
"disable_logging",
"(",
")",
":",
"flexmock",
"(",
"logging",
")",
".",
"should_receive",
"(",
"'error'",
")",
".",
"and_return",
"(",
")",
"flexmock",
"(",
"logging",
")",
".",
"should_receive",
"(",
"'warning'",
")",
".",
"and_return",
"(",
")",
"flexmock",
"(",
"logging",
")",
".",
"should_receive",
"(",
"'info'",
")",
".",
"and_return",
"(",
")"
] | mocks out logging for testing output . | train | false |
20,852 | def parse_pi_data(pi_data):
results = {}
for elt in pi_data.split():
if RE_DOUBLE_QUOTE.match(elt):
(kwd, val) = RE_DOUBLE_QUOTE.match(elt).groups()
elif RE_SIMPLE_QUOTE.match(elt):
(kwd, val) = RE_SIMPLE_QUOTE.match(elt).groups()
else:
(kwd, val) = (elt, None)
results[kwd] = val
return results
| [
"def",
"parse_pi_data",
"(",
"pi_data",
")",
":",
"results",
"=",
"{",
"}",
"for",
"elt",
"in",
"pi_data",
".",
"split",
"(",
")",
":",
"if",
"RE_DOUBLE_QUOTE",
".",
"match",
"(",
"elt",
")",
":",
"(",
"kwd",
",",
"val",
")",
"=",
"RE_DOUBLE_QUOTE",
".",
"match",
"(",
"elt",
")",
".",
"groups",
"(",
")",
"elif",
"RE_SIMPLE_QUOTE",
".",
"match",
"(",
"elt",
")",
":",
"(",
"kwd",
",",
"val",
")",
"=",
"RE_SIMPLE_QUOTE",
".",
"match",
"(",
"elt",
")",
".",
"groups",
"(",
")",
"else",
":",
"(",
"kwd",
",",
"val",
")",
"=",
"(",
"elt",
",",
"None",
")",
"results",
"[",
"kwd",
"]",
"=",
"val",
"return",
"results"
] | utility function that parses the data contained in an xml processing instruction and returns a dictionary of keywords and their associated values . | train | false |
20,853 | def noid(d):
d.pop('id', None)
d.pop('action_id', None)
return d
| [
"def",
"noid",
"(",
"d",
")",
":",
"d",
".",
"pop",
"(",
"'id'",
",",
"None",
")",
"d",
".",
"pop",
"(",
"'action_id'",
",",
"None",
")",
"return",
"d"
] | removes values that are not relevant for the test comparisons . | train | false |
20,854 | def parse_octal_escape(source, info, digits, in_set):
saved_pos = source.pos
ch = source.get()
while ((len(digits) < 3) and (ch in OCT_DIGITS)):
digits.append(ch)
saved_pos = source.pos
ch = source.get()
source.pos = saved_pos
try:
value = int(''.join(digits), 8)
return make_character(info, value, in_set)
except ValueError:
if (digits[0] in OCT_DIGITS):
raise error(('incomplete escape \\%s' % ''.join(digits)), source.string, source.pos)
else:
raise error(('bad escape \\%s' % digits[0]), source.string, source.pos)
| [
"def",
"parse_octal_escape",
"(",
"source",
",",
"info",
",",
"digits",
",",
"in_set",
")",
":",
"saved_pos",
"=",
"source",
".",
"pos",
"ch",
"=",
"source",
".",
"get",
"(",
")",
"while",
"(",
"(",
"len",
"(",
"digits",
")",
"<",
"3",
")",
"and",
"(",
"ch",
"in",
"OCT_DIGITS",
")",
")",
":",
"digits",
".",
"append",
"(",
"ch",
")",
"saved_pos",
"=",
"source",
".",
"pos",
"ch",
"=",
"source",
".",
"get",
"(",
")",
"source",
".",
"pos",
"=",
"saved_pos",
"try",
":",
"value",
"=",
"int",
"(",
"''",
".",
"join",
"(",
"digits",
")",
",",
"8",
")",
"return",
"make_character",
"(",
"info",
",",
"value",
",",
"in_set",
")",
"except",
"ValueError",
":",
"if",
"(",
"digits",
"[",
"0",
"]",
"in",
"OCT_DIGITS",
")",
":",
"raise",
"error",
"(",
"(",
"'incomplete escape \\\\%s'",
"%",
"''",
".",
"join",
"(",
"digits",
")",
")",
",",
"source",
".",
"string",
",",
"source",
".",
"pos",
")",
"else",
":",
"raise",
"error",
"(",
"(",
"'bad escape \\\\%s'",
"%",
"digits",
"[",
"0",
"]",
")",
",",
"source",
".",
"string",
",",
"source",
".",
"pos",
")"
] | parses an octal escape sequence . | train | false |
20,856 | def list_avail():
cmd = 'locale -a'
out = __salt__['cmd.run'](cmd).split('\n')
return out
| [
"def",
"list_avail",
"(",
")",
":",
"cmd",
"=",
"'locale -a'",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
")",
".",
"split",
"(",
"'\\n'",
")",
"return",
"out"
] | lists available locales cli example: . | train | false |
20,858 | def update_copy(d, _new=None, **kw):
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
| [
"def",
"update_copy",
"(",
"d",
",",
"_new",
"=",
"None",
",",
"**",
"kw",
")",
":",
"d",
"=",
"d",
".",
"copy",
"(",
")",
"if",
"_new",
":",
"d",
".",
"update",
"(",
"_new",
")",
"d",
".",
"update",
"(",
"**",
"kw",
")",
"return",
"d"
] | copy the given dict and update with the given values . | train | false |
20,860 | def _get_file_info(ctx):
retval = []
request = ctx.transport.req
headers = request.getAllHeaders()
content_type = headers.get('content-type', None)
if (content_type is None):
return retval
img = cgi.FieldStorage(fp=request.content, headers=ctx.in_header_doc, environ={'REQUEST_METHOD': request.method, 'CONTENT_TYPE': content_type})
try:
keys = img.keys()
except TypeError:
return retval
for k in keys:
field = img[k]
file_type = field.type
file_name = field.disposition_options.get('filename', None)
if (file_name is not None):
retval.append(_FileInfo(k, file_name, file_type, [mmap(field.file.fileno(), 0)]))
return retval
| [
"def",
"_get_file_info",
"(",
"ctx",
")",
":",
"retval",
"=",
"[",
"]",
"request",
"=",
"ctx",
".",
"transport",
".",
"req",
"headers",
"=",
"request",
".",
"getAllHeaders",
"(",
")",
"content_type",
"=",
"headers",
".",
"get",
"(",
"'content-type'",
",",
"None",
")",
"if",
"(",
"content_type",
"is",
"None",
")",
":",
"return",
"retval",
"img",
"=",
"cgi",
".",
"FieldStorage",
"(",
"fp",
"=",
"request",
".",
"content",
",",
"headers",
"=",
"ctx",
".",
"in_header_doc",
",",
"environ",
"=",
"{",
"'REQUEST_METHOD'",
":",
"request",
".",
"method",
",",
"'CONTENT_TYPE'",
":",
"content_type",
"}",
")",
"try",
":",
"keys",
"=",
"img",
".",
"keys",
"(",
")",
"except",
"TypeError",
":",
"return",
"retval",
"for",
"k",
"in",
"keys",
":",
"field",
"=",
"img",
"[",
"k",
"]",
"file_type",
"=",
"field",
".",
"type",
"file_name",
"=",
"field",
".",
"disposition_options",
".",
"get",
"(",
"'filename'",
",",
"None",
")",
"if",
"(",
"file_name",
"is",
"not",
"None",
")",
":",
"retval",
".",
"append",
"(",
"_FileInfo",
"(",
"k",
",",
"file_name",
",",
"file_type",
",",
"[",
"mmap",
"(",
"field",
".",
"file",
".",
"fileno",
"(",
")",
",",
"0",
")",
"]",
")",
")",
"return",
"retval"
] | we need this hack because twisted doesnt offer a way to get file name from content-disposition header . | train | false |
20,861 | def _resolveDotSegments(path):
segs = []
for seg in path:
if (seg == u'.'):
pass
elif (seg == u'..'):
if segs:
segs.pop()
else:
segs.append(seg)
if (list(path[(-1):]) in ([u'.'], [u'..'])):
segs.append(u'')
return segs
| [
"def",
"_resolveDotSegments",
"(",
"path",
")",
":",
"segs",
"=",
"[",
"]",
"for",
"seg",
"in",
"path",
":",
"if",
"(",
"seg",
"==",
"u'.'",
")",
":",
"pass",
"elif",
"(",
"seg",
"==",
"u'..'",
")",
":",
"if",
"segs",
":",
"segs",
".",
"pop",
"(",
")",
"else",
":",
"segs",
".",
"append",
"(",
"seg",
")",
"if",
"(",
"list",
"(",
"path",
"[",
"(",
"-",
"1",
")",
":",
"]",
")",
"in",
"(",
"[",
"u'.'",
"]",
",",
"[",
"u'..'",
"]",
")",
")",
":",
"segs",
".",
"append",
"(",
"u''",
")",
"return",
"segs"
] | normalise the url path by resolving segments of . | train | false |
20,864 | def get_diff_opcode_generator(*args, **kwargs):
return _generator(*args, **kwargs)
| [
"def",
"get_diff_opcode_generator",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"return",
"_generator",
"(",
"*",
"args",
",",
"**",
"kwargs",
")"
] | returns a diffopcodegenerator instance used for generating opcodes . | train | false |
20,865 | def PartitionString(value, separator):
index = value.find(separator)
if (index == (-1)):
return (value, '', value[0:0])
else:
return (value[0:index], separator, value[(index + len(separator)):len(value)])
| [
"def",
"PartitionString",
"(",
"value",
",",
"separator",
")",
":",
"index",
"=",
"value",
".",
"find",
"(",
"separator",
")",
"if",
"(",
"index",
"==",
"(",
"-",
"1",
")",
")",
":",
"return",
"(",
"value",
",",
"''",
",",
"value",
"[",
"0",
":",
"0",
"]",
")",
"else",
":",
"return",
"(",
"value",
"[",
"0",
":",
"index",
"]",
",",
"separator",
",",
"value",
"[",
"(",
"index",
"+",
"len",
"(",
"separator",
")",
")",
":",
"len",
"(",
"value",
")",
"]",
")"
] | equivalent to python2 . | train | false |
20,866 | def run_in_subprocess(func, *args, **kwargs):
from multiprocessing import Process
thread = Process(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
| [
"def",
"run_in_subprocess",
"(",
"func",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"from",
"multiprocessing",
"import",
"Process",
"thread",
"=",
"Process",
"(",
"target",
"=",
"func",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
"thread",
".",
"daemon",
"=",
"True",
"thread",
".",
"start",
"(",
")",
"return",
"thread"
] | run function in subprocess . | train | true |
20,868 | def init(mpstate):
return SerialModule(mpstate)
| [
"def",
"init",
"(",
"mpstate",
")",
":",
"return",
"SerialModule",
"(",
"mpstate",
")"
] | initialise module . | train | false |
20,871 | def libvlc_video_get_adjust_int(p_mi, option):
f = (_Cfunctions.get('libvlc_video_get_adjust_int', None) or _Cfunction('libvlc_video_get_adjust_int', ((1,), (1,)), None, ctypes.c_int, MediaPlayer, ctypes.c_uint))
return f(p_mi, option)
| [
"def",
"libvlc_video_get_adjust_int",
"(",
"p_mi",
",",
"option",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_video_get_adjust_int'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_video_get_adjust_int'",
",",
"(",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
")",
",",
"None",
",",
"ctypes",
".",
"c_int",
",",
"MediaPlayer",
",",
"ctypes",
".",
"c_uint",
")",
")",
"return",
"f",
"(",
"p_mi",
",",
"option",
")"
] | get integer adjust option . | train | true |
20,872 | def bellman_ford_path_length(G, source, target, weight='weight'):
if (source == target):
return 0
weight = _weight_function(G, weight)
length = _bellman_ford(G, [source], weight, target=target)
try:
return length[target]
except KeyError:
raise nx.NetworkXNoPath(('node %s not reachable from %s' % (source, target)))
| [
"def",
"bellman_ford_path_length",
"(",
"G",
",",
"source",
",",
"target",
",",
"weight",
"=",
"'weight'",
")",
":",
"if",
"(",
"source",
"==",
"target",
")",
":",
"return",
"0",
"weight",
"=",
"_weight_function",
"(",
"G",
",",
"weight",
")",
"length",
"=",
"_bellman_ford",
"(",
"G",
",",
"[",
"source",
"]",
",",
"weight",
",",
"target",
"=",
"target",
")",
"try",
":",
"return",
"length",
"[",
"target",
"]",
"except",
"KeyError",
":",
"raise",
"nx",
".",
"NetworkXNoPath",
"(",
"(",
"'node %s not reachable from %s'",
"%",
"(",
"source",
",",
"target",
")",
")",
")"
] | returns the shortest path length from source to target in a weighted graph . | train | false |
20,873 | def sub_list(l):
r = []
for i in l:
if (type(i) in prims):
r.append(i)
elif (type(i) is list):
r.append(sub_list(i))
elif (type(i) is dict):
r.append(sub_dict(i))
else:
print 'Unknown Type: {}'.format(type(i))
r = sorted(r)
return r
| [
"def",
"sub_list",
"(",
"l",
")",
":",
"r",
"=",
"[",
"]",
"for",
"i",
"in",
"l",
":",
"if",
"(",
"type",
"(",
"i",
")",
"in",
"prims",
")",
":",
"r",
".",
"append",
"(",
"i",
")",
"elif",
"(",
"type",
"(",
"i",
")",
"is",
"list",
")",
":",
"r",
".",
"append",
"(",
"sub_list",
"(",
"i",
")",
")",
"elif",
"(",
"type",
"(",
"i",
")",
"is",
"dict",
")",
":",
"r",
".",
"append",
"(",
"sub_dict",
"(",
"i",
")",
")",
"else",
":",
"print",
"'Unknown Type: {}'",
".",
"format",
"(",
"type",
"(",
"i",
")",
")",
"r",
"=",
"sorted",
"(",
"r",
")",
"return",
"r"
] | recursively walk a datastructrue sorting any lists along the way . | train | false |
20,874 | def arg_stringname(func_arg):
if hasattr(func_arg, 'arg'):
return func_arg.arg
else:
return str(func_arg)
| [
"def",
"arg_stringname",
"(",
"func_arg",
")",
":",
"if",
"hasattr",
"(",
"func_arg",
",",
"'arg'",
")",
":",
"return",
"func_arg",
".",
"arg",
"else",
":",
"return",
"str",
"(",
"func_arg",
")"
] | gets the string name of a kwarg or vararg in python3 . | train | false |
20,876 | def inject_into_urllib3():
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
util.IS_PYOPENSSL = True
| [
"def",
"inject_into_urllib3",
"(",
")",
":",
"connection",
".",
"ssl_wrap_socket",
"=",
"ssl_wrap_socket",
"util",
".",
"HAS_SNI",
"=",
"HAS_SNI",
"util",
".",
"IS_PYOPENSSL",
"=",
"True"
] | monkey-patch urllib3 with pyopenssl-backed ssl-support . | train | false |
20,878 | def delete_jail(name):
if is_jail(name):
cmd = 'poudriere jail -d -j {0}'.format(name)
__salt__['cmd.run'](cmd)
if is_jail(name):
return 'Looks like there was an issue deleteing jail {0}'.format(name)
else:
return 'Looks like jail {0} has not been created'.format(name)
make_file = os.path.join(_config_dir(), '{0}-make.conf'.format(name))
if os.path.isfile(make_file):
try:
os.remove(make_file)
except (IOError, OSError):
return 'Deleted jail "{0}" but was unable to remove jail make file'.format(name)
__salt__['file.remove'](make_file)
return 'Deleted jail {0}'.format(name)
| [
"def",
"delete_jail",
"(",
"name",
")",
":",
"if",
"is_jail",
"(",
"name",
")",
":",
"cmd",
"=",
"'poudriere jail -d -j {0}'",
".",
"format",
"(",
"name",
")",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
")",
"if",
"is_jail",
"(",
"name",
")",
":",
"return",
"'Looks like there was an issue deleteing jail {0}'",
".",
"format",
"(",
"name",
")",
"else",
":",
"return",
"'Looks like jail {0} has not been created'",
".",
"format",
"(",
"name",
")",
"make_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_config_dir",
"(",
")",
",",
"'{0}-make.conf'",
".",
"format",
"(",
"name",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"make_file",
")",
":",
"try",
":",
"os",
".",
"remove",
"(",
"make_file",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"return",
"'Deleted jail \"{0}\" but was unable to remove jail make file'",
".",
"format",
"(",
"name",
")",
"__salt__",
"[",
"'file.remove'",
"]",
"(",
"make_file",
")",
"return",
"'Deleted jail {0}'",
".",
"format",
"(",
"name",
")"
] | deletes poudriere jail with name cli example: . | train | true |
20,879 | def is_required_form_field(field):
for validator in field.validators:
if isinstance(validator, (DataRequired, InputRequired)):
return True
return False
| [
"def",
"is_required_form_field",
"(",
"field",
")",
":",
"for",
"validator",
"in",
"field",
".",
"validators",
":",
"if",
"isinstance",
"(",
"validator",
",",
"(",
"DataRequired",
",",
"InputRequired",
")",
")",
":",
"return",
"True",
"return",
"False"
] | check if form field has datarequired or inputrequired validators . | train | false |
20,880 | def waitKeys(maxWait=float('inf'), keyList=None, modifiers=False, timeStamped=False):
key = None
clearEvents('keyboard')
timer = psychopy.core.Clock()
while ((key is None) and (timer.getTime() < maxWait)):
if havePyglet:
defDisplay = pyglet.window.get_platform().get_default_display()
for win in defDisplay.get_windows():
win.dispatch_events()
keys = getKeys(keyList=keyList, modifiers=modifiers, timeStamped=timeStamped)
if len(keys):
return keys
logging.data('No keypress (maxWait exceeded)')
return None
| [
"def",
"waitKeys",
"(",
"maxWait",
"=",
"float",
"(",
"'inf'",
")",
",",
"keyList",
"=",
"None",
",",
"modifiers",
"=",
"False",
",",
"timeStamped",
"=",
"False",
")",
":",
"key",
"=",
"None",
"clearEvents",
"(",
"'keyboard'",
")",
"timer",
"=",
"psychopy",
".",
"core",
".",
"Clock",
"(",
")",
"while",
"(",
"(",
"key",
"is",
"None",
")",
"and",
"(",
"timer",
".",
"getTime",
"(",
")",
"<",
"maxWait",
")",
")",
":",
"if",
"havePyglet",
":",
"defDisplay",
"=",
"pyglet",
".",
"window",
".",
"get_platform",
"(",
")",
".",
"get_default_display",
"(",
")",
"for",
"win",
"in",
"defDisplay",
".",
"get_windows",
"(",
")",
":",
"win",
".",
"dispatch_events",
"(",
")",
"keys",
"=",
"getKeys",
"(",
"keyList",
"=",
"keyList",
",",
"modifiers",
"=",
"modifiers",
",",
"timeStamped",
"=",
"timeStamped",
")",
"if",
"len",
"(",
"keys",
")",
":",
"return",
"keys",
"logging",
".",
"data",
"(",
"'No keypress (maxWait exceeded)'",
")",
"return",
"None"
] | same as ~psychopy . | train | false |
20,881 | def getNewRepository():
return ExportRepository()
| [
"def",
"getNewRepository",
"(",
")",
":",
"return",
"ExportRepository",
"(",
")"
] | get the repository constructor . | train | false |
20,883 | def get_test_records(doctype):
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), u'doctype', scrub(doctype), u'test_records.json')
if os.path.exists(path):
with open(path, u'r') as f:
return json.loads(f.read())
else:
return []
| [
"def",
"get_test_records",
"(",
"doctype",
")",
":",
"from",
"frappe",
".",
"modules",
"import",
"get_doctype_module",
",",
"get_module_path",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_module_path",
"(",
"get_doctype_module",
"(",
"doctype",
")",
")",
",",
"u'doctype'",
",",
"scrub",
"(",
"doctype",
")",
",",
"u'test_records.json'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"u'r'",
")",
"as",
"f",
":",
"return",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"else",
":",
"return",
"[",
"]"
] | returns list of objects from test_records . | train | false |
20,884 | def getRenderingView(w3af, parentView):
if RENDERING_ENGINES['webkit']:
return WebKitRenderingView(w3af, parentView)
elif RENDERING_ENGINES['moz']:
return MozRenderingView(w3af, parentView)
elif RENDERING_ENGINES['gtkhtml2']:
return GtkHtmlRenderingView(w3af, parentView)
raise BaseFrameworkException(NO_RENDER_MSG)
| [
"def",
"getRenderingView",
"(",
"w3af",
",",
"parentView",
")",
":",
"if",
"RENDERING_ENGINES",
"[",
"'webkit'",
"]",
":",
"return",
"WebKitRenderingView",
"(",
"w3af",
",",
"parentView",
")",
"elif",
"RENDERING_ENGINES",
"[",
"'moz'",
"]",
":",
"return",
"MozRenderingView",
"(",
"w3af",
",",
"parentView",
")",
"elif",
"RENDERING_ENGINES",
"[",
"'gtkhtml2'",
"]",
":",
"return",
"GtkHtmlRenderingView",
"(",
"w3af",
",",
"parentView",
")",
"raise",
"BaseFrameworkException",
"(",
"NO_RENDER_MSG",
")"
] | return renderingview with best web engine or raise exception . | train | false |
20,892 | def upsize_quota_delta(context, new_flavor, old_flavor):
return resize_quota_delta(context, new_flavor, old_flavor, 1, 1)
| [
"def",
"upsize_quota_delta",
"(",
"context",
",",
"new_flavor",
",",
"old_flavor",
")",
":",
"return",
"resize_quota_delta",
"(",
"context",
",",
"new_flavor",
",",
"old_flavor",
",",
"1",
",",
"1",
")"
] | calculate deltas required to adjust quota for an instance upsize . | train | false |
20,893 | def get_port_by_quantum_tag(cluster, lswitch_uuid, quantum_port_id):
uri = _build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=lswitch_uuid, fields='uuid', filters={'tag': quantum_port_id, 'tag_scope': 'q_port_id'})
LOG.debug((_("Looking for port with q_port_id tag '%(quantum_port_id)s' on: '%(lswitch_uuid)s'") % {'quantum_port_id': quantum_port_id, 'lswitch_uuid': lswitch_uuid}))
try:
res_obj = do_single_request(HTTP_GET, uri, cluster=cluster)
except Exception:
LOG.exception(_('An exception occurred while querying NVP ports'))
raise
res = json.loads(res_obj)
num_results = len(res['results'])
if (num_results >= 1):
if (num_results > 1):
LOG.warn((_("Found '%(num_ports)d' ports with q_port_id tag: '%(quantum_port_id)s'. Only 1 was expected.") % {'num_ports': num_results, 'quantum_port_id': quantum_port_id}))
return res['results'][0]
| [
"def",
"get_port_by_quantum_tag",
"(",
"cluster",
",",
"lswitch_uuid",
",",
"quantum_port_id",
")",
":",
"uri",
"=",
"_build_uri_path",
"(",
"LSWITCHPORT_RESOURCE",
",",
"parent_resource_id",
"=",
"lswitch_uuid",
",",
"fields",
"=",
"'uuid'",
",",
"filters",
"=",
"{",
"'tag'",
":",
"quantum_port_id",
",",
"'tag_scope'",
":",
"'q_port_id'",
"}",
")",
"LOG",
".",
"debug",
"(",
"(",
"_",
"(",
"\"Looking for port with q_port_id tag '%(quantum_port_id)s' on: '%(lswitch_uuid)s'\"",
")",
"%",
"{",
"'quantum_port_id'",
":",
"quantum_port_id",
",",
"'lswitch_uuid'",
":",
"lswitch_uuid",
"}",
")",
")",
"try",
":",
"res_obj",
"=",
"do_single_request",
"(",
"HTTP_GET",
",",
"uri",
",",
"cluster",
"=",
"cluster",
")",
"except",
"Exception",
":",
"LOG",
".",
"exception",
"(",
"_",
"(",
"'An exception occurred while querying NVP ports'",
")",
")",
"raise",
"res",
"=",
"json",
".",
"loads",
"(",
"res_obj",
")",
"num_results",
"=",
"len",
"(",
"res",
"[",
"'results'",
"]",
")",
"if",
"(",
"num_results",
">=",
"1",
")",
":",
"if",
"(",
"num_results",
">",
"1",
")",
":",
"LOG",
".",
"warn",
"(",
"(",
"_",
"(",
"\"Found '%(num_ports)d' ports with q_port_id tag: '%(quantum_port_id)s'. Only 1 was expected.\"",
")",
"%",
"{",
"'num_ports'",
":",
"num_results",
",",
"'quantum_port_id'",
":",
"quantum_port_id",
"}",
")",
")",
"return",
"res",
"[",
"'results'",
"]",
"[",
"0",
"]"
] | return the nvp uuid of the logical port with tag q_port_id equal to quantum_port_id or none if the port is not found . | train | false |
20,894 | def keypair_field_data(request, include_empty_option=False):
keypair_list = []
try:
keypairs = api.nova.keypair_list(request)
keypair_list = [(kp.name, kp.name) for kp in keypairs]
except Exception:
exceptions.handle(request, _('Unable to retrieve key pairs.'))
if (not keypair_list):
if include_empty_option:
return [('', _('No key pairs available'))]
return []
if include_empty_option:
return ([('', _('Select a key pair'))] + keypair_list)
return keypair_list
| [
"def",
"keypair_field_data",
"(",
"request",
",",
"include_empty_option",
"=",
"False",
")",
":",
"keypair_list",
"=",
"[",
"]",
"try",
":",
"keypairs",
"=",
"api",
".",
"nova",
".",
"keypair_list",
"(",
"request",
")",
"keypair_list",
"=",
"[",
"(",
"kp",
".",
"name",
",",
"kp",
".",
"name",
")",
"for",
"kp",
"in",
"keypairs",
"]",
"except",
"Exception",
":",
"exceptions",
".",
"handle",
"(",
"request",
",",
"_",
"(",
"'Unable to retrieve key pairs.'",
")",
")",
"if",
"(",
"not",
"keypair_list",
")",
":",
"if",
"include_empty_option",
":",
"return",
"[",
"(",
"''",
",",
"_",
"(",
"'No key pairs available'",
")",
")",
"]",
"return",
"[",
"]",
"if",
"include_empty_option",
":",
"return",
"(",
"[",
"(",
"''",
",",
"_",
"(",
"'Select a key pair'",
")",
")",
"]",
"+",
"keypair_list",
")",
"return",
"keypair_list"
] | returns a list of tuples of all keypairs . | train | true |
20,896 | def get_pkg_srv_specs(package):
types = list_srv_types(package, False)
specs = []
failures = []
for t in types:
try:
spec = load_from_file(srv_file(package, t), package)
specs.append(spec)
except Exception as e:
failures.append(t)
sys.stderr.write(('ERROR: unable to load %s\n' % t))
return (specs, failures)
| [
"def",
"get_pkg_srv_specs",
"(",
"package",
")",
":",
"types",
"=",
"list_srv_types",
"(",
"package",
",",
"False",
")",
"specs",
"=",
"[",
"]",
"failures",
"=",
"[",
"]",
"for",
"t",
"in",
"types",
":",
"try",
":",
"spec",
"=",
"load_from_file",
"(",
"srv_file",
"(",
"package",
",",
"t",
")",
",",
"package",
")",
"specs",
".",
"append",
"(",
"spec",
")",
"except",
"Exception",
"as",
"e",
":",
"failures",
".",
"append",
"(",
"t",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"(",
"'ERROR: unable to load %s\\n'",
"%",
"t",
")",
")",
"return",
"(",
"specs",
",",
"failures",
")"
] | list all messages that a package contains . | train | false |
20,898 | def list_ipsecpolicies(profile=None):
conn = _auth(profile)
return conn.list_ipsecpolicies()
| [
"def",
"list_ipsecpolicies",
"(",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_auth",
"(",
"profile",
")",
"return",
"conn",
".",
"list_ipsecpolicies",
"(",
")"
] | fetches a list of all configured ipsecpolicies for a tenant cli example: . | train | false |
20,899 | def _get_cmd():
if (__grains__['os_family'] == 'RedHat'):
return 'alternatives'
return 'update-alternatives'
| [
"def",
"_get_cmd",
"(",
")",
":",
"if",
"(",
"__grains__",
"[",
"'os_family'",
"]",
"==",
"'RedHat'",
")",
":",
"return",
"'alternatives'",
"return",
"'update-alternatives'"
] | alteratives commands and differ across distributions . | train | false |
20,901 | def _filter_keys(item, keys):
return {k: v for (k, v) in item.items() if (k in keys)}
| [
"def",
"_filter_keys",
"(",
"item",
",",
"keys",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"item",
".",
"items",
"(",
")",
"if",
"(",
"k",
"in",
"keys",
")",
"}"
] | filters all model attributes except for keys item is a dict . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.