id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
21,860 | def add_constant(builder, val, const):
return builder.add(val, Constant.int(TIMEDELTA64, const))
| [
"def",
"add_constant",
"(",
"builder",
",",
"val",
",",
"const",
")",
":",
"return",
"builder",
".",
"add",
"(",
"val",
",",
"Constant",
".",
"int",
"(",
"TIMEDELTA64",
",",
"const",
")",
")"
] | adds a column of ones to an array parameters data : array-like data is the column-ordered design matrix prepend : bool if true . | train | false |
21,863 | def github_pull_request_merge(registry, xml_parent, data):
osb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.ghprb.GhprbPullRequestMerge')
mapping = [('only-admins-merge', 'onlyAdminsMerge', 'false'), ('disallow-own-code', 'disallowOwnCode', 'false'), ('merge-comment', 'mergeComment', ''), ('fail-on-non-merge', 'failOnNonMerge', 'false'), ('delete-on-merge', 'deleteOnMerge', 'false')]
helpers.convert_mapping_to_xml(osb, data, mapping, fail_required=True)
| [
"def",
"github_pull_request_merge",
"(",
"registry",
",",
"xml_parent",
",",
"data",
")",
":",
"osb",
"=",
"XML",
".",
"SubElement",
"(",
"xml_parent",
",",
"'org.jenkinsci.plugins.ghprb.GhprbPullRequestMerge'",
")",
"mapping",
"=",
"[",
"(",
"'only-admins-merge'",
",",
"'onlyAdminsMerge'",
",",
"'false'",
")",
",",
"(",
"'disallow-own-code'",
",",
"'disallowOwnCode'",
",",
"'false'",
")",
",",
"(",
"'merge-comment'",
",",
"'mergeComment'",
",",
"''",
")",
",",
"(",
"'fail-on-non-merge'",
",",
"'failOnNonMerge'",
",",
"'false'",
")",
",",
"(",
"'delete-on-merge'",
",",
"'deleteOnMerge'",
",",
"'false'",
")",
"]",
"helpers",
".",
"convert_mapping_to_xml",
"(",
"osb",
",",
"data",
",",
"mapping",
",",
"fail_required",
"=",
"True",
")"
] | yaml: github-pull-request-merge this action merges the pull request that triggered the build requires the jenkins :jenkins-wiki:github pull request builder plugin <github+pull+request+builder+plugin> . | train | false |
21,864 | def fix_sys_path(extra_extra_paths=()):
sys.path[1:1] = EXTRA_PATHS
| [
"def",
"fix_sys_path",
"(",
"extra_extra_paths",
"=",
"(",
")",
")",
":",
"sys",
".",
"path",
"[",
"1",
":",
"1",
"]",
"=",
"EXTRA_PATHS"
] | fix the sys . | train | false |
21,865 | def func3():
print 'Whatever'
| [
"def",
"func3",
"(",
")",
":",
"print",
"'Whatever'"
] | simple test function . | train | false |
21,866 | def get_presence(jid, from_jid=None, get_show=False):
if (not jid):
raise InvalidJidError()
request = xmpp_service_pb.PresenceRequest()
response = xmpp_service_pb.PresenceResponse()
request.set_jid(_to_str(jid))
if from_jid:
request.set_from_jid(_to_str(from_jid))
try:
apiproxy_stub_map.MakeSyncCall('xmpp', 'GetPresence', request, response)
except apiproxy_errors.ApplicationError as e:
if (e.application_error == xmpp_service_pb.XmppServiceError.INVALID_JID):
raise InvalidJidError()
else:
raise Error()
if get_show:
show = None
if response.has_presence():
presence = response.presence()
if (presence == xmpp_service_pb.PresenceResponse.NORMAL):
show = PRESENCE_SHOW_NONE
elif (presence == xmpp_service_pb.PresenceResponse.AWAY):
show = PRESENCE_SHOW_AWAY
elif (presence == xmpp_service_pb.PresenceResponse.DO_NOT_DISTURB):
show = PRESENCE_SHOW_DND
elif (presence == xmpp_service_pb.PresenceResponse.CHAT):
show = PRESENCE_SHOW_CHAT
elif (presence == xmpp_service_pb.PresenceResponse.EXTENDED_AWAY):
show = PRESENCE_SHOW_XA
return (bool(response.is_available()), show)
else:
return bool(response.is_available())
| [
"def",
"get_presence",
"(",
"jid",
",",
"from_jid",
"=",
"None",
",",
"get_show",
"=",
"False",
")",
":",
"if",
"(",
"not",
"jid",
")",
":",
"raise",
"InvalidJidError",
"(",
")",
"request",
"=",
"xmpp_service_pb",
".",
"PresenceRequest",
"(",
")",
"response",
"=",
"xmpp_service_pb",
".",
"PresenceResponse",
"(",
")",
"request",
".",
"set_jid",
"(",
"_to_str",
"(",
"jid",
")",
")",
"if",
"from_jid",
":",
"request",
".",
"set_from_jid",
"(",
"_to_str",
"(",
"from_jid",
")",
")",
"try",
":",
"apiproxy_stub_map",
".",
"MakeSyncCall",
"(",
"'xmpp'",
",",
"'GetPresence'",
",",
"request",
",",
"response",
")",
"except",
"apiproxy_errors",
".",
"ApplicationError",
"as",
"e",
":",
"if",
"(",
"e",
".",
"application_error",
"==",
"xmpp_service_pb",
".",
"XmppServiceError",
".",
"INVALID_JID",
")",
":",
"raise",
"InvalidJidError",
"(",
")",
"else",
":",
"raise",
"Error",
"(",
")",
"if",
"get_show",
":",
"show",
"=",
"None",
"if",
"response",
".",
"has_presence",
"(",
")",
":",
"presence",
"=",
"response",
".",
"presence",
"(",
")",
"if",
"(",
"presence",
"==",
"xmpp_service_pb",
".",
"PresenceResponse",
".",
"NORMAL",
")",
":",
"show",
"=",
"PRESENCE_SHOW_NONE",
"elif",
"(",
"presence",
"==",
"xmpp_service_pb",
".",
"PresenceResponse",
".",
"AWAY",
")",
":",
"show",
"=",
"PRESENCE_SHOW_AWAY",
"elif",
"(",
"presence",
"==",
"xmpp_service_pb",
".",
"PresenceResponse",
".",
"DO_NOT_DISTURB",
")",
":",
"show",
"=",
"PRESENCE_SHOW_DND",
"elif",
"(",
"presence",
"==",
"xmpp_service_pb",
".",
"PresenceResponse",
".",
"CHAT",
")",
":",
"show",
"=",
"PRESENCE_SHOW_CHAT",
"elif",
"(",
"presence",
"==",
"xmpp_service_pb",
".",
"PresenceResponse",
".",
"EXTENDED_AWAY",
")",
":",
"show",
"=",
"PRESENCE_SHOW_XA",
"return",
"(",
"bool",
"(",
"response",
".",
"is_available",
"(",
")",
")",
",",
"show",
")",
"else",
":",
"return",
"bool",
"(",
"response",
".",
"is_available",
"(",
")",
")"
] | gets the presence for a jid . | train | false |
21,867 | def _get_safelint_counts(filename):
report_contents = _get_report_contents(filename)
rule_count_regex = re.compile('^(?P<rule_id>[a-z-]+):\\s+(?P<count>\\d+) violations', re.MULTILINE)
total_count_regex = re.compile('^(?P<count>\\d+) violations total', re.MULTILINE)
violations = {'rules': {}}
for violation_match in rule_count_regex.finditer(report_contents):
try:
violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count'))
except ValueError:
violations['rules'][violation_match.group('rule_id')] = None
try:
violations['total'] = int(total_count_regex.search(report_contents).group('count'))
except (AttributeError, ValueError):
violations['total'] = None
return violations
| [
"def",
"_get_safelint_counts",
"(",
"filename",
")",
":",
"report_contents",
"=",
"_get_report_contents",
"(",
"filename",
")",
"rule_count_regex",
"=",
"re",
".",
"compile",
"(",
"'^(?P<rule_id>[a-z-]+):\\\\s+(?P<count>\\\\d+) violations'",
",",
"re",
".",
"MULTILINE",
")",
"total_count_regex",
"=",
"re",
".",
"compile",
"(",
"'^(?P<count>\\\\d+) violations total'",
",",
"re",
".",
"MULTILINE",
")",
"violations",
"=",
"{",
"'rules'",
":",
"{",
"}",
"}",
"for",
"violation_match",
"in",
"rule_count_regex",
".",
"finditer",
"(",
"report_contents",
")",
":",
"try",
":",
"violations",
"[",
"'rules'",
"]",
"[",
"violation_match",
".",
"group",
"(",
"'rule_id'",
")",
"]",
"=",
"int",
"(",
"violation_match",
".",
"group",
"(",
"'count'",
")",
")",
"except",
"ValueError",
":",
"violations",
"[",
"'rules'",
"]",
"[",
"violation_match",
".",
"group",
"(",
"'rule_id'",
")",
"]",
"=",
"None",
"try",
":",
"violations",
"[",
"'total'",
"]",
"=",
"int",
"(",
"total_count_regex",
".",
"search",
"(",
"report_contents",
")",
".",
"group",
"(",
"'count'",
")",
")",
"except",
"(",
"AttributeError",
",",
"ValueError",
")",
":",
"violations",
"[",
"'total'",
"]",
"=",
"None",
"return",
"violations"
] | this returns a dict of violations from the safelint report . | train | false |
21,869 | def hist_bins(bins, vals):
hist = zeros(len(bins))
j = 0
for i in vals:
while (bins[j] < i):
j += 1
hist[j] += 1
return (asarray(bins), hist)
| [
"def",
"hist_bins",
"(",
"bins",
",",
"vals",
")",
":",
"hist",
"=",
"zeros",
"(",
"len",
"(",
"bins",
")",
")",
"j",
"=",
"0",
"for",
"i",
"in",
"vals",
":",
"while",
"(",
"bins",
"[",
"j",
"]",
"<",
"i",
")",
":",
"j",
"+=",
"1",
"hist",
"[",
"j",
"]",
"+=",
"1",
"return",
"(",
"asarray",
"(",
"bins",
")",
",",
"hist",
")"
] | creates a histogram given the bins and the vals . | train | false |
21,870 | def CL_scaling_vector(x, g, lb, ub):
v = np.ones_like(x)
dv = np.zeros_like(x)
mask = ((g < 0) & np.isfinite(ub))
v[mask] = (ub[mask] - x[mask])
dv[mask] = (-1)
mask = ((g > 0) & np.isfinite(lb))
v[mask] = (x[mask] - lb[mask])
dv[mask] = 1
return (v, dv)
| [
"def",
"CL_scaling_vector",
"(",
"x",
",",
"g",
",",
"lb",
",",
"ub",
")",
":",
"v",
"=",
"np",
".",
"ones_like",
"(",
"x",
")",
"dv",
"=",
"np",
".",
"zeros_like",
"(",
"x",
")",
"mask",
"=",
"(",
"(",
"g",
"<",
"0",
")",
"&",
"np",
".",
"isfinite",
"(",
"ub",
")",
")",
"v",
"[",
"mask",
"]",
"=",
"(",
"ub",
"[",
"mask",
"]",
"-",
"x",
"[",
"mask",
"]",
")",
"dv",
"[",
"mask",
"]",
"=",
"(",
"-",
"1",
")",
"mask",
"=",
"(",
"(",
"g",
">",
"0",
")",
"&",
"np",
".",
"isfinite",
"(",
"lb",
")",
")",
"v",
"[",
"mask",
"]",
"=",
"(",
"x",
"[",
"mask",
"]",
"-",
"lb",
"[",
"mask",
"]",
")",
"dv",
"[",
"mask",
"]",
"=",
"1",
"return",
"(",
"v",
",",
"dv",
")"
] | compute coleman-li scaling vector and its derivatives . | train | false |
21,871 | def match_ts(file):
match = TS_RE.search(file)
if (not match):
return (False, '', 0)
num = int(match.group(1))
try:
set = file[:match.start()]
set += '.ts'
except:
set = ''
return (match, set, num)
| [
"def",
"match_ts",
"(",
"file",
")",
":",
"match",
"=",
"TS_RE",
".",
"search",
"(",
"file",
")",
"if",
"(",
"not",
"match",
")",
":",
"return",
"(",
"False",
",",
"''",
",",
"0",
")",
"num",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"try",
":",
"set",
"=",
"file",
"[",
":",
"match",
".",
"start",
"(",
")",
"]",
"set",
"+=",
"'.ts'",
"except",
":",
"set",
"=",
"''",
"return",
"(",
"match",
",",
"set",
",",
"num",
")"
] | return true if file is a joinable ts file . | train | false |
21,872 | def SetCampaignTargetingCriteria(client, campaign):
campaign_criterion_service = client.GetService('CampaignCriterionService')
criteria = [{'xsi_type': 'Location', 'id': 21137}, {'xsi_type': 'Location', 'id': 2484}, {'xsi_type': 'Language', 'id': 1000}, {'xsi_type': 'Language', 'id': 1003}]
operations = [{'operator': 'ADD', 'operand': {'campaignId': campaign['id'], 'criterion': criterion}} for criterion in criteria]
response = campaign_criterion_service.mutate(operations)
if (response and ('value' in response)):
for criterion in response['value']:
print ('Campaign criteria of type "%s" and id "%s" was added.' % (criterion['criterion']['type'], criterion['criterion']['id']))
| [
"def",
"SetCampaignTargetingCriteria",
"(",
"client",
",",
"campaign",
")",
":",
"campaign_criterion_service",
"=",
"client",
".",
"GetService",
"(",
"'CampaignCriterionService'",
")",
"criteria",
"=",
"[",
"{",
"'xsi_type'",
":",
"'Location'",
",",
"'id'",
":",
"21137",
"}",
",",
"{",
"'xsi_type'",
":",
"'Location'",
",",
"'id'",
":",
"2484",
"}",
",",
"{",
"'xsi_type'",
":",
"'Language'",
",",
"'id'",
":",
"1000",
"}",
",",
"{",
"'xsi_type'",
":",
"'Language'",
",",
"'id'",
":",
"1003",
"}",
"]",
"operations",
"=",
"[",
"{",
"'operator'",
":",
"'ADD'",
",",
"'operand'",
":",
"{",
"'campaignId'",
":",
"campaign",
"[",
"'id'",
"]",
",",
"'criterion'",
":",
"criterion",
"}",
"}",
"for",
"criterion",
"in",
"criteria",
"]",
"response",
"=",
"campaign_criterion_service",
".",
"mutate",
"(",
"operations",
")",
"if",
"(",
"response",
"and",
"(",
"'value'",
"in",
"response",
")",
")",
":",
"for",
"criterion",
"in",
"response",
"[",
"'value'",
"]",
":",
"print",
"(",
"'Campaign criteria of type \"%s\" and id \"%s\" was added.'",
"%",
"(",
"criterion",
"[",
"'criterion'",
"]",
"[",
"'type'",
"]",
",",
"criterion",
"[",
"'criterion'",
"]",
"[",
"'id'",
"]",
")",
")"
] | sets targeting criteria for the given campaign . | train | true |
21,873 | def site_location():
return s3_rest_controller()
| [
"def",
"site_location",
"(",
")",
":",
"return",
"s3_rest_controller",
"(",
")"
] | restful crud controller . | train | false |
21,874 | def ManifestFromXML(xmlstr):
manifest = Manifest()
manifest.parse_string(xmlstr)
return manifest
| [
"def",
"ManifestFromXML",
"(",
"xmlstr",
")",
":",
"manifest",
"=",
"Manifest",
"(",
")",
"manifest",
".",
"parse_string",
"(",
"xmlstr",
")",
"return",
"manifest"
] | create and return manifest instance from xml . | train | false |
21,876 | def get_timeout(gross_time, start, end, precision, split_range):
if split_range:
top_num = (float(end) / precision)
bottom_num = (float(start) / precision)
if ((gross_time % 2) == 0):
timeout = (top_num - (float(gross_time) / precision))
else:
timeout = (bottom_num + (float(gross_time) / precision))
else:
timeout = (float(gross_time) / precision)
return timeout
| [
"def",
"get_timeout",
"(",
"gross_time",
",",
"start",
",",
"end",
",",
"precision",
",",
"split_range",
")",
":",
"if",
"split_range",
":",
"top_num",
"=",
"(",
"float",
"(",
"end",
")",
"/",
"precision",
")",
"bottom_num",
"=",
"(",
"float",
"(",
"start",
")",
"/",
"precision",
")",
"if",
"(",
"(",
"gross_time",
"%",
"2",
")",
"==",
"0",
")",
":",
"timeout",
"=",
"(",
"top_num",
"-",
"(",
"float",
"(",
"gross_time",
")",
"/",
"precision",
")",
")",
"else",
":",
"timeout",
"=",
"(",
"bottom_num",
"+",
"(",
"float",
"(",
"gross_time",
")",
"/",
"precision",
")",
")",
"else",
":",
"timeout",
"=",
"(",
"float",
"(",
"gross_time",
")",
"/",
"precision",
")",
"return",
"timeout"
] | a way to generate varying timeouts based on ranges . | train | false |
21,877 | def input_dir(self, dirname):
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for (root, dirs, files) in os.walk(dirname):
if verbose:
print ('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(os.path.join(root, subdir)):
dirs.remove(subdir)
for filename in sorted(files):
if (pep8.filename_match(filename, filepatterns) and (not self.excluded(filename))):
runner(os.path.join(root, filename))
| [
"def",
"input_dir",
"(",
"self",
",",
"dirname",
")",
":",
"dirname",
"=",
"dirname",
".",
"rstrip",
"(",
"'/'",
")",
"if",
"self",
".",
"excluded",
"(",
"dirname",
")",
":",
"return",
"0",
"counters",
"=",
"self",
".",
"options",
".",
"report",
".",
"counters",
"verbose",
"=",
"self",
".",
"options",
".",
"verbose",
"filepatterns",
"=",
"self",
".",
"options",
".",
"filename",
"runner",
"=",
"self",
".",
"runner",
"for",
"(",
"root",
",",
"dirs",
",",
"files",
")",
"in",
"os",
".",
"walk",
"(",
"dirname",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'directory '",
"+",
"root",
")",
"counters",
"[",
"'directories'",
"]",
"+=",
"1",
"for",
"subdir",
"in",
"sorted",
"(",
"dirs",
")",
":",
"if",
"self",
".",
"excluded",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"subdir",
")",
")",
":",
"dirs",
".",
"remove",
"(",
"subdir",
")",
"for",
"filename",
"in",
"sorted",
"(",
"files",
")",
":",
"if",
"(",
"pep8",
".",
"filename_match",
"(",
"filename",
",",
"filepatterns",
")",
"and",
"(",
"not",
"self",
".",
"excluded",
"(",
"filename",
")",
")",
")",
":",
"runner",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
")"
] | check all python source files in this directory and all subdirectories . | train | true |
21,878 | def pathdirs():
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath((dir or '.'))
normdir = os.path.normcase(dir)
if ((normdir not in normdirs) and os.path.isdir(dir)):
dirs.append(dir)
normdirs.append(normdir)
return dirs
| [
"def",
"pathdirs",
"(",
")",
":",
"dirs",
"=",
"[",
"]",
"normdirs",
"=",
"[",
"]",
"for",
"dir",
"in",
"sys",
".",
"path",
":",
"dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"(",
"dir",
"or",
"'.'",
")",
")",
"normdir",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"dir",
")",
"if",
"(",
"(",
"normdir",
"not",
"in",
"normdirs",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"dir",
")",
")",
":",
"dirs",
".",
"append",
"(",
"dir",
")",
"normdirs",
".",
"append",
"(",
"normdir",
")",
"return",
"dirs"
] | convert sys . | train | false |
21,879 | def get_capture_stream():
return getattr(local_context, u'output', None)
| [
"def",
"get_capture_stream",
"(",
")",
":",
"return",
"getattr",
"(",
"local_context",
",",
"u'output'",
",",
"None",
")"
] | if output is currently being redirected to a stream . | train | false |
21,881 | def category_detail(request, slug, template_name='blog/category_detail.html', **kwargs):
category = get_object_or_404(Category, slug__iexact=slug)
return list_detail.object_list(request, queryset=category.post_set.published(), extra_context={'category': category}, template_name=template_name, **kwargs)
| [
"def",
"category_detail",
"(",
"request",
",",
"slug",
",",
"template_name",
"=",
"'blog/category_detail.html'",
",",
"**",
"kwargs",
")",
":",
"category",
"=",
"get_object_or_404",
"(",
"Category",
",",
"slug__iexact",
"=",
"slug",
")",
"return",
"list_detail",
".",
"object_list",
"(",
"request",
",",
"queryset",
"=",
"category",
".",
"post_set",
".",
"published",
"(",
")",
",",
"extra_context",
"=",
"{",
"'category'",
":",
"category",
"}",
",",
"template_name",
"=",
"template_name",
",",
"**",
"kwargs",
")"
] | category detail template: blog/category_detail . | train | false |
21,882 | def is_pointer_tag(xml_obj):
if (xml_obj.tag != 'course'):
expected_attr = set(['url_name'])
else:
expected_attr = set(['url_name', 'course', 'org'])
actual_attr = set(xml_obj.attrib.keys())
has_text = ((xml_obj.text is not None) and (len(xml_obj.text.strip()) > 0))
return ((len(xml_obj) == 0) and (actual_attr == expected_attr) and (not has_text))
| [
"def",
"is_pointer_tag",
"(",
"xml_obj",
")",
":",
"if",
"(",
"xml_obj",
".",
"tag",
"!=",
"'course'",
")",
":",
"expected_attr",
"=",
"set",
"(",
"[",
"'url_name'",
"]",
")",
"else",
":",
"expected_attr",
"=",
"set",
"(",
"[",
"'url_name'",
",",
"'course'",
",",
"'org'",
"]",
")",
"actual_attr",
"=",
"set",
"(",
"xml_obj",
".",
"attrib",
".",
"keys",
"(",
")",
")",
"has_text",
"=",
"(",
"(",
"xml_obj",
".",
"text",
"is",
"not",
"None",
")",
"and",
"(",
"len",
"(",
"xml_obj",
".",
"text",
".",
"strip",
"(",
")",
")",
">",
"0",
")",
")",
"return",
"(",
"(",
"len",
"(",
"xml_obj",
")",
"==",
"0",
")",
"and",
"(",
"actual_attr",
"==",
"expected_attr",
")",
"and",
"(",
"not",
"has_text",
")",
")"
] | check if xml_obj is a pointer tag: <blah url_name="something" /> . | train | false |
21,884 | def getBooleanFromDictionaryDefault(defaultBoolean, dictionary, key):
if (key not in dictionary):
return defaultBoolean
return getBooleanFromValue(dictionary[key])
| [
"def",
"getBooleanFromDictionaryDefault",
"(",
"defaultBoolean",
",",
"dictionary",
",",
"key",
")",
":",
"if",
"(",
"key",
"not",
"in",
"dictionary",
")",
":",
"return",
"defaultBoolean",
"return",
"getBooleanFromValue",
"(",
"dictionary",
"[",
"key",
"]",
")"
] | get boolean from the dictionary and key . | train | false |
21,885 | def test_missing_keyword_argument_value():
try:
can_compile(u'((fn [x] x) :x)')
except HyTypeError as e:
assert (e.message == u'Keyword argument :x needs a value.')
else:
assert False
| [
"def",
"test_missing_keyword_argument_value",
"(",
")",
":",
"try",
":",
"can_compile",
"(",
"u'((fn [x] x) :x)'",
")",
"except",
"HyTypeError",
"as",
"e",
":",
"assert",
"(",
"e",
".",
"message",
"==",
"u'Keyword argument :x needs a value.'",
")",
"else",
":",
"assert",
"False"
] | ensure the compiler chokes on missing keyword argument values . | train | false |
21,886 | def cleanup_tmpdir(tmpdir=None, keep_so=False):
tmpdir = (tmpdir or _caller_dir_pycache())
try:
filelist = os.listdir(tmpdir)
except OSError:
return
if keep_so:
suffix = '.c'
else:
suffix = _get_so_suffixes()[0].lower()
for fn in filelist:
if (fn.lower().startswith('_cffi_') and (fn.lower().endswith(suffix) or fn.lower().endswith('.c'))):
try:
os.unlink(os.path.join(tmpdir, fn))
except OSError:
pass
clean_dir = [os.path.join(tmpdir, 'build')]
for dir in clean_dir:
try:
for fn in os.listdir(dir):
fn = os.path.join(dir, fn)
if os.path.isdir(fn):
clean_dir.append(fn)
else:
os.unlink(fn)
except OSError:
pass
| [
"def",
"cleanup_tmpdir",
"(",
"tmpdir",
"=",
"None",
",",
"keep_so",
"=",
"False",
")",
":",
"tmpdir",
"=",
"(",
"tmpdir",
"or",
"_caller_dir_pycache",
"(",
")",
")",
"try",
":",
"filelist",
"=",
"os",
".",
"listdir",
"(",
"tmpdir",
")",
"except",
"OSError",
":",
"return",
"if",
"keep_so",
":",
"suffix",
"=",
"'.c'",
"else",
":",
"suffix",
"=",
"_get_so_suffixes",
"(",
")",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"for",
"fn",
"in",
"filelist",
":",
"if",
"(",
"fn",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'_cffi_'",
")",
"and",
"(",
"fn",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"suffix",
")",
"or",
"fn",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.c'",
")",
")",
")",
":",
"try",
":",
"os",
".",
"unlink",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"fn",
")",
")",
"except",
"OSError",
":",
"pass",
"clean_dir",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"'build'",
")",
"]",
"for",
"dir",
"in",
"clean_dir",
":",
"try",
":",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"dir",
")",
":",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"fn",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"fn",
")",
":",
"clean_dir",
".",
"append",
"(",
"fn",
")",
"else",
":",
"os",
".",
"unlink",
"(",
"fn",
")",
"except",
"OSError",
":",
"pass"
] | clean up the temporary directory by removing all files in it called _cffi_* . | train | false |
21,887 | def unscrub(txt):
return txt.replace(u'_', u' ').replace(u'-', u' ').title()
| [
"def",
"unscrub",
"(",
"txt",
")",
":",
"return",
"txt",
".",
"replace",
"(",
"u'_'",
",",
"u' '",
")",
".",
"replace",
"(",
"u'-'",
",",
"u' '",
")",
".",
"title",
"(",
")"
] | returns titlified string . | train | false |
21,888 | def test_unique_form_order(session):
species_by_form_order = {}
query = session.query(tables.PokemonForm)
query = query.options(joinedload('pokemon.species'))
for form in query:
try:
previous_species = species_by_form_order[form.order]
except KeyError:
species_by_form_order[form.order] = form.species
else:
assert (previous_species == form.species), ('PokemonForm.order == %s is used for %s and %s' % (form.order, species_by_form_order[form.order].name, form.species.name))
| [
"def",
"test_unique_form_order",
"(",
"session",
")",
":",
"species_by_form_order",
"=",
"{",
"}",
"query",
"=",
"session",
".",
"query",
"(",
"tables",
".",
"PokemonForm",
")",
"query",
"=",
"query",
".",
"options",
"(",
"joinedload",
"(",
"'pokemon.species'",
")",
")",
"for",
"form",
"in",
"query",
":",
"try",
":",
"previous_species",
"=",
"species_by_form_order",
"[",
"form",
".",
"order",
"]",
"except",
"KeyError",
":",
"species_by_form_order",
"[",
"form",
".",
"order",
"]",
"=",
"form",
".",
"species",
"else",
":",
"assert",
"(",
"previous_species",
"==",
"form",
".",
"species",
")",
",",
"(",
"'PokemonForm.order == %s is used for %s and %s'",
"%",
"(",
"form",
".",
"order",
",",
"species_by_form_order",
"[",
"form",
".",
"order",
"]",
".",
"name",
",",
"form",
".",
"species",
".",
"name",
")",
")"
] | check that tone pokemonform . | train | false |
21,890 | def prettytuple(t):
return (('(' + ', '.join((str(f) for f in t))) + ')')
| [
"def",
"prettytuple",
"(",
"t",
")",
":",
"return",
"(",
"(",
"'('",
"+",
"', '",
".",
"join",
"(",
"(",
"str",
"(",
"f",
")",
"for",
"f",
"in",
"t",
")",
")",
")",
"+",
"')'",
")"
] | use the string representation of objects in a tuple . | train | false |
21,891 | def get_instances_to_sync(context, updated_since=None, project_id=None, deleted=True, shuffle=False, uuids_only=False):
filters = {}
if (updated_since is not None):
filters['changes-since'] = updated_since
if (project_id is not None):
filters['project_id'] = project_id
if (not deleted):
filters['deleted'] = False
instances = db.instance_get_all_by_filters(context, filters, 'deleted', 'asc')
if shuffle:
random.shuffle(instances)
for instance in instances:
if uuids_only:
(yield instance['uuid'])
else:
(yield instance)
| [
"def",
"get_instances_to_sync",
"(",
"context",
",",
"updated_since",
"=",
"None",
",",
"project_id",
"=",
"None",
",",
"deleted",
"=",
"True",
",",
"shuffle",
"=",
"False",
",",
"uuids_only",
"=",
"False",
")",
":",
"filters",
"=",
"{",
"}",
"if",
"(",
"updated_since",
"is",
"not",
"None",
")",
":",
"filters",
"[",
"'changes-since'",
"]",
"=",
"updated_since",
"if",
"(",
"project_id",
"is",
"not",
"None",
")",
":",
"filters",
"[",
"'project_id'",
"]",
"=",
"project_id",
"if",
"(",
"not",
"deleted",
")",
":",
"filters",
"[",
"'deleted'",
"]",
"=",
"False",
"instances",
"=",
"db",
".",
"instance_get_all_by_filters",
"(",
"context",
",",
"filters",
",",
"'deleted'",
",",
"'asc'",
")",
"if",
"shuffle",
":",
"random",
".",
"shuffle",
"(",
"instances",
")",
"for",
"instance",
"in",
"instances",
":",
"if",
"uuids_only",
":",
"(",
"yield",
"instance",
"[",
"'uuid'",
"]",
")",
"else",
":",
"(",
"yield",
"instance",
")"
] | return a generator that will return a list of active and deleted instances to sync with parent cells . | train | false |
21,892 | def _iter_lexerclasses():
for (module_name, name, _, _, _) in LEXERS.itervalues():
if (name not in _lexer_cache):
_load_lexers(module_name)
(yield _lexer_cache[name])
for lexer in find_plugin_lexers():
(yield lexer)
| [
"def",
"_iter_lexerclasses",
"(",
")",
":",
"for",
"(",
"module_name",
",",
"name",
",",
"_",
",",
"_",
",",
"_",
")",
"in",
"LEXERS",
".",
"itervalues",
"(",
")",
":",
"if",
"(",
"name",
"not",
"in",
"_lexer_cache",
")",
":",
"_load_lexers",
"(",
"module_name",
")",
"(",
"yield",
"_lexer_cache",
"[",
"name",
"]",
")",
"for",
"lexer",
"in",
"find_plugin_lexers",
"(",
")",
":",
"(",
"yield",
"lexer",
")"
] | return an iterator over all lexer classes . | train | false |
21,893 | def unlink_quietly(*paths):
def looper(*paths):
for i in paths:
if (type(i) == list):
for path in i:
(yield path)
else:
(yield i)
for path in looper(*paths):
try:
_os.unlink(path)
except:
pass
| [
"def",
"unlink_quietly",
"(",
"*",
"paths",
")",
":",
"def",
"looper",
"(",
"*",
"paths",
")",
":",
"for",
"i",
"in",
"paths",
":",
"if",
"(",
"type",
"(",
"i",
")",
"==",
"list",
")",
":",
"for",
"path",
"in",
"i",
":",
"(",
"yield",
"path",
")",
"else",
":",
"(",
"yield",
"i",
")",
"for",
"path",
"in",
"looper",
"(",
"*",
"paths",
")",
":",
"try",
":",
"_os",
".",
"unlink",
"(",
"path",
")",
"except",
":",
"pass"
] | like the standard os . | train | false |
21,895 | def p4_version_string():
global _p4_version_string
if (not _p4_version_string):
a = p4_read_pipe_lines(['-V'])
_p4_version_string = a[(-1)].rstrip()
return _p4_version_string
| [
"def",
"p4_version_string",
"(",
")",
":",
"global",
"_p4_version_string",
"if",
"(",
"not",
"_p4_version_string",
")",
":",
"a",
"=",
"p4_read_pipe_lines",
"(",
"[",
"'-V'",
"]",
")",
"_p4_version_string",
"=",
"a",
"[",
"(",
"-",
"1",
")",
"]",
".",
"rstrip",
"(",
")",
"return",
"_p4_version_string"
] | read the version string . | train | false |
21,897 | def libvlc_vlm_get_media_instance_position(p_instance, psz_name, i_instance):
f = (_Cfunctions.get('libvlc_vlm_get_media_instance_position', None) or _Cfunction('libvlc_vlm_get_media_instance_position', ((1,), (1,), (1,)), None, ctypes.c_float, Instance, ctypes.c_char_p, ctypes.c_int))
return f(p_instance, psz_name, i_instance)
| [
"def",
"libvlc_vlm_get_media_instance_position",
"(",
"p_instance",
",",
"psz_name",
",",
"i_instance",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_vlm_get_media_instance_position'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_vlm_get_media_instance_position'",
",",
"(",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
")",
",",
"None",
",",
"ctypes",
".",
"c_float",
",",
"Instance",
",",
"ctypes",
".",
"c_char_p",
",",
"ctypes",
".",
"c_int",
")",
")",
"return",
"f",
"(",
"p_instance",
",",
"psz_name",
",",
"i_instance",
")"
] | get vlm_media instance position by name or instance id . | train | true |
21,900 | def auth_wanted(view_func):
def wrapper(request, *args, **kwargs):
is_secure = settings.TWITTER_COOKIE_SECURE
if (request.COOKIES.get(REDIRECT_NAME) and (is_secure and (not request.is_secure()))):
ssl_url = url(request, {'scheme': ('https' if is_secure else 'http')})
return http.HttpResponseRedirect(ssl_url)
return view_func(request, *args, **kwargs)
return wrapper
| [
"def",
"auth_wanted",
"(",
"view_func",
")",
":",
"def",
"wrapper",
"(",
"request",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"is_secure",
"=",
"settings",
".",
"TWITTER_COOKIE_SECURE",
"if",
"(",
"request",
".",
"COOKIES",
".",
"get",
"(",
"REDIRECT_NAME",
")",
"and",
"(",
"is_secure",
"and",
"(",
"not",
"request",
".",
"is_secure",
"(",
")",
")",
")",
")",
":",
"ssl_url",
"=",
"url",
"(",
"request",
",",
"{",
"'scheme'",
":",
"(",
"'https'",
"if",
"is_secure",
"else",
"'http'",
")",
"}",
")",
"return",
"http",
".",
"HttpResponseRedirect",
"(",
"ssl_url",
")",
"return",
"view_func",
"(",
"request",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"wrapper"
] | twitter sessions are ssl only . | train | false |
21,901 | def sdm_monomial_lcm(A, B):
return ((A[0],) + monomial_lcm(A[1:], B[1:]))
| [
"def",
"sdm_monomial_lcm",
"(",
"A",
",",
"B",
")",
":",
"return",
"(",
"(",
"A",
"[",
"0",
"]",
",",
")",
"+",
"monomial_lcm",
"(",
"A",
"[",
"1",
":",
"]",
",",
"B",
"[",
"1",
":",
"]",
")",
")"
] | return the "least common multiple" of a and b . | train | false |
21,902 | def _sgf_init_gamestate(sgf_root):
props = sgf_root.properties
s_size = props.get('SZ', ['19'])[0]
s_player = props.get('PL', ['B'])[0]
gs = go.GameState(int(s_size))
if ('AB' in props):
for stone in props['AB']:
gs.do_move(_parse_sgf_move(stone), go.BLACK)
if ('AW' in props):
for stone in props['AW']:
gs.do_move(_parse_sgf_move(stone), go.WHITE)
gs.current_player = (go.BLACK if (s_player == 'B') else go.WHITE)
return gs
| [
"def",
"_sgf_init_gamestate",
"(",
"sgf_root",
")",
":",
"props",
"=",
"sgf_root",
".",
"properties",
"s_size",
"=",
"props",
".",
"get",
"(",
"'SZ'",
",",
"[",
"'19'",
"]",
")",
"[",
"0",
"]",
"s_player",
"=",
"props",
".",
"get",
"(",
"'PL'",
",",
"[",
"'B'",
"]",
")",
"[",
"0",
"]",
"gs",
"=",
"go",
".",
"GameState",
"(",
"int",
"(",
"s_size",
")",
")",
"if",
"(",
"'AB'",
"in",
"props",
")",
":",
"for",
"stone",
"in",
"props",
"[",
"'AB'",
"]",
":",
"gs",
".",
"do_move",
"(",
"_parse_sgf_move",
"(",
"stone",
")",
",",
"go",
".",
"BLACK",
")",
"if",
"(",
"'AW'",
"in",
"props",
")",
":",
"for",
"stone",
"in",
"props",
"[",
"'AW'",
"]",
":",
"gs",
".",
"do_move",
"(",
"_parse_sgf_move",
"(",
"stone",
")",
",",
"go",
".",
"WHITE",
")",
"gs",
".",
"current_player",
"=",
"(",
"go",
".",
"BLACK",
"if",
"(",
"s_player",
"==",
"'B'",
")",
"else",
"go",
".",
"WHITE",
")",
"return",
"gs"
] | helper function to set up a gamestate object from the root node of an sgf file . | train | false |
21,903 | def RouteProtocolCheck(proto):
proto = str(proto).strip().lower()
if (proto not in ('smtp', 'smtptls', 'smtpssl', 'local')):
raise ValueError((_('Invalid message delivery protocol: %s') % proto))
return proto
| [
"def",
"RouteProtocolCheck",
"(",
"proto",
")",
":",
"proto",
"=",
"str",
"(",
"proto",
")",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"(",
"proto",
"not",
"in",
"(",
"'smtp'",
",",
"'smtptls'",
",",
"'smtpssl'",
",",
"'local'",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"_",
"(",
"'Invalid message delivery protocol: %s'",
")",
"%",
"proto",
")",
")",
"return",
"proto"
] | verify that the protocol is actually a protocol . | train | false |
21,904 | def _get_authorized_user(requesting_user, username=None, allow_staff=False):
if (username is None):
username = requesting_user.username
try:
existing_user = User.objects.get(username=username)
except ObjectDoesNotExist:
raise UserNotFound()
_check_authorized(requesting_user, username, allow_staff)
return existing_user
| [
"def",
"_get_authorized_user",
"(",
"requesting_user",
",",
"username",
"=",
"None",
",",
"allow_staff",
"=",
"False",
")",
":",
"if",
"(",
"username",
"is",
"None",
")",
":",
"username",
"=",
"requesting_user",
".",
"username",
"try",
":",
"existing_user",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"username",
"=",
"username",
")",
"except",
"ObjectDoesNotExist",
":",
"raise",
"UserNotFound",
"(",
")",
"_check_authorized",
"(",
"requesting_user",
",",
"username",
",",
"allow_staff",
")",
"return",
"existing_user"
] | helper method to return the authorized user for a given username . | train | false |
21,906 | def dpid_to_str(dpid, alwaysLong=False):
if ((type(dpid) is long) or (type(dpid) is int)):
dpid = struct.pack('!Q', dpid)
assert (len(dpid) == 8)
r = '-'.join([('%02x' % (ord(x),)) for x in dpid[2:]])
if (alwaysLong or (dpid[0:2] != ('\x00' * 2))):
r += ('|' + str(struct.unpack('!H', dpid[0:2])[0]))
return r
| [
"def",
"dpid_to_str",
"(",
"dpid",
",",
"alwaysLong",
"=",
"False",
")",
":",
"if",
"(",
"(",
"type",
"(",
"dpid",
")",
"is",
"long",
")",
"or",
"(",
"type",
"(",
"dpid",
")",
"is",
"int",
")",
")",
":",
"dpid",
"=",
"struct",
".",
"pack",
"(",
"'!Q'",
",",
"dpid",
")",
"assert",
"(",
"len",
"(",
"dpid",
")",
"==",
"8",
")",
"r",
"=",
"'-'",
".",
"join",
"(",
"[",
"(",
"'%02x'",
"%",
"(",
"ord",
"(",
"x",
")",
",",
")",
")",
"for",
"x",
"in",
"dpid",
"[",
"2",
":",
"]",
"]",
")",
"if",
"(",
"alwaysLong",
"or",
"(",
"dpid",
"[",
"0",
":",
"2",
"]",
"!=",
"(",
"'\\x00'",
"*",
"2",
")",
")",
")",
":",
"r",
"+=",
"(",
"'|'",
"+",
"str",
"(",
"struct",
".",
"unpack",
"(",
"'!H'",
",",
"dpid",
"[",
"0",
":",
"2",
"]",
")",
"[",
"0",
"]",
")",
")",
"return",
"r"
] | convert a dpid from a long into into the canonical string form . | train | false |
21,907 | def _delete_ntp_peers(peers):
return __salt__['ntp.delete_peers'](commit=False, *peers)
| [
"def",
"_delete_ntp_peers",
"(",
"peers",
")",
":",
"return",
"__salt__",
"[",
"'ntp.delete_peers'",
"]",
"(",
"commit",
"=",
"False",
",",
"*",
"peers",
")"
] | calls ntp . | train | false |
21,908 | def getBooleanFromValue(value):
firstCharacter = str(value).lower().lstrip()[:1]
return ((firstCharacter == 't') or (firstCharacter == '1'))
| [
"def",
"getBooleanFromValue",
"(",
"value",
")",
":",
"firstCharacter",
"=",
"str",
"(",
"value",
")",
".",
"lower",
"(",
")",
".",
"lstrip",
"(",
")",
"[",
":",
"1",
"]",
"return",
"(",
"(",
"firstCharacter",
"==",
"'t'",
")",
"or",
"(",
"firstCharacter",
"==",
"'1'",
")",
")"
] | get boolean from the word . | train | false |
21,909 | def _dict_with_extra_specs_if_authorized(context, inst_type_query):
inst_type_dict = dict(inst_type_query)
if (not is_admin_context(context)):
del inst_type_dict['extra_specs']
else:
extra_specs = {x['key']: x['value'] for x in inst_type_query['extra_specs']}
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
| [
"def",
"_dict_with_extra_specs_if_authorized",
"(",
"context",
",",
"inst_type_query",
")",
":",
"inst_type_dict",
"=",
"dict",
"(",
"inst_type_query",
")",
"if",
"(",
"not",
"is_admin_context",
"(",
"context",
")",
")",
":",
"del",
"inst_type_dict",
"[",
"'extra_specs'",
"]",
"else",
":",
"extra_specs",
"=",
"{",
"x",
"[",
"'key'",
"]",
":",
"x",
"[",
"'value'",
"]",
"for",
"x",
"in",
"inst_type_query",
"[",
"'extra_specs'",
"]",
"}",
"inst_type_dict",
"[",
"'extra_specs'",
"]",
"=",
"extra_specs",
"return",
"inst_type_dict"
] | convert type query result to dict with extra_spec and rate_limit . | train | false |
21,911 | def getVisibleObjects(archivableObjects):
visibleObjects = []
for archivableObject in archivableObjects:
if archivableObject.getVisible():
visibleObjects.append(archivableObject)
return visibleObjects
| [
"def",
"getVisibleObjects",
"(",
"archivableObjects",
")",
":",
"visibleObjects",
"=",
"[",
"]",
"for",
"archivableObject",
"in",
"archivableObjects",
":",
"if",
"archivableObject",
".",
"getVisible",
"(",
")",
":",
"visibleObjects",
".",
"append",
"(",
"archivableObject",
")",
"return",
"visibleObjects"
] | get the visible objects . | train | false |
21,913 | def strip_user_meta_prefix(server_type, key):
return key[len(get_user_meta_prefix(server_type)):]
| [
"def",
"strip_user_meta_prefix",
"(",
"server_type",
",",
"key",
")",
":",
"return",
"key",
"[",
"len",
"(",
"get_user_meta_prefix",
"(",
"server_type",
")",
")",
":",
"]"
] | removes the user metadata prefix for a given server type from the start of a header key . | train | false |
21,914 | def _get_tests_stanza(raw_control_files, is_server, prepend, append, client_control_file=''):
if client_control_file:
raw_control_files.append((client_control_file + '\nreturn locals()'))
raw_steps = ((prepend + [add_boilerplate_to_nested_steps(step) for step in raw_control_files]) + append)
steps = [format_step(index, step) for (index, step) in enumerate(raw_steps)]
if is_server:
step_template = SERVER_STEP_TEMPLATE
footer = '\n\nstep_init()\n'
else:
step_template = CLIENT_STEP_TEMPLATE
footer = ''
header = ''.join(((step_template % i) for i in xrange(len(steps))))
return (((header + '\n') + '\n\n'.join(steps)) + footer)
| [
"def",
"_get_tests_stanza",
"(",
"raw_control_files",
",",
"is_server",
",",
"prepend",
",",
"append",
",",
"client_control_file",
"=",
"''",
")",
":",
"if",
"client_control_file",
":",
"raw_control_files",
".",
"append",
"(",
"(",
"client_control_file",
"+",
"'\\nreturn locals()'",
")",
")",
"raw_steps",
"=",
"(",
"(",
"prepend",
"+",
"[",
"add_boilerplate_to_nested_steps",
"(",
"step",
")",
"for",
"step",
"in",
"raw_control_files",
"]",
")",
"+",
"append",
")",
"steps",
"=",
"[",
"format_step",
"(",
"index",
",",
"step",
")",
"for",
"(",
"index",
",",
"step",
")",
"in",
"enumerate",
"(",
"raw_steps",
")",
"]",
"if",
"is_server",
":",
"step_template",
"=",
"SERVER_STEP_TEMPLATE",
"footer",
"=",
"'\\n\\nstep_init()\\n'",
"else",
":",
"step_template",
"=",
"CLIENT_STEP_TEMPLATE",
"footer",
"=",
"''",
"header",
"=",
"''",
".",
"join",
"(",
"(",
"(",
"step_template",
"%",
"i",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"steps",
")",
")",
")",
")",
"return",
"(",
"(",
"(",
"header",
"+",
"'\\n'",
")",
"+",
"'\\n\\n'",
".",
"join",
"(",
"steps",
")",
")",
"+",
"footer",
")"
] | implements the common parts of get_test_stanza . | train | false |
21,915 | @task
def distutils_check():
with cd('/home/vagrant/repos/sympy'):
run('python setup.py check')
run('python3 setup.py check')
| [
"@",
"task",
"def",
"distutils_check",
"(",
")",
":",
"with",
"cd",
"(",
"'/home/vagrant/repos/sympy'",
")",
":",
"run",
"(",
"'python setup.py check'",
")",
"run",
"(",
"'python3 setup.py check'",
")"
] | runs setup . | train | false |
21,916 | def _merge_a_into_b(a, b):
if (type(a) is not edict):
return
for (k, v) in a.iteritems():
if (not b.has_key(k)):
raise KeyError('{} is not a valid config key'.format(k))
if (type(b[k]) is not type(v)):
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k))
if (type(v) is edict):
try:
_merge_a_into_b(a[k], b[k])
except:
print 'Error under config key: {}'.format(k)
raise
else:
b[k] = v
| [
"def",
"_merge_a_into_b",
"(",
"a",
",",
"b",
")",
":",
"if",
"(",
"type",
"(",
"a",
")",
"is",
"not",
"edict",
")",
":",
"return",
"for",
"(",
"k",
",",
"v",
")",
"in",
"a",
".",
"iteritems",
"(",
")",
":",
"if",
"(",
"not",
"b",
".",
"has_key",
"(",
"k",
")",
")",
":",
"raise",
"KeyError",
"(",
"'{} is not a valid config key'",
".",
"format",
"(",
"k",
")",
")",
"if",
"(",
"type",
"(",
"b",
"[",
"k",
"]",
")",
"is",
"not",
"type",
"(",
"v",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Type mismatch ({} vs. {}) for config key: {}'",
".",
"format",
"(",
"type",
"(",
"b",
"[",
"k",
"]",
")",
",",
"type",
"(",
"v",
")",
",",
"k",
")",
")",
"if",
"(",
"type",
"(",
"v",
")",
"is",
"edict",
")",
":",
"try",
":",
"_merge_a_into_b",
"(",
"a",
"[",
"k",
"]",
",",
"b",
"[",
"k",
"]",
")",
"except",
":",
"print",
"'Error under config key: {}'",
".",
"format",
"(",
"k",
")",
"raise",
"else",
":",
"b",
"[",
"k",
"]",
"=",
"v"
] | merge config dictionary a into config dictionary b . | train | false |
21,917 | def trim_tables():
ignore_fields = (default_fields + optional_fields)
for doctype in frappe.db.get_all(u'DocType', filters={u'issingle': 0}):
doctype = doctype.name
columns = frappe.db.get_table_columns(doctype)
fields = frappe.get_meta(doctype).get_fieldnames_with_value()
columns_to_remove = [f for f in list((set(columns) - set(fields))) if ((f not in ignore_fields) and (not f.startswith(u'_')))]
if columns_to_remove:
print doctype, u'columns removed:', columns_to_remove
columns_to_remove = u', '.join([u'drop `{0}`'.format(c) for c in columns_to_remove])
query = u'alter table `tab{doctype}` {columns}'.format(doctype=doctype, columns=columns_to_remove)
frappe.db.sql_ddl(query)
| [
"def",
"trim_tables",
"(",
")",
":",
"ignore_fields",
"=",
"(",
"default_fields",
"+",
"optional_fields",
")",
"for",
"doctype",
"in",
"frappe",
".",
"db",
".",
"get_all",
"(",
"u'DocType'",
",",
"filters",
"=",
"{",
"u'issingle'",
":",
"0",
"}",
")",
":",
"doctype",
"=",
"doctype",
".",
"name",
"columns",
"=",
"frappe",
".",
"db",
".",
"get_table_columns",
"(",
"doctype",
")",
"fields",
"=",
"frappe",
".",
"get_meta",
"(",
"doctype",
")",
".",
"get_fieldnames_with_value",
"(",
")",
"columns_to_remove",
"=",
"[",
"f",
"for",
"f",
"in",
"list",
"(",
"(",
"set",
"(",
"columns",
")",
"-",
"set",
"(",
"fields",
")",
")",
")",
"if",
"(",
"(",
"f",
"not",
"in",
"ignore_fields",
")",
"and",
"(",
"not",
"f",
".",
"startswith",
"(",
"u'_'",
")",
")",
")",
"]",
"if",
"columns_to_remove",
":",
"print",
"doctype",
",",
"u'columns removed:'",
",",
"columns_to_remove",
"columns_to_remove",
"=",
"u', '",
".",
"join",
"(",
"[",
"u'drop `{0}`'",
".",
"format",
"(",
"c",
")",
"for",
"c",
"in",
"columns_to_remove",
"]",
")",
"query",
"=",
"u'alter table `tab{doctype}` {columns}'",
".",
"format",
"(",
"doctype",
"=",
"doctype",
",",
"columns",
"=",
"columns_to_remove",
")",
"frappe",
".",
"db",
".",
"sql_ddl",
"(",
"query",
")"
] | use this to remove columns that dont exist in meta . | train | false |
21,918 | def javascript_prompt(url, js_msg, default, abort_on):
log.js.debug('prompt: {}'.format(js_msg))
if config.get('ui', 'modal-js-dialog'):
raise CallSuper
if config.get('content', 'ignore-javascript-prompt'):
return (False, '')
msg = '<b>{}</b> asks:<br/>{}'.format(html.escape(url.toDisplayString()), html.escape(js_msg))
answer = message.ask('Javascript prompt', msg, mode=usertypes.PromptMode.text, default=default, abort_on=abort_on)
if (answer is None):
return (False, '')
else:
return (True, answer)
| [
"def",
"javascript_prompt",
"(",
"url",
",",
"js_msg",
",",
"default",
",",
"abort_on",
")",
":",
"log",
".",
"js",
".",
"debug",
"(",
"'prompt: {}'",
".",
"format",
"(",
"js_msg",
")",
")",
"if",
"config",
".",
"get",
"(",
"'ui'",
",",
"'modal-js-dialog'",
")",
":",
"raise",
"CallSuper",
"if",
"config",
".",
"get",
"(",
"'content'",
",",
"'ignore-javascript-prompt'",
")",
":",
"return",
"(",
"False",
",",
"''",
")",
"msg",
"=",
"'<b>{}</b> asks:<br/>{}'",
".",
"format",
"(",
"html",
".",
"escape",
"(",
"url",
".",
"toDisplayString",
"(",
")",
")",
",",
"html",
".",
"escape",
"(",
"js_msg",
")",
")",
"answer",
"=",
"message",
".",
"ask",
"(",
"'Javascript prompt'",
",",
"msg",
",",
"mode",
"=",
"usertypes",
".",
"PromptMode",
".",
"text",
",",
"default",
"=",
"default",
",",
"abort_on",
"=",
"abort_on",
")",
"if",
"(",
"answer",
"is",
"None",
")",
":",
"return",
"(",
"False",
",",
"''",
")",
"else",
":",
"return",
"(",
"True",
",",
"answer",
")"
] | display a javascript prompt . | train | false |
21,919 | @public
def symmetric_poly(n, *gens, **args):
gens = _analyze_gens(gens)
if ((n < 0) or (n > len(gens)) or (not gens)):
raise ValueError(("can't generate symmetric polynomial of order %s for %s" % (n, gens)))
elif (not n):
poly = S.One
else:
poly = Add(*[Mul(*s) for s in subsets(gens, int(n))])
if (not args.get('polys', False)):
return poly
else:
return Poly(poly, *gens)
| [
"@",
"public",
"def",
"symmetric_poly",
"(",
"n",
",",
"*",
"gens",
",",
"**",
"args",
")",
":",
"gens",
"=",
"_analyze_gens",
"(",
"gens",
")",
"if",
"(",
"(",
"n",
"<",
"0",
")",
"or",
"(",
"n",
">",
"len",
"(",
"gens",
")",
")",
"or",
"(",
"not",
"gens",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"can't generate symmetric polynomial of order %s for %s\"",
"%",
"(",
"n",
",",
"gens",
")",
")",
")",
"elif",
"(",
"not",
"n",
")",
":",
"poly",
"=",
"S",
".",
"One",
"else",
":",
"poly",
"=",
"Add",
"(",
"*",
"[",
"Mul",
"(",
"*",
"s",
")",
"for",
"s",
"in",
"subsets",
"(",
"gens",
",",
"int",
"(",
"n",
")",
")",
"]",
")",
"if",
"(",
"not",
"args",
".",
"get",
"(",
"'polys'",
",",
"False",
")",
")",
":",
"return",
"poly",
"else",
":",
"return",
"Poly",
"(",
"poly",
",",
"*",
"gens",
")"
] | generates symmetric polynomial of order n . | train | false |
21,920 | def uninstall_ruby(ruby, runas=None):
ruby = re.sub('^ruby-', '', ruby)
_rbenv_exec(['uninstall', '--force', ruby], runas=runas)
return True
| [
"def",
"uninstall_ruby",
"(",
"ruby",
",",
"runas",
"=",
"None",
")",
":",
"ruby",
"=",
"re",
".",
"sub",
"(",
"'^ruby-'",
",",
"''",
",",
"ruby",
")",
"_rbenv_exec",
"(",
"[",
"'uninstall'",
",",
"'--force'",
",",
"ruby",
"]",
",",
"runas",
"=",
"runas",
")",
"return",
"True"
] | uninstall a ruby implementation . | train | true |
21,924 | def ExceptionAppend(e, msg):
if (not e.args):
e.args = (msg,)
elif (len(e.args) == 1):
e.args = (((str(e.args[0]) + ' ') + msg),)
else:
e.args = ((((str(e.args[0]) + ' ') + msg),) + e.args[1:])
| [
"def",
"ExceptionAppend",
"(",
"e",
",",
"msg",
")",
":",
"if",
"(",
"not",
"e",
".",
"args",
")",
":",
"e",
".",
"args",
"=",
"(",
"msg",
",",
")",
"elif",
"(",
"len",
"(",
"e",
".",
"args",
")",
"==",
"1",
")",
":",
"e",
".",
"args",
"=",
"(",
"(",
"(",
"str",
"(",
"e",
".",
"args",
"[",
"0",
"]",
")",
"+",
"' '",
")",
"+",
"msg",
")",
",",
")",
"else",
":",
"e",
".",
"args",
"=",
"(",
"(",
"(",
"(",
"str",
"(",
"e",
".",
"args",
"[",
"0",
"]",
")",
"+",
"' '",
")",
"+",
"msg",
")",
",",
")",
"+",
"e",
".",
"args",
"[",
"1",
":",
"]",
")"
] | append a message to the given exceptions message . | train | false |
21,925 | def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code, have_gil=False, first_assignment=True):
assert rhs.type.is_memoryviewslice
pretty_rhs = (rhs.result_in_temp() or rhs.is_simple())
if pretty_rhs:
rhstmp = rhs.result()
else:
rhstmp = code.funcstate.allocate_temp(lhs_type, manage_ref=False)
code.putln(('%s = %s;' % (rhstmp, rhs.result_as(lhs_type))))
put_assign_to_memviewslice(lhs_cname, rhs, rhstmp, lhs_type, code, have_gil=have_gil, first_assignment=first_assignment)
if (not pretty_rhs):
code.funcstate.release_temp(rhstmp)
| [
"def",
"put_acquire_memoryviewslice",
"(",
"lhs_cname",
",",
"lhs_type",
",",
"lhs_pos",
",",
"rhs",
",",
"code",
",",
"have_gil",
"=",
"False",
",",
"first_assignment",
"=",
"True",
")",
":",
"assert",
"rhs",
".",
"type",
".",
"is_memoryviewslice",
"pretty_rhs",
"=",
"(",
"rhs",
".",
"result_in_temp",
"(",
")",
"or",
"rhs",
".",
"is_simple",
"(",
")",
")",
"if",
"pretty_rhs",
":",
"rhstmp",
"=",
"rhs",
".",
"result",
"(",
")",
"else",
":",
"rhstmp",
"=",
"code",
".",
"funcstate",
".",
"allocate_temp",
"(",
"lhs_type",
",",
"manage_ref",
"=",
"False",
")",
"code",
".",
"putln",
"(",
"(",
"'%s = %s;'",
"%",
"(",
"rhstmp",
",",
"rhs",
".",
"result_as",
"(",
"lhs_type",
")",
")",
")",
")",
"put_assign_to_memviewslice",
"(",
"lhs_cname",
",",
"rhs",
",",
"rhstmp",
",",
"lhs_type",
",",
"code",
",",
"have_gil",
"=",
"have_gil",
",",
"first_assignment",
"=",
"first_assignment",
")",
"if",
"(",
"not",
"pretty_rhs",
")",
":",
"code",
".",
"funcstate",
".",
"release_temp",
"(",
"rhstmp",
")"
] | we can avoid decreffing the lhs if we know it is the first assignment . | train | false |
21,927 | def iplot(figure_or_data, show_link=True, link_text='Export to plot.ly', validate=True, image=None, filename='plot_image', image_width=800, image_height=600):
if (not __PLOTLY_OFFLINE_INITIALIZED):
raise PlotlyError('\n'.join(['Plotly Offline mode has not been initialized in this notebook. Run: ', '', 'import plotly', 'plotly.offline.init_notebook_mode() # run at the start of every ipython notebook']))
if (not ipython):
raise ImportError('`iplot` can only run inside an IPython Notebook.')
config = {}
config['showLink'] = show_link
config['linkText'] = link_text
(plot_html, plotdivid, width, height) = _plot_html(figure_or_data, config, validate, '100%', 525, True)
ipython_display.display(ipython_display.HTML(plot_html))
if image:
if (image not in __IMAGE_FORMATS):
raise ValueError('The image parameter must be one of the following: {}'.format(__IMAGE_FORMATS))
script = get_image_download_script('iplot').format(format=image, width=image_width, height=image_height, filename=filename, plot_id=plotdivid)
time.sleep(1)
ipython_display.display(ipython_display.HTML(script))
| [
"def",
"iplot",
"(",
"figure_or_data",
",",
"show_link",
"=",
"True",
",",
"link_text",
"=",
"'Export to plot.ly'",
",",
"validate",
"=",
"True",
",",
"image",
"=",
"None",
",",
"filename",
"=",
"'plot_image'",
",",
"image_width",
"=",
"800",
",",
"image_height",
"=",
"600",
")",
":",
"if",
"(",
"not",
"__PLOTLY_OFFLINE_INITIALIZED",
")",
":",
"raise",
"PlotlyError",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"'Plotly Offline mode has not been initialized in this notebook. Run: '",
",",
"''",
",",
"'import plotly'",
",",
"'plotly.offline.init_notebook_mode() # run at the start of every ipython notebook'",
"]",
")",
")",
"if",
"(",
"not",
"ipython",
")",
":",
"raise",
"ImportError",
"(",
"'`iplot` can only run inside an IPython Notebook.'",
")",
"config",
"=",
"{",
"}",
"config",
"[",
"'showLink'",
"]",
"=",
"show_link",
"config",
"[",
"'linkText'",
"]",
"=",
"link_text",
"(",
"plot_html",
",",
"plotdivid",
",",
"width",
",",
"height",
")",
"=",
"_plot_html",
"(",
"figure_or_data",
",",
"config",
",",
"validate",
",",
"'100%'",
",",
"525",
",",
"True",
")",
"ipython_display",
".",
"display",
"(",
"ipython_display",
".",
"HTML",
"(",
"plot_html",
")",
")",
"if",
"image",
":",
"if",
"(",
"image",
"not",
"in",
"__IMAGE_FORMATS",
")",
":",
"raise",
"ValueError",
"(",
"'The image parameter must be one of the following: {}'",
".",
"format",
"(",
"__IMAGE_FORMATS",
")",
")",
"script",
"=",
"get_image_download_script",
"(",
"'iplot'",
")",
".",
"format",
"(",
"format",
"=",
"image",
",",
"width",
"=",
"image_width",
",",
"height",
"=",
"image_height",
",",
"filename",
"=",
"filename",
",",
"plot_id",
"=",
"plotdivid",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"ipython_display",
".",
"display",
"(",
"ipython_display",
".",
"HTML",
"(",
"script",
")",
")"
] | create a unique url for this plot in plotly and open in ipython . | train | false |
21,929 | def default_hash():
return ('*' if (__grains__['os'].lower() == 'freebsd') else '*************')
| [
"def",
"default_hash",
"(",
")",
":",
"return",
"(",
"'*'",
"if",
"(",
"__grains__",
"[",
"'os'",
"]",
".",
"lower",
"(",
")",
"==",
"'freebsd'",
")",
"else",
"'*************'",
")"
] | returns the default hash used for unset passwords cli example: . | train | false |
21,930 | def base_search(index, query, params, search, schema):
with index.searcher() as searcher:
queries = []
for param in params:
if search[param]:
parser = qparser.QueryParser(param, schema)
queries.append(parser.parse(query))
terms = functools.reduce((lambda x, y: (x | y)), queries)
return [result['pk'] for result in searcher.search(terms)]
| [
"def",
"base_search",
"(",
"index",
",",
"query",
",",
"params",
",",
"search",
",",
"schema",
")",
":",
"with",
"index",
".",
"searcher",
"(",
")",
"as",
"searcher",
":",
"queries",
"=",
"[",
"]",
"for",
"param",
"in",
"params",
":",
"if",
"search",
"[",
"param",
"]",
":",
"parser",
"=",
"qparser",
".",
"QueryParser",
"(",
"param",
",",
"schema",
")",
"queries",
".",
"append",
"(",
"parser",
".",
"parse",
"(",
"query",
")",
")",
"terms",
"=",
"functools",
".",
"reduce",
"(",
"(",
"lambda",
"x",
",",
"y",
":",
"(",
"x",
"|",
"y",
")",
")",
",",
"queries",
")",
"return",
"[",
"result",
"[",
"'pk'",
"]",
"for",
"result",
"in",
"searcher",
".",
"search",
"(",
"terms",
")",
"]"
] | wrapper for fulltext search . | train | false |
21,935 | @contextmanager
def respect_language(language):
if language:
prev = translation.get_language()
translation.activate(language)
try:
(yield)
finally:
translation.activate(prev)
else:
(yield)
| [
"@",
"contextmanager",
"def",
"respect_language",
"(",
"language",
")",
":",
"if",
"language",
":",
"prev",
"=",
"translation",
".",
"get_language",
"(",
")",
"translation",
".",
"activate",
"(",
"language",
")",
"try",
":",
"(",
"yield",
")",
"finally",
":",
"translation",
".",
"activate",
"(",
"prev",
")",
"else",
":",
"(",
"yield",
")"
] | context manager that changes the current translation language for all code inside the following block . | train | true |
21,937 | @register_specialize
@register_canonicalize
@register_useless
@gof.local_optimizer([T.Join])
def local_join_1(node):
if (not isinstance(node.op, T.Join)):
return
tensors = node.inputs[1:]
if (len(tensors) == 1):
return [tensors[0]]
| [
"@",
"register_specialize",
"@",
"register_canonicalize",
"@",
"register_useless",
"@",
"gof",
".",
"local_optimizer",
"(",
"[",
"T",
".",
"Join",
"]",
")",
"def",
"local_join_1",
"(",
"node",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"node",
".",
"op",
",",
"T",
".",
"Join",
")",
")",
":",
"return",
"tensors",
"=",
"node",
".",
"inputs",
"[",
"1",
":",
"]",
"if",
"(",
"len",
"(",
"tensors",
")",
"==",
"1",
")",
":",
"return",
"[",
"tensors",
"[",
"0",
"]",
"]"
] | join => x remove join() when only one element is joined . | train | false |
21,938 | def get_full_file_list(file_path_glob):
file_list = []
for file_name in glob.glob(file_path_glob):
full_file_path = os.path.abspath(file_name)
file_list.append(full_file_path)
return file_list
| [
"def",
"get_full_file_list",
"(",
"file_path_glob",
")",
":",
"file_list",
"=",
"[",
"]",
"for",
"file_name",
"in",
"glob",
".",
"glob",
"(",
"file_path_glob",
")",
":",
"full_file_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"file_name",
")",
"file_list",
".",
"append",
"(",
"full_file_path",
")",
"return",
"file_list"
] | return a list of absolute file paths to all the files in the provided file path glob . | train | false |
21,939 | def notification_handler(fn):
notification_handlers.add(fn)
return fn
| [
"def",
"notification_handler",
"(",
"fn",
")",
":",
"notification_handlers",
".",
"add",
"(",
"fn",
")",
"return",
"fn"
] | register a function to be called via celery for every notification . | train | false |
21,940 | def _cartesian_to_sphere(x, y, z):
hypotxy = np.hypot(x, y)
r = np.hypot(hypotxy, z)
elev = np.arctan2(z, hypotxy)
az = np.arctan2(y, x)
return (az, elev, r)
| [
"def",
"_cartesian_to_sphere",
"(",
"x",
",",
"y",
",",
"z",
")",
":",
"hypotxy",
"=",
"np",
".",
"hypot",
"(",
"x",
",",
"y",
")",
"r",
"=",
"np",
".",
"hypot",
"(",
"hypotxy",
",",
"z",
")",
"elev",
"=",
"np",
".",
"arctan2",
"(",
"z",
",",
"hypotxy",
")",
"az",
"=",
"np",
".",
"arctan2",
"(",
"y",
",",
"x",
")",
"return",
"(",
"az",
",",
"elev",
",",
"r",
")"
] | convert using old function . | train | false |
21,941 | def get_global_aliases():
return {u'shop': [u'store', u'store_id', u'_store'], u'default_price_value': [u'price', u'original_price', u'originalprice', u'default_price'], u'first_name': [u'firstname'], u'last_name': [u'lastname'], u'street': [u'street_address', u'address_street', u'addr_street', u'address[street]'], u'country': [u'country_id'], u'slug': [u'url_key', u'url'], u'phone': [u'telephone'], u'postal_code': [u'postcode', u'postalcode', u'address_postcode', u'address_postalcode', u'address_postal_code', u'address[postcode]', u'address[postalcode]', u'address[postal_code]']}
| [
"def",
"get_global_aliases",
"(",
")",
":",
"return",
"{",
"u'shop'",
":",
"[",
"u'store'",
",",
"u'store_id'",
",",
"u'_store'",
"]",
",",
"u'default_price_value'",
":",
"[",
"u'price'",
",",
"u'original_price'",
",",
"u'originalprice'",
",",
"u'default_price'",
"]",
",",
"u'first_name'",
":",
"[",
"u'firstname'",
"]",
",",
"u'last_name'",
":",
"[",
"u'lastname'",
"]",
",",
"u'street'",
":",
"[",
"u'street_address'",
",",
"u'address_street'",
",",
"u'addr_street'",
",",
"u'address[street]'",
"]",
",",
"u'country'",
":",
"[",
"u'country_id'",
"]",
",",
"u'slug'",
":",
"[",
"u'url_key'",
",",
"u'url'",
"]",
",",
"u'phone'",
":",
"[",
"u'telephone'",
"]",
",",
"u'postal_code'",
":",
"[",
"u'postcode'",
",",
"u'postalcode'",
",",
"u'address_postcode'",
",",
"u'address_postalcode'",
",",
"u'address_postal_code'",
",",
"u'address[postcode]'",
",",
"u'address[postalcode]'",
",",
"u'address[postal_code]'",
"]",
"}"
] | get list of global aliases for fields :return: . | train | false |
21,942 | def gametime_to_realtime(secs=0, mins=0, hrs=0, days=0, weeks=0, months=0, yrs=0, format=False):
realtime = (((((((secs + (mins * MIN)) + (hrs * HOUR)) + (days * DAY)) + (weeks * WEEK)) + (months * MONTH)) + (yrs * YEAR)) / TIMEFACTOR)
if format:
return _format(realtime, 31536000, 2628000, 604800, 86400, 3600, 60)
return realtime
| [
"def",
"gametime_to_realtime",
"(",
"secs",
"=",
"0",
",",
"mins",
"=",
"0",
",",
"hrs",
"=",
"0",
",",
"days",
"=",
"0",
",",
"weeks",
"=",
"0",
",",
"months",
"=",
"0",
",",
"yrs",
"=",
"0",
",",
"format",
"=",
"False",
")",
":",
"realtime",
"=",
"(",
"(",
"(",
"(",
"(",
"(",
"(",
"secs",
"+",
"(",
"mins",
"*",
"MIN",
")",
")",
"+",
"(",
"hrs",
"*",
"HOUR",
")",
")",
"+",
"(",
"days",
"*",
"DAY",
")",
")",
"+",
"(",
"weeks",
"*",
"WEEK",
")",
")",
"+",
"(",
"months",
"*",
"MONTH",
")",
")",
"+",
"(",
"yrs",
"*",
"YEAR",
")",
")",
"/",
"TIMEFACTOR",
")",
"if",
"format",
":",
"return",
"_format",
"(",
"realtime",
",",
"31536000",
",",
"2628000",
",",
"604800",
",",
"86400",
",",
"3600",
",",
"60",
")",
"return",
"realtime"
] | this method helps to figure out the real-world time it will take until an in-game time has passed . | train | false |
21,943 | def binary_is_text(data):
if (not isinstance(data, bytes)):
raise TypeError(("data must be bytes, got '%s'" % type(data).__name__))
return all(((c not in _BINARYCHARS) for c in data))
| [
"def",
"binary_is_text",
"(",
"data",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"data",
",",
"bytes",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
"\"data must be bytes, got '%s'\"",
"%",
"type",
"(",
"data",
")",
".",
"__name__",
")",
")",
"return",
"all",
"(",
"(",
"(",
"c",
"not",
"in",
"_BINARYCHARS",
")",
"for",
"c",
"in",
"data",
")",
")"
] | returns true if the given data argument does not contain unprintable control characters . | train | false |
21,944 | def present(name, DomainName, ElasticsearchClusterConfig=None, EBSOptions=None, AccessPolicies=None, SnapshotOptions=None, AdvancedOptions=None, Tags=None, region=None, key=None, keyid=None, profile=None, ElasticsearchVersion='1.5'):
ret = {'name': DomainName, 'result': True, 'comment': '', 'changes': {}}
if (ElasticsearchClusterConfig is None):
ElasticsearchClusterConfig = {'DedicatedMasterEnabled': False, 'InstanceCount': 1, 'InstanceType': 'm3.medium.elasticsearch', 'ZoneAwarenessEnabled': False}
if (EBSOptions is None):
EBSOptions = {'EBSEnabled': False}
if (SnapshotOptions is None):
SnapshotOptions = {'AutomatedSnapshotStartHour': 0}
if (AdvancedOptions is None):
AdvancedOptions = {'rest.action.multi.allow_explicit_index': 'true'}
if (Tags is None):
Tags = {}
if ((AccessPolicies is not None) and isinstance(AccessPolicies, six.string_types)):
try:
AccessPolicies = json.loads(AccessPolicies)
except ValueError as e:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(e.message)
return ret
r = __salt__['boto_elasticsearch_domain.exists'](DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile)
if ('error' in r):
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
if (not r.get('exists')):
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be created.'.format(DomainName)
ret['result'] = None
return ret
r = __salt__['boto_elasticsearch_domain.create'](DomainName=DomainName, ElasticsearchClusterConfig=ElasticsearchClusterConfig, EBSOptions=EBSOptions, AccessPolicies=AccessPolicies, SnapshotOptions=SnapshotOptions, AdvancedOptions=AdvancedOptions, ElasticsearchVersion=str(ElasticsearchVersion), region=region, key=key, keyid=keyid, profile=profile)
if (not r.get('created')):
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName, region=region, key=key, keyid=keyid, profile=profile)
ret['changes']['old'] = {'domain': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Domain {0} created.'.format(DomainName)
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain {0} is present.'.format(DomainName)])
ret['changes'] = {}
_status = __salt__['boto_elasticsearch_domain.status'](DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile)['domain']
if (_status.get('ElasticsearchVersion') != str(ElasticsearchVersion)):
ret['result'] = False
ret['comment'] = 'Failed to update domain: version cannot be modified from {0} to {1}.'.format(_status.get('ElasticsearchVersion'), str(ElasticsearchVersion))
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile)['domain']
_describe['AccessPolicies'] = json.loads(_describe['AccessPolicies'])
if (not _describe.get('EBSOptions', {}).get('EBSEnabled')):
opts = _describe.get('EBSOptions', {})
opts.pop('VolumeSize', None)
opts.pop('VolumeType', None)
comm_args = {}
need_update = False
es_opts = {'ElasticsearchClusterConfig': ElasticsearchClusterConfig, 'EBSOptions': EBSOptions, 'AccessPolicies': AccessPolicies, 'SnapshotOptions': SnapshotOptions, 'AdvancedOptions': AdvancedOptions}
for (k, v) in six.iteritems(es_opts):
if (not _compare_json(v, _describe[k])):
need_update = True
comm_args[k] = v
ret['changes'].setdefault('new', {})[k] = v
ret['changes'].setdefault('old', {})[k] = _describe[k]
if need_update:
if __opts__['test']:
msg = 'Domain {0} set to be modified.'.format(DomainName)
ret['comment'] = msg
ret['result'] = None
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain to be modified'])
r = __salt__['boto_elasticsearch_domain.update'](DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile, **comm_args)
if (not r.get('updated')):
ret['result'] = False
ret['comment'] = 'Failed to update domain: {0}.'.format(r['error'])
ret['changes'] = {}
return ret
return ret
| [
"def",
"present",
"(",
"name",
",",
"DomainName",
",",
"ElasticsearchClusterConfig",
"=",
"None",
",",
"EBSOptions",
"=",
"None",
",",
"AccessPolicies",
"=",
"None",
",",
"SnapshotOptions",
"=",
"None",
",",
"AdvancedOptions",
"=",
"None",
",",
"Tags",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"ElasticsearchVersion",
"=",
"'1.5'",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"DomainName",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"if",
"(",
"ElasticsearchClusterConfig",
"is",
"None",
")",
":",
"ElasticsearchClusterConfig",
"=",
"{",
"'DedicatedMasterEnabled'",
":",
"False",
",",
"'InstanceCount'",
":",
"1",
",",
"'InstanceType'",
":",
"'m3.medium.elasticsearch'",
",",
"'ZoneAwarenessEnabled'",
":",
"False",
"}",
"if",
"(",
"EBSOptions",
"is",
"None",
")",
":",
"EBSOptions",
"=",
"{",
"'EBSEnabled'",
":",
"False",
"}",
"if",
"(",
"SnapshotOptions",
"is",
"None",
")",
":",
"SnapshotOptions",
"=",
"{",
"'AutomatedSnapshotStartHour'",
":",
"0",
"}",
"if",
"(",
"AdvancedOptions",
"is",
"None",
")",
":",
"AdvancedOptions",
"=",
"{",
"'rest.action.multi.allow_explicit_index'",
":",
"'true'",
"}",
"if",
"(",
"Tags",
"is",
"None",
")",
":",
"Tags",
"=",
"{",
"}",
"if",
"(",
"(",
"AccessPolicies",
"is",
"not",
"None",
")",
"and",
"isinstance",
"(",
"AccessPolicies",
",",
"six",
".",
"string_types",
")",
")",
":",
"try",
":",
"AccessPolicies",
"=",
"json",
".",
"loads",
"(",
"AccessPolicies",
")",
"except",
"ValueError",
"as",
"e",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to create domain: {0}.'",
".",
"format",
"(",
"e",
".",
"message",
")",
"return",
"ret",
"r",
"=",
"__salt__",
"[",
"'boto_elasticsearch_domain.exists'",
"]",
"(",
"DomainName",
"=",
"DomainName",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"(",
"'error'",
"in",
"r",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to create domain: {0}.'",
".",
"format",
"(",
"r",
"[",
"'error'",
"]",
"[",
"'message'",
"]",
")",
"return",
"ret",
"if",
"(",
"not",
"r",
".",
"get",
"(",
"'exists'",
")",
")",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Domain {0} is set to be created.'",
".",
"format",
"(",
"DomainName",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"r",
"=",
"__salt__",
"[",
"'boto_elasticsearch_domain.create'",
"]",
"(",
"DomainName",
"=",
"DomainName",
",",
"ElasticsearchClusterConfig",
"=",
"ElasticsearchClusterConfig",
",",
"EBSOptions",
"=",
"EBSOptions",
",",
"AccessPolicies",
"=",
"AccessPolicies",
",",
"SnapshotOptions",
"=",
"SnapshotOptions",
",",
"AdvancedOptions",
"=",
"AdvancedOptions",
",",
"ElasticsearchVersion",
"=",
"str",
"(",
"ElasticsearchVersion",
")",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"(",
"not",
"r",
".",
"get",
"(",
"'created'",
")",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to create domain: {0}.'",
".",
"format",
"(",
"r",
"[",
"'error'",
"]",
"[",
"'message'",
"]",
")",
"return",
"ret",
"_describe",
"=",
"__salt__",
"[",
"'boto_elasticsearch_domain.describe'",
"]",
"(",
"DomainName",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"=",
"{",
"'domain'",
":",
"None",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"_describe",
"ret",
"[",
"'comment'",
"]",
"=",
"'Domain {0} created.'",
".",
"format",
"(",
"DomainName",
")",
"return",
"ret",
"ret",
"[",
"'comment'",
"]",
"=",
"os",
".",
"linesep",
".",
"join",
"(",
"[",
"ret",
"[",
"'comment'",
"]",
",",
"'Domain {0} is present.'",
".",
"format",
"(",
"DomainName",
")",
"]",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"}",
"_status",
"=",
"__salt__",
"[",
"'boto_elasticsearch_domain.status'",
"]",
"(",
"DomainName",
"=",
"DomainName",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"[",
"'domain'",
"]",
"if",
"(",
"_status",
".",
"get",
"(",
"'ElasticsearchVersion'",
")",
"!=",
"str",
"(",
"ElasticsearchVersion",
")",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to update domain: version cannot be modified from {0} to {1}.'",
".",
"format",
"(",
"_status",
".",
"get",
"(",
"'ElasticsearchVersion'",
")",
",",
"str",
"(",
"ElasticsearchVersion",
")",
")",
"return",
"ret",
"_describe",
"=",
"__salt__",
"[",
"'boto_elasticsearch_domain.describe'",
"]",
"(",
"DomainName",
"=",
"DomainName",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"[",
"'domain'",
"]",
"_describe",
"[",
"'AccessPolicies'",
"]",
"=",
"json",
".",
"loads",
"(",
"_describe",
"[",
"'AccessPolicies'",
"]",
")",
"if",
"(",
"not",
"_describe",
".",
"get",
"(",
"'EBSOptions'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'EBSEnabled'",
")",
")",
":",
"opts",
"=",
"_describe",
".",
"get",
"(",
"'EBSOptions'",
",",
"{",
"}",
")",
"opts",
".",
"pop",
"(",
"'VolumeSize'",
",",
"None",
")",
"opts",
".",
"pop",
"(",
"'VolumeType'",
",",
"None",
")",
"comm_args",
"=",
"{",
"}",
"need_update",
"=",
"False",
"es_opts",
"=",
"{",
"'ElasticsearchClusterConfig'",
":",
"ElasticsearchClusterConfig",
",",
"'EBSOptions'",
":",
"EBSOptions",
",",
"'AccessPolicies'",
":",
"AccessPolicies",
",",
"'SnapshotOptions'",
":",
"SnapshotOptions",
",",
"'AdvancedOptions'",
":",
"AdvancedOptions",
"}",
"for",
"(",
"k",
",",
"v",
")",
"in",
"six",
".",
"iteritems",
"(",
"es_opts",
")",
":",
"if",
"(",
"not",
"_compare_json",
"(",
"v",
",",
"_describe",
"[",
"k",
"]",
")",
")",
":",
"need_update",
"=",
"True",
"comm_args",
"[",
"k",
"]",
"=",
"v",
"ret",
"[",
"'changes'",
"]",
".",
"setdefault",
"(",
"'new'",
",",
"{",
"}",
")",
"[",
"k",
"]",
"=",
"v",
"ret",
"[",
"'changes'",
"]",
".",
"setdefault",
"(",
"'old'",
",",
"{",
"}",
")",
"[",
"k",
"]",
"=",
"_describe",
"[",
"k",
"]",
"if",
"need_update",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"msg",
"=",
"'Domain {0} set to be modified.'",
".",
"format",
"(",
"DomainName",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"ret",
"[",
"'comment'",
"]",
"=",
"os",
".",
"linesep",
".",
"join",
"(",
"[",
"ret",
"[",
"'comment'",
"]",
",",
"'Domain to be modified'",
"]",
")",
"r",
"=",
"__salt__",
"[",
"'boto_elasticsearch_domain.update'",
"]",
"(",
"DomainName",
"=",
"DomainName",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
"**",
"comm_args",
")",
"if",
"(",
"not",
"r",
".",
"get",
"(",
"'updated'",
")",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to update domain: {0}.'",
".",
"format",
"(",
"r",
"[",
"'error'",
"]",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"}",
"return",
"ret",
"return",
"ret"
] | ensure pagerduty service exists . | train | true |
21,948 | def split_dataset(dataset, split_at, order=None):
n_examples = len(dataset)
if (split_at < 0):
raise ValueError('split_at must be non-negative')
if (split_at >= n_examples):
raise ValueError('split_at exceeds the dataset size')
subset1 = SubDataset(dataset, 0, split_at, order)
subset2 = SubDataset(dataset, split_at, n_examples, order)
return (subset1, subset2)
| [
"def",
"split_dataset",
"(",
"dataset",
",",
"split_at",
",",
"order",
"=",
"None",
")",
":",
"n_examples",
"=",
"len",
"(",
"dataset",
")",
"if",
"(",
"split_at",
"<",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'split_at must be non-negative'",
")",
"if",
"(",
"split_at",
">=",
"n_examples",
")",
":",
"raise",
"ValueError",
"(",
"'split_at exceeds the dataset size'",
")",
"subset1",
"=",
"SubDataset",
"(",
"dataset",
",",
"0",
",",
"split_at",
",",
"order",
")",
"subset2",
"=",
"SubDataset",
"(",
"dataset",
",",
"split_at",
",",
"n_examples",
",",
"order",
")",
"return",
"(",
"subset1",
",",
"subset2",
")"
] | splits a dataset into two subsets . | train | false |
21,949 | @pytest.mark.cmd
def test_test_checks_srctgt_pass(capfd):
call_command('test_checks', '--source="Files"', '--target="Leers"')
(out, err) = capfd.readouterr()
assert ('No errors found' in out)
| [
"@",
"pytest",
".",
"mark",
".",
"cmd",
"def",
"test_test_checks_srctgt_pass",
"(",
"capfd",
")",
":",
"call_command",
"(",
"'test_checks'",
",",
"'--source=\"Files\"'",
",",
"'--target=\"Leers\"'",
")",
"(",
"out",
",",
"err",
")",
"=",
"capfd",
".",
"readouterr",
"(",
")",
"assert",
"(",
"'No errors found'",
"in",
"out",
")"
] | passing --source --target check . | train | false |
21,950 | def test_darken():
assert (darken('#800', 20) == '#200')
assert (darken('#800e', 20) == '#200e')
assert (darken('#800', 0) == '#800')
assert (darken('#ffffff', 10) == '#e6e6e6')
assert (darken('#000000', 10) == '#000000')
assert (darken('#f3148a', 25) == '#810747')
assert (darken('#f3148aab', 25) == '#810747ab')
assert (darken('#121212', 1) == '#0f0f0f')
assert (darken('#999999', 100) == '#000000')
assert (darken('#99999999', 100) == '#00000099')
assert (darken('#1479ac', 8) == '#105f87')
assert (darken('rgb(136, 0, 0)', 20) == 'rgb(34, 0, 0)')
assert (darken('rgba(20, 121, 172, .13)', 8) == 'rgba(16, 95, 135, 0.13)')
| [
"def",
"test_darken",
"(",
")",
":",
"assert",
"(",
"darken",
"(",
"'#800'",
",",
"20",
")",
"==",
"'#200'",
")",
"assert",
"(",
"darken",
"(",
"'#800e'",
",",
"20",
")",
"==",
"'#200e'",
")",
"assert",
"(",
"darken",
"(",
"'#800'",
",",
"0",
")",
"==",
"'#800'",
")",
"assert",
"(",
"darken",
"(",
"'#ffffff'",
",",
"10",
")",
"==",
"'#e6e6e6'",
")",
"assert",
"(",
"darken",
"(",
"'#000000'",
",",
"10",
")",
"==",
"'#000000'",
")",
"assert",
"(",
"darken",
"(",
"'#f3148a'",
",",
"25",
")",
"==",
"'#810747'",
")",
"assert",
"(",
"darken",
"(",
"'#f3148aab'",
",",
"25",
")",
"==",
"'#810747ab'",
")",
"assert",
"(",
"darken",
"(",
"'#121212'",
",",
"1",
")",
"==",
"'#0f0f0f'",
")",
"assert",
"(",
"darken",
"(",
"'#999999'",
",",
"100",
")",
"==",
"'#000000'",
")",
"assert",
"(",
"darken",
"(",
"'#99999999'",
",",
"100",
")",
"==",
"'#00000099'",
")",
"assert",
"(",
"darken",
"(",
"'#1479ac'",
",",
"8",
")",
"==",
"'#105f87'",
")",
"assert",
"(",
"darken",
"(",
"'rgb(136, 0, 0)'",
",",
"20",
")",
"==",
"'rgb(34, 0, 0)'",
")",
"assert",
"(",
"darken",
"(",
"'rgba(20, 121, 172, .13)'",
",",
"8",
")",
"==",
"'rgba(16, 95, 135, 0.13)'",
")"
] | test darken color function . | train | false |
21,951 | def _fix_list_encoding(var):
if isinstance(var, (list, tuple)):
return filter((lambda x: (x is not None)), map(_to_unicode, var))
return var
| [
"def",
"_fix_list_encoding",
"(",
"var",
")",
":",
"if",
"isinstance",
"(",
"var",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"filter",
"(",
"(",
"lambda",
"x",
":",
"(",
"x",
"is",
"not",
"None",
")",
")",
",",
"map",
"(",
"_to_unicode",
",",
"var",
")",
")",
"return",
"var"
] | converts each item in a list to unicode . | train | false |
21,952 | def test_if_rlimits_can_be_used(dcos_api_session):
name = 'test-rlimits-{}'.format(uuid.uuid4().hex)
argv = ['/opt/mesosphere/bin/mesos-execute', '--rlimits={"rlimits": [{"type":"RLMT_CORE"}]}', '--master=leader.mesos:5050', '--name={}'.format(name), '--command=ulimit -c | grep -q unlimited', '--shell=true', '--env={"LC_ALL":"C"}']
output = subprocess.check_output(argv, stderr=subprocess.STDOUT, universal_newlines=True)
expected_output = "Received status update TASK_FINISHED for task '{name}'".format(name=name)
assert (expected_output in output)
| [
"def",
"test_if_rlimits_can_be_used",
"(",
"dcos_api_session",
")",
":",
"name",
"=",
"'test-rlimits-{}'",
".",
"format",
"(",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"argv",
"=",
"[",
"'/opt/mesosphere/bin/mesos-execute'",
",",
"'--rlimits={\"rlimits\": [{\"type\":\"RLMT_CORE\"}]}'",
",",
"'--master=leader.mesos:5050'",
",",
"'--name={}'",
".",
"format",
"(",
"name",
")",
",",
"'--command=ulimit -c | grep -q unlimited'",
",",
"'--shell=true'",
",",
"'--env={\"LC_ALL\":\"C\"}'",
"]",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"argv",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"universal_newlines",
"=",
"True",
")",
"expected_output",
"=",
"\"Received status update TASK_FINISHED for task '{name}'\"",
".",
"format",
"(",
"name",
"=",
"name",
")",
"assert",
"(",
"expected_output",
"in",
"output",
")"
] | this test verifies that rlimits can be used . | train | false |
21,953 | def setup_instance_group(context, request_spec, filter_properties):
group_hosts = filter_properties.get('group_hosts')
instance_uuid = request_spec.get('instance_properties', {}).get('uuid')
group_info = _get_group_details(context, instance_uuid, group_hosts)
if (group_info is not None):
filter_properties['group_updated'] = True
filter_properties['group_hosts'] = group_info.hosts
filter_properties['group_policies'] = group_info.policies
filter_properties['group_members'] = group_info.members
| [
"def",
"setup_instance_group",
"(",
"context",
",",
"request_spec",
",",
"filter_properties",
")",
":",
"group_hosts",
"=",
"filter_properties",
".",
"get",
"(",
"'group_hosts'",
")",
"instance_uuid",
"=",
"request_spec",
".",
"get",
"(",
"'instance_properties'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'uuid'",
")",
"group_info",
"=",
"_get_group_details",
"(",
"context",
",",
"instance_uuid",
",",
"group_hosts",
")",
"if",
"(",
"group_info",
"is",
"not",
"None",
")",
":",
"filter_properties",
"[",
"'group_updated'",
"]",
"=",
"True",
"filter_properties",
"[",
"'group_hosts'",
"]",
"=",
"group_info",
".",
"hosts",
"filter_properties",
"[",
"'group_policies'",
"]",
"=",
"group_info",
".",
"policies",
"filter_properties",
"[",
"'group_members'",
"]",
"=",
"group_info",
".",
"members"
] | add group_hosts and group_policies fields to filter_properties dict based on instance uuids provided in request_spec . | train | false |
21,955 | def append_data_to_webob_response(response, data):
if (getattr(response, 'content_type', None) == 'application/json'):
response_data = json.loads(response.body)
response_data.update(data)
response.body = json.dumps(response_data)
return response
| [
"def",
"append_data_to_webob_response",
"(",
"response",
",",
"data",
")",
":",
"if",
"(",
"getattr",
"(",
"response",
",",
"'content_type'",
",",
"None",
")",
"==",
"'application/json'",
")",
":",
"response_data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"body",
")",
"response_data",
".",
"update",
"(",
"data",
")",
"response",
".",
"body",
"=",
"json",
".",
"dumps",
"(",
"response_data",
")",
"return",
"response"
] | appends data to a json webob response . | train | false |
21,956 | def register_yaml():
try:
import yaml
registry.register('yaml', yaml.safe_dump, yaml.safe_load, content_type='application/x-yaml', content_encoding='utf-8')
except ImportError:
def not_available(*args, **kwargs):
"In case a client receives a yaml message, but yaml\n isn't installed."
raise SerializerNotInstalled('No decoder installed for YAML. Install the PyYAML library')
registry.register('yaml', None, not_available, 'application/x-yaml')
| [
"def",
"register_yaml",
"(",
")",
":",
"try",
":",
"import",
"yaml",
"registry",
".",
"register",
"(",
"'yaml'",
",",
"yaml",
".",
"safe_dump",
",",
"yaml",
".",
"safe_load",
",",
"content_type",
"=",
"'application/x-yaml'",
",",
"content_encoding",
"=",
"'utf-8'",
")",
"except",
"ImportError",
":",
"def",
"not_available",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"raise",
"SerializerNotInstalled",
"(",
"'No decoder installed for YAML. Install the PyYAML library'",
")",
"registry",
".",
"register",
"(",
"'yaml'",
",",
"None",
",",
"not_available",
",",
"'application/x-yaml'",
")"
] | register a encoder/decoder for yaml serialization . | train | true |
21,957 | def trim_leading_lines(lines):
lines = list(lines)
while (lines and (not lines[0])):
lines.pop(0)
return lines
| [
"def",
"trim_leading_lines",
"(",
"lines",
")",
":",
"lines",
"=",
"list",
"(",
"lines",
")",
"while",
"(",
"lines",
"and",
"(",
"not",
"lines",
"[",
"0",
"]",
")",
")",
":",
"lines",
".",
"pop",
"(",
"0",
")",
"return",
"lines"
] | trim leading blank lines . | train | false |
21,958 | def get_func_list(stats, sel_list):
if stats.fcn_list:
list = stats.fcn_list[:]
order_message = ('Ordered by: ' + stats.sort_type)
else:
list = stats.stats.keys()
order_message = 'Random listing order was used'
select_message = ''
for selection in sel_list:
(list, select_message) = stats.eval_print_amount(selection, list, select_message)
return (list, order_message, select_message)
| [
"def",
"get_func_list",
"(",
"stats",
",",
"sel_list",
")",
":",
"if",
"stats",
".",
"fcn_list",
":",
"list",
"=",
"stats",
".",
"fcn_list",
"[",
":",
"]",
"order_message",
"=",
"(",
"'Ordered by: '",
"+",
"stats",
".",
"sort_type",
")",
"else",
":",
"list",
"=",
"stats",
".",
"stats",
".",
"keys",
"(",
")",
"order_message",
"=",
"'Random listing order was used'",
"select_message",
"=",
"''",
"for",
"selection",
"in",
"sel_list",
":",
"(",
"list",
",",
"select_message",
")",
"=",
"stats",
".",
"eval_print_amount",
"(",
"selection",
",",
"list",
",",
"select_message",
")",
"return",
"(",
"list",
",",
"order_message",
",",
"select_message",
")"
] | use sel_list to select a list of functions to display . | train | false |
21,959 | def make_cascade(loader, global_conf, catch='404', **local_conf):
catch = map(int, converters.aslist(catch))
apps = []
for (name, value) in local_conf.items():
if (not name.startswith('app')):
raise ValueError(("Bad configuration key %r (=%r); all configuration keys must start with 'app'" % (name, value)))
app = loader.get_app(value, global_conf=global_conf)
apps.append((name, app))
apps.sort()
apps = [app for (name, app) in apps]
return Cascade(apps, catch=catch)
| [
"def",
"make_cascade",
"(",
"loader",
",",
"global_conf",
",",
"catch",
"=",
"'404'",
",",
"**",
"local_conf",
")",
":",
"catch",
"=",
"map",
"(",
"int",
",",
"converters",
".",
"aslist",
"(",
"catch",
")",
")",
"apps",
"=",
"[",
"]",
"for",
"(",
"name",
",",
"value",
")",
"in",
"local_conf",
".",
"items",
"(",
")",
":",
"if",
"(",
"not",
"name",
".",
"startswith",
"(",
"'app'",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"Bad configuration key %r (=%r); all configuration keys must start with 'app'\"",
"%",
"(",
"name",
",",
"value",
")",
")",
")",
"app",
"=",
"loader",
".",
"get_app",
"(",
"value",
",",
"global_conf",
"=",
"global_conf",
")",
"apps",
".",
"append",
"(",
"(",
"name",
",",
"app",
")",
")",
"apps",
".",
"sort",
"(",
")",
"apps",
"=",
"[",
"app",
"for",
"(",
"name",
",",
"app",
")",
"in",
"apps",
"]",
"return",
"Cascade",
"(",
"apps",
",",
"catch",
"=",
"catch",
")"
] | entry point for paste deploy configuration expects configuration like:: [composit:cascade] use = egg:paste#cascade # all start with app and are sorted alphabetically app1 = foo app2 = bar catch = 404 500 . | train | false |
21,962 | def home_slug():
prefix = get_script_prefix()
slug = reverse(u'home')
if slug.startswith(prefix):
slug = (u'/' + slug[len(prefix):])
try:
return resolve(slug).kwargs[u'slug']
except KeyError:
return slug
| [
"def",
"home_slug",
"(",
")",
":",
"prefix",
"=",
"get_script_prefix",
"(",
")",
"slug",
"=",
"reverse",
"(",
"u'home'",
")",
"if",
"slug",
".",
"startswith",
"(",
"prefix",
")",
":",
"slug",
"=",
"(",
"u'/'",
"+",
"slug",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
")",
"try",
":",
"return",
"resolve",
"(",
"slug",
")",
".",
"kwargs",
"[",
"u'slug'",
"]",
"except",
"KeyError",
":",
"return",
"slug"
] | returns the slug arg defined for the home urlpattern . | train | false |
21,963 | def _api_queue_resume(output, value, kwargs):
if value:
items = value.split(',')
handled = NzbQueue.do.resume_multiple_nzo(items)
return report(output, keyword='', data={'status': bool(handled), 'nzo_ids': handled})
| [
"def",
"_api_queue_resume",
"(",
"output",
",",
"value",
",",
"kwargs",
")",
":",
"if",
"value",
":",
"items",
"=",
"value",
".",
"split",
"(",
"','",
")",
"handled",
"=",
"NzbQueue",
".",
"do",
".",
"resume_multiple_nzo",
"(",
"items",
")",
"return",
"report",
"(",
"output",
",",
"keyword",
"=",
"''",
",",
"data",
"=",
"{",
"'status'",
":",
"bool",
"(",
"handled",
")",
",",
"'nzo_ids'",
":",
"handled",
"}",
")"
] | api: accepts output . | train | false |
21,965 | def remove_tags(TagKeys, DomainName=None, ARN=None, region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if (ARN is None):
if (DomainName is None):
raise SaltInvocationError('One (but not both) of ARN or domain must be specified.')
domaindata = status(DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile)
if ((not domaindata) or ('domain' not in domaindata)):
log.warning('Domain tags not updated')
return {'tagged': False}
ARN = domaindata.get('domain', {}).get('ARN')
elif (DomainName is not None):
raise SaltInvocationError('One (but not both) of ARN or domain must be specified.')
conn.remove_tags(ARN=domaindata.get('domain', {}).get('ARN'), TagKeys=TagKeys)
return {'tagged': True}
except ClientError as e:
return {'tagged': False, 'error': salt.utils.boto3.get_error(e)}
| [
"def",
"remove_tags",
"(",
"TagKeys",
",",
"DomainName",
"=",
"None",
",",
"ARN",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"(",
"ARN",
"is",
"None",
")",
":",
"if",
"(",
"DomainName",
"is",
"None",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'One (but not both) of ARN or domain must be specified.'",
")",
"domaindata",
"=",
"status",
"(",
"DomainName",
"=",
"DomainName",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"(",
"(",
"not",
"domaindata",
")",
"or",
"(",
"'domain'",
"not",
"in",
"domaindata",
")",
")",
":",
"log",
".",
"warning",
"(",
"'Domain tags not updated'",
")",
"return",
"{",
"'tagged'",
":",
"False",
"}",
"ARN",
"=",
"domaindata",
".",
"get",
"(",
"'domain'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'ARN'",
")",
"elif",
"(",
"DomainName",
"is",
"not",
"None",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'One (but not both) of ARN or domain must be specified.'",
")",
"conn",
".",
"remove_tags",
"(",
"ARN",
"=",
"domaindata",
".",
"get",
"(",
"'domain'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'ARN'",
")",
",",
"TagKeys",
"=",
"TagKeys",
")",
"return",
"{",
"'tagged'",
":",
"True",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'tagged'",
":",
"False",
",",
"'error'",
":",
"salt",
".",
"utils",
".",
"boto3",
".",
"get_error",
"(",
"e",
")",
"}"
] | returns the given html with given tags removed . | train | true |
21,966 | def get_lib_dir():
dirname = ('DLLs' if (sys.platform == 'win32') else 'lib')
libdir = os.path.join(sys.prefix, dirname)
return libdir
| [
"def",
"get_lib_dir",
"(",
")",
":",
"dirname",
"=",
"(",
"'DLLs'",
"if",
"(",
"sys",
".",
"platform",
"==",
"'win32'",
")",
"else",
"'lib'",
")",
"libdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sys",
".",
"prefix",
",",
"dirname",
")",
"return",
"libdir"
] | anaconda specific . | train | false |
21,967 | def ar_periodogram(x, window='hanning', window_len=7):
(x_current, x_lagged) = (x[1:], x[:(-1)])
(x_current, x_lagged) = (Series(x_current), Series(x_lagged))
results = ols(y=x_current, x=x_lagged, intercept=True, nw_lags=1)
e_hat = results.resid.values
phi = results.beta['x']
(w, I_w) = periodogram(e_hat, window=window, window_len=window_len)
I_w = (I_w / (np.abs((1 - (phi * np.exp((1j * w))))) ** 2))
return (w, I_w)
| [
"def",
"ar_periodogram",
"(",
"x",
",",
"window",
"=",
"'hanning'",
",",
"window_len",
"=",
"7",
")",
":",
"(",
"x_current",
",",
"x_lagged",
")",
"=",
"(",
"x",
"[",
"1",
":",
"]",
",",
"x",
"[",
":",
"(",
"-",
"1",
")",
"]",
")",
"(",
"x_current",
",",
"x_lagged",
")",
"=",
"(",
"Series",
"(",
"x_current",
")",
",",
"Series",
"(",
"x_lagged",
")",
")",
"results",
"=",
"ols",
"(",
"y",
"=",
"x_current",
",",
"x",
"=",
"x_lagged",
",",
"intercept",
"=",
"True",
",",
"nw_lags",
"=",
"1",
")",
"e_hat",
"=",
"results",
".",
"resid",
".",
"values",
"phi",
"=",
"results",
".",
"beta",
"[",
"'x'",
"]",
"(",
"w",
",",
"I_w",
")",
"=",
"periodogram",
"(",
"e_hat",
",",
"window",
"=",
"window",
",",
"window_len",
"=",
"window_len",
")",
"I_w",
"=",
"(",
"I_w",
"/",
"(",
"np",
".",
"abs",
"(",
"(",
"1",
"-",
"(",
"phi",
"*",
"np",
".",
"exp",
"(",
"(",
"1j",
"*",
"w",
")",
")",
")",
")",
")",
"**",
"2",
")",
")",
"return",
"(",
"w",
",",
"I_w",
")"
] | compute periodogram from data x . | train | false |
21,968 | def __execute_kadmin(cmd):
ret = {}
auth_keytab = __opts__.get('auth_keytab', None)
auth_principal = __opts__.get('auth_principal', None)
if (__salt__['file.file_exists'](auth_keytab) and auth_principal):
return __salt__['cmd.run_all']('kadmin -k -t {0} -p {1} -q "{2}"'.format(auth_keytab, auth_principal, cmd))
else:
log.error('Unable to find kerberos keytab/principal')
ret['retcode'] = 1
ret['comment'] = 'Missing authentication keytab/principal'
return ret
| [
"def",
"__execute_kadmin",
"(",
"cmd",
")",
":",
"ret",
"=",
"{",
"}",
"auth_keytab",
"=",
"__opts__",
".",
"get",
"(",
"'auth_keytab'",
",",
"None",
")",
"auth_principal",
"=",
"__opts__",
".",
"get",
"(",
"'auth_principal'",
",",
"None",
")",
"if",
"(",
"__salt__",
"[",
"'file.file_exists'",
"]",
"(",
"auth_keytab",
")",
"and",
"auth_principal",
")",
":",
"return",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"'kadmin -k -t {0} -p {1} -q \"{2}\"'",
".",
"format",
"(",
"auth_keytab",
",",
"auth_principal",
",",
"cmd",
")",
")",
"else",
":",
"log",
".",
"error",
"(",
"'Unable to find kerberos keytab/principal'",
")",
"ret",
"[",
"'retcode'",
"]",
"=",
"1",
"ret",
"[",
"'comment'",
"]",
"=",
"'Missing authentication keytab/principal'",
"return",
"ret"
] | execute kadmin commands . | train | true |
21,969 | def write_test_files(test_dir, names=None):
names = (names or range(10))
for i in names:
with open(os.path.join(test_dir, str(i)), u'wb') as out:
out.write(u''.encode(u'UTF-8'))
| [
"def",
"write_test_files",
"(",
"test_dir",
",",
"names",
"=",
"None",
")",
":",
"names",
"=",
"(",
"names",
"or",
"range",
"(",
"10",
")",
")",
"for",
"i",
"in",
"names",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"test_dir",
",",
"str",
"(",
"i",
")",
")",
",",
"u'wb'",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"u''",
".",
"encode",
"(",
"u'UTF-8'",
")",
")"
] | write test files in test_dir using the names list . | train | false |
21,972 | def load_object(path):
try:
dot = path.rindex('.')
except ValueError:
raise ValueError(("Error loading object '%s': not a full path" % path))
(module, name) = (path[:dot], path[(dot + 1):])
mod = import_module(module)
try:
obj = getattr(mod, name)
except AttributeError:
raise NameError(("Module '%s' doesn't define any object named '%s'" % (module, name)))
return obj
| [
"def",
"load_object",
"(",
"path",
")",
":",
"try",
":",
"dot",
"=",
"path",
".",
"rindex",
"(",
"'.'",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"(",
"\"Error loading object '%s': not a full path\"",
"%",
"path",
")",
")",
"(",
"module",
",",
"name",
")",
"=",
"(",
"path",
"[",
":",
"dot",
"]",
",",
"path",
"[",
"(",
"dot",
"+",
"1",
")",
":",
"]",
")",
"mod",
"=",
"import_module",
"(",
"module",
")",
"try",
":",
"obj",
"=",
"getattr",
"(",
"mod",
",",
"name",
")",
"except",
"AttributeError",
":",
"raise",
"NameError",
"(",
"(",
"\"Module '%s' doesn't define any object named '%s'\"",
"%",
"(",
"module",
",",
"name",
")",
")",
")",
"return",
"obj"
] | load an object given its absolute object path . | train | false |
21,974 | @hug.sink('/all')
def sink(path):
return path
| [
"@",
"hug",
".",
"sink",
"(",
"'/all'",
")",
"def",
"sink",
"(",
"path",
")",
":",
"return",
"path"
] | for testing . | train | false |
21,975 | def list_home(t):
(owner, slug) = get_slug()
res = t.lists.statuses(slug=slug, owner_screen_name=owner, count=c['LIST_MAX'], include_entities=False)
for tweet in reversed(res):
draw(t=tweet)
printNicely('')
| [
"def",
"list_home",
"(",
"t",
")",
":",
"(",
"owner",
",",
"slug",
")",
"=",
"get_slug",
"(",
")",
"res",
"=",
"t",
".",
"lists",
".",
"statuses",
"(",
"slug",
"=",
"slug",
",",
"owner_screen_name",
"=",
"owner",
",",
"count",
"=",
"c",
"[",
"'LIST_MAX'",
"]",
",",
"include_entities",
"=",
"False",
")",
"for",
"tweet",
"in",
"reversed",
"(",
"res",
")",
":",
"draw",
"(",
"t",
"=",
"tweet",
")",
"printNicely",
"(",
"''",
")"
] | list home . | train | false |
21,976 | @ajax_required
def get_units(request, **kwargs_):
search_form = UnitSearchForm(request.GET, user=request.user)
if (not search_form.is_valid()):
errors = search_form.errors.as_data()
if ('path' in errors):
for error in errors['path']:
if (error.code == 'max_length'):
raise Http400(_('Path too long.'))
elif (error.code == 'required'):
raise Http400(_('Arguments missing.'))
raise Http404(forms.ValidationError(search_form.errors).messages)
(total, start, end, units_qs) = search_backend.get(Unit)(request.user, **search_form.cleaned_data).search()
return JsonResponse({'start': start, 'end': end, 'total': total, 'unitGroups': GroupedResults(units_qs).data})
| [
"@",
"ajax_required",
"def",
"get_units",
"(",
"request",
",",
"**",
"kwargs_",
")",
":",
"search_form",
"=",
"UnitSearchForm",
"(",
"request",
".",
"GET",
",",
"user",
"=",
"request",
".",
"user",
")",
"if",
"(",
"not",
"search_form",
".",
"is_valid",
"(",
")",
")",
":",
"errors",
"=",
"search_form",
".",
"errors",
".",
"as_data",
"(",
")",
"if",
"(",
"'path'",
"in",
"errors",
")",
":",
"for",
"error",
"in",
"errors",
"[",
"'path'",
"]",
":",
"if",
"(",
"error",
".",
"code",
"==",
"'max_length'",
")",
":",
"raise",
"Http400",
"(",
"_",
"(",
"'Path too long.'",
")",
")",
"elif",
"(",
"error",
".",
"code",
"==",
"'required'",
")",
":",
"raise",
"Http400",
"(",
"_",
"(",
"'Arguments missing.'",
")",
")",
"raise",
"Http404",
"(",
"forms",
".",
"ValidationError",
"(",
"search_form",
".",
"errors",
")",
".",
"messages",
")",
"(",
"total",
",",
"start",
",",
"end",
",",
"units_qs",
")",
"=",
"search_backend",
".",
"get",
"(",
"Unit",
")",
"(",
"request",
".",
"user",
",",
"**",
"search_form",
".",
"cleaned_data",
")",
".",
"search",
"(",
")",
"return",
"JsonResponse",
"(",
"{",
"'start'",
":",
"start",
",",
"'end'",
":",
"end",
",",
"'total'",
":",
"total",
",",
"'unitGroups'",
":",
"GroupedResults",
"(",
"units_qs",
")",
".",
"data",
"}",
")"
] | gets source and target texts and its metadata . | train | false |
21,980 | def file_load(file_name):
retval = None
if os.path.exists(file_name):
f_obj = open(file_name, 'r')
retval = f_obj.read()
f_obj.close()
else:
stderr.write((' ** Warning: The requested file (%s) does not exist.\n' % file_name))
return retval
| [
"def",
"file_load",
"(",
"file_name",
")",
":",
"retval",
"=",
"None",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_name",
")",
":",
"f_obj",
"=",
"open",
"(",
"file_name",
",",
"'r'",
")",
"retval",
"=",
"f_obj",
".",
"read",
"(",
")",
"f_obj",
".",
"close",
"(",
")",
"else",
":",
"stderr",
".",
"write",
"(",
"(",
"' ** Warning: The requested file (%s) does not exist.\\n'",
"%",
"file_name",
")",
")",
"return",
"retval"
] | load the indicated file into a string and return the string . | train | false |
21,981 | def htmlquote(text):
text = text.replace(u'&', u'&')
text = text.replace(u'<', u'<')
text = text.replace(u'>', u'>')
text = text.replace(u"'", u''')
text = text.replace(u'"', u'"')
return text
| [
"def",
"htmlquote",
"(",
"text",
")",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"u'&'",
",",
"u'&'",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"u'<'",
",",
"u'<'",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"u'>'",
",",
"u'>'",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"u\"'\"",
",",
"u'''",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"u'\"'",
",",
"u'"'",
")",
"return",
"text"
] | encodes s for raw use in html . | train | false |
21,983 | def get_lan_ip():
try:
return [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if (not ip.startswith(u'127.'))][:1], [[(s.connect((u'8.8.8.8', 80)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
except:
return socket.gethostname()
| [
"def",
"get_lan_ip",
"(",
")",
":",
"try",
":",
"return",
"[",
"l",
"for",
"l",
"in",
"(",
"[",
"ip",
"for",
"ip",
"in",
"socket",
".",
"gethostbyname_ex",
"(",
"socket",
".",
"gethostname",
"(",
")",
")",
"[",
"2",
"]",
"if",
"(",
"not",
"ip",
".",
"startswith",
"(",
"u'127.'",
")",
")",
"]",
"[",
":",
"1",
"]",
",",
"[",
"[",
"(",
"s",
".",
"connect",
"(",
"(",
"u'8.8.8.8'",
",",
"80",
")",
")",
",",
"s",
".",
"getsockname",
"(",
")",
"[",
"0",
"]",
",",
"s",
".",
"close",
"(",
")",
")",
"for",
"s",
"in",
"[",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"]",
"]",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
")",
"if",
"l",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"except",
":",
"return",
"socket",
".",
"gethostname",
"(",
")"
] | returns ip of system . | train | false |
21,984 | def compact_text(text, elidelength=None):
lines = []
for line in text.splitlines():
lines.append(line.strip())
out = ''.join(lines)
if (elidelength is not None):
out = elide(out, elidelength)
return out
| [
"def",
"compact_text",
"(",
"text",
",",
"elidelength",
"=",
"None",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"text",
".",
"splitlines",
"(",
")",
":",
"lines",
".",
"append",
"(",
"line",
".",
"strip",
"(",
")",
")",
"out",
"=",
"''",
".",
"join",
"(",
"lines",
")",
"if",
"(",
"elidelength",
"is",
"not",
"None",
")",
":",
"out",
"=",
"elide",
"(",
"out",
",",
"elidelength",
")",
"return",
"out"
] | remove leading whitespace and newlines from a text and maybe elide it . | train | false |
21,985 | def _normalize_step_parameters(steps, param_map, legacy=False, already_normalized=False):
normalized_param_map = {}
for step in steps:
if already_normalized:
param_dict = param_map.get(str(step.order_index), {})
else:
param_dict = _step_parameters(step, param_map, legacy=legacy)
if param_dict:
normalized_param_map[step.id] = param_dict
return normalized_param_map
| [
"def",
"_normalize_step_parameters",
"(",
"steps",
",",
"param_map",
",",
"legacy",
"=",
"False",
",",
"already_normalized",
"=",
"False",
")",
":",
"normalized_param_map",
"=",
"{",
"}",
"for",
"step",
"in",
"steps",
":",
"if",
"already_normalized",
":",
"param_dict",
"=",
"param_map",
".",
"get",
"(",
"str",
"(",
"step",
".",
"order_index",
")",
",",
"{",
"}",
")",
"else",
":",
"param_dict",
"=",
"_step_parameters",
"(",
"step",
",",
"param_map",
",",
"legacy",
"=",
"legacy",
")",
"if",
"param_dict",
":",
"normalized_param_map",
"[",
"step",
".",
"id",
"]",
"=",
"param_dict",
"return",
"normalized_param_map"
] | take a complex param_map that can reference parameters by step_id in the new flexible way or in the old one-parameter per tep fashion or by tool id and normalize the parameters so everything is referenced by a numeric step id . | train | false |
21,986 | def showglobal(**connection_args):
mod = sys._getframe().f_code.co_name
log.debug('{0}<--'.format(mod))
conn = _connect(**connection_args)
if (conn is None):
return []
rtnv = __do_query_into_hash(conn, 'SHOW GLOBAL VARIABLES')
conn.close()
if (len(rtnv) == 0):
rtnv.append([])
log.debug('{0}-->{1}'.format(mod, len(rtnv[0])))
return rtnv
| [
"def",
"showglobal",
"(",
"**",
"connection_args",
")",
":",
"mod",
"=",
"sys",
".",
"_getframe",
"(",
")",
".",
"f_code",
".",
"co_name",
"log",
".",
"debug",
"(",
"'{0}<--'",
".",
"format",
"(",
"mod",
")",
")",
"conn",
"=",
"_connect",
"(",
"**",
"connection_args",
")",
"if",
"(",
"conn",
"is",
"None",
")",
":",
"return",
"[",
"]",
"rtnv",
"=",
"__do_query_into_hash",
"(",
"conn",
",",
"'SHOW GLOBAL VARIABLES'",
")",
"conn",
".",
"close",
"(",
")",
"if",
"(",
"len",
"(",
"rtnv",
")",
"==",
"0",
")",
":",
"rtnv",
".",
"append",
"(",
"[",
"]",
")",
"log",
".",
"debug",
"(",
"'{0}-->{1}'",
".",
"format",
"(",
"mod",
",",
"len",
"(",
"rtnv",
"[",
"0",
"]",
")",
")",
")",
"return",
"rtnv"
] | retrieves the show global variables from the minion . | train | true |
21,991 | def generate_fontconfig_pattern(d):
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, ('get_' + key))()
if ((val is not None) and (val != [])):
if (type(val) == list):
val = [value_escape('\\\\\\1', str(x)) for x in val if (x is not None)]
if (val != []):
val = ','.join(val)
props.append((':%s=%s' % (key, val)))
return ''.join(props)
| [
"def",
"generate_fontconfig_pattern",
"(",
"d",
")",
":",
"props",
"=",
"[",
"]",
"families",
"=",
"''",
"size",
"=",
"''",
"for",
"key",
"in",
"'family style variant weight stretch file size'",
".",
"split",
"(",
")",
":",
"val",
"=",
"getattr",
"(",
"d",
",",
"(",
"'get_'",
"+",
"key",
")",
")",
"(",
")",
"if",
"(",
"(",
"val",
"is",
"not",
"None",
")",
"and",
"(",
"val",
"!=",
"[",
"]",
")",
")",
":",
"if",
"(",
"type",
"(",
"val",
")",
"==",
"list",
")",
":",
"val",
"=",
"[",
"value_escape",
"(",
"'\\\\\\\\\\\\1'",
",",
"str",
"(",
"x",
")",
")",
"for",
"x",
"in",
"val",
"if",
"(",
"x",
"is",
"not",
"None",
")",
"]",
"if",
"(",
"val",
"!=",
"[",
"]",
")",
":",
"val",
"=",
"','",
".",
"join",
"(",
"val",
")",
"props",
".",
"append",
"(",
"(",
"':%s=%s'",
"%",
"(",
"key",
",",
"val",
")",
")",
")",
"return",
"''",
".",
"join",
"(",
"props",
")"
] | given a dictionary of key/value pairs . | train | false |
21,992 | def generate_doc(handler_cls):
if (not ((type(handler_cls) is ObjectHandlerMetaClass) or (type(handler_cls) is handler.HandlerMetaClass))):
raise ValueError(('Give me handler, not %s' % type(handler_cls)))
return HandlerDocumentation(handler_cls)
| [
"def",
"generate_doc",
"(",
"handler_cls",
")",
":",
"if",
"(",
"not",
"(",
"(",
"type",
"(",
"handler_cls",
")",
"is",
"ObjectHandlerMetaClass",
")",
"or",
"(",
"type",
"(",
"handler_cls",
")",
"is",
"handler",
".",
"HandlerMetaClass",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'Give me handler, not %s'",
"%",
"type",
"(",
"handler_cls",
")",
")",
")",
"return",
"HandlerDocumentation",
"(",
"handler_cls",
")"
] | returns a handlerdocumentation object for the given handler . | train | false |
21,993 | def ResetNolintSuppressions():
_error_suppressions.clear()
_global_error_suppressions.clear()
| [
"def",
"ResetNolintSuppressions",
"(",
")",
":",
"_error_suppressions",
".",
"clear",
"(",
")",
"_global_error_suppressions",
".",
"clear",
"(",
")"
] | resets the set of nolint suppressions to empty . | train | false |
21,995 | def initialize_glance_store():
glance_store.register_opts(CONF)
glance_store.create_stores(CONF)
glance_store.verify_default_store()
| [
"def",
"initialize_glance_store",
"(",
")",
":",
"glance_store",
".",
"register_opts",
"(",
"CONF",
")",
"glance_store",
".",
"create_stores",
"(",
"CONF",
")",
"glance_store",
".",
"verify_default_store",
"(",
")"
] | initialize glance store . | train | false |
21,996 | def obscure_string(input_string):
if ((input_string is None) or (len(input_string) < 4)):
return input_string
last_four = input_string[(-4):]
obscured = ('*' * (len(input_string) - 4))
return (obscured + last_four)
| [
"def",
"obscure_string",
"(",
"input_string",
")",
":",
"if",
"(",
"(",
"input_string",
"is",
"None",
")",
"or",
"(",
"len",
"(",
"input_string",
")",
"<",
"4",
")",
")",
":",
"return",
"input_string",
"last_four",
"=",
"input_string",
"[",
"(",
"-",
"4",
")",
":",
"]",
"obscured",
"=",
"(",
"'*'",
"*",
"(",
"len",
"(",
"input_string",
")",
"-",
"4",
")",
")",
"return",
"(",
"obscured",
"+",
"last_four",
")"
] | obscures the input string by replacing all but the last 4 characters in the string with the character * . | train | false |
21,997 | def escape_path(path):
if isinstance(path, types.UnicodeType):
path = path.encode('utf-8')
path = urllib.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
| [
"def",
"escape_path",
"(",
"path",
")",
":",
"if",
"isinstance",
"(",
"path",
",",
"types",
".",
"UnicodeType",
")",
":",
"path",
"=",
"path",
".",
"encode",
"(",
"'utf-8'",
")",
"path",
"=",
"urllib",
".",
"quote",
"(",
"path",
",",
"HTTP_PATH_SAFE",
")",
"path",
"=",
"ESCAPED_CHAR_RE",
".",
"sub",
"(",
"uppercase_escaped_char",
",",
"path",
")",
"return",
"path"
] | escape any invalid characters in http url . | train | false |
21,999 | def deluser(name, username):
grp_info = __salt__['group.info'](name)
if (username not in grp_info['members']):
return True
retcode = __salt__['cmd.retcode']('pw groupmod {0} -d {1}'.format(name, username), python_shell=False)
return (not retcode)
| [
"def",
"deluser",
"(",
"name",
",",
"username",
")",
":",
"grp_info",
"=",
"__salt__",
"[",
"'group.info'",
"]",
"(",
"name",
")",
"if",
"(",
"username",
"not",
"in",
"grp_info",
"[",
"'members'",
"]",
")",
":",
"return",
"True",
"retcode",
"=",
"__salt__",
"[",
"'cmd.retcode'",
"]",
"(",
"'pw groupmod {0} -d {1}'",
".",
"format",
"(",
"name",
",",
"username",
")",
",",
"python_shell",
"=",
"False",
")",
"return",
"(",
"not",
"retcode",
")"
] | remove a user from the group . | train | true |
22,002 | def advance_time_delta(timedelta):
assert (not (utcnow.override_time is None))
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
| [
"def",
"advance_time_delta",
"(",
"timedelta",
")",
":",
"assert",
"(",
"not",
"(",
"utcnow",
".",
"override_time",
"is",
"None",
")",
")",
"try",
":",
"for",
"dt",
"in",
"utcnow",
".",
"override_time",
":",
"dt",
"+=",
"timedelta",
"except",
"TypeError",
":",
"utcnow",
".",
"override_time",
"+=",
"timedelta"
] | advance overridden time using a datetime . | train | true |
22,004 | @requires_badges_enabled
def completion_check(user):
from certificates.models import CertificateStatuses
config = CourseEventBadgesConfiguration.current().completed_settings
certificates = user.generatedcertificate_set.filter(status__in=CertificateStatuses.PASSED_STATUSES).count()
award_badge(config, certificates, user)
| [
"@",
"requires_badges_enabled",
"def",
"completion_check",
"(",
"user",
")",
":",
"from",
"certificates",
".",
"models",
"import",
"CertificateStatuses",
"config",
"=",
"CourseEventBadgesConfiguration",
".",
"current",
"(",
")",
".",
"completed_settings",
"certificates",
"=",
"user",
".",
"generatedcertificate_set",
".",
"filter",
"(",
"status__in",
"=",
"CertificateStatuses",
".",
"PASSED_STATUSES",
")",
".",
"count",
"(",
")",
"award_badge",
"(",
"config",
",",
"certificates",
",",
"user",
")"
] | awards badges based upon the number of courses a user has completed . | train | false |
22,005 | def _compressed_sparse_stack(blocks, axis):
other_axis = (1 if (axis == 0) else 0)
data = np.concatenate([b.data for b in blocks])
indices = np.concatenate([b.indices for b in blocks])
indptr = []
last_indptr = 0
constant_dim = blocks[0].shape[other_axis]
sum_dim = 0
for b in blocks:
if (b.shape[other_axis] != constant_dim):
raise ValueError(('incompatible dimensions for axis %d' % other_axis))
sum_dim += b.shape[axis]
indptr.append((b.indptr[:(-1)] + last_indptr))
last_indptr += b.indptr[(-1)]
indptr.append([last_indptr])
indptr = np.concatenate(indptr)
if (axis == 0):
return csr_matrix((data, indices, indptr), shape=(sum_dim, constant_dim))
else:
return csc_matrix((data, indices, indptr), shape=(constant_dim, sum_dim))
| [
"def",
"_compressed_sparse_stack",
"(",
"blocks",
",",
"axis",
")",
":",
"other_axis",
"=",
"(",
"1",
"if",
"(",
"axis",
"==",
"0",
")",
"else",
"0",
")",
"data",
"=",
"np",
".",
"concatenate",
"(",
"[",
"b",
".",
"data",
"for",
"b",
"in",
"blocks",
"]",
")",
"indices",
"=",
"np",
".",
"concatenate",
"(",
"[",
"b",
".",
"indices",
"for",
"b",
"in",
"blocks",
"]",
")",
"indptr",
"=",
"[",
"]",
"last_indptr",
"=",
"0",
"constant_dim",
"=",
"blocks",
"[",
"0",
"]",
".",
"shape",
"[",
"other_axis",
"]",
"sum_dim",
"=",
"0",
"for",
"b",
"in",
"blocks",
":",
"if",
"(",
"b",
".",
"shape",
"[",
"other_axis",
"]",
"!=",
"constant_dim",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'incompatible dimensions for axis %d'",
"%",
"other_axis",
")",
")",
"sum_dim",
"+=",
"b",
".",
"shape",
"[",
"axis",
"]",
"indptr",
".",
"append",
"(",
"(",
"b",
".",
"indptr",
"[",
":",
"(",
"-",
"1",
")",
"]",
"+",
"last_indptr",
")",
")",
"last_indptr",
"+=",
"b",
".",
"indptr",
"[",
"(",
"-",
"1",
")",
"]",
"indptr",
".",
"append",
"(",
"[",
"last_indptr",
"]",
")",
"indptr",
"=",
"np",
".",
"concatenate",
"(",
"indptr",
")",
"if",
"(",
"axis",
"==",
"0",
")",
":",
"return",
"csr_matrix",
"(",
"(",
"data",
",",
"indices",
",",
"indptr",
")",
",",
"shape",
"=",
"(",
"sum_dim",
",",
"constant_dim",
")",
")",
"else",
":",
"return",
"csc_matrix",
"(",
"(",
"data",
",",
"indices",
",",
"indptr",
")",
",",
"shape",
"=",
"(",
"constant_dim",
",",
"sum_dim",
")",
")"
] | stacking fast path for csr/csc matrices (i) vstack for csr . | train | false |
22,006 | def _objectify(items, container_name):
objects = []
for item in items:
if (item.get('subdir', None) is not None):
object_cls = PseudoFolder
else:
object_cls = StorageObject
objects.append(object_cls(item, container_name))
return objects
| [
"def",
"_objectify",
"(",
"items",
",",
"container_name",
")",
":",
"objects",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"if",
"(",
"item",
".",
"get",
"(",
"'subdir'",
",",
"None",
")",
"is",
"not",
"None",
")",
":",
"object_cls",
"=",
"PseudoFolder",
"else",
":",
"object_cls",
"=",
"StorageObject",
"objects",
".",
"append",
"(",
"object_cls",
"(",
"item",
",",
"container_name",
")",
")",
"return",
"objects"
] | splits a listing of objects into their appropriate wrapper classes . | train | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.