id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
25,834 | def norm_corr(x, y, mode='valid'):
return (np.correlate(x, y, mode) / ((np.std(x) * np.std(y)) * x.shape[(-1)]))
| [
"def",
"norm_corr",
"(",
"x",
",",
"y",
",",
"mode",
"=",
"'valid'",
")",
":",
"return",
"(",
"np",
".",
"correlate",
"(",
"x",
",",
"y",
",",
"mode",
")",
"/",
"(",
"(",
"np",
".",
"std",
"(",
"x",
")",
"*",
"np",
".",
"std",
"(",
"y",
"... | returns the correlation between two ndarrays . | train | false |
25,835 | def get_review_request_fieldsets(include_main=False, include_change_entries_only=False):
if (include_main and include_change_entries_only):
return list(fieldset_registry)
else:
excluded_ids = []
if (not include_main):
excluded_ids.append(u'main')
if (not include_change_entries_only):
excluded_ids.append(u'_change_entries_only')
return [fieldset for fieldset in fieldset_registry if (fieldset.fieldset_id not in excluded_ids)]
| [
"def",
"get_review_request_fieldsets",
"(",
"include_main",
"=",
"False",
",",
"include_change_entries_only",
"=",
"False",
")",
":",
"if",
"(",
"include_main",
"and",
"include_change_entries_only",
")",
":",
"return",
"list",
"(",
"fieldset_registry",
")",
"else",
... | returns a list of all registered fieldset classes . | train | false |
25,836 | def _make_tag(base_dt, val, mdtype, sde=False):
base_dt = np.dtype(base_dt)
bo = boc.to_numpy_code(base_dt.byteorder)
byte_count = base_dt.itemsize
if (not sde):
udt = (bo + 'u4')
padding = (8 - (byte_count % 8))
all_dt = [('mdtype', udt), ('byte_count', udt), ('val', base_dt)]
if padding:
all_dt.append(('padding', 'u1', padding))
else:
udt = (bo + 'u2')
padding = (4 - byte_count)
if (bo == '<'):
all_dt = [('mdtype', udt), ('byte_count', udt), ('val', base_dt)]
else:
all_dt = [('byte_count', udt), ('mdtype', udt), ('val', base_dt)]
if padding:
all_dt.append(('padding', 'u1', padding))
tag = np.zeros((1,), dtype=all_dt)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
tag['val'] = val
return tag
| [
"def",
"_make_tag",
"(",
"base_dt",
",",
"val",
",",
"mdtype",
",",
"sde",
"=",
"False",
")",
":",
"base_dt",
"=",
"np",
".",
"dtype",
"(",
"base_dt",
")",
"bo",
"=",
"boc",
".",
"to_numpy_code",
"(",
"base_dt",
".",
"byteorder",
")",
"byte_count",
"... | makes a simple matlab tag . | train | false |
25,837 | @utils.arg('label', metavar='<network_label>', help=_('Network label (ex. my_new_network)'))
@utils.arg('cidr', metavar='<cidr>', help=_('IP block to allocate from (ex. 172.16.0.0/24 or 2001:DB8::/64)'))
@shell.deprecated_network
def do_tenant_network_create(cs, args):
network = cs.tenant_networks.create(args.label, args.cidr)
utils.print_dict(network._info)
| [
"@",
"utils",
".",
"arg",
"(",
"'label'",
",",
"metavar",
"=",
"'<network_label>'",
",",
"help",
"=",
"_",
"(",
"'Network label (ex. my_new_network)'",
")",
")",
"@",
"utils",
".",
"arg",
"(",
"'cidr'",
",",
"metavar",
"=",
"'<cidr>'",
",",
"help",
"=",
... | create a tenant network . | train | false |
25,838 | def rescan_iscsi_hba(session, cluster=None):
host_mor = vm_util.get_host_ref(session, cluster)
storage_system_mor = session._call_method(vim_util, 'get_dynamic_property', host_mor, 'HostSystem', 'configManager.storageSystem')
hbas_ret = session._call_method(vim_util, 'get_dynamic_property', storage_system_mor, 'HostStorageSystem', 'storageDeviceInfo.hostBusAdapter')
if (hbas_ret is None):
return
host_hbas = hbas_ret.HostHostBusAdapter
if (not host_hbas):
return
for hba in host_hbas:
if (hba.__class__.__name__ == 'HostInternetScsiHba'):
hba_device = hba.device
break
else:
return
LOG.debug((_('Rescanning HBA %s') % hba_device))
session._call_method(session._get_vim(), 'RescanHba', storage_system_mor, hbaDevice=hba_device)
LOG.debug((_('Rescanned HBA %s ') % hba_device))
| [
"def",
"rescan_iscsi_hba",
"(",
"session",
",",
"cluster",
"=",
"None",
")",
":",
"host_mor",
"=",
"vm_util",
".",
"get_host_ref",
"(",
"session",
",",
"cluster",
")",
"storage_system_mor",
"=",
"session",
".",
"_call_method",
"(",
"vim_util",
",",
"'get_dynam... | rescan the iscsi hba to discover iscsi targets . | train | false |
25,839 | def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True):
(c, lower) = c_and_lower
if check_finite:
b1 = asarray_chkfinite(b)
c = asarray_chkfinite(c)
else:
b1 = asarray(b)
c = asarray(c)
if ((c.ndim != 2) or (c.shape[0] != c.shape[1])):
raise ValueError('The factored matrix c is not square.')
if (c.shape[1] != b1.shape[0]):
raise ValueError('incompatible dimensions.')
overwrite_b = (overwrite_b or _datacopied(b1, b))
(potrs,) = get_lapack_funcs(('potrs',), (c, b1))
(x, info) = potrs(c, b1, lower=lower, overwrite_b=overwrite_b)
if (info != 0):
raise ValueError(('illegal value in %d-th argument of internal potrs' % (- info)))
return x
| [
"def",
"cho_solve",
"(",
"c_and_lower",
",",
"b",
",",
"overwrite_b",
"=",
"False",
",",
"check_finite",
"=",
"True",
")",
":",
"(",
"c",
",",
"lower",
")",
"=",
"c_and_lower",
"if",
"check_finite",
":",
"b1",
"=",
"asarray_chkfinite",
"(",
"b",
")",
"... | solve the linear equations a x = b . | train | false |
25,841 | def assign_default_role(course_id, user):
assign_role(course_id, user, FORUM_ROLE_STUDENT)
| [
"def",
"assign_default_role",
"(",
"course_id",
",",
"user",
")",
":",
"assign_role",
"(",
"course_id",
",",
"user",
",",
"FORUM_ROLE_STUDENT",
")"
] | assign forum default role student to user . | train | false |
25,842 | def encode_for_xml(ustr, encoding='ascii'):
if isinstance(ustr, unicode):
pass
elif isinstance(ustr, str):
ustr = ustr.decode(codepage, 'replace')
else:
ustr = unicode(str(ustr))
return ustr.encode(encoding, 'xmlcharrefreplace')
| [
"def",
"encode_for_xml",
"(",
"ustr",
",",
"encoding",
"=",
"'ascii'",
")",
":",
"if",
"isinstance",
"(",
"ustr",
",",
"unicode",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"ustr",
",",
"str",
")",
":",
"ustr",
"=",
"ustr",
".",
"decode",
"(",
"cod... | encode unicode_data for use as xml or html . | train | false |
25,843 | def set_filename(filename, tree):
worklist = [tree]
while worklist:
node = worklist.pop(0)
node.filename = filename
worklist.extend(node.getChildNodes())
| [
"def",
"set_filename",
"(",
"filename",
",",
"tree",
")",
":",
"worklist",
"=",
"[",
"tree",
"]",
"while",
"worklist",
":",
"node",
"=",
"worklist",
".",
"pop",
"(",
"0",
")",
"node",
".",
"filename",
"=",
"filename",
"worklist",
".",
"extend",
"(",
... | set the filename attribute to filename on every node in tree . | train | false |
25,844 | def _get_config(**kwargs):
config = {'api_host': 'localhost', 'api_port': 623, 'api_user': 'admin', 'api_pass': '', 'api_kg': None, 'api_login_timeout': 2}
if ('__salt__' in globals()):
config_key = '{0}.config'.format(__virtualname__)
config.update(__salt__['config.get'](config_key, {}))
for k in (set(config.keys()) & set(kwargs.keys())):
config[k] = kwargs[k]
return config
| [
"def",
"_get_config",
"(",
"**",
"kwargs",
")",
":",
"config",
"=",
"{",
"'api_host'",
":",
"'localhost'",
",",
"'api_port'",
":",
"623",
",",
"'api_user'",
":",
"'admin'",
",",
"'api_pass'",
":",
"''",
",",
"'api_kg'",
":",
"None",
",",
"'api_login_timeou... | helper function for get_config . | train | true |
25,845 | def normalize_name(name):
new = re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', name)
return new.lower().strip('_')
| [
"def",
"normalize_name",
"(",
"name",
")",
":",
"new",
"=",
"re",
".",
"sub",
"(",
"'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))'",
",",
"'_\\\\1'",
",",
"name",
")",
"return",
"new",
".",
"lower",
"(",
")",
".",
"strip",
"(",
"'_'",
")"
] | normalize a python package name a la pep 503 . | train | false |
25,846 | def is_homebrew_env():
env_prefix = get_homebrew_prefix()
if (env_prefix and base_prefix.startswith(env_prefix)):
return True
return False
| [
"def",
"is_homebrew_env",
"(",
")",
":",
"env_prefix",
"=",
"get_homebrew_prefix",
"(",
")",
"if",
"(",
"env_prefix",
"and",
"base_prefix",
".",
"startswith",
"(",
"env_prefix",
")",
")",
":",
"return",
"True",
"return",
"False"
] | check if python interpreter was installed via homebrew command brew . | train | false |
25,847 | def set_config(key, value):
if value.isdigit():
value = int(value)
elif (value.lower() == 'true'):
value = True
elif (value.lower() == 'false'):
value = False
c[key] = value
path = ((os.path.expanduser('~') + os.sep) + '.rainbow_config.json')
data = {}
try:
data = load_config(path)
except:
return
if (key in data):
fixup(data, key, value)
else:
data[key] = value
with open(path, 'w') as out:
json.dump(data, out, indent=4)
os.system(('chmod 777 ' + path))
| [
"def",
"set_config",
"(",
"key",
",",
"value",
")",
":",
"if",
"value",
".",
"isdigit",
"(",
")",
":",
"value",
"=",
"int",
"(",
"value",
")",
"elif",
"(",
"value",
".",
"lower",
"(",
")",
"==",
"'true'",
")",
":",
"value",
"=",
"True",
"elif",
... | set the value of a traffic server configuration variable . | train | false |
25,848 | def crop_title(title, length=None, suffix='...'):
length = (flaskbb_config['TITLE_LENGTH'] if (length is None) else length)
if (len(title) <= length):
return title
return (title[:length].rsplit(' ', 1)[0] + suffix)
| [
"def",
"crop_title",
"(",
"title",
",",
"length",
"=",
"None",
",",
"suffix",
"=",
"'...'",
")",
":",
"length",
"=",
"(",
"flaskbb_config",
"[",
"'TITLE_LENGTH'",
"]",
"if",
"(",
"length",
"is",
"None",
")",
"else",
"length",
")",
"if",
"(",
"len",
"... | crops the title to a specified length . | train | false |
25,849 | def _create_fake_xml(items):
xml = '<?xml version="1.0" encoding="UTF-8" ?><rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:newznab="http://www.newznab.com/DTD/2010/feeds/attributes/" encoding="utf-8"><channel>'
for item in items:
xml += (('<item><title>' + item) + '</title>\n')
xml += (('<link>http://fantasy.com/' + item) + '</link></item>')
xml += '</channel></rss>'
return xml
| [
"def",
"_create_fake_xml",
"(",
"items",
")",
":",
"xml",
"=",
"'<?xml version=\"1.0\" encoding=\"UTF-8\" ?><rss version=\"2.0\" xmlns:atom=\"http://www.w3.org/2005/Atom\" xmlns:newznab=\"http://www.newznab.com/DTD/2010/feeds/attributes/\" encoding=\"utf-8\"><channel>'",
"for",
"item",
"in",
... | create fake xml . | train | false |
25,850 | def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output=None):
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
local.form_dict.doc = doc
if (not html):
html = build_page(u'print')
if as_pdf:
return get_pdf(html, output=output)
else:
return html
| [
"def",
"get_print",
"(",
"doctype",
"=",
"None",
",",
"name",
"=",
"None",
",",
"print_format",
"=",
"None",
",",
"style",
"=",
"None",
",",
"html",
"=",
"None",
",",
"as_pdf",
"=",
"False",
",",
"doc",
"=",
"None",
",",
"output",
"=",
"None",
")",... | get print format for given document . | train | false |
25,852 | def inner_problem_check(problems, done, verbosity):
result = []
for (last, migration) in problems:
checked = set([])
to_check = list(last.dependencies)
while to_check:
checking = to_check.pop()
if (checking in checked):
continue
checked.add(checking)
if (checking not in done):
if verbosity:
print((' ! Migration %s should not have been applied before %s but was.' % (last, checking)))
result.append((last, checking))
else:
to_check.extend(checking.dependencies)
return result
| [
"def",
"inner_problem_check",
"(",
"problems",
",",
"done",
",",
"verbosity",
")",
":",
"result",
"=",
"[",
"]",
"for",
"(",
"last",
",",
"migration",
")",
"in",
"problems",
":",
"checked",
"=",
"set",
"(",
"[",
"]",
")",
"to_check",
"=",
"list",
"("... | takes a set of possible problems and gets the actual issues out of it . | train | false |
25,853 | def aggregate(loss, weights=None, mode='mean'):
if (weights is not None):
loss = (loss * weights)
if (mode == 'mean'):
return loss.mean()
elif (mode == 'sum'):
return loss.sum()
elif (mode == 'normalized_sum'):
if (weights is None):
raise ValueError("require weights for mode='normalized_sum'")
return (loss.sum() / weights.sum())
else:
raise ValueError(("mode must be 'mean', 'sum' or 'normalized_sum', got %r" % mode))
| [
"def",
"aggregate",
"(",
"loss",
",",
"weights",
"=",
"None",
",",
"mode",
"=",
"'mean'",
")",
":",
"if",
"(",
"weights",
"is",
"not",
"None",
")",
":",
"loss",
"=",
"(",
"loss",
"*",
"weights",
")",
"if",
"(",
"mode",
"==",
"'mean'",
")",
":",
... | aggregate record-style value for a method decorated with @one . | train | false |
25,855 | def _resp_body_property():
def getter(self):
if (not self._body):
if (not self._app_iter):
return ''
self._body = ''.join(self._app_iter)
self._app_iter = None
return self._body
def setter(self, value):
if isinstance(value, unicode):
value = value.encode('utf-8')
if isinstance(value, str):
self.content_length = len(value)
self._app_iter = None
self._body = value
return property(getter, setter, doc='Retrieve and set the Response body str')
| [
"def",
"_resp_body_property",
"(",
")",
":",
"def",
"getter",
"(",
"self",
")",
":",
"if",
"(",
"not",
"self",
".",
"_body",
")",
":",
"if",
"(",
"not",
"self",
".",
"_app_iter",
")",
":",
"return",
"''",
"self",
".",
"_body",
"=",
"''",
".",
"jo... | set and retrieve the value of response . | train | false |
25,856 | def build_file_from_blob(blob, mode, target_path, honor_filemode=True):
try:
oldstat = os.lstat(target_path)
except OSError as e:
if (e.errno == errno.ENOENT):
oldstat = None
else:
raise
contents = blob.as_raw_string()
if stat.S_ISLNK(mode):
if oldstat:
os.unlink(target_path)
os.symlink(contents, target_path)
else:
if ((oldstat is not None) and (oldstat.st_size == len(contents))):
with open(target_path, 'rb') as f:
if (f.read() == contents):
return oldstat
with open(target_path, 'wb') as f:
f.write(contents)
if honor_filemode:
os.chmod(target_path, mode)
return os.lstat(target_path)
| [
"def",
"build_file_from_blob",
"(",
"blob",
",",
"mode",
",",
"target_path",
",",
"honor_filemode",
"=",
"True",
")",
":",
"try",
":",
"oldstat",
"=",
"os",
".",
"lstat",
"(",
"target_path",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"(",
"e",
"."... | build a file or symlink on disk based on a git object . | train | false |
25,857 | def metadef_property_get_all(context, namespace_name, session=None):
session = (session or get_session())
return metadef_property_api.get_all(context, namespace_name, session)
| [
"def",
"metadef_property_get_all",
"(",
"context",
",",
"namespace_name",
",",
"session",
"=",
"None",
")",
":",
"session",
"=",
"(",
"session",
"or",
"get_session",
"(",
")",
")",
"return",
"metadef_property_api",
".",
"get_all",
"(",
"context",
",",
"namespa... | get a metadef property or raise if it does not exist . | train | false |
25,859 | @register.filter
def showextrawhitespace(value):
value = extraWhitespace.sub(u'<span class="ew">\\1</span>', value)
return value.replace(u' DCTB ', u'<span class="tb"> DCTB </span>')
| [
"@",
"register",
".",
"filter",
"def",
"showextrawhitespace",
"(",
"value",
")",
":",
"value",
"=",
"extraWhitespace",
".",
"sub",
"(",
"u'<span class=\"ew\">\\\\1</span>'",
",",
"value",
")",
"return",
"value",
".",
"replace",
"(",
"u' DCTB '",
",",
"u'<span cl... | marks up any extra whitespace in the specified text . | train | false |
25,860 | def GetStartTime(cron_cls):
if (not cron_cls.start_time_randomization):
return rdfvalue.RDFDatetime.Now()
now = rdfvalue.RDFDatetime.Now()
window_ms = cron_cls.frequency.microseconds
start_time_ms = random.randint(now.AsMicroSecondsFromEpoch(), (now.AsMicroSecondsFromEpoch() + window_ms))
return rdfvalue.RDFDatetime(start_time_ms)
| [
"def",
"GetStartTime",
"(",
"cron_cls",
")",
":",
"if",
"(",
"not",
"cron_cls",
".",
"start_time_randomization",
")",
":",
"return",
"rdfvalue",
".",
"RDFDatetime",
".",
"Now",
"(",
")",
"now",
"=",
"rdfvalue",
".",
"RDFDatetime",
".",
"Now",
"(",
")",
"... | get start time for a systemcronflow class . | train | false |
25,861 | def get_defaults_for(parent=u'__default'):
defaults = frappe.cache().hget(u'defaults', parent)
if (defaults == None):
res = frappe.db.sql(u'select defkey, defvalue from `tabDefaultValue`\n DCTB DCTB DCTB where parent = %s order by creation', (parent,), as_dict=1)
defaults = frappe._dict({})
for d in res:
if (d.defkey in defaults):
if ((not isinstance(defaults[d.defkey], list)) and (defaults[d.defkey] != d.defvalue)):
defaults[d.defkey] = [defaults[d.defkey]]
if (d.defvalue not in defaults[d.defkey]):
defaults[d.defkey].append(d.defvalue)
elif (d.defvalue is not None):
defaults[d.defkey] = d.defvalue
frappe.cache().hset(u'defaults', parent, defaults)
return defaults
| [
"def",
"get_defaults_for",
"(",
"parent",
"=",
"u'__default'",
")",
":",
"defaults",
"=",
"frappe",
".",
"cache",
"(",
")",
".",
"hget",
"(",
"u'defaults'",
",",
"parent",
")",
"if",
"(",
"defaults",
"==",
"None",
")",
":",
"res",
"=",
"frappe",
".",
... | get all defaults . | train | false |
25,862 | def vs_to_tup(vs):
return re.findall('\\d+', vs)
| [
"def",
"vs_to_tup",
"(",
"vs",
")",
":",
"return",
"re",
".",
"findall",
"(",
"'\\\\d+'",
",",
"vs",
")"
] | version string to tuple . | train | false |
25,863 | def expand_indent(line):
if (' DCTB ' not in line):
return (len(line) - len(line.lstrip()))
result = 0
for char in line:
if (char == ' DCTB '):
result = (((result // 8) * 8) + 8)
elif (char == ' '):
result += 1
else:
break
return result
| [
"def",
"expand_indent",
"(",
"line",
")",
":",
"if",
"(",
"' DCTB '",
"not",
"in",
"line",
")",
":",
"return",
"(",
"len",
"(",
"line",
")",
"-",
"len",
"(",
"line",
".",
"lstrip",
"(",
")",
")",
")",
"result",
"=",
"0",
"for",
"char",
"in",
"l... | return the amount of indentation . | train | true |
25,864 | def FormatISOTime(t):
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime((t / 1000000.0)))
| [
"def",
"FormatISOTime",
"(",
"t",
")",
":",
"return",
"time",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
",",
"time",
".",
"gmtime",
"(",
"(",
"t",
"/",
"1000000.0",
")",
")",
")"
] | format a time in epoch notation to iso utc . | train | false |
25,865 | @memoize_id
def TTableModel(metadata=None):
from sqlalchemy import MetaData
@add_metaclass(ComplexModelMeta)
class TableModel(TTableModelBase(), ):
class Attributes(ComplexModelBase.Attributes, ):
sqla_metadata = (metadata or MetaData())
return TableModel
| [
"@",
"memoize_id",
"def",
"TTableModel",
"(",
"metadata",
"=",
"None",
")",
":",
"from",
"sqlalchemy",
"import",
"MetaData",
"@",
"add_metaclass",
"(",
"ComplexModelMeta",
")",
"class",
"TableModel",
"(",
"TTableModelBase",
"(",
")",
",",
")",
":",
"class",
... | a tablemodel template that generates a new tablemodel class for each call . | train | false |
25,867 | def subprocess_call(cmd, verbose=True, errorprint=True):
verbose_print(verbose, ('\n[MoviePy] Running:\n>>> ' + ' '.join(cmd)))
popen_params = {'stdout': DEVNULL, 'stderr': sp.PIPE, 'stdin': DEVNULL}
if (os.name == 'nt'):
popen_params['creationflags'] = 134217728
proc = sp.Popen(cmd, **popen_params)
(out, err) = proc.communicate()
proc.stderr.close()
if proc.returncode:
verbose_print(errorprint, '\n[MoviePy] This command returned an error !')
raise IOError(err.decode('utf8'))
else:
verbose_print(verbose, '\n... command successful.\n')
del proc
| [
"def",
"subprocess_call",
"(",
"cmd",
",",
"verbose",
"=",
"True",
",",
"errorprint",
"=",
"True",
")",
":",
"verbose_print",
"(",
"verbose",
",",
"(",
"'\\n[MoviePy] Running:\\n>>> '",
"+",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
")",
"popen_params",
"=... | executes the given subprocess command . | train | false |
25,868 | def used_memory():
return (psutil.virtual_memory().used / (2 ** 20))
| [
"def",
"used_memory",
"(",
")",
":",
"return",
"(",
"psutil",
".",
"virtual_memory",
"(",
")",
".",
"used",
"/",
"(",
"2",
"**",
"20",
")",
")"
] | return the total mb of system memory in use . | train | false |
25,870 | def init(mpstate):
return SerialModule(mpstate)
| [
"def",
"init",
"(",
"mpstate",
")",
":",
"return",
"SerialModule",
"(",
"mpstate",
")"
] | initialise module . | train | false |
25,871 | def check_compatibility():
messages = []
for check_module in COMPAT_CHECKS:
check = getattr(check_module, u'run_checks', None)
if (check is None):
warnings.warn(((u"The '%s' module lacks a " % check_module.__name__) + u"'run_checks' method, which is needed to verify compatibility."))
continue
messages.extend(check())
return messages
| [
"def",
"check_compatibility",
"(",
")",
":",
"messages",
"=",
"[",
"]",
"for",
"check_module",
"in",
"COMPAT_CHECKS",
":",
"check",
"=",
"getattr",
"(",
"check_module",
",",
"u'run_checks'",
",",
"None",
")",
"if",
"(",
"check",
"is",
"None",
")",
":",
"... | raises errors or warns if called with an incompatible wheel-version . | train | false |
25,872 | def _setHTTPCookies():
if conf.cookie:
debugMsg = 'setting the HTTP Cookie header'
logger.debug(debugMsg)
conf.httpHeaders[HTTP_HEADER.COOKIE] = conf.cookie
| [
"def",
"_setHTTPCookies",
"(",
")",
":",
"if",
"conf",
".",
"cookie",
":",
"debugMsg",
"=",
"'setting the HTTP Cookie header'",
"logger",
".",
"debug",
"(",
"debugMsg",
")",
"conf",
".",
"httpHeaders",
"[",
"HTTP_HEADER",
".",
"COOKIE",
"]",
"=",
"conf",
"."... | set the http cookie header . | train | false |
25,873 | @interruptable
def xwrite(fh, content, encoding=None):
return fh.write(encode(content, encoding=encoding))
| [
"@",
"interruptable",
"def",
"xwrite",
"(",
"fh",
",",
"content",
",",
"encoding",
"=",
"None",
")",
":",
"return",
"fh",
".",
"write",
"(",
"encode",
"(",
"content",
",",
"encoding",
"=",
"encoding",
")",
")"
] | write to a filehandle and retry when interrupted . | train | false |
25,874 | def is_data_index_sample_file(file_path):
if is_column_based(file_path):
return True
if checkers.check_html(file_path):
return False
if checkers.check_image(file_path):
return False
if checkers.check_binary(name=file_path):
return False
if checkers.is_bz2(file_path):
return False
if checkers.is_gzip(file_path):
return False
if checkers.check_zip(file_path):
return False
return True
| [
"def",
"is_data_index_sample_file",
"(",
"file_path",
")",
":",
"if",
"is_column_based",
"(",
"file_path",
")",
":",
"return",
"True",
"if",
"checkers",
".",
"check_html",
"(",
"file_path",
")",
":",
"return",
"False",
"if",
"checkers",
".",
"check_image",
"("... | attempt to determine if a . | train | false |
25,875 | def getNewRepository():
return ExportRepository()
| [
"def",
"getNewRepository",
"(",
")",
":",
"return",
"ExportRepository",
"(",
")"
] | get the repository constructor . | train | false |
25,876 | def analyze_imdbid(href):
if (not href):
return None
match = re_imdbid.search(href)
if (not match):
return None
return str(match.group(2))
| [
"def",
"analyze_imdbid",
"(",
"href",
")",
":",
"if",
"(",
"not",
"href",
")",
":",
"return",
"None",
"match",
"=",
"re_imdbid",
".",
"search",
"(",
"href",
")",
"if",
"(",
"not",
"match",
")",
":",
"return",
"None",
"return",
"str",
"(",
"match",
... | return an imdbid from an url . | train | false |
25,877 | def drop_non_windows_name_records(font):
names = font['name'].names
records_to_drop = set()
for (record_number, record) in enumerate(names):
name_ids = (record.platformID, record.platEncID, record.langID)
if (name_ids != (3, 1, 1033)):
records_to_drop.add(record_number)
if records_to_drop:
font['name'].names = [record for (record_number, record) in enumerate(names) if (record_number not in records_to_drop)]
| [
"def",
"drop_non_windows_name_records",
"(",
"font",
")",
":",
"names",
"=",
"font",
"[",
"'name'",
"]",
".",
"names",
"records_to_drop",
"=",
"set",
"(",
")",
"for",
"(",
"record_number",
",",
"record",
")",
"in",
"enumerate",
"(",
"names",
")",
":",
"n... | drop name records whose != . | train | false |
25,878 | def _align_nums(nums):
try:
nums = asarray(nums)
if (not np.issubdtype(nums.dtype, np.number)):
raise ValueError('dtype of numerator is non-numeric')
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max((num.size for num in nums))
aligned_nums = np.zeros((len(nums), max_width))
for (index, num) in enumerate(nums):
aligned_nums[index, (- num.size):] = num
return aligned_nums
| [
"def",
"_align_nums",
"(",
"nums",
")",
":",
"try",
":",
"nums",
"=",
"asarray",
"(",
"nums",
")",
"if",
"(",
"not",
"np",
".",
"issubdtype",
"(",
"nums",
".",
"dtype",
",",
"np",
".",
"number",
")",
")",
":",
"raise",
"ValueError",
"(",
"'dtype of... | aligns the shapes of multiple numerators . | train | false |
25,879 | def printAttributesKey(attributesKey, elementNode):
if (attributesKey.lower() == '_localdictionary'):
localDictionary = getLocalDictionary(attributesKey, elementNode)
if (localDictionary != None):
localDictionaryKeys = localDictionary.keys()
attributeValue = elementNode.attributes[attributesKey]
if (attributeValue != ''):
attributeValue = (' - ' + attributeValue)
print ('Local Dictionary Variables' + attributeValue)
localDictionaryKeys.sort()
for localDictionaryKey in localDictionaryKeys:
print ('%s: %s' % (localDictionaryKey, localDictionary[localDictionaryKey]))
return
value = elementNode.attributes[attributesKey]
evaluatedValue = None
if (value == ''):
evaluatedValue = evaluate.getEvaluatedExpressionValue(elementNode, attributesKey)
else:
evaluatedValue = evaluate.getEvaluatedExpressionValue(elementNode, value)
print ('%s: %s' % (attributesKey, evaluatedValue))
| [
"def",
"printAttributesKey",
"(",
"attributesKey",
",",
"elementNode",
")",
":",
"if",
"(",
"attributesKey",
".",
"lower",
"(",
")",
"==",
"'_localdictionary'",
")",
":",
"localDictionary",
"=",
"getLocalDictionary",
"(",
"attributesKey",
",",
"elementNode",
")",
... | print the attributeskey . | train | false |
25,880 | def test_round_to_float():
assert (round_to_float(12.01934, 0.01) == 12.02)
assert (round_to_float(12.01134, 0.01) == 12.01)
assert (round_to_float(12.1934, 0.1) == 12.2)
assert (round_to_float(12.1134, 0.1) == 12.1)
assert (round_to_float(12.1134, 0.001) == 12.113)
assert (round_to_float(12.1134, 1e-05) == 12.1134)
assert (round_to_float(12.1934, 0.5) == 12.0)
assert (round_to_float(12.2934, 0.5) == 12.5)
| [
"def",
"test_round_to_float",
"(",
")",
":",
"assert",
"(",
"round_to_float",
"(",
"12.01934",
",",
"0.01",
")",
"==",
"12.02",
")",
"assert",
"(",
"round_to_float",
"(",
"12.01134",
",",
"0.01",
")",
"==",
"12.01",
")",
"assert",
"(",
"round_to_float",
"(... | test round to float function . | train | false |
25,883 | @nottest
def get_run_tests():
test_suite = None
msg_fmt = 'Reading %s run tests from: "%s"'
for fname in os.listdir(ARTIFACT_DIR):
if (fname.startswith(NOSE_OUTPUT_PREFIX) and fname.endswith(NOSE_XUNIT_EXT)):
path_fname = os.path.join(ARTIFACT_DIR, fname)
try:
(curr_test_suite, test_result) = parse_xunit(path_fname)
except ElementTree.ParseError:
logging.warning(('"%s" is an invalid XML file.' % fname))
continue
logging.debug((msg_fmt % (test_result.testsRun, fname)))
if (test_suite is None):
test_suite = curr_test_suite
else:
for test in curr_test_suite:
test_suite.addTest(test)
normalize_test_names(test_suite)
run_str = '\n'.join(sorted([t.id() for t in test_suite._tests]))
logging.debug(('Run %s tests.' % len(test_suite._tests)))
logging.debug(('The following tests were run:\n%s' % run_str))
return test_suite
| [
"@",
"nottest",
"def",
"get_run_tests",
"(",
")",
":",
"test_suite",
"=",
"None",
"msg_fmt",
"=",
"'Reading %s run tests from: \"%s\"'",
"for",
"fname",
"in",
"os",
".",
"listdir",
"(",
"ARTIFACT_DIR",
")",
":",
"if",
"(",
"fname",
".",
"startswith",
"(",
"N... | merge all the information from the command outputs into one consolidated test suite which contains all tests which were run . | train | false |
25,884 | def tempallow(ip=None, ttl=None, port=None, direction=None, comment=''):
return _tmp_access_rule('tempallow', ip, ttl, port, direction, comment)
| [
"def",
"tempallow",
"(",
"ip",
"=",
"None",
",",
"ttl",
"=",
"None",
",",
"port",
"=",
"None",
",",
"direction",
"=",
"None",
",",
"comment",
"=",
"''",
")",
":",
"return",
"_tmp_access_rule",
"(",
"'tempallow'",
",",
"ip",
",",
"ttl",
",",
"port",
... | add an rule to the temporary ip allow list . | train | true |
25,885 | @hug.directive(apply_globally=False)
def my_directive(default=None, **kwargs):
return default
| [
"@",
"hug",
".",
"directive",
"(",
"apply_globally",
"=",
"False",
")",
"def",
"my_directive",
"(",
"default",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"return",
"default"
] | for testing . | train | false |
25,886 | def moments_central(image, cr, cc, order=3):
return _moments_cy.moments_central(image, cr, cc, order)
| [
"def",
"moments_central",
"(",
"image",
",",
"cr",
",",
"cc",
",",
"order",
"=",
"3",
")",
":",
"return",
"_moments_cy",
".",
"moments_central",
"(",
"image",
",",
"cr",
",",
"cc",
",",
"order",
")"
] | calculate all central image moments up to a certain order . | train | false |
25,887 | def equivalence_classes(iterable, relation):
blocks = []
for y in iterable:
for block in blocks:
x = arbitrary_element(block)
if relation(x, y):
block.append(y)
break
else:
blocks.append([y])
return {frozenset(block) for block in blocks}
| [
"def",
"equivalence_classes",
"(",
"iterable",
",",
"relation",
")",
":",
"blocks",
"=",
"[",
"]",
"for",
"y",
"in",
"iterable",
":",
"for",
"block",
"in",
"blocks",
":",
"x",
"=",
"arbitrary_element",
"(",
"block",
")",
"if",
"relation",
"(",
"x",
","... | returns the set of equivalence classes of the given iterable under the specified equivalence relation . | train | false |
25,888 | @permission_pre_checks(action='add_page')
@cached_func
def user_can_add_subpage(user, target, site=None):
has_perm = has_generic_permission(page=target, user=user, action='add_page', site=site)
return has_perm
| [
"@",
"permission_pre_checks",
"(",
"action",
"=",
"'add_page'",
")",
"@",
"cached_func",
"def",
"user_can_add_subpage",
"(",
"user",
",",
"target",
",",
"site",
"=",
"None",
")",
":",
"has_perm",
"=",
"has_generic_permission",
"(",
"page",
"=",
"target",
",",
... | return true if the current user has permission to add a new page under target . | train | false |
25,889 | def logical_volume_size(path):
(out, _err) = execute('lvs', '-o', 'lv_size', '--noheadings', '--units', 'b', '--nosuffix', path, run_as_root=True)
return int(out)
| [
"def",
"logical_volume_size",
"(",
"path",
")",
":",
"(",
"out",
",",
"_err",
")",
"=",
"execute",
"(",
"'lvs'",
",",
"'-o'",
",",
"'lv_size'",
",",
"'--noheadings'",
",",
"'--units'",
",",
"'b'",
",",
"'--nosuffix'",
",",
"path",
",",
"run_as_root",
"="... | get logical volume size in bytes . | train | false |
25,890 | def RecordSubscription(client, obj_store, user_id, device_id, request, callback):
def _OnRecord(verify_response, op):
callback({'subscription': Subscription.CreateFromITunes(user_id, verify_response).MakeMetadataDict()})
def _OnVerify(environment, verify_response):
if ((environment == 'prod') and (verify_response.GetStatus() == VerifyResponse.SANDBOX_ON_PROD_ERROR)):
ITunesStoreClient.Instance('dev').VerifyReceipt(receipt_data, partial(_OnVerify, 'dev'))
return
if (not verify_response.IsValid()):
logging.warning('record_subscription: invalid signature; request: %r', request)
raise web.HTTPError(400, 'invalid receipt signature')
if (environment == 'prod'):
op_request = {'headers': request['headers'], 'user_id': user_id, 'verify_response_str': verify_response.ToString()}
Operation.CreateAndExecute(client, user_id, device_id, 'Subscription.RecordITunesTransactionOperation', op_request, partial(_OnRecord, verify_response))
else:
callback({'subscription': Subscription.CreateFromITunes(user_id, verify_response).MakeMetadataDict()})
receipt_data = base64.b64decode(request['receipt_data'])
ITunesStoreClient.Instance('prod').VerifyReceipt(receipt_data, partial(_OnVerify, 'prod'))
| [
"def",
"RecordSubscription",
"(",
"client",
",",
"obj_store",
",",
"user_id",
",",
"device_id",
",",
"request",
",",
"callback",
")",
":",
"def",
"_OnRecord",
"(",
"verify_response",
",",
"op",
")",
":",
"callback",
"(",
"{",
"'subscription'",
":",
"Subscrip... | records an external subscription . | train | false |
25,891 | def test_ipy_dash_c():
ipi = IronPythonInstance(executable, exec_prefix, '-c True;False')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True)
AreEqual(res[1], '')
AreEqual(res[2], '')
AreEqual(res[3], 0)
| [
"def",
"test_ipy_dash_c",
"(",
")",
":",
"ipi",
"=",
"IronPythonInstance",
"(",
"executable",
",",
"exec_prefix",
",",
"'-c True;False'",
")",
"res",
"=",
"ipi",
".",
"StartAndRunToCompletion",
"(",
")",
"AreEqual",
"(",
"res",
"[",
"0",
"]",
",",
"True",
... | verify ipy -c cmd doesnt print expression statements . | train | false |
25,892 | def display_data_size(size):
prefixes = ['B', 'kB', 'MB', 'GB', 'TB']
i = 0
while (size > 1000.0):
size /= 1000.0
i += 1
return ('%.2f %s' % (size, prefixes[i]))
| [
"def",
"display_data_size",
"(",
"size",
")",
":",
"prefixes",
"=",
"[",
"'B'",
",",
"'kB'",
",",
"'MB'",
",",
"'GB'",
",",
"'TB'",
"]",
"i",
"=",
"0",
"while",
"(",
"size",
">",
"1000.0",
")",
":",
"size",
"/=",
"1000.0",
"i",
"+=",
"1",
"return... | display data size in human readable units . | train | false |
25,893 | @contextmanager
def alt_file(current_file):
_alt_file = (current_file + '-alt')
(yield _alt_file)
try:
shutil.move(_alt_file, current_file)
except IOError:
pass
| [
"@",
"contextmanager",
"def",
"alt_file",
"(",
"current_file",
")",
":",
"_alt_file",
"=",
"(",
"current_file",
"+",
"'-alt'",
")",
"(",
"yield",
"_alt_file",
")",
"try",
":",
"shutil",
".",
"move",
"(",
"_alt_file",
",",
"current_file",
")",
"except",
"IO... | create an alternate file next to an existing file . | train | false |
25,894 | def _all_releases(items):
relcounts = defaultdict(int)
for item in items:
if (item.path not in _matches):
continue
(_, release_ids) = _matches[item.path]
for release_id in release_ids:
relcounts[release_id] += 1
for (release_id, count) in relcounts.items():
if ((float(count) / len(items)) > COMMON_REL_THRESH):
(yield release_id)
| [
"def",
"_all_releases",
"(",
"items",
")",
":",
"relcounts",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"item",
"in",
"items",
":",
"if",
"(",
"item",
".",
"path",
"not",
"in",
"_matches",
")",
":",
"continue",
"(",
"_",
",",
"release_ids",
")",
"="... | given an iterable of items . | train | false |
25,896 | def unpack_cli_arg(cli_argument, value):
return _unpack_cli_arg(cli_argument.argument_model, value, cli_argument.cli_name)
| [
"def",
"unpack_cli_arg",
"(",
"cli_argument",
",",
"value",
")",
":",
"return",
"_unpack_cli_arg",
"(",
"cli_argument",
".",
"argument_model",
",",
"value",
",",
"cli_argument",
".",
"cli_name",
")"
] | parses and unpacks the encoded string command line parameter and returns native python data structures that can be passed to the operation . | train | false |
25,898 | def undo_patch():
Request.send = _original_request_send
| [
"def",
"undo_patch",
"(",
")",
":",
"Request",
".",
"send",
"=",
"_original_request_send"
] | undo requests monkey patch . | train | false |
25,899 | def get_conv_gradinputs_shape_1axis(kernel_shape, top_shape, border_mode, subsample, dilation):
if (None in [kernel_shape, top_shape, border_mode, subsample, dilation]):
return None
if (subsample != 1):
return None
dil_kernel_shape = (((kernel_shape - 1) * dilation) + 1)
if (border_mode == 'half'):
pad = (dil_kernel_shape // 2)
elif (border_mode == 'full'):
pad = (dil_kernel_shape - 1)
elif (border_mode == 'valid'):
pad = 0
else:
pad = border_mode
if (pad < 0):
raise ValueError('border_mode must be >= 0')
if (pad == 0):
image_shape = ((top_shape + dil_kernel_shape) - 1)
else:
image_shape = (((top_shape - (2 * pad)) + dil_kernel_shape) - 1)
return image_shape
| [
"def",
"get_conv_gradinputs_shape_1axis",
"(",
"kernel_shape",
",",
"top_shape",
",",
"border_mode",
",",
"subsample",
",",
"dilation",
")",
":",
"if",
"(",
"None",
"in",
"[",
"kernel_shape",
",",
"top_shape",
",",
"border_mode",
",",
"subsample",
",",
"dilation... | this function tries to compute the image shape of convolution gradinputs . | train | false |
25,900 | def SetupCore(searchPaths):
import sys
for path in searchPaths:
sys.path.append(path)
import string, os
import regutil, win32api, win32con
(installPath, corePaths) = LocatePythonCore(searchPaths)
print corePaths
regutil.RegisterNamedPath(None, string.join(corePaths, ';'))
hKey = win32api.RegCreateKey(regutil.GetRootKey(), regutil.BuildDefaultPythonKey())
try:
win32api.RegSetValue(hKey, 'InstallPath', win32con.REG_SZ, installPath)
finally:
win32api.RegCloseKey(hKey)
win32paths = ((os.path.abspath(os.path.split(win32api.__file__)[0]) + ';') + os.path.abspath(os.path.split(LocateFileName('win32con.py;win32con.pyc', sys.path))[0]))
check = os.path.join(sys.prefix, 'PCBuild')
if os.path.isdir(check):
regutil.RegisterNamedPath('PCBuild', check)
| [
"def",
"SetupCore",
"(",
"searchPaths",
")",
":",
"import",
"sys",
"for",
"path",
"in",
"searchPaths",
":",
"sys",
".",
"path",
".",
"append",
"(",
"path",
")",
"import",
"string",
",",
"os",
"import",
"regutil",
",",
"win32api",
",",
"win32con",
"(",
... | setup the core python information in the registry . | train | false |
25,901 | def _verify_plugin_type(plugin_type):
if (hasattr(plugin_type, '__module__') and issubclass(plugin_type, CMSPluginBase)):
plugin_pool.set_plugin_meta()
plugin_model = plugin_type.model
assert (plugin_type in plugin_pool.plugins.values())
plugin_type = plugin_type.__name__
elif isinstance(plugin_type, six.string_types):
try:
plugin_model = plugin_pool.get_plugin(plugin_type).model
except KeyError:
raise TypeError('plugin_type must be CMSPluginBase subclass or string')
else:
raise TypeError('plugin_type must be CMSPluginBase subclass or string')
return (plugin_model, plugin_type)
| [
"def",
"_verify_plugin_type",
"(",
"plugin_type",
")",
":",
"if",
"(",
"hasattr",
"(",
"plugin_type",
",",
"'__module__'",
")",
"and",
"issubclass",
"(",
"plugin_type",
",",
"CMSPluginBase",
")",
")",
":",
"plugin_pool",
".",
"set_plugin_meta",
"(",
")",
"plug... | verifies the given plugin_type is valid and returns a tuple of . | train | false |
25,902 | def getSelectedPluginModule(plugins):
for plugin in plugins:
if plugin.value:
return archive.getModuleWithDirectoryPath(plugin.directoryPath, plugin.name)
return None
| [
"def",
"getSelectedPluginModule",
"(",
"plugins",
")",
":",
"for",
"plugin",
"in",
"plugins",
":",
"if",
"plugin",
".",
"value",
":",
"return",
"archive",
".",
"getModuleWithDirectoryPath",
"(",
"plugin",
".",
"directoryPath",
",",
"plugin",
".",
"name",
")",
... | get the selected plugin module . | train | false |
25,905 | def CHECK_FETCH_CMD(state, fetch_dists):
if (not fetch_dists):
return
prefix = state['prefix']
index = state['index']
assert isdir(prefix)
size = reduce(add, (index[dist].get('size', 0) for dist in fetch_dists), 0)
check_size(prefix, size)
| [
"def",
"CHECK_FETCH_CMD",
"(",
"state",
",",
"fetch_dists",
")",
":",
"if",
"(",
"not",
"fetch_dists",
")",
":",
"return",
"prefix",
"=",
"state",
"[",
"'prefix'",
"]",
"index",
"=",
"state",
"[",
"'index'",
"]",
"assert",
"isdir",
"(",
"prefix",
")",
... | check whether there is enough space for download packages . | train | false |
25,906 | def gen_auth_resp(chall_list):
return [('%s%s' % (chall.__class__.__name__, chall.domain)) for chall in chall_list]
| [
"def",
"gen_auth_resp",
"(",
"chall_list",
")",
":",
"return",
"[",
"(",
"'%s%s'",
"%",
"(",
"chall",
".",
"__class__",
".",
"__name__",
",",
"chall",
".",
"domain",
")",
")",
"for",
"chall",
"in",
"chall_list",
"]"
] | generate a dummy authorization response . | train | false |
25,907 | def fake_open(filename, flags, mode=511, _os_open=os.open):
if (flags & ((os.O_RDWR | os.O_CREAT) | os.O_WRONLY)):
raise OSError(errno.EROFS, 'Read-only file system', filename)
elif (not FakeFile.is_file_accessible(filename)):
raise OSError(errno.ENOENT, 'No such file or directory', filename)
return _os_open(filename, flags, mode)
| [
"def",
"fake_open",
"(",
"filename",
",",
"flags",
",",
"mode",
"=",
"511",
",",
"_os_open",
"=",
"os",
".",
"open",
")",
":",
"if",
"(",
"flags",
"&",
"(",
"(",
"os",
".",
"O_RDWR",
"|",
"os",
".",
"O_CREAT",
")",
"|",
"os",
".",
"O_WRONLY",
"... | fake version of os . | train | false |
25,908 | @map_project_slug
@map_subproject_slug
def serve_docs(request, project, subproject, lang_slug=None, version_slug=None, filename=''):
if (not version_slug):
version_slug = project.get_default_version()
try:
version = project.versions.public(request.user).get(slug=version_slug)
except Version.DoesNotExist:
if project.versions.filter(slug=version_slug).exists():
return _serve_401(request, project)
raise Http404('Version does not exist.')
filename = resolve_path((subproject or project), version_slug=version_slug, language=lang_slug, filename=filename, subdomain=True)
if ((version.privacy_level == constants.PRIVATE) and (not AdminPermission.is_member(user=request.user, obj=project))):
return _serve_401(request, project)
return _serve_symlink_docs(request, filename=filename, project=project, privacy_level=version.privacy_level)
| [
"@",
"map_project_slug",
"@",
"map_subproject_slug",
"def",
"serve_docs",
"(",
"request",
",",
"project",
",",
"subproject",
",",
"lang_slug",
"=",
"None",
",",
"version_slug",
"=",
"None",
",",
"filename",
"=",
"''",
")",
":",
"if",
"(",
"not",
"version_slu... | exists to map existing proj . | train | false |
25,909 | def _dmidecoder(args=None):
if (args is None):
return salt.modules.cmdmod._run_quiet(DMIDECODER)
else:
return salt.modules.cmdmod._run_quiet('{0} {1}'.format(DMIDECODER, args))
| [
"def",
"_dmidecoder",
"(",
"args",
"=",
"None",
")",
":",
"if",
"(",
"args",
"is",
"None",
")",
":",
"return",
"salt",
".",
"modules",
".",
"cmdmod",
".",
"_run_quiet",
"(",
"DMIDECODER",
")",
"else",
":",
"return",
"salt",
".",
"modules",
".",
"cmdm... | call dmidecode . | train | false |
25,910 | def curl(vect, coord_sys):
return coord_sys.delop.cross(vect).doit()
| [
"def",
"curl",
"(",
"vect",
",",
"coord_sys",
")",
":",
"return",
"coord_sys",
".",
"delop",
".",
"cross",
"(",
"vect",
")",
".",
"doit",
"(",
")"
] | returns the curl of a vector field computed wrt the coordinate symbols of the given frame . | train | false |
25,911 | def _get_flat_core_sizes(cores):
core_sizes_lists = []
for core in cores:
flat_output_size = nest.flatten(core.output_size)
core_sizes_lists.append([tensor_shape.as_shape(size).as_list() for size in flat_output_size])
return core_sizes_lists
| [
"def",
"_get_flat_core_sizes",
"(",
"cores",
")",
":",
"core_sizes_lists",
"=",
"[",
"]",
"for",
"core",
"in",
"cores",
":",
"flat_output_size",
"=",
"nest",
".",
"flatten",
"(",
"core",
".",
"output_size",
")",
"core_sizes_lists",
".",
"append",
"(",
"[",
... | obtains the list flattened output sizes of a list of cores . | train | false |
25,912 | def root(desktop=None):
if _is_x11():
return Window(None)
else:
raise OSError(("Desktop '%s' not supported" % use_desktop(desktop)))
| [
"def",
"root",
"(",
"desktop",
"=",
"None",
")",
":",
"if",
"_is_x11",
"(",
")",
":",
"return",
"Window",
"(",
"None",
")",
"else",
":",
"raise",
"OSError",
"(",
"(",
"\"Desktop '%s' not supported\"",
"%",
"use_desktop",
"(",
"desktop",
")",
")",
")"
] | return the root window for the current desktop . | train | false |
25,913 | def ensure_float01(arr, dtype_preference='float32'):
if (arr.dtype == 'uint8'):
return (np.array(arr, dtype=dtype_preference) / 255)
elif (arr.dtype in ('float32', 'float64')):
return arr
else:
raise Exception(('ensure_float01 expects uint8 or float input but got %s with range [%g,%g,].' % (arr.dtype, arr.min(), arr.max())))
| [
"def",
"ensure_float01",
"(",
"arr",
",",
"dtype_preference",
"=",
"'float32'",
")",
":",
"if",
"(",
"arr",
".",
"dtype",
"==",
"'uint8'",
")",
":",
"return",
"(",
"np",
".",
"array",
"(",
"arr",
",",
"dtype",
"=",
"dtype_preference",
")",
"/",
"255",
... | if data is uint . | train | false |
25,914 | def _footer_static_url(request, name):
return request.build_absolute_uri(staticfiles_storage.url(name))
| [
"def",
"_footer_static_url",
"(",
"request",
",",
"name",
")",
":",
"return",
"request",
".",
"build_absolute_uri",
"(",
"staticfiles_storage",
".",
"url",
"(",
"name",
")",
")"
] | construct an absolute url to a static asset . | train | false |
25,916 | def _ExtractProxyConfig(product_yaml_key, proxy_config_data):
cafile = proxy_config_data.get('cafile', None)
disable_certificate_validation = proxy_config_data.get('disable_certificate_validation', False)
http_proxy = _ExtractProxy(_HTTP_PROXY_YAML_KEY, proxy_config_data)
https_proxy = _ExtractProxy(_HTTPS_PROXY_YAML_KEY, proxy_config_data)
proxy_config = ProxyConfig(http_proxy=http_proxy, https_proxy=https_proxy, cafile=cafile, disable_certificate_validation=disable_certificate_validation)
return proxy_config
| [
"def",
"_ExtractProxyConfig",
"(",
"product_yaml_key",
",",
"proxy_config_data",
")",
":",
"cafile",
"=",
"proxy_config_data",
".",
"get",
"(",
"'cafile'",
",",
"None",
")",
"disable_certificate_validation",
"=",
"proxy_config_data",
".",
"get",
"(",
"'disable_certifi... | returns an initialized proxyconfig using the given proxy_config_data . | train | true |
25,918 | def _use_appnope():
return ((sys.platform == 'darwin') and (V(platform.mac_ver()[0]) >= V('10.9')))
| [
"def",
"_use_appnope",
"(",
")",
":",
"return",
"(",
"(",
"sys",
".",
"platform",
"==",
"'darwin'",
")",
"and",
"(",
"V",
"(",
"platform",
".",
"mac_ver",
"(",
")",
"[",
"0",
"]",
")",
">=",
"V",
"(",
"'10.9'",
")",
")",
")"
] | should we use appnope for dealing with os x app nap? checks if we are on os x 10 . | train | false |
25,920 | def set_ring(devname, **kwargs):
try:
ring = ethtool.get_ringparam(devname)
except IOError:
log.error('Ring parameters not supported on {0}'.format(devname))
return 'Not supported'
changed = False
for (param, value) in kwargs.items():
if (param in ethtool_ring_map):
param = ethtool_ring_map[param]
if (param in ring):
if (ring[param] != value):
ring[param] = value
changed = True
try:
if changed:
ethtool.set_ringparam(devname, ring)
return show_ring(devname)
except IOError:
log.error('Invalid ring arguments on {0}: {1}'.format(devname, ring))
return 'Invalid arguments'
| [
"def",
"set_ring",
"(",
"devname",
",",
"**",
"kwargs",
")",
":",
"try",
":",
"ring",
"=",
"ethtool",
".",
"get_ringparam",
"(",
"devname",
")",
"except",
"IOError",
":",
"log",
".",
"error",
"(",
"'Ring parameters not supported on {0}'",
".",
"format",
"(",... | changes the rx/tx ring parameters of the specified network device cli example: . | train | true |
25,921 | def get_headnode_dict(fixer_list):
head_nodes = defaultdict(list)
for fixer in fixer_list:
if (not fixer.pattern):
head_nodes[None].append(fixer)
continue
for t in get_head_types(fixer.pattern):
head_nodes[t].append(fixer)
return head_nodes
| [
"def",
"get_headnode_dict",
"(",
"fixer_list",
")",
":",
"head_nodes",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"fixer",
"in",
"fixer_list",
":",
"if",
"(",
"not",
"fixer",
".",
"pattern",
")",
":",
"head_nodes",
"[",
"None",
"]",
".",
"append",
"(",... | accepts a list of fixers and returns a dictionary of head node type --> fixer list . | train | false |
25,922 | def _is_load_excluded(d):
if d.startswith('.'):
return True
for build_path in ('build32_debug', 'build32_release', 'build64_debug', 'build64_release'):
if d.startswith(build_path):
return True
return False
| [
"def",
"_is_load_excluded",
"(",
"d",
")",
":",
"if",
"d",
".",
"startswith",
"(",
"'.'",
")",
":",
"return",
"True",
"for",
"build_path",
"in",
"(",
"'build32_debug'",
",",
"'build32_release'",
",",
"'build64_debug'",
",",
"'build64_release'",
")",
":",
"if... | whether exclude the directory when loading build . | train | false |
25,924 | def _warn_if_string(iterable):
if isinstance(iterable, basestring):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears to work but means that the server will send the data to the client char, by char. This is almost never intended behavior, use response.data to assign strings to the response object.'), stacklevel=2)
| [
"def",
"_warn_if_string",
"(",
"iterable",
")",
":",
"if",
"isinstance",
"(",
"iterable",
",",
"basestring",
")",
":",
"from",
"warnings",
"import",
"warn",
"warn",
"(",
"Warning",
"(",
"'response iterable was set to a string. This appears to work but means that the serv... | helper for the response objects to check if the iterable returned to the wsgi server is not a string . | train | false |
25,925 | def rs_trunc(p1, x, prec):
R = p1.ring
p = R.zero
i = R.gens.index(x)
for exp1 in p1:
if (exp1[i] >= prec):
continue
p[exp1] = p1[exp1]
return p
| [
"def",
"rs_trunc",
"(",
"p1",
",",
"x",
",",
"prec",
")",
":",
"R",
"=",
"p1",
".",
"ring",
"p",
"=",
"R",
".",
"zero",
"i",
"=",
"R",
".",
"gens",
".",
"index",
"(",
"x",
")",
"for",
"exp1",
"in",
"p1",
":",
"if",
"(",
"exp1",
"[",
"i",
... | truncate the series in the x variable with precision prec . | train | false |
25,926 | def get_daily_10yr_treasury_data():
url = 'http://www.federalreserve.gov/datadownload/Output.aspx?rel=H15&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=&filetype=csv&label=include&layout=seriescolumn'
return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'], parse_dates=True, converters={1: dataconverter}, squeeze=True)
| [
"def",
"get_daily_10yr_treasury_data",
"(",
")",
":",
"url",
"=",
"'http://www.federalreserve.gov/datadownload/Output.aspx?rel=H15&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=&filetype=csv&label=include&layout=seriescolumn'",
"return",
"pd",
".",
"read_csv",
"(",
"url",
","... | download daily 10 year treasury rates from the federal reserve and return a pandas . | train | true |
25,928 | def has_instance(name, provider=None):
data = get_instance(name, provider)
if (data is None):
return False
return True
| [
"def",
"has_instance",
"(",
"name",
",",
"provider",
"=",
"None",
")",
":",
"data",
"=",
"get_instance",
"(",
"name",
",",
"provider",
")",
"if",
"(",
"data",
"is",
"None",
")",
":",
"return",
"False",
"return",
"True"
] | return true if the instance is found on a provider cli example: . | train | true |
25,931 | def set_virt_ram(self, num):
if (num == '<<inherit>>'):
self.virt_ram = '<<inherit>>'
return
try:
inum = int(num)
if (inum != float(num)):
raise CX(_(('invalid virt ram size (%s)' % num)))
if (inum >= 0):
self.virt_ram = inum
return
raise CX(_(('invalid virt ram size (%s)' % num)))
except:
raise CX(_(('invalid virt ram size (%s)' % num)))
| [
"def",
"set_virt_ram",
"(",
"self",
",",
"num",
")",
":",
"if",
"(",
"num",
"==",
"'<<inherit>>'",
")",
":",
"self",
".",
"virt_ram",
"=",
"'<<inherit>>'",
"return",
"try",
":",
"inum",
"=",
"int",
"(",
"num",
")",
"if",
"(",
"inum",
"!=",
"float",
... | for virt only . | train | false |
25,932 | def fake_participant_identity(participant, verification=None):
country_id = random_country_id(participant.db)
participant.store_identity_info(country_id, 'nothing-enforced', {})
if verification:
participant.set_identity_verification(country_id, verification)
elif (random.randrange(2) == 0):
participant.set_identity_verification(country_id, True)
return country_id
| [
"def",
"fake_participant_identity",
"(",
"participant",
",",
"verification",
"=",
"None",
")",
":",
"country_id",
"=",
"random_country_id",
"(",
"participant",
".",
"db",
")",
"participant",
".",
"store_identity_info",
"(",
"country_id",
",",
"'nothing-enforced'",
"... | pick a country and make an identity for the participant there . | train | false |
25,933 | def guess_net_inet_tcp_delayed_ack():
return 0
| [
"def",
"guess_net_inet_tcp_delayed_ack",
"(",
")",
":",
"return",
"0"
] | set the tcp stack to not use delayed acks . | train | false |
25,934 | def test_ugettext_calls():
runtime = TestRuntime()
block = XBlockWithServices(runtime, scope_ids=Mock(spec=[]))
assert_equals(block.ugettext('test'), u'test')
assert_true(isinstance(block.ugettext('test'), unicode))
runtime = TestRuntime(services={'i18n': None})
block = XBlockWithServices(runtime, scope_ids=Mock(spec=[]))
with assert_raises(NoSuchServiceError):
block.ugettext('test')
| [
"def",
"test_ugettext_calls",
"(",
")",
":",
"runtime",
"=",
"TestRuntime",
"(",
")",
"block",
"=",
"XBlockWithServices",
"(",
"runtime",
",",
"scope_ids",
"=",
"Mock",
"(",
"spec",
"=",
"[",
"]",
")",
")",
"assert_equals",
"(",
"block",
".",
"ugettext",
... | test ugettext calls in xblock . | train | false |
25,935 | def obtain_uptime(show_ver):
match = re.search('.* uptime is .*', show_ver)
if match:
uptime_str = match.group().strip()
uptime_obj = Uptime(uptime_str)
return uptime_obj.uptime_seconds()
else:
return None
| [
"def",
"obtain_uptime",
"(",
"show_ver",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"'.* uptime is .*'",
",",
"show_ver",
")",
"if",
"match",
":",
"uptime_str",
"=",
"match",
".",
"group",
"(",
")",
".",
"strip",
"(",
")",
"uptime_obj",
"=",
"Up... | obtain uptime from show version data . | train | false |
25,936 | def DistEntry():
flags.StartMain(main)
| [
"def",
"DistEntry",
"(",
")",
":",
"flags",
".",
"StartMain",
"(",
"main",
")"
] | this is called from the package entry point . | train | false |
25,937 | def convertXMLElementRename(geometryOutput, xmlElement):
xmlElement.className = 'path'
convertXMLElement(geometryOutput, xmlElement)
| [
"def",
"convertXMLElementRename",
"(",
"geometryOutput",
",",
"xmlElement",
")",
":",
"xmlElement",
".",
"className",
"=",
"'path'",
"convertXMLElement",
"(",
"geometryOutput",
",",
"xmlElement",
")"
] | convert the xml element to a path xml element . | train | false |
25,940 | @not_implemented_for('undirected')
def in_degree_centrality(G):
centrality = {}
s = (1.0 / (len(G) - 1.0))
centrality = {n: (d * s) for (n, d) in G.in_degree()}
return centrality
| [
"@",
"not_implemented_for",
"(",
"'undirected'",
")",
"def",
"in_degree_centrality",
"(",
"G",
")",
":",
"centrality",
"=",
"{",
"}",
"s",
"=",
"(",
"1.0",
"/",
"(",
"len",
"(",
"G",
")",
"-",
"1.0",
")",
")",
"centrality",
"=",
"{",
"n",
":",
"(",... | compute the in-degree centrality for nodes . | train | false |
25,941 | def flaky(jira_keys, max_runs=3, min_passes=1):
if isinstance(jira_keys, unicode):
jira_keys = [jira_keys]
annotation = _FlakyAnnotation(jira_keys=pset(jira_keys), max_runs=max_runs, min_passes=min_passes)
def wrapper(test_method):
existing_flaky = getattr(test_method, _FLAKY_ATTRIBUTE, None)
if (existing_flaky is None):
note = annotation
else:
note = _combine_flaky_annotation(annotation, existing_flaky)
setattr(test_method, _FLAKY_ATTRIBUTE, note)
return test_method
return wrapper
| [
"def",
"flaky",
"(",
"jira_keys",
",",
"max_runs",
"=",
"3",
",",
"min_passes",
"=",
"1",
")",
":",
"if",
"isinstance",
"(",
"jira_keys",
",",
"unicode",
")",
":",
"jira_keys",
"=",
"[",
"jira_keys",
"]",
"annotation",
"=",
"_FlakyAnnotation",
"(",
"jira... | mark a test as flaky . | train | false |
25,947 | @open_file(0, mode='r')
def read_yaml(path):
try:
import yaml
except ImportError:
raise ImportError('read_yaml() requires PyYAML: http://pyyaml.org/')
G = yaml.load(path)
return G
| [
"@",
"open_file",
"(",
"0",
",",
"mode",
"=",
"'r'",
")",
"def",
"read_yaml",
"(",
"path",
")",
":",
"try",
":",
"import",
"yaml",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'read_yaml() requires PyYAML: http://pyyaml.org/'",
")",
"G",
"=",
... | read graph in yaml format from path . | train | false |
25,948 | def setup_local_browser(context):
profile = webdriver.FirefoxProfile()
if ('download_csv' in context.tags):
context.download_dir = tempfile.mkdtemp()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', context.download_dir)
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'text/csv')
context.browser = webdriver.Firefox(firefox_profile=profile)
| [
"def",
"setup_local_browser",
"(",
"context",
")",
":",
"profile",
"=",
"webdriver",
".",
"FirefoxProfile",
"(",
")",
"if",
"(",
"'download_csv'",
"in",
"context",
".",
"tags",
")",
":",
"context",
".",
"download_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
... | use local webdriver . | train | false |
25,950 | def unpickle_cifar_dic(file):
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return (dict['data'], dict['labels'])
| [
"def",
"unpickle_cifar_dic",
"(",
"file",
")",
":",
"fo",
"=",
"open",
"(",
"file",
",",
"'rb'",
")",
"dict",
"=",
"cPickle",
".",
"load",
"(",
"fo",
")",
"fo",
".",
"close",
"(",
")",
"return",
"(",
"dict",
"[",
"'data'",
"]",
",",
"dict",
"[",
... | helper function: unpickles a dictionary . | train | false |
25,951 | def riccati_yn(n, x):
if (not (isscalar(n) and isscalar(x))):
raise ValueError('arguments must be scalars.')
if ((n != floor(n)) or (n < 0)):
raise ValueError('n must be a non-negative integer.')
if (n == 0):
n1 = 1
else:
n1 = n
(nm, jn, jnp) = specfun.rcty(n1, x)
return (jn[:(n + 1)], jnp[:(n + 1)])
| [
"def",
"riccati_yn",
"(",
"n",
",",
"x",
")",
":",
"if",
"(",
"not",
"(",
"isscalar",
"(",
"n",
")",
"and",
"isscalar",
"(",
"x",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"'arguments must be scalars.'",
")",
"if",
"(",
"(",
"n",
"!=",
"floor... | compute ricatti-bessel function of the second kind and its derivative . | train | false |
25,952 | def _create_scheduled_actions(conn, as_name, scheduled_actions):
if scheduled_actions:
for (name, action) in six.iteritems(scheduled_actions):
if (('start_time' in action) and isinstance(action['start_time'], six.string_types)):
action['start_time'] = datetime.datetime.strptime(action['start_time'], DATE_FORMAT)
if (('end_time' in action) and isinstance(action['end_time'], six.string_types)):
action['end_time'] = datetime.datetime.strptime(action['end_time'], DATE_FORMAT)
conn.create_scheduled_group_action(as_name, name, desired_capacity=action.get('desired_capacity'), min_size=action.get('min_size'), max_size=action.get('max_size'), start_time=action.get('start_time'), end_time=action.get('end_time'), recurrence=action.get('recurrence'))
| [
"def",
"_create_scheduled_actions",
"(",
"conn",
",",
"as_name",
",",
"scheduled_actions",
")",
":",
"if",
"scheduled_actions",
":",
"for",
"(",
"name",
",",
"action",
")",
"in",
"six",
".",
"iteritems",
"(",
"scheduled_actions",
")",
":",
"if",
"(",
"(",
... | helper function to create scheduled actions . | train | true |
25,953 | def make_data(chart, datas):
for (i, data) in enumerate(datas):
chart.add(data[0], adapt(chart, data[1]), secondary=bool((i % 2)))
return chart
| [
"def",
"make_data",
"(",
"chart",
",",
"datas",
")",
":",
"for",
"(",
"i",
",",
"data",
")",
"in",
"enumerate",
"(",
"datas",
")",
":",
"chart",
".",
"add",
"(",
"data",
"[",
"0",
"]",
",",
"adapt",
"(",
"chart",
",",
"data",
"[",
"1",
"]",
"... | add sample data to the test chart . | train | false |
25,954 | def _lint_py_files(config_pylint, files_to_lint, result):
start_time = time.time()
are_there_errors = False
num_py_files = len(files_to_lint)
if (not files_to_lint):
result.put('')
print 'There are no Python files to lint.'
return
print ('Linting %s Python files' % num_py_files)
_BATCH_SIZE = 50
current_batch_start_index = 0
while (current_batch_start_index < len(files_to_lint)):
current_batch_end_index = min((current_batch_start_index + _BATCH_SIZE), len(files_to_lint))
current_files_to_lint = files_to_lint[current_batch_start_index:current_batch_end_index]
print ('Linting Python files %s to %s...' % ((current_batch_start_index + 1), current_batch_end_index))
try:
lint.Run((current_files_to_lint + [config_pylint]))
except SystemExit as e:
if (str(e) != '0'):
are_there_errors = True
current_batch_start_index = current_batch_end_index
if are_there_errors:
result.put(('%s Python linting failed' % _MESSAGE_TYPE_FAILED))
else:
result.put(('%s %s Python files linted (%.1f secs)' % (_MESSAGE_TYPE_SUCCESS, num_py_files, (time.time() - start_time))))
print 'Python linting finished.'
| [
"def",
"_lint_py_files",
"(",
"config_pylint",
",",
"files_to_lint",
",",
"result",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"are_there_errors",
"=",
"False",
"num_py_files",
"=",
"len",
"(",
"files_to_lint",
")",
"if",
"(",
"not",
"files_... | prints a list of lint errors in the given list of python files . | train | false |
25,955 | def get_nvml():
if (platform.system() == 'Windows'):
return get_library('nvml.dll')
else:
for name in ('libnvidia-ml.so.1', 'libnvidia-ml.so', 'nvml.so'):
nvml = get_library(name)
if (nvml is not None):
return nvml
return None
| [
"def",
"get_nvml",
"(",
")",
":",
"if",
"(",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
")",
":",
"return",
"get_library",
"(",
"'nvml.dll'",
")",
"else",
":",
"for",
"name",
"in",
"(",
"'libnvidia-ml.so.1'",
",",
"'libnvidia-ml.so'",
",",
"... | return the ctypes . | train | false |
25,956 | def _find_channel_idx(ch_name, params):
indices = list()
offset = 0
labels = [l._text for l in params['fig_selection'].radio.labels]
for label in labels:
if (label == 'Custom'):
continue
selection = params['selections'][label]
hits = np.where((np.array(params['raw'].ch_names)[selection] == ch_name))
for idx in hits[0]:
indices.append((offset + idx))
offset += len(selection)
return indices
| [
"def",
"_find_channel_idx",
"(",
"ch_name",
",",
"params",
")",
":",
"indices",
"=",
"list",
"(",
")",
"offset",
"=",
"0",
"labels",
"=",
"[",
"l",
".",
"_text",
"for",
"l",
"in",
"params",
"[",
"'fig_selection'",
"]",
".",
"radio",
".",
"labels",
"]... | helper for finding all indices when using selections . | train | false |
25,957 | def _wait_for_consistency(checker):
for _ in xrange(EVENTUAL_CONSISTENCY_MAX_SLEEPS):
if checker():
return
time.sleep(EVENTUAL_CONSISTENCY_SLEEP_INTERVAL)
logger.warning('Exceeded wait for eventual GCS consistency - this may be abug in the library or something is terribly wrong.')
| [
"def",
"_wait_for_consistency",
"(",
"checker",
")",
":",
"for",
"_",
"in",
"xrange",
"(",
"EVENTUAL_CONSISTENCY_MAX_SLEEPS",
")",
":",
"if",
"checker",
"(",
")",
":",
"return",
"time",
".",
"sleep",
"(",
"EVENTUAL_CONSISTENCY_SLEEP_INTERVAL",
")",
"logger",
"."... | eventual consistency: wait until gcs reports something is true . | train | true |
25,958 | def changes_on_update(model):
return any(((column.onupdate is not None) for column in sqlalchemy_inspect(model).columns))
| [
"def",
"changes_on_update",
"(",
"model",
")",
":",
"return",
"any",
"(",
"(",
"(",
"column",
".",
"onupdate",
"is",
"not",
"None",
")",
"for",
"column",
"in",
"sqlalchemy_inspect",
"(",
"model",
")",
".",
"columns",
")",
")"
] | returns a best guess at whether the specified sqlalchemy model class is modified on updates . | train | false |
25,959 | def s3_location_constraint_for_region(region):
region = _fix_region(region)
if ((not region) or (region == _S3_REGION_WITH_NO_LOCATION_CONSTRAINT)):
return ''
else:
return region
| [
"def",
"s3_location_constraint_for_region",
"(",
"region",
")",
":",
"region",
"=",
"_fix_region",
"(",
"region",
")",
"if",
"(",
"(",
"not",
"region",
")",
"or",
"(",
"region",
"==",
"_S3_REGION_WITH_NO_LOCATION_CONSTRAINT",
")",
")",
":",
"return",
"''",
"el... | get the location constraint an s3 bucket needs so that other aws services can connect to it in the given region . | train | false |
25,960 | def get_oauth_token_from_body(body):
if six.PY3:
body = body.decode('utf-8')
credentials = urlparse.parse_qs(body)
key = credentials['oauth_token'][0]
secret = credentials['oauth_token_secret'][0]
token = {'key': key, 'id': key, 'secret': secret}
expires_at = credentials.get('oauth_expires_at')
if expires_at:
token['expires'] = expires_at[0]
return token
| [
"def",
"get_oauth_token_from_body",
"(",
"body",
")",
":",
"if",
"six",
".",
"PY3",
":",
"body",
"=",
"body",
".",
"decode",
"(",
"'utf-8'",
")",
"credentials",
"=",
"urlparse",
".",
"parse_qs",
"(",
"body",
")",
"key",
"=",
"credentials",
"[",
"'oauth_t... | parse the url response body to retrieve the oauth token key and secret . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.