_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q8100
|
Import.open
|
train
|
def open(self, options, loaded_schemata):
"""
Open and import the referenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@param loaded_schemata: Already loaded schemata cache (URL --> Schema).
@type loaded_schemata: dict
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug("%s, importing ns='%s', location='%s'", self.id, self.ns[1],
self.location)
result = self.__locate()
if result is None:
if self.location is None:
log.debug("imported schema (%s) not-found", self.ns[1])
else:
url = self.location
if "://" not in url:
url = urljoin(self.schema.baseurl, url)
result = (loaded_schemata.get(url) or
self.__download(url, loaded_schemata, options))
log.debug("imported:\n%s", result)
return result
|
python
|
{
"resource": ""
}
|
q8101
|
Import.__locate
|
train
|
def __locate(self):
"""Find the schema locally."""
if self.ns[1] != self.schema.tns[1]:
return self.schema.locate(self.ns)
|
python
|
{
"resource": ""
}
|
q8102
|
Import.__download
|
train
|
def __download(self, url, loaded_schemata, options):
"""Download the schema."""
try:
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
root.set("url", url)
return self.schema.instance(root, url, loaded_schemata, options)
except TransportError:
msg = "import schema (%s) at (%s), failed" % (self.ns[1], url)
log.error("%s, %s", self.id, msg, exc_info=True)
raise Exception(msg)
|
python
|
{
"resource": ""
}
|
q8103
|
Include.__applytns
|
train
|
def __applytns(self, root):
"""Make sure included schema has the same target namespace."""
TNS = "targetNamespace"
tns = root.get(TNS)
if tns is None:
tns = self.schema.tns[1]
root.set(TNS, tns)
else:
if self.schema.tns[1] != tns:
raise Exception, "%s mismatch" % TNS
|
python
|
{
"resource": ""
}
|
q8104
|
Definitions.add_methods
|
train
|
def add_methods(self, service):
"""Build method view for service."""
bindings = {
"document/literal": Document(self),
"rpc/literal": RPC(self),
"rpc/encoded": Encoded(self)}
for p in service.ports:
binding = p.binding
ptype = p.binding.type
operations = p.binding.type.operations.values()
for name in (op.name for op in operations):
m = Facade("Method")
m.name = name
m.location = p.location
m.binding = Facade("binding")
op = binding.operation(name)
m.soap = op.soap
key = "/".join((op.soap.style, op.soap.input.body.use))
m.binding.input = bindings.get(key)
key = "/".join((op.soap.style, op.soap.output.body.use))
m.binding.output = bindings.get(key)
p.methods[name] = m
|
python
|
{
"resource": ""
}
|
q8105
|
PortType.operation
|
train
|
def operation(self, name):
"""
Shortcut used to get a contained operation by name.
@param name: An operation name.
@type name: str
@return: The named operation.
@rtype: Operation
@raise L{MethodNotFound}: When not found.
"""
try:
return self.operations[name]
except Exception, e:
raise MethodNotFound(name)
|
python
|
{
"resource": ""
}
|
q8106
|
Service.port
|
train
|
def port(self, name):
"""
Locate a port by name.
@param name: A port name.
@type name: str
@return: The port object.
@rtype: L{Port}
"""
for p in self.ports:
if p.name == name:
return p
|
python
|
{
"resource": ""
}
|
q8107
|
Service.do_resolve
|
train
|
def do_resolve(self, definitions):
"""
Resolve named references to other WSDL objects. Ports without SOAP
bindings are discarded.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
filtered = []
for p in self.ports:
ref = qualify(p.binding, self.root, definitions.tns)
binding = definitions.bindings.get(ref)
if binding is None:
raise Exception("binding '%s', not-found" % (p.binding,))
if binding.soap is None:
log.debug("binding '%s' - not a SOAP binding, discarded",
binding.name)
continue
# After we have been resolved, our caller will expect that the
# binding we are referencing has been fully constructed, i.e.
# resolved, as well. The only scenario where the operations binding
# might possibly not have already resolved its references, and
# where this explicit resolve() call is required, is if we are
# dealing with a recursive WSDL import chain.
binding.resolve(definitions)
p.binding = binding
filtered.append(p)
self.ports = filtered
|
python
|
{
"resource": ""
}
|
q8108
|
FileCache._getf
|
train
|
def _getf(self, id):
"""Open a cached file with the given id for reading."""
try:
filename = self.__filename(id)
self.__remove_if_expired(filename)
return self.__open(filename, "rb")
except Exception:
pass
|
python
|
{
"resource": ""
}
|
q8109
|
FileCache.__filename
|
train
|
def __filename(self, id):
"""Return the cache file name for an entry with a given id."""
suffix = self.fnsuffix()
filename = "%s-%s.%s" % (self.fnprefix, id, suffix)
return os.path.join(self.location, filename)
|
python
|
{
"resource": ""
}
|
q8110
|
FileCache.__get_default_location
|
train
|
def __get_default_location():
"""
Returns the current process's default cache location folder.
The folder is determined lazily on first call.
"""
if not FileCache.__default_location:
tmp = tempfile.mkdtemp("suds-default-cache")
FileCache.__default_location = tmp
import atexit
atexit.register(FileCache.__remove_default_location)
return FileCache.__default_location
|
python
|
{
"resource": ""
}
|
q8111
|
FileCache.__remove_if_expired
|
train
|
def __remove_if_expired(self, filename):
"""
Remove a cached file entry if it expired.
@param filename: The file name.
@type filename: str
"""
if not self.duration:
return
created = datetime.datetime.fromtimestamp(os.path.getctime(filename))
expired = created + self.duration
if expired < datetime.datetime.now():
os.remove(filename)
log.debug("%s expired, deleted", filename)
|
python
|
{
"resource": ""
}
|
q8112
|
any_contains_any
|
train
|
def any_contains_any(strings, candidates):
"""Whether any of the strings contains any of the candidates."""
for string in strings:
for c in candidates:
if c in string:
return True
|
python
|
{
"resource": ""
}
|
q8113
|
path_to_URL
|
train
|
def path_to_URL(path, escape=True):
"""Convert a local file path to a absolute path file protocol URL."""
# We do not use urllib's builtin pathname2url() function since:
# - it has been commented with 'not recommended for general use'
# - it does not seem to work the same on Windows and non-Windows platforms
# (result starts with /// on Windows but does not on others)
# - urllib implementation prior to Python 2.5 used to quote ':' characters
# as '|' which would confuse pip on Windows.
url = os.path.abspath(path)
for sep in (os.sep, os.altsep):
if sep and sep != "/":
url = url.replace(sep, "/")
if escape:
# Must not escape ':' or '/' or Python will not recognize those URLs
# correctly. Detected on Windows 7 SP1 x64 with Python 3.4.0, but doing
# this always does not hurt since both are valid ASCII characters.
no_protocol_URL = url_quote(url, safe=":/")
else:
no_protocol_URL = url
return "file:///%s" % (no_protocol_URL,)
|
python
|
{
"resource": ""
}
|
q8114
|
requirement_spec
|
train
|
def requirement_spec(package_name, *args):
"""Identifier used when specifying a requirement to pip or setuptools."""
if not args or args == (None,):
return package_name
version_specs = []
for version_spec in args:
if isinstance(version_spec, (list, tuple)):
operator, version = version_spec
else:
assert isinstance(version_spec, str)
operator = "=="
version = version_spec
version_specs.append("%s%s" % (operator, version))
return "%s%s" % (package_name, ",".join(version_specs))
|
python
|
{
"resource": ""
}
|
q8115
|
path_iter
|
train
|
def path_iter(path):
"""Returns an iterator over all the file & folder names in a path."""
parts = []
while path:
path, item = os.path.split(path)
if item:
parts.append(item)
return reversed(parts)
|
python
|
{
"resource": ""
}
|
q8116
|
Document.mkparam
|
train
|
def mkparam(self, method, pdef, object):
"""
Expand list parameters into individual parameters each with the type
information. This is because in document arrays are simply
multi-occurrence elements.
"""
if isinstance(object, (list, tuple)):
return [self.mkparam(method, pdef, item) for item in object]
return super(Document, self).mkparam(method, pdef, object)
|
python
|
{
"resource": ""
}
|
q8117
|
Document.param_defs
|
train
|
def param_defs(self, method):
"""Get parameter definitions for document literal."""
pts = self.bodypart_types(method)
if not method.soap.input.body.wrapped:
return pts
pt = pts[0][1].resolve()
return [(c.name, c, a) for c, a in pt if not c.isattr()]
|
python
|
{
"resource": ""
}
|
q8118
|
byte_str
|
train
|
def byte_str(s="", encoding="utf-8", input_encoding="utf-8", errors="strict"):
"""
Returns a byte string version of 's', encoded as specified in 'encoding'.
Accepts str & unicode objects, interpreting non-unicode strings as byte
strings encoded using the given input encoding.
"""
assert isinstance(s, basestring)
if isinstance(s, unicode):
return s.encode(encoding, errors)
if s and encoding != input_encoding:
return s.decode(input_encoding, errors).encode(encoding, errors)
return s
|
python
|
{
"resource": ""
}
|
q8119
|
_date_from_match
|
train
|
def _date_from_match(match_object):
"""
Create a date object from a regular expression match.
The regular expression match is expected to be from _RE_DATE or
_RE_DATETIME.
@param match_object: The regular expression match.
@type match_object: B{re}.I{MatchObject}
@return: A date object.
@rtype: B{datetime}.I{date}
"""
year = int(match_object.group("year"))
month = int(match_object.group("month"))
day = int(match_object.group("day"))
return datetime.date(year, month, day)
|
python
|
{
"resource": ""
}
|
q8120
|
_parse
|
train
|
def _parse(string):
"""
Parses given XML document content.
Returns the resulting root XML element node or None if the given XML
content is empty.
@param string: XML document content to parse.
@type string: I{bytes}
@return: Resulting root XML element node or None.
@rtype: L{Element}|I{None}
"""
if string:
return suds.sax.parser.Parser().parse(string=string)
|
python
|
{
"resource": ""
}
|
q8121
|
Factory.create
|
train
|
def create(self, name):
"""
Create a WSDL type by name.
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = sudsobject.Factory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug("%s created: %s", name, timer)
return result
|
python
|
{
"resource": ""
}
|
q8122
|
RequestContext.process_reply
|
train
|
def process_reply(self, reply, status=None, description=None):
"""
Re-entry for processing a successful reply.
Depending on how the ``retxml`` option is set, may return the SOAP
reply XML or process it and return the Python object representing the
returned value.
@param reply: The SOAP reply envelope.
@type reply: I{bytes}
@param status: The HTTP status code.
@type status: int
@param description: Additional status description.
@type description: I{bytes}
@return: The invoked web service operation return value.
@rtype: I{builtin}|I{subclass of} L{Object}|I{bytes}|I{None}
"""
return self.__process_reply(reply, status, description)
|
python
|
{
"resource": ""
}
|
q8123
|
_SoapClient.send
|
train
|
def send(self, soapenv):
"""
Send SOAP message.
Depending on how the ``nosend`` & ``retxml`` options are set, may do
one of the following:
* Return a constructed web service operation request without sending
it to the web service.
* Invoke the web service operation and return its SOAP reply XML.
* Invoke the web service operation, process its results and return
the Python object representing the returned value.
@param soapenv: A SOAP envelope to send.
@type soapenv: L{Document}
@return: SOAP request, SOAP reply or a web service return value.
@rtype: L{RequestContext}|I{builtin}|I{subclass of} L{Object}|I{bytes}|
I{None}
"""
location = self.__location()
log.debug("sending to (%s)\nmessage:\n%s", location, soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if self.options.prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode("utf-8")
ctx = plugins.message.sending(envelope=soapenv)
soapenv = ctx.envelope
if self.options.nosend:
return RequestContext(self.process_reply, soapenv)
request = suds.transport.Request(location, soapenv)
request.headers = self.__headers()
try:
timer = metrics.Timer()
timer.start()
reply = self.options.transport.send(request)
timer.stop()
metrics.log.debug("waited %s on server reply", timer)
except suds.transport.TransportError, e:
content = e.fp and e.fp.read() or ""
return self.process_reply(content, e.httpcode, tostr(e))
return self.process_reply(reply.message, None, None)
|
python
|
{
"resource": ""
}
|
q8124
|
_SoapClient.process_reply
|
train
|
def process_reply(self, reply, status, description):
"""
Process a web service operation SOAP reply.
Depending on how the ``retxml`` option is set, may return the SOAP
reply XML or process it and return the Python object representing the
returned value.
@param reply: The SOAP reply envelope.
@type reply: I{bytes}
@param status: The HTTP status code (None indicates httplib.OK).
@type status: int|I{None}
@param description: Additional status description.
@type description: str
@return: The invoked web service operation return value.
@rtype: I{builtin}|I{subclass of} L{Object}|I{bytes}|I{None}
"""
if status is None:
status = httplib.OK
debug_message = "Reply HTTP status - %d" % (status,)
if status in (httplib.ACCEPTED, httplib.NO_CONTENT):
log.debug(debug_message)
return
#TODO: Consider whether and how to allow plugins to handle error,
# httplib.ACCEPTED & httplib.NO_CONTENT replies as well as successful
# ones.
if status == httplib.OK:
log.debug("%s\n%s", debug_message, reply)
else:
log.debug("%s - %s\n%s", debug_message, description, reply)
plugins = PluginContainer(self.options.plugins)
ctx = plugins.message.received(reply=reply)
reply = ctx.reply
# SOAP standard states that SOAP errors must be accompanied by HTTP
# status code 500 - internal server error:
#
# From SOAP 1.1 specification:
# In case of a SOAP error while processing the request, the SOAP HTTP
# server MUST issue an HTTP 500 "Internal Server Error" response and
# include a SOAP message in the response containing a SOAP Fault
# element (see section 4.4) indicating the SOAP processing error.
#
# From WS-I Basic profile:
# An INSTANCE MUST use a "500 Internal Server Error" HTTP status code
# if the response message is a SOAP Fault.
replyroot = None
if status in (httplib.OK, httplib.INTERNAL_SERVER_ERROR):
replyroot = _parse(reply)
plugins.message.parsed(reply=replyroot)
fault = self.__get_fault(replyroot)
if fault:
if status != httplib.INTERNAL_SERVER_ERROR:
log.warn("Web service reported a SOAP processing fault "
"using an unexpected HTTP status code %d. Reporting "
"as an internal server error.", status)
if self.options.faults:
raise WebFault(fault, replyroot)
return httplib.INTERNAL_SERVER_ERROR, fault
if status != httplib.OK:
if self.options.faults:
#TODO: Use a more specific exception class here.
raise Exception((status, description))
return status, description
if self.options.retxml:
return reply
result = replyroot and self.method.binding.output.get_reply(
self.method, replyroot)
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
return httplib.OK, result
|
python
|
{
"resource": ""
}
|
q8125
|
_SoapClient.__get_fault
|
train
|
def __get_fault(self, replyroot):
"""
Extract fault information from a SOAP reply.
Returns an I{unmarshalled} fault L{Object} or None in case the given
XML document does not contain a SOAP <Fault> element.
@param replyroot: A SOAP reply message root XML element or None.
@type replyroot: L{Element}|I{None}
@return: A fault object.
@rtype: L{Object}
"""
envns = suds.bindings.binding.envns
soapenv = replyroot and replyroot.getChild("Envelope", envns)
soapbody = soapenv and soapenv.getChild("Body", envns)
fault = soapbody and soapbody.getChild("Fault", envns)
return fault is not None and UmxBasic().process(fault)
|
python
|
{
"resource": ""
}
|
q8126
|
_SimClient.invoke
|
train
|
def invoke(self, args, kwargs):
"""
Invoke a specified web service method.
Uses an injected SOAP request/response instead of a regularly
constructed/received one.
Depending on how the ``nosend`` & ``retxml`` options are set, may do
one of the following:
* Return a constructed web service operation request without sending
it to the web service.
* Invoke the web service operation and return its SOAP reply XML.
* Invoke the web service operation, process its results and return
the Python object representing the returned value.
@param args: Positional arguments for the method invoked.
@type args: list|tuple
@param kwargs: Keyword arguments for the method invoked.
@type kwargs: dict
@return: SOAP request, SOAP reply or a web service return value.
@rtype: L{RequestContext}|I{builtin}|I{subclass of} L{Object}|I{bytes}|
I{None}
"""
simulation = kwargs.pop(self.__injkey)
msg = simulation.get("msg")
if msg is not None:
assert msg.__class__ is suds.byte_str_class
return self.send(_parse(msg))
msg = self.method.binding.input.get_message(self.method, args, kwargs)
log.debug("inject (simulated) send message:\n%s", msg)
reply = simulation.get("reply")
if reply is not None:
assert reply.__class__ is suds.byte_str_class
status = simulation.get("status")
description = simulation.get("description")
if description is None:
description = "injected reply"
return self.process_reply(reply, status, description)
raise Exception("reply or msg injection parameter expected")
|
python
|
{
"resource": ""
}
|
q8127
|
Request.__set_URL
|
train
|
def __set_URL(self, url):
"""
URL is stored as a str internally and must not contain ASCII chars.
Raised exception in case of detected non-ASCII URL characters may be
either UnicodeEncodeError or UnicodeDecodeError, depending on the used
Python version's str type and the exact value passed as URL input data.
"""
if isinstance(url, str):
url.encode("ascii") # Check for non-ASCII characters.
self.url = url
elif sys.version_info < (3, 0):
self.url = url.encode("ascii")
else:
self.url = url.decode("ascii")
|
python
|
{
"resource": ""
}
|
q8128
|
PathResolver.root
|
train
|
def root(self, parts):
"""
Find the path root.
@param parts: A list of path parts.
@type parts: [str,..]
@return: The root.
@rtype: L{xsd.sxbase.SchemaObject}
"""
result = None
name = parts[0]
log.debug('searching schema for (%s)', name)
qref = self.qualify(parts[0])
query = BlindQuery(qref)
result = query.execute(self.schema)
if result is None:
log.error('(%s) not-found', name)
raise PathResolver.BadPath(name)
log.debug('found (%s) as (%s)', name, Repr(result))
return result
|
python
|
{
"resource": ""
}
|
q8129
|
PathResolver.branch
|
train
|
def branch(self, root, parts):
"""
Traverse the path until a leaf is reached.
@param parts: A list of path parts.
@type parts: [str,..]
@param root: The root.
@type root: L{xsd.sxbase.SchemaObject}
@return: The end of the branch.
@rtype: L{xsd.sxbase.SchemaObject}
"""
result = root
for part in parts[1:-1]:
name = splitPrefix(part)[1]
log.debug('searching parent (%s) for (%s)', Repr(result), name)
result, ancestry = result.get_child(name)
if result is None:
log.error('(%s) not-found', name)
raise PathResolver.BadPath(name)
result = result.resolve(nobuiltin=True)
log.debug('found (%s) as (%s)', name, Repr(result))
return result
|
python
|
{
"resource": ""
}
|
q8130
|
TreeResolver.getchild
|
train
|
def getchild(self, name, parent):
"""Get a child by name."""
log.debug('searching parent (%s) for (%s)', Repr(parent), name)
if name.startswith('@'):
return parent.get_attribute(name[1:])
return parent.get_child(name)
|
python
|
{
"resource": ""
}
|
q8131
|
_Archiver.__path_prefix
|
train
|
def __path_prefix(self, folder):
"""
Path prefix to be used when archiving any items from the given folder.
Expects the folder to be located under the base folder path and the
returned path prefix does not include the base folder information. This
makes sure we include just the base folder's content in the archive,
and not the base folder itself.
"""
path_parts = path_iter(folder)
_skip_expected(path_parts, self.__base_folder_parts)
result = "/".join(path_parts)
if result:
result += "/"
return result
|
python
|
{
"resource": ""
}
|
q8132
|
XDecimal._decimal_to_xsd_format
|
train
|
def _decimal_to_xsd_format(value):
"""
Converts a decimal.Decimal value to its XSD decimal type value.
Result is a string containing the XSD decimal type's lexical value
representation. The conversion is done without any precision loss.
Note that Python's native decimal.Decimal string representation will
not do here as the lexical representation desired here does not allow
representing decimal values using float-like `<mantissa>E<exponent>'
format, e.g. 12E+30 or 0.10006E-12.
"""
value = XDecimal._decimal_canonical(value)
negative, digits, exponent = value.as_tuple()
# The following implementation assumes the following tuple decimal
# encoding (part of the canonical decimal value encoding):
# - digits must contain at least one element
# - no leading integral 0 digits except a single one in 0 (if a non-0
# decimal value has leading integral 0 digits they must be encoded
# in its 'exponent' value and not included explicitly in its
# 'digits' tuple)
assert digits
assert digits[0] != 0 or len(digits) == 1
result = []
if negative:
result.append("-")
# No fractional digits.
if exponent >= 0:
result.extend(str(x) for x in digits)
result.extend("0" * exponent)
return "".join(result)
digit_count = len(digits)
# Decimal point offset from the given digit start.
point_offset = digit_count + exponent
# Trim trailing fractional 0 digits.
fractional_digit_count = min(digit_count, -exponent)
while fractional_digit_count and digits[digit_count - 1] == 0:
digit_count -= 1
fractional_digit_count -= 1
# No trailing fractional 0 digits and a decimal point coming not after
# the given digits, meaning there is no need to add additional trailing
# integral 0 digits.
if point_offset <= 0:
# No integral digits.
result.append("0")
if digit_count > 0:
result.append(".")
result.append("0" * -point_offset)
result.extend(str(x) for x in digits[:digit_count])
else:
# Have integral and possibly some fractional digits.
result.extend(str(x) for x in digits[:point_offset])
if point_offset < digit_count:
result.append(".")
result.extend(str(x) for x in digits[point_offset:digit_count])
return "".join(result)
|
python
|
{
"resource": ""
}
|
q8133
|
_reuse_pre_installed_setuptools
|
train
|
def _reuse_pre_installed_setuptools(env, installer):
"""
Return whether a pre-installed setuptools distribution should be reused.
"""
if not env.setuptools_version:
return # no prior setuptools ==> no reuse
reuse_old = config.reuse_old_setuptools
reuse_best = config.reuse_best_setuptools
reuse_future = config.reuse_future_setuptools
reuse_comment = None
if reuse_old or reuse_best or reuse_future:
pv_old = parse_version(env.setuptools_version)
pv_new = parse_version(installer.setuptools_version())
if pv_old < pv_new:
if reuse_old:
reuse_comment = "%s+ recommended" % (
installer.setuptools_version(),)
elif pv_old > pv_new:
if reuse_future:
reuse_comment = "%s+ required" % (
installer.setuptools_version(),)
elif reuse_best:
reuse_comment = ""
if reuse_comment is None:
return # reuse not allowed by configuration
if reuse_comment:
reuse_comment = " (%s)" % (reuse_comment,)
print("Reusing pre-installed setuptools %s distribution%s." % (
env.setuptools_version, reuse_comment))
return True
|
python
|
{
"resource": ""
}
|
q8134
|
download_pip
|
train
|
def download_pip(env, requirements):
"""Download pip and its requirements using setuptools."""
if config.installation_cache_folder() is None:
raise EnvironmentSetupError("Local installation cache folder not "
"defined but required for downloading a pip installation.")
# Installation cache folder needs to be explicitly created for setuptools
# to be able to copy its downloaded installation files into it. Seen using
# Python 2.4.4 & setuptools 1.4.
_create_installation_cache_folder_if_needed()
try:
env.execute(["-m", "easy_install", "--zip-ok", "--multi-version",
"--always-copy", "--exclude-scripts", "--install-dir",
config.installation_cache_folder()] + requirements)
zip_eggs_in_folder(config.installation_cache_folder())
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
raise EnvironmentSetupError("pip download failed.")
|
python
|
{
"resource": ""
}
|
q8135
|
setuptools_install_options
|
train
|
def setuptools_install_options(local_storage_folder):
"""
Return options to make setuptools use installations from the given folder.
No other installation source is allowed.
"""
if local_storage_folder is None:
return []
# setuptools expects its find-links parameter to contain a list of link
# sources (either local paths, file: URLs pointing to folders or URLs
# pointing to a file containing HTML links) separated by spaces. That means
# that, when specifying such items, whether local paths or URLs, they must
# not contain spaces. The problem can be worked around by using a local
# file URL, since URLs can contain space characters encoded as '%20' (for
# more detailed information see below).
#
# Any URL referencing a folder needs to be specified with a trailing '/'
# character in order for setuptools to correctly recognize it as a folder.
#
# All this has been tested using Python 2.4.3/2.4.4 & setuptools 1.4/1.4.2
# as well as Python 3.4 & setuptools 3.3.
#
# Supporting paths with spaces - method 1:
# ----------------------------------------
# One way would be to prepare a link file and pass an URL referring to that
# link file. The link file needs to contain a list of HTML link tags
# (<a href="..."/>), one for every item stored inside the local storage
# folder. If a link file references a folder whose name matches the desired
# requirement name, it will be searched recursively (as described in method
# 2 below).
#
# Note that in order for setuptools to recognize a local link file URL
# correctly, the file needs to be named with the '.html' extension. That
# will cause the underlying urllib2.open() operation to return the link
# file's content type as 'text/html' which is required for setuptools to
# recognize a valid link file.
#
# Supporting paths with spaces - method 2:
# ----------------------------------------
# Another possible way is to use an URL referring to the local storage
# folder directly. This will cause setuptools to prepare and use a link
# file internally - with its content read from a 'index.html' file located
# in the given local storage folder, if it exists, or constructed so it
# contains HTML links to all top-level local storage folder items, as
# described for method 1 above.
if " " in local_storage_folder:
find_links_param = utility.path_to_URL(local_storage_folder)
if find_links_param[-1] != "/":
find_links_param += "/"
else:
find_links_param = local_storage_folder
return ["-f", find_links_param, "--allow-hosts=None"]
|
python
|
{
"resource": ""
}
|
q8136
|
install_pip
|
train
|
def install_pip(env, requirements):
"""Install pip and its requirements using setuptools."""
try:
installation_source_folder = config.installation_cache_folder()
options = setuptools_install_options(installation_source_folder)
if installation_source_folder is not None:
zip_eggs_in_folder(installation_source_folder)
env.execute(["-m", "easy_install"] + options + requirements)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
raise EnvironmentSetupError("pip installation failed.")
|
python
|
{
"resource": ""
}
|
q8137
|
download_pip_based_installations
|
train
|
def download_pip_based_installations(env, pip_invocation, requirements,
download_cache_folder):
"""Download requirements for pip based installation."""
if config.installation_cache_folder() is None:
raise EnvironmentSetupError("Local installation cache folder not "
"defined but required for downloading pip based installations.")
# Installation cache folder needs to be explicitly created for pip to be
# able to copy its downloaded installation files into it. The same does not
# hold for pip's download cache folder which gets created by pip on-demand.
# Seen using Python 3.4.0 & pip 1.5.4.
_create_installation_cache_folder_if_needed()
try:
pip_options = ["install", "-d", config.installation_cache_folder(),
"--exists-action=i"]
pip_options.extend(pip_download_cache_options(download_cache_folder))
# Running pip based installations on Python 2.5.
# * Python 2.5 does not come with SSL support enabled by default and
# so pip can not use SSL certified downloads from PyPI.
# * To work around this either install the
# https://pypi.python.org/pypi/ssl package or run pip using the
# '--insecure' command-line options.
# * Installing the ssl package seems ridden with problems on
# Python 2.5 so this workaround has not been tested.
if (2, 5) <= env.sys_version_info < (2, 6):
# There are some potential cases where we do not need to use
# "--insecure", e.g. if the target Python environment already has
# the 'ssl' module installed. However, detecting whether this is so
# does not seem to be worth the effort. The only way to detect
# whether secure download is supported would be to scan the target
# environment for this information, e.g. setuptools has this
# information in its pip.backwardcompat.ssl variable - if it is
# None, the necessary SSL support is not available. But then we
# would have to be careful:
# - not to run the scan if we already know this information from
# some previous scan
# - to track all actions that could have invalidated our previous
# scan results, etc.
# It just does not seem to be worth the hassle so for now - YAGNI.
pip_options.append("--insecure")
env.execute(pip_invocation + pip_options + requirements)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
raise EnvironmentSetupError("pip based download failed.")
|
python
|
{
"resource": ""
}
|
q8138
|
enabled_actions_for_env
|
train
|
def enabled_actions_for_env(env):
"""Returns actions to perform when processing the given environment."""
def enabled(config_value, required):
if config_value is Config.TriBool.No:
return False
if config_value is Config.TriBool.Yes:
return True
assert config_value is Config.TriBool.IfNeeded
return bool(required)
# Some old Python versions do not support HTTPS downloads and therefore can
# not download installation packages from PyPI. To run setuptools or pip
# based installations on such Python versions, all the required
# installation packages need to be downloaded locally first using a
# compatible Python version (e.g. Python 2.4.4 for Python 2.4.3) and then
# installed locally.
download_supported = not ((2, 4, 3) <= env.sys_version_info < (2, 4, 4))
local_install = config.installation_cache_folder() is not None
actions = set()
pip_required = False
run_pip_based_installations = enabled(config.install_environments, True)
if run_pip_based_installations:
actions.add("run pip based installations")
pip_required = True
if download_supported and enabled(config.download_installations,
local_install and run_pip_based_installations):
actions.add("download pip based installations")
pip_required = True
setuptools_required = False
run_pip_installation = enabled(config.install_environments, pip_required)
if run_pip_installation:
actions.add("run pip installation")
setuptools_required = True
if download_supported and enabled(config.download_installations,
local_install and run_pip_installation):
actions.add("download pip installation")
setuptools_required = True
if enabled(config.setup_setuptools, setuptools_required):
actions.add("setup setuptools")
return actions
|
python
|
{
"resource": ""
}
|
q8139
|
_ez_setup_script.__setuptools_version
|
train
|
def __setuptools_version(self):
"""Read setuptools version from the underlying ez_setup script."""
# Read the script directly as a file instead of importing it as a
# Python module and reading the value from the loaded module's global
# DEFAULT_VERSION variable. Not all ez_setup scripts are compatible
# with all Python environments and so importing them would require
# doing so using a separate process run in the target Python
# environment instead of the current one.
f = open(self.script_path(), "r")
try:
matcher = re.compile(r'\s*DEFAULT_VERSION\s*=\s*"([^"]*)"\s*$')
for i, line in enumerate(f):
if i > 50:
break
match = matcher.match(line)
if match:
return match.group(1)
finally:
f.close()
self.__error("error parsing setuptools installation script '%s'" % (
self.script_path(),))
|
python
|
{
"resource": ""
}
|
q8140
|
Element.rename
|
train
|
def rename(self, name):
"""
Rename the element.
@param name: A new name for the element.
@type name: basestring
"""
if name is None:
raise Exception("name (%s) not-valid" % (name,))
self.prefix, self.name = splitPrefix(name)
|
python
|
{
"resource": ""
}
|
q8141
|
Element.clone
|
train
|
def clone(self, parent=None):
"""
Deep clone of this element and children.
@param parent: An optional parent for the copied fragment.
@type parent: I{Element}
@return: A deep copy parented by I{parent}
@rtype: I{Element}
"""
root = Element(self.qname(), parent, self.namespace())
for a in self.attributes:
root.append(a.clone(self))
for c in self.children:
root.append(c.clone(self))
for ns in self.nsprefixes.items():
root.addPrefix(ns[0], ns[1])
return root
|
python
|
{
"resource": ""
}
|
q8142
|
Element.get
|
train
|
def get(self, name, ns=None, default=None):
"""
Get the value of an attribute by name.
@param name: The name of the attribute.
@type name: basestring
@param ns: The optional attribute's namespace.
@type ns: (I{prefix}, I{name})
@param default: An optional value to be returned when either the
attribute does not exist or has no value.
@type default: basestring
@return: The attribute's value or I{default}.
@rtype: basestring
@see: __getitem__()
"""
attr = self.getAttribute(name, ns)
if attr is None or attr.value is None:
return default
return attr.getValue()
|
python
|
{
"resource": ""
}
|
q8143
|
Element.namespace
|
train
|
def namespace(self):
"""
Get the element's namespace.
@return: The element's namespace by resolving the prefix, the explicit
namespace or the inherited namespace.
@rtype: (I{prefix}, I{name})
"""
if self.prefix is None:
return self.defaultNamespace()
return self.resolvePrefix(self.prefix)
|
python
|
{
"resource": ""
}
|
q8144
|
Element.append
|
train
|
def append(self, objects):
"""
Append the specified child based on whether it is an element or an
attribute.
@param objects: A (single|collection) of attribute(s) or element(s) to
be added as children.
@type objects: (L{Element}|L{Attribute})
@return: self
@rtype: L{Element}
"""
if not isinstance(objects, (list, tuple)):
objects = (objects,)
for child in objects:
if isinstance(child, Element):
self.children.append(child)
child.parent = self
continue
if isinstance(child, Attribute):
self.attributes.append(child)
child.parent = self
continue
raise Exception("append %s not-valid" %
(child.__class__.__name__,))
return self
|
python
|
{
"resource": ""
}
|
q8145
|
Element.promotePrefixes
|
train
|
def promotePrefixes(self):
"""
Push prefix declarations up the tree as far as possible.
Prefix mapping are pushed to its parent unless the parent has the
prefix mapped to another URI or the parent has the prefix. This is
propagated up the tree until the top is reached.
@return: self
@rtype: L{Element}
"""
for c in self.children:
c.promotePrefixes()
if self.parent is None:
return
for p, u in self.nsprefixes.items():
if p in self.parent.nsprefixes:
pu = self.parent.nsprefixes[p]
if pu == u:
del self.nsprefixes[p]
continue
if p != self.parent.prefix:
self.parent.nsprefixes[p] = u
del self.nsprefixes[p]
return self
|
python
|
{
"resource": ""
}
|
q8146
|
Element.isempty
|
train
|
def isempty(self, content=True):
"""
Get whether the element has no children.
@param content: Test content (children & text) only.
@type content: boolean
@return: True when element has not children.
@rtype: boolean
"""
nochildren = not self.children
notext = self.text is None
nocontent = nochildren and notext
if content:
return nocontent
noattrs = not len(self.attributes)
return nocontent and noattrs
|
python
|
{
"resource": ""
}
|
q8147
|
Element.applyns
|
train
|
def applyns(self, ns):
"""
Apply the namespace to this node.
If the prefix is I{None} then this element's explicit namespace
I{expns} is set to the URI defined by I{ns}. Otherwise, the I{ns} is
simply mapped.
@param ns: A namespace.
@type ns: (I{prefix}, I{URI})
"""
if ns is None:
return
if not isinstance(ns, (list, tuple)):
raise Exception("namespace must be a list or a tuple")
if ns[0] is None:
self.expns = ns[1]
else:
self.prefix = ns[0]
self.nsprefixes[ns[0]] = ns[1]
|
python
|
{
"resource": ""
}
|
q8148
|
DocumentReader.__fetch
|
train
|
def __fetch(self, url):
"""
Fetch document content from an external source.
The document content will first be looked up in the registered document
store, and if not found there, downloaded using the registered
transport system.
Before being returned, the fetched document content first gets
processed by all the registered 'loaded' plugins.
@param url: A document URL.
@type url: str.
@return: A file pointer to the fetched document content.
@rtype: file-like
"""
content = None
store = self.options.documentStore
if store is not None:
content = store.open(url)
if content is None:
request = suds.transport.Request(url)
fp = self.options.transport.open(request)
try:
content = fp.read()
finally:
fp.close()
ctx = self.plugins.document.loaded(url=url, document=content)
content = ctx.document
sax = suds.sax.parser.Parser()
return sax.parse(string=content)
|
python
|
{
"resource": ""
}
|
q8149
|
Environment.__construct_python_version
|
train
|
def __construct_python_version(self):
"""
Construct a setuptools compatible Python version string.
Constructed based on the environment's reported sys.version_info.
"""
major, minor, micro, release_level, serial = self.sys_version_info
assert release_level in ("alfa", "beta", "candidate", "final")
assert release_level != "final" or serial == 0
parts = [str(major), ".", str(minor), ".", str(micro)]
if release_level != "final":
parts.append(release_level[0])
parts.append(str(serial))
self.python_version = "".join(parts)
|
python
|
{
"resource": ""
}
|
q8150
|
Environment.__parse_scanned_version_info
|
train
|
def __parse_scanned_version_info(self):
"""Parses the environment's formatted version info string."""
string = self.sys_version_info_formatted
try:
major, minor, micro, release_level, serial = string.split(",")
if (release_level in ("alfa", "beta", "candidate", "final") and
(release_level != "final" or serial == "0") and
major.isdigit() and # --- --- --- --- --- --- --- --- ---
minor.isdigit() and # Explicit isdigit() checks to detect
micro.isdigit() and # leading/trailing whitespace.
serial.isdigit()): # --- --- --- --- --- --- --- --- ---
self.sys_version_info = (int(major), int(minor), int(micro),
release_level, int(serial))
self.__construct_python_version()
return
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
raise BadEnvironment("Unsupported Python version (%s)" % (string,))
|
python
|
{
"resource": ""
}
|
q8151
|
Typed.node
|
train
|
def node(self, content):
"""
Create an XML node.
The XML node is namespace qualified as defined by the corresponding
schema element.
"""
ns = content.type.namespace()
if content.type.form_qualified:
node = Element(content.tag, ns=ns)
if ns[0]:
node.addPrefix(ns[0], ns[1])
else:
node = Element(content.tag)
self.encode(node, content)
log.debug("created - node:\n%s", node)
return node
|
python
|
{
"resource": ""
}
|
q8152
|
_detect_eggs_in_folder
|
train
|
def _detect_eggs_in_folder(folder):
"""
Detect egg distributions located in the given folder.
Only direct folder content is considered and subfolders are not searched
recursively.
"""
eggs = {}
for x in os.listdir(folder):
zip = x.endswith(_zip_ext)
if zip:
root = x[:-len(_zip_ext)]
egg = _Egg.NONE
elif x.endswith(_egg_ext):
root = x[:-len(_egg_ext)]
if os.path.isdir(os.path.join(folder, x)):
egg = _Egg.FOLDER
else:
egg = _Egg.FILE
else:
continue
try:
info = eggs[root]
except KeyError:
eggs[root] = _Egg(os.path.join(folder, root), egg, zip)
else:
if egg is not _Egg.NONE:
info.set_egg(egg)
if zip:
info.set_zip()
return eggs.values()
|
python
|
{
"resource": ""
}
|
q8153
|
_Egg.normalize
|
train
|
def normalize(self):
"""
Makes sure this egg distribution is stored only as an egg file.
The egg file will be created from another existing distribution format
if needed.
"""
if self.has_egg_file():
if self.has_zip():
self.__remove_zip()
else:
if self.has_egg_folder():
if not self.has_zip():
self.__zip_egg_folder()
self.__remove_egg_folder()
self.__rename_zip_to_egg()
|
python
|
{
"resource": ""
}
|
q8154
|
parse_args
|
train
|
def parse_args(method_name, param_defs, args, kwargs, external_param_processor,
extra_parameter_errors):
"""
Parse arguments for suds web service operation invocation functions.
Suds prepares Python function objects for invoking web service operations.
This function implements generic binding agnostic part of processing the
arguments passed when calling those function objects.
Argument parsing rules:
* Each input parameter element should be represented by single regular
Python function argument.
* At most one input parameter belonging to a single choice parameter
structure may have its value specified as something other than None.
* Positional arguments are mapped to choice group input parameters the
same as is done for a simple all/sequence group - each in turn.
Expects to be passed the web service operation's parameter definitions
(parameter name, type & optional ancestry information) in order and, based
on that, extracts the values for those parameter from the arguments
provided in the web service operation invocation call.
Ancestry information describes parameters constructed based on suds
library's automatic input parameter structure unwrapping. It is expected to
include the parameter's XSD schema 'ancestry' context, i.e. a list of all
the parent XSD schema tags containing the parameter's <element> tag. Such
ancestry context provides detailed information about how the parameter's
value is expected to be used, especially in relation to other input
parameters, e.g. at most one parameter value may be specified for
parameters directly belonging to the same choice input group.
Rules on acceptable ancestry items:
* Ancestry item's choice() method must return whether the item
represents a <choice> XSD schema tag.
* Passed ancestry items are used 'by address' internally and the same XSD
schema tag is expected to be identified by the exact same ancestry item
object during the whole argument processing.
During processing, each parameter's definition and value, together with any
additional pertinent information collected from the encountered parameter
definition structure, is passed on to the provided external parameter
processor function. There that information is expected to be used to
construct the actual binding specific web service operation invocation
request.
Raises a TypeError exception in case any argument related errors are
detected. The exceptions raised have been constructed to make them as
similar as possible to their respective exceptions raised during regular
Python function argument checking.
Does not support multiple same-named input parameters.
"""
arg_parser = _ArgParser(method_name, param_defs, external_param_processor)
return arg_parser(args, kwargs, extra_parameter_errors)
|
python
|
{
"resource": ""
}
|
q8155
|
_ArgParser.__all_parameters_processed
|
train
|
def __all_parameters_processed(self):
"""
Finish the argument processing.
Should be called after all the web service operation's parameters have
been successfully processed and, afterwards, no further parameter
processing is allowed.
Returns a 2-tuple containing the number of required & allowed
arguments.
See the _ArgParser class description for more detailed information.
"""
assert self.active()
sentinel_frame = self.__stack[0]
self.__pop_frames_above(sentinel_frame)
assert len(self.__stack) == 1
self.__pop_top_frame()
assert not self.active()
args_required = sentinel_frame.args_required()
args_allowed = sentinel_frame.args_allowed()
self.__check_for_extra_arguments(args_required, args_allowed)
return args_required, args_allowed
|
python
|
{
"resource": ""
}
|
q8156
|
_ArgParser.__check_for_extra_arguments
|
train
|
def __check_for_extra_arguments(self, args_required, args_allowed):
"""
Report an error in case any extra arguments are detected.
Does nothing if reporting extra arguments as exceptions has not been
enabled.
May only be called after the argument processing has been completed.
"""
assert not self.active()
if not self.__extra_parameter_errors:
return
if self.__kwargs:
param_name = self.__kwargs.keys()[0]
if param_name in self.__params_with_arguments:
msg = "got multiple values for parameter '%s'"
else:
msg = "got an unexpected keyword argument '%s'"
self.__error(msg % (param_name,))
if self.__args:
def plural_suffix(count):
if count == 1:
return ""
return "s"
def plural_was_were(count):
if count == 1:
return "was"
return "were"
expected = args_required
if args_required != args_allowed:
expected = "%d to %d" % (args_required, args_allowed)
given = self.__args_count
msg_parts = ["takes %s positional argument" % (expected,),
plural_suffix(expected), " but %d " % (given,),
plural_was_were(given), " given"]
self.__error("".join(msg_parts))
|
python
|
{
"resource": ""
}
|
q8157
|
_ArgParser.__frame_factory
|
train
|
def __frame_factory(self, ancestry_item):
"""Construct a new frame representing the given ancestry item."""
frame_class = Frame
if ancestry_item is not None and ancestry_item.choice():
frame_class = ChoiceFrame
return frame_class(ancestry_item, self.__error,
self.__extra_parameter_errors)
|
python
|
{
"resource": ""
}
|
q8158
|
_ArgParser.__get_param_value
|
train
|
def __get_param_value(self, name):
"""
Extract a parameter value from the remaining given arguments.
Returns a 2-tuple consisting of the following:
* Boolean indicating whether an argument has been specified for the
requested input parameter.
* Parameter value.
"""
if self.__args:
return True, self.__args.pop(0)
try:
value = self.__kwargs.pop(name)
except KeyError:
return False, None
return True, value
|
python
|
{
"resource": ""
}
|
q8159
|
_ArgParser.__in_choice_context
|
train
|
def __in_choice_context(self):
"""
Whether we are currently processing a choice parameter group.
This includes processing a parameter defined directly or indirectly
within such a group.
May only be called during parameter processing or the result will be
calculated based on the context left behind by the previous parameter
processing if any.
"""
for x in self.__stack:
if x.__class__ is ChoiceFrame:
return True
return False
|
python
|
{
"resource": ""
}
|
q8160
|
_ArgParser.__init_run
|
train
|
def __init_run(self, args, kwargs, extra_parameter_errors):
"""Initializes data for a new argument parsing run."""
assert not self.active()
self.__args = list(args)
self.__kwargs = dict(kwargs)
self.__extra_parameter_errors = extra_parameter_errors
self.__args_count = len(args) + len(kwargs)
self.__params_with_arguments = set()
self.__stack = []
self.__push_frame(None)
|
python
|
{
"resource": ""
}
|
q8161
|
_ArgParser.__match_ancestry
|
train
|
def __match_ancestry(self, ancestry):
"""
Find frames matching the given ancestry.
Returns a tuple containing the following:
* Topmost frame matching the given ancestry or the bottom-most sentry
frame if no frame matches.
* Unmatched ancestry part.
"""
stack = self.__stack
if len(stack) == 1:
return stack[0], ancestry
previous = stack[0]
for frame, n in zip(stack[1:], xrange(len(ancestry))):
if frame.id() is not ancestry[n]:
return previous, ancestry[n:]
previous = frame
return frame, ancestry[n + 1:]
|
python
|
{
"resource": ""
}
|
q8162
|
_ArgParser.__pop_frames_above
|
train
|
def __pop_frames_above(self, frame):
"""Pops all the frames above, but not including the given frame."""
while self.__stack[-1] is not frame:
self.__pop_top_frame()
assert self.__stack
|
python
|
{
"resource": ""
}
|
q8163
|
_ArgParser.__pop_top_frame
|
train
|
def __pop_top_frame(self):
"""Pops the top frame off the frame stack."""
popped = self.__stack.pop()
if self.__stack:
self.__stack[-1].process_subframe(popped)
|
python
|
{
"resource": ""
}
|
q8164
|
_ArgParser.__process_parameter
|
train
|
def __process_parameter(self, param_name, param_type, ancestry=None):
"""Collect values for a given web service operation input parameter."""
assert self.active()
param_optional = param_type.optional()
has_argument, value = self.__get_param_value(param_name)
if has_argument:
self.__params_with_arguments.add(param_name)
self.__update_context(ancestry)
self.__stack[-1].process_parameter(param_optional, value is not None)
self.__external_param_processor(param_name, param_type,
self.__in_choice_context(), value)
|
python
|
{
"resource": ""
}
|
q8165
|
_ArgParser.__push_frame
|
train
|
def __push_frame(self, ancestry_item):
"""Push a new frame on top of the frame stack."""
frame = self.__frame_factory(ancestry_item)
self.__stack.append(frame)
|
python
|
{
"resource": ""
}
|
q8166
|
DocumentStore.open
|
train
|
def open(self, url):
"""
Open a document at the specified URL.
The document URL's needs not contain a protocol identifier, and if it
does, that protocol identifier is ignored when looking up the store
content.
Missing documents referenced using the internal 'suds' protocol are
reported by raising an exception. For other protocols, None is returned
instead.
@param url: A document URL.
@type url: str
@return: Document content or None if not found.
@rtype: bytes
"""
protocol, location = self.__split(url)
content = self.__find(location)
if protocol == 'suds' and content is None:
raise Exception, 'location "%s" not in document store' % location
return content
|
python
|
{
"resource": ""
}
|
q8167
|
read_python_code
|
train
|
def read_python_code(filename):
"Returns the given Python source file's compiled content."
file = open(filename, "rt")
try:
source = file.read()
finally:
file.close()
# Python 2.6 and below did not support passing strings to exec() &
# compile() functions containing line separators other than '\n'. To
# support them we need to manually make sure such line endings get
# converted even on platforms where this is not handled by native text file
# read operations.
source = source.replace("\r\n", "\n").replace("\r", "\n")
return compile(source, filename, "exec")
|
python
|
{
"resource": ""
}
|
q8168
|
recursive_package_list
|
train
|
def recursive_package_list(*packages):
"""
Returns a list of all the given packages and all their subpackages.
Given packages are expected to be found relative to this script's location.
Subpackages are detected by scanning the given packages' subfolder
hierarchy for any folders containing the '__init__.py' module. As a
consequence, namespace packages are not supported.
This is our own specialized setuptools.find_packages() replacement so we
can avoid the setuptools dependency.
"""
result = set()
todo = []
for package in packages:
folder = os.path.join(script_folder, *package.split("."))
if not os.path.isdir(folder):
raise Exception("Folder not found for package '%s'." % (package,))
todo.append((package, folder))
while todo:
package, folder = todo.pop()
if package in result:
continue
result.add(package)
for subitem in os.listdir(folder):
subpackage = ".".join((package, subitem))
subfolder = os.path.join(folder, subitem)
if not os.path.isfile(os.path.join(subfolder, "__init__.py")):
continue
todo.append((subpackage, subfolder))
return list(result)
|
python
|
{
"resource": ""
}
|
q8169
|
SchemaObject.find
|
train
|
def find(self, qref, classes=[], ignore=None):
"""
Find a referenced type in self or children. Return None if not found.
Qualified references for all schema objects checked in this search will
be added to the set of ignored qualified references to avoid the find
operation going into an infinite loop in case of recursively defined
structures.
@param qref: A qualified reference.
@type qref: qref
@param classes: A collection of classes used to qualify the match.
@type classes: Collection(I{class},...), e.g. [I(class),...]
@param ignore: A set of qualified references to ignore in this search.
@type ignore: {qref,...}
@return: The referenced type.
@rtype: L{SchemaObject}
@see: L{qualify()}
"""
if not len(classes):
classes = (self.__class__,)
if ignore is None:
ignore = set()
if self.qname in ignore:
return
ignore.add(self.qname)
if self.qname == qref and self.__class__ in classes:
return self
for c in self.rawchildren:
p = c.find(qref, classes, ignore=ignore)
if p is not None:
return p
|
python
|
{
"resource": ""
}
|
q8170
|
Iter.next
|
train
|
def next(self):
"""
Get the next item.
@return: A tuple: the next (child, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
@raise StopIteration: A the end.
"""
frame = self.top()
while True:
result = frame.next()
if result is None:
self.pop()
return self.next()
if isinstance(result, Content):
ancestry = [f.sx for f in self.stack]
return result, ancestry
self.push(result)
return self.next()
|
python
|
{
"resource": ""
}
|
q8171
|
ServiceDefinition.getprefixes
|
train
|
def getprefixes(self):
"""Add prefixes for each namespace referenced by parameter types."""
namespaces = []
for l in (self.params, self.types):
for t,r in l:
ns = r.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
if Namespace.xs(ns) or Namespace.xsd(ns):
continue
namespaces.append(ns[1])
if t == r: continue
ns = t.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
namespaces.append(ns[1])
i = 0
namespaces.sort()
for u in namespaces:
p = self.nextprefix()
ns = (p, u)
self.prefixes.append(ns)
|
python
|
{
"resource": ""
}
|
q8172
|
ServiceDefinition.description
|
train
|
def description(self):
"""
Get a textual description of the service for which this object represents.
@return: A textual description.
@rtype: str
"""
s = []
indent = (lambda n : '\n%*s'%(n*3,' '))
s.append('Service ( %s ) tns="%s"' % (self.service.name, self.wsdl.tns[1]))
s.append(indent(1))
s.append('Prefixes (%d)' % len(self.prefixes))
for p in self.prefixes:
s.append(indent(2))
s.append('%s = "%s"' % p)
s.append(indent(1))
s.append('Ports (%d):' % len(self.ports))
for p in self.ports:
s.append(indent(2))
s.append('(%s)' % p[0].name)
s.append(indent(3))
s.append('Methods (%d):' % len(p[1]))
for m in p[1]:
sig = []
s.append(indent(4))
sig.append(m[0])
sig.append('(')
sig.append(', '.join("%s %s" % (self.xlate(p[1]), p[0]) for p
in m[1]))
sig.append(')')
try:
s.append(''.join(sig))
except Exception:
pass
s.append(indent(3))
s.append('Types (%d):' % len(self.types))
for t in self.types:
s.append(indent(4))
s.append(self.xlate(t[0]))
s.append('\n\n')
return ''.join(s)
|
python
|
{
"resource": ""
}
|
q8173
|
Document.plain
|
train
|
def plain(self):
"""
Get a string representation of this XML document.
@return: A I{plain} string.
@rtype: basestring
"""
s = []
s.append(self.DECL)
root = self.root()
if root is not None:
s.append(root.plain())
return ''.join(s)
|
python
|
{
"resource": ""
}
|
q8174
|
SchemaCollection.merge
|
train
|
def merge(self):
"""
Merge contained schemas into one.
@return: The merged schema.
@rtype: L{Schema}
"""
if self.children:
schema = self.children[0]
for s in self.children[1:]:
schema.merge(s)
return schema
|
python
|
{
"resource": ""
}
|
q8175
|
Topics.create_sqs_policy
|
train
|
def create_sqs_policy(self, topic_name, topic_arn, topic_subs):
"""
This method creates the SQS policy needed for an SNS subscription. It
also takes the ARN of the SQS queue and converts it to the URL needed
for the subscription, as that takes a URL rather than the ARN.
"""
t = self.template
arn_endpoints = []
url_endpoints = []
for sub in topic_subs:
arn_endpoints.append(sub["Endpoint"])
split_endpoint = sub["Endpoint"].split(":")
queue_url = "https://%s.%s.amazonaws.com/%s/%s" % (
split_endpoint[2], # literally "sqs"
split_endpoint[3], # AWS region
split_endpoint[4], # AWS ID
split_endpoint[5], # Queue name
)
url_endpoints.append(queue_url)
policy_doc = queue_policy(topic_arn, arn_endpoints)
t.add_resource(
sqs.QueuePolicy(
topic_name + "SubPolicy",
PolicyDocument=policy_doc,
Queues=url_endpoints,
)
)
|
python
|
{
"resource": ""
}
|
q8176
|
Topics.create_topic
|
train
|
def create_topic(self, topic_name, topic_config):
"""
Creates the SNS topic, along with any subscriptions requested.
"""
topic_subs = []
t = self.template
if "Subscription" in topic_config:
topic_subs = topic_config["Subscription"]
t.add_resource(
sns.Topic.from_dict(
topic_name,
topic_config
)
)
topic_arn = Ref(topic_name)
t.add_output(
Output(topic_name + "Name", Value=GetAtt(topic_name, "TopicName"))
)
t.add_output(Output(topic_name + "Arn", Value=topic_arn))
sqs_subs = [sub for sub in topic_subs if sub["Protocol"] == "sqs"]
if sqs_subs:
self.create_sqs_policy(topic_name, topic_arn, sqs_subs)
|
python
|
{
"resource": ""
}
|
q8177
|
get_stream_action_type
|
train
|
def get_stream_action_type(stream_arn):
"""Returns the awacs Action for a stream type given an arn
Args:
stream_arn (str): The Arn of the stream.
Returns:
:class:`awacs.aws.Action`: The appropriate stream type awacs Action
class
Raises:
ValueError: If the stream type doesn't match kinesis or dynamodb.
"""
stream_type_map = {
"kinesis": awacs.kinesis.Action,
"dynamodb": awacs.dynamodb.Action,
}
stream_type = stream_arn.split(":")[2]
try:
return stream_type_map[stream_type]
except KeyError:
raise ValueError(
"Invalid stream type '%s' in arn '%s'" % (stream_type, stream_arn)
)
|
python
|
{
"resource": ""
}
|
q8178
|
stream_reader_statements
|
train
|
def stream_reader_statements(stream_arn):
"""Returns statements to allow Lambda to read from a stream.
Handles both DynamoDB & Kinesis streams. Automatically figures out the
type of stream, and provides the correct actions from the supplied Arn.
Arg:
stream_arn (str): A kinesis or dynamodb stream arn.
Returns:
list: A list of statements.
"""
action_type = get_stream_action_type(stream_arn)
arn_parts = stream_arn.split("/")
# Cut off the last bit and replace it with a wildcard
wildcard_arn_parts = arn_parts[:-1]
wildcard_arn_parts.append("*")
wildcard_arn = "/".join(wildcard_arn_parts)
return [
Statement(
Effect=Allow,
Resource=[stream_arn],
Action=[
action_type("DescribeStream"),
action_type("GetRecords"),
action_type("GetShardIterator"),
]
),
Statement(
Effect=Allow,
Resource=[wildcard_arn],
Action=[action_type("ListStreams")]
)
]
|
python
|
{
"resource": ""
}
|
q8179
|
Function.add_policy_statements
|
train
|
def add_policy_statements(self, statements):
"""Adds statements to the policy.
Args:
statements (:class:`awacs.aws.Statement` or list): Either a single
Statment, or a list of statements.
"""
if isinstance(statements, Statement):
statements = [statements]
self._policy_statements.extend(statements)
|
python
|
{
"resource": ""
}
|
q8180
|
Function.generate_policy_statements
|
train
|
def generate_policy_statements(self):
"""Generates the policy statements for the role used by the function.
To add additional statements you can either override the
`extended_policy_statements` method to return a list of Statements
to be added to the policy, or override this method itself if you
need more control.
Returns:
list: A list of :class:`awacs.aws.Statement` objects.
"""
statements = self._policy_statements
statements.extend(
lambda_basic_execution_statements(
self.function.Ref()
)
)
extended_statements = self.extended_policy_statements()
if extended_statements:
statements.extend(extended_statements)
return statements
|
python
|
{
"resource": ""
}
|
q8181
|
GenericResourceCreator.setup_resource
|
train
|
def setup_resource(self):
""" Setting Up Resource """
template = self.template
variables = self.get_variables()
tclass = variables['Class']
tprops = variables['Properties']
output = variables['Output']
klass = load_object_from_string('troposphere.' + tclass)
instance = klass.from_dict('ResourceRefName', tprops)
template.add_resource(instance)
template.add_output(Output(
output,
Description="A reference to the object created in this blueprint",
Value=Ref(instance)
))
|
python
|
{
"resource": ""
}
|
q8182
|
kms_key_policy
|
train
|
def kms_key_policy():
""" Creates a key policy for use of a KMS Key. """
statements = []
statements.extend(kms_key_root_statements())
return Policy(
Version="2012-10-17",
Id="root-account-access",
Statement=statements
)
|
python
|
{
"resource": ""
}
|
q8183
|
logstream_policy
|
train
|
def logstream_policy():
"""Policy needed for logspout -> kinesis log streaming."""
p = Policy(
Statement=[
Statement(
Effect=Allow,
Resource=["*"],
Action=[
kinesis.CreateStream, kinesis.DescribeStream,
Action(kinesis.prefix, "AddTagsToStream"),
Action(kinesis.prefix, "PutRecords")
])])
return p
|
python
|
{
"resource": ""
}
|
q8184
|
runlogs_policy
|
train
|
def runlogs_policy(log_group_ref):
"""Policy needed for Empire -> Cloudwatch logs to record run output."""
p = Policy(
Statement=[
Statement(
Effect=Allow,
Resource=[
Join('', [
'arn:aws:logs:*:*:log-group:',
log_group_ref,
':log-stream:*'])],
Action=[
logs.CreateLogStream,
logs.PutLogEvents,
])])
return p
|
python
|
{
"resource": ""
}
|
q8185
|
check_properties
|
train
|
def check_properties(properties, allowed_properties, resource):
"""Checks the list of properties in the properties variable against the
property list provided by the allowed_properties variable. If any property
does not match the properties in allowed_properties, a ValueError is
raised to prevent unexpected behavior when creating resources.
properties: The config (as dict) provided by the configuration file
allowed_properties: A list of strings representing the available params
for a resource.
resource: A string naming the resource in question for the error
message.
"""
for key in properties.keys():
if key not in allowed_properties:
raise ValueError(
"%s is not a valid property of %s" % (key, resource)
)
|
python
|
{
"resource": ""
}
|
q8186
|
merge_tags
|
train
|
def merge_tags(left, right, factory=Tags):
"""
Merge two sets of tags into a new troposphere object
Args:
left (Union[dict, troposphere.Tags]): dictionary or Tags object to be
merged with lower priority
right (Union[dict, troposphere.Tags]): dictionary or Tags object to be
merged with higher priority
factory (type): Type of object to create. Defaults to the troposphere
Tags class.
"""
if isinstance(left, Mapping):
tags = dict(left)
elif hasattr(left, 'tags'):
tags = _tags_to_dict(left.tags)
else:
tags = _tags_to_dict(left)
if isinstance(right, Mapping):
tags.update(right)
elif hasattr(left, 'tags'):
tags.update(_tags_to_dict(right.tags))
else:
tags.update(_tags_to_dict(right))
return factory(**tags)
|
python
|
{
"resource": ""
}
|
q8187
|
get_record_set_md5
|
train
|
def get_record_set_md5(rs_name, rs_type):
"""Accept record_set Name and Type. Return MD5 sum of these values."""
rs_name = rs_name.lower()
rs_type = rs_type.upper()
# Make A and CNAME records hash to same sum to support updates.
rs_type = "ACNAME" if rs_type in ["A", "CNAME"] else rs_type
return md5(rs_name + rs_type).hexdigest()
|
python
|
{
"resource": ""
}
|
q8188
|
DNSRecords.add_hosted_zone_id_for_alias_target_if_missing
|
train
|
def add_hosted_zone_id_for_alias_target_if_missing(self, rs):
"""Add proper hosted zone id to record set alias target if missing."""
alias_target = getattr(rs, "AliasTarget", None)
if alias_target:
hosted_zone_id = getattr(alias_target, "HostedZoneId", None)
if not hosted_zone_id:
dns_name = alias_target.DNSName
if dns_name.endswith(CF_DOMAIN):
alias_target.HostedZoneId = CLOUDFRONT_ZONE_ID
elif dns_name.endswith(ELB_DOMAIN):
region = dns_name.split('.')[-5]
alias_target.HostedZoneId = ELB_ZONE_IDS[region]
elif dns_name in S3_WEBSITE_ZONE_IDS:
alias_target.HostedZoneId = S3_WEBSITE_ZONE_IDS[dns_name]
else:
alias_target.HostedZoneId = self.hosted_zone_id
return rs
|
python
|
{
"resource": ""
}
|
q8189
|
DNSRecords.create_record_sets
|
train
|
def create_record_sets(self, record_set_dicts):
"""Accept list of record_set dicts.
Return list of record_set objects."""
record_set_objects = []
for record_set_dict in record_set_dicts:
# pop removes the 'Enabled' key and tests if True.
if record_set_dict.pop('Enabled', True):
record_set_objects.append(
self.create_record_set(record_set_dict)
)
return record_set_objects
|
python
|
{
"resource": ""
}
|
q8190
|
DNSRecords.create_record_set_groups
|
train
|
def create_record_set_groups(self, record_set_group_dicts):
"""Accept list of record_set_group dicts.
Return list of record_set_group objects."""
record_set_groups = []
for name, group in record_set_group_dicts.iteritems():
# pop removes the 'Enabled' key and tests if True.
if group.pop('Enabled', True):
record_set_groups.append(
self.create_record_set_group(name, group)
)
return record_set_groups
|
python
|
{
"resource": ""
}
|
q8191
|
read_only_s3_bucket_policy_statements
|
train
|
def read_only_s3_bucket_policy_statements(buckets, folder="*"):
""" Read only policy an s3 bucket. """
list_buckets = [s3_arn(b) for b in buckets]
object_buckets = [s3_objects_arn(b, folder) for b in buckets]
bucket_resources = list_buckets + object_buckets
return [
Statement(
Effect=Allow,
Resource=[s3_arn("*")],
Action=[s3.ListAllMyBuckets]
),
Statement(
Effect=Allow,
Resource=bucket_resources,
Action=[Action('s3', 'Get*'), Action('s3', 'List*')]
)
]
|
python
|
{
"resource": ""
}
|
q8192
|
lambda_vpc_execution_statements
|
train
|
def lambda_vpc_execution_statements():
"""Allow Lambda to manipuate EC2 ENIs for VPC support."""
return [
Statement(
Effect=Allow,
Resource=['*'],
Action=[
ec2.CreateNetworkInterface,
ec2.DescribeNetworkInterfaces,
ec2.DeleteNetworkInterface,
]
)
]
|
python
|
{
"resource": ""
}
|
q8193
|
dynamodb_autoscaling_policy
|
train
|
def dynamodb_autoscaling_policy(tables):
"""Policy to allow AutoScaling a list of DynamoDB tables."""
return Policy(
Statement=[
Statement(
Effect=Allow,
Resource=dynamodb_arns(tables),
Action=[
dynamodb.DescribeTable,
dynamodb.UpdateTable,
]
),
Statement(
Effect=Allow,
Resource=['*'],
Action=[
cloudwatch.PutMetricAlarm,
cloudwatch.DescribeAlarms,
cloudwatch.GetMetricStatistics,
cloudwatch.SetAlarmState,
cloudwatch.DeleteAlarms,
]
),
]
)
|
python
|
{
"resource": ""
}
|
q8194
|
HEALPix.interpolate_bilinear_skycoord
|
train
|
def interpolate_bilinear_skycoord(self, skycoord, values):
"""
Interpolate values at specific celestial coordinates using bilinear interpolation.
If a position does not have four neighbours, this currently returns NaN.
Note that this method requires that a celestial frame was specified when
initializing HEALPix. If you don't know or need the celestial frame, you
can instead use :meth:`~astropy_healpix.HEALPix.interpolate_bilinear_lonlat`.
Parameters
----------
skycoord : :class:`~astropy.coordinates.SkyCoord`
The celestial coordinates at which to interpolate
values : `~numpy.ndarray`
1-D array with the values in each HEALPix pixel. This must have a
length of the form 12 * nside ** 2 (and nside is determined
automatically from this).
Returns
-------
result : `~numpy.ndarray`
1-D array of interpolated values
"""
if self.frame is None:
raise NoFrameError("interpolate_bilinear_skycoord")
skycoord = skycoord.transform_to(self.frame)
representation = skycoord.represent_as(UnitSphericalRepresentation)
lon, lat = representation.lon, representation.lat
return self.interpolate_bilinear_lonlat(lon, lat, values)
|
python
|
{
"resource": ""
}
|
q8195
|
HEALPix.cone_search_skycoord
|
train
|
def cone_search_skycoord(self, skycoord, radius):
"""
Find all the HEALPix pixels within a given radius of a celestial position.
Note that this returns all pixels that overlap, including partially,
with the search cone. This function can only be used for a single
celestial position at a time, since different calls to the function may
result in a different number of matches.
This method requires that a celestial frame was specified when
initializing HEALPix. If you don't know or need the celestial frame,
you can instead use :meth:`~astropy_healpix.HEALPix.cone_search_lonlat`.
Parameters
----------
skycoord : :class:`~astropy.coordinates.SkyCoord`
The celestial coordinates to use for the cone search
radius : :class:`~astropy.units.Quantity`
The search radius
Returns
-------
healpix_index : `~numpy.ndarray`
1-D array with all the matching HEALPix pixel indices.
"""
if self.frame is None:
raise NoFrameError("cone_search_skycoord")
skycoord = skycoord.transform_to(self.frame)
representation = skycoord.represent_as(UnitSphericalRepresentation)
lon, lat = representation.lon, representation.lat
return self.cone_search_lonlat(lon, lat, radius)
|
python
|
{
"resource": ""
}
|
q8196
|
HEALPix.boundaries_skycoord
|
train
|
def boundaries_skycoord(self, healpix_index, step):
"""
Return the celestial coordinates of the edges of HEALPix pixels
This returns the celestial coordinates of points along the edge of each
HEALPIX pixel. The number of points returned for each pixel is ``4 * step``,
so setting ``step`` to 1 returns just the corners.
This method requires that a celestial frame was specified when
initializing HEALPix. If you don't know or need the celestial frame,
you can instead use :meth:`~astropy_healpix.HEALPix.boundaries_lonlat`.
Parameters
----------
healpix_index : `~numpy.ndarray`
1-D array of HEALPix pixels
step : int
The number of steps to take along each edge.
Returns
-------
skycoord : :class:`~astropy.coordinates.SkyCoord`
The celestial coordinates of the HEALPix pixel boundaries
"""
if self.frame is None:
raise NoFrameError("boundaries_skycoord")
lon, lat = self.boundaries_lonlat(healpix_index, step)
representation = UnitSphericalRepresentation(lon, lat, copy=False)
return SkyCoord(self.frame.realize_frame(representation))
|
python
|
{
"resource": ""
}
|
q8197
|
level_to_nside
|
train
|
def level_to_nside(level):
"""
Find the pixel dimensions of the top-level HEALPix tiles.
This is given by ``nside = 2**level``.
Parameters
----------
level : int
The resolution level
Returns
-------
nside : int
The number of pixels on the side of one of the 12 'top-level' HEALPix tiles.
"""
level = np.asarray(level, dtype=np.int64)
_validate_level(level)
return 2 ** level
|
python
|
{
"resource": ""
}
|
q8198
|
nside_to_level
|
train
|
def nside_to_level(nside):
"""
Find the HEALPix level for a given nside.
This is given by ``level = log2(nside)``.
This function is the inverse of `level_to_nside`.
Parameters
----------
nside : int
The number of pixels on the side of one of the 12 'top-level' HEALPix tiles.
Must be a power of two.
Returns
-------
level : int
The level of the HEALPix cells
"""
nside = np.asarray(nside, dtype=np.int64)
_validate_nside(nside)
return np.log2(nside).astype(np.int64)
|
python
|
{
"resource": ""
}
|
q8199
|
level_ipix_to_uniq
|
train
|
def level_ipix_to_uniq(level, ipix):
"""
Convert a level and HEALPix index into a uniq number representing the cell.
This function is the inverse of `uniq_to_level_ipix`.
Parameters
----------
level : int
The level of the HEALPix cell
ipix : int
The index of the HEALPix cell
Returns
-------
uniq : int
The uniq number representing the HEALPix cell.
"""
level = np.asarray(level, dtype=np.int64)
ipix = np.asarray(ipix, dtype=np.int64)
_validate_level(level)
_validate_npix(level, ipix)
return ipix + (1 << 2*(level + 1))
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.