code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
appender = self.default
for matcher, candidate_appender in self.appenders:
if matcher == content.value:
appender = candidate_appender
break
appender.append(parent, content)
|
def append(self, parent, content)
|
Select an appender and append the content to parent.
@param parent: A parent node.
@type parent: L{Element}
@param content: The content to append.
@type content: L{Content}
| 6.239983
| 6.250497
| 0.998318
|
result = None
name = parts[0]
log.debug('searching schema for (%s)', name)
qref = self.qualify(parts[0])
query = BlindQuery(qref)
result = query.execute(self.schema)
if result is None:
log.error('(%s) not-found', name)
raise PathResolver.BadPath(name)
log.debug('found (%s) as (%s)', name, Repr(result))
return result
|
def root(self, parts)
|
Find the path root.
@param parts: A list of path parts.
@type parts: [str,..]
@return: The root.
@rtype: L{xsd.sxbase.SchemaObject}
| 6.23646
| 5.938082
| 1.050248
|
result = root
for part in parts[1:-1]:
name = splitPrefix(part)[1]
log.debug('searching parent (%s) for (%s)', Repr(result), name)
result, ancestry = result.get_child(name)
if result is None:
log.error('(%s) not-found', name)
raise PathResolver.BadPath(name)
result = result.resolve(nobuiltin=True)
log.debug('found (%s) as (%s)', name, Repr(result))
return result
|
def branch(self, root, parts)
|
Traverse the path until a leaf is reached.
@param parts: A list of path parts.
@type parts: [str,..]
@param root: The root.
@type root: L{xsd.sxbase.SchemaObject}
@return: The end of the branch.
@rtype: L{xsd.sxbase.SchemaObject}
| 5.862153
| 5.759201
| 1.017876
|
log.debug('searching parent (%s) for (%s)', Repr(parent), name)
if name.startswith('@'):
return parent.get_attribute(name[1:])
return parent.get_child(name)
|
def getchild(self, name, parent)
|
Get a child by name.
| 6.188291
| 6.160465
| 1.004517
|
def __path_prefix(self, folder):
path_parts = path_iter(folder)
_skip_expected(path_parts, self.__base_folder_parts)
result = "/".join(path_parts)
if result:
result += "/"
return result
|
Path prefix to be used when archiving any items from the given folder.
Expects the folder to be located under the base folder path and the
returned path prefix does not include the base folder information. This
makes sure we include just the base folder's content in the archive,
and not the base folder itself.
| null | null | null |
|
value = XDecimal._decimal_canonical(value)
negative, digits, exponent = value.as_tuple()
# The following implementation assumes the following tuple decimal
# encoding (part of the canonical decimal value encoding):
# - digits must contain at least one element
# - no leading integral 0 digits except a single one in 0 (if a non-0
# decimal value has leading integral 0 digits they must be encoded
# in its 'exponent' value and not included explicitly in its
# 'digits' tuple)
assert digits
assert digits[0] != 0 or len(digits) == 1
result = []
if negative:
result.append("-")
# No fractional digits.
if exponent >= 0:
result.extend(str(x) for x in digits)
result.extend("0" * exponent)
return "".join(result)
digit_count = len(digits)
# Decimal point offset from the given digit start.
point_offset = digit_count + exponent
# Trim trailing fractional 0 digits.
fractional_digit_count = min(digit_count, -exponent)
while fractional_digit_count and digits[digit_count - 1] == 0:
digit_count -= 1
fractional_digit_count -= 1
# No trailing fractional 0 digits and a decimal point coming not after
# the given digits, meaning there is no need to add additional trailing
# integral 0 digits.
if point_offset <= 0:
# No integral digits.
result.append("0")
if digit_count > 0:
result.append(".")
result.append("0" * -point_offset)
result.extend(str(x) for x in digits[:digit_count])
else:
# Have integral and possibly some fractional digits.
result.extend(str(x) for x in digits[:point_offset])
if point_offset < digit_count:
result.append(".")
result.extend(str(x) for x in digits[point_offset:digit_count])
return "".join(result)
|
def _decimal_to_xsd_format(value)
|
Converts a decimal.Decimal value to its XSD decimal type value.
Result is a string containing the XSD decimal type's lexical value
representation. The conversion is done without any precision loss.
Note that Python's native decimal.Decimal string representation will
not do here as the lexical representation desired here does not allow
representing decimal values using float-like `<mantissa>E<exponent>'
format, e.g. 12E+30 or 0.10006E-12.
| 4.230563
| 4.266495
| 0.991578
|
fn = cls.tags.get(name, XBuiltin)
return fn(schema, name)
|
def create(cls, schema, name)
|
Create an object based on the root tag name.
@param schema: A schema object.
@type schema: L{schema.Schema}
@param name: The name.
@type name: str
@return: The created object.
@rtype: L{XBuiltin}
| 16.37369
| 8.889342
| 1.841946
|
def _avoid_setuptools_zipped_egg_upgrade_issue(env, ez_setup):
if env.sys_version_info[:2] != (2, 5):
return # only Python 2.5.x affected by this
if not env.setuptools_zipped_egg:
return # setuptools not pre-installed as a zipped egg
pv_new = parse_version(ez_setup.setuptools_version())
if pv_new != parse_version(env.setuptools_version):
return # issue avoided since zipped egg archive names will not match
fixed_version = utility.lowest_version_string_with_prefix("3.5.2")
if pv_new >= parse_version(fixed_version):
return # issue fixed in setuptools
# We could check for pip and use it for a cleaner setuptools uninstall if
# available, but YAGNI since only Python 2.5.x environments are affected by
# the zipped egg upgrade issue.
os.remove(env.setuptools_zipped_egg)
|
Avoid the setuptools self-upgrade issue.
setuptools versions prior to version 3.5.2 have a bug that can cause their
upgrade installations to fail when installing a new zipped egg distribution
over an existing zipped egg setuptools distribution with the same name.
The following Python versions are not affected by this issue:
Python 2.4 - use setuptools 1.4.2 - installs itself as a non-zipped egg
Python 2.6+ - use setuptools versions not affected by this issue
That just leaves Python versions 2.5.x to worry about.
This problem occurs because of an internal stale cache issue causing the
upgrade to read data from the new zip archive at a location calculated
based on the original zip archive's content, effectively causing such read
operations to either succeed (if read content had not changed its
location), fail with a 'bad local header' exception or even fail silently
and return incorrect data.
To avoid the issue, we explicitly uninstall the previously installed
setuptools distribution before installing its new version.
| null | null | null |
|
def _reuse_pre_installed_setuptools(env, installer):
if not env.setuptools_version:
return # no prior setuptools ==> no reuse
reuse_old = config.reuse_old_setuptools
reuse_best = config.reuse_best_setuptools
reuse_future = config.reuse_future_setuptools
reuse_comment = None
if reuse_old or reuse_best or reuse_future:
pv_old = parse_version(env.setuptools_version)
pv_new = parse_version(installer.setuptools_version())
if pv_old < pv_new:
if reuse_old:
reuse_comment = "%s+ recommended" % (
installer.setuptools_version(),)
elif pv_old > pv_new:
if reuse_future:
reuse_comment = "%s+ required" % (
installer.setuptools_version(),)
elif reuse_best:
reuse_comment = ""
if reuse_comment is None:
return # reuse not allowed by configuration
if reuse_comment:
reuse_comment = " (%s)" % (reuse_comment,)
print("Reusing pre-installed setuptools %s distribution%s." % (
env.setuptools_version, reuse_comment))
return True
|
Return whether a pre-installed setuptools distribution should be reused.
| null | null | null |
|
def download_pip(env, requirements):
if config.installation_cache_folder() is None:
raise EnvironmentSetupError("Local installation cache folder not "
"defined but required for downloading a pip installation.")
# Installation cache folder needs to be explicitly created for setuptools
# to be able to copy its downloaded installation files into it. Seen using
# Python 2.4.4 & setuptools 1.4.
_create_installation_cache_folder_if_needed()
try:
env.execute(["-m", "easy_install", "--zip-ok", "--multi-version",
"--always-copy", "--exclude-scripts", "--install-dir",
config.installation_cache_folder()] + requirements)
zip_eggs_in_folder(config.installation_cache_folder())
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
raise EnvironmentSetupError("pip download failed.")
|
Download pip and its requirements using setuptools.
| null | null | null |
|
def setuptools_install_options(local_storage_folder):
if local_storage_folder is None:
return []
# setuptools expects its find-links parameter to contain a list of link
# sources (either local paths, file: URLs pointing to folders or URLs
# pointing to a file containing HTML links) separated by spaces. That means
# that, when specifying such items, whether local paths or URLs, they must
# not contain spaces. The problem can be worked around by using a local
# file URL, since URLs can contain space characters encoded as '%20' (for
# more detailed information see below).
#
# Any URL referencing a folder needs to be specified with a trailing '/'
# character in order for setuptools to correctly recognize it as a folder.
#
# All this has been tested using Python 2.4.3/2.4.4 & setuptools 1.4/1.4.2
# as well as Python 3.4 & setuptools 3.3.
#
# Supporting paths with spaces - method 1:
# ----------------------------------------
# One way would be to prepare a link file and pass an URL referring to that
# link file. The link file needs to contain a list of HTML link tags
# (<a href="..."/>), one for every item stored inside the local storage
# folder. If a link file references a folder whose name matches the desired
# requirement name, it will be searched recursively (as described in method
# 2 below).
#
# Note that in order for setuptools to recognize a local link file URL
# correctly, the file needs to be named with the '.html' extension. That
# will cause the underlying urllib2.open() operation to return the link
# file's content type as 'text/html' which is required for setuptools to
# recognize a valid link file.
#
# Supporting paths with spaces - method 2:
# ----------------------------------------
# Another possible way is to use an URL referring to the local storage
# folder directly. This will cause setuptools to prepare and use a link
# file internally - with its content read from a 'index.html' file located
# in the given local storage folder, if it exists, or constructed so it
# contains HTML links to all top-level local storage folder items, as
# described for method 1 above.
if " " in local_storage_folder:
find_links_param = utility.path_to_URL(local_storage_folder)
if find_links_param[-1] != "/":
find_links_param += "/"
else:
find_links_param = local_storage_folder
return ["-f", find_links_param, "--allow-hosts=None"]
|
Return options to make setuptools use installations from the given folder.
No other installation source is allowed.
| null | null | null |
|
def install_pip(env, requirements):
try:
installation_source_folder = config.installation_cache_folder()
options = setuptools_install_options(installation_source_folder)
if installation_source_folder is not None:
zip_eggs_in_folder(installation_source_folder)
env.execute(["-m", "easy_install"] + options + requirements)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
raise EnvironmentSetupError("pip installation failed.")
|
Install pip and its requirements using setuptools.
| null | null | null |
|
def prepare_pip_requirements_file_if_needed(requirements):
if utility.any_contains_any(requirements, "<>|()&^"):
file_path, janitor = pip_requirements_file(requirements)
requirements[:] = ["-r", file_path]
return janitor
|
Make requirements be passed to pip via a requirements file if needed.
We must be careful about how we pass shell operator characters (e.g. '<',
'>', '|' or '^') included in our command-line arguments or they might cause
problems if run through an intermediate shell interpreter. If our pip
requirement specifications contain such characters, we pass them using a
separate requirements file.
This problem has been encountered on Windows 7 SP1 x64 using Python 2.4.3,
2.4.4 & 2.5.4.
| null | null | null |
|
def download_pip_based_installations(env, pip_invocation, requirements,
download_cache_folder):
if config.installation_cache_folder() is None:
raise EnvironmentSetupError("Local installation cache folder not "
"defined but required for downloading pip based installations.")
# Installation cache folder needs to be explicitly created for pip to be
# able to copy its downloaded installation files into it. The same does not
# hold for pip's download cache folder which gets created by pip on-demand.
# Seen using Python 3.4.0 & pip 1.5.4.
_create_installation_cache_folder_if_needed()
try:
pip_options = ["install", "-d", config.installation_cache_folder(),
"--exists-action=i"]
pip_options.extend(pip_download_cache_options(download_cache_folder))
# Running pip based installations on Python 2.5.
# * Python 2.5 does not come with SSL support enabled by default and
# so pip can not use SSL certified downloads from PyPI.
# * To work around this either install the
# https://pypi.python.org/pypi/ssl package or run pip using the
# '--insecure' command-line options.
# * Installing the ssl package seems ridden with problems on
# Python 2.5 so this workaround has not been tested.
if (2, 5) <= env.sys_version_info < (2, 6):
# There are some potential cases where we do not need to use
# "--insecure", e.g. if the target Python environment already has
# the 'ssl' module installed. However, detecting whether this is so
# does not seem to be worth the effort. The only way to detect
# whether secure download is supported would be to scan the target
# environment for this information, e.g. setuptools has this
# information in its pip.backwardcompat.ssl variable - if it is
# None, the necessary SSL support is not available. But then we
# would have to be careful:
# - not to run the scan if we already know this information from
# some previous scan
# - to track all actions that could have invalidated our previous
# scan results, etc.
# It just does not seem to be worth the hassle so for now - YAGNI.
pip_options.append("--insecure")
env.execute(pip_invocation + pip_options + requirements)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
raise EnvironmentSetupError("pip based download failed.")
|
Download requirements for pip based installation.
| null | null | null |
|
def enabled_actions_for_env(env):
def enabled(config_value, required):
if config_value is Config.TriBool.No:
return False
if config_value is Config.TriBool.Yes:
return True
assert config_value is Config.TriBool.IfNeeded
return bool(required)
# Some old Python versions do not support HTTPS downloads and therefore can
# not download installation packages from PyPI. To run setuptools or pip
# based installations on such Python versions, all the required
# installation packages need to be downloaded locally first using a
# compatible Python version (e.g. Python 2.4.4 for Python 2.4.3) and then
# installed locally.
download_supported = not ((2, 4, 3) <= env.sys_version_info < (2, 4, 4))
local_install = config.installation_cache_folder() is not None
actions = set()
pip_required = False
run_pip_based_installations = enabled(config.install_environments, True)
if run_pip_based_installations:
actions.add("run pip based installations")
pip_required = True
if download_supported and enabled(config.download_installations,
local_install and run_pip_based_installations):
actions.add("download pip based installations")
pip_required = True
setuptools_required = False
run_pip_installation = enabled(config.install_environments, pip_required)
if run_pip_installation:
actions.add("run pip installation")
setuptools_required = True
if download_supported and enabled(config.download_installations,
local_install and run_pip_installation):
actions.add("download pip installation")
setuptools_required = True
if enabled(config.setup_setuptools, setuptools_required):
actions.add("setup setuptools")
return actions
|
Returns actions to perform when processing the given environment.
| null | null | null |
|
def __setuptools_version(self):
# Read the script directly as a file instead of importing it as a
# Python module and reading the value from the loaded module's global
# DEFAULT_VERSION variable. Not all ez_setup scripts are compatible
# with all Python environments and so importing them would require
# doing so using a separate process run in the target Python
# environment instead of the current one.
f = open(self.script_path(), "r")
try:
matcher = re.compile(r'\s*DEFAULT_VERSION\s*=\s*"([^"]*)"\s*$')
for i, line in enumerate(f):
if i > 50:
break
match = matcher.match(line)
if match:
return match.group(1)
finally:
f.close()
self.__error("error parsing setuptools installation script '%s'" % (
self.script_path(),))
|
Read setuptools version from the underlying ez_setup script.
| null | null | null |
|
if name is None:
raise Exception("name (%s) not-valid" % (name,))
self.prefix, self.name = splitPrefix(name)
|
def rename(self, name)
|
Rename the element.
@param name: A new name for the element.
@type name: basestring
| 9.15627
| 11.454983
| 0.799326
|
root = Element(self.qname(), parent, self.namespace())
for a in self.attributes:
root.append(a.clone(self))
for c in self.children:
root.append(c.clone(self))
for ns in self.nsprefixes.items():
root.addPrefix(ns[0], ns[1])
return root
|
def clone(self, parent=None)
|
Deep clone of this element and children.
@param parent: An optional parent for the copied fragment.
@type parent: I{Element}
@return: A deep copy parented by I{parent}
@rtype: I{Element}
| 3.084257
| 3.427585
| 0.899834
|
try:
attr = self.getAttribute(name)
self.attributes.remove(attr)
except Exception:
pass
return self
|
def unset(self, name)
|
Unset (remove) an attribute.
@param name: The attribute name.
@type name: str
@return: self
@rtype: L{Element}
| 4.704398
| 4.688157
| 1.003464
|
attr = self.getAttribute(name, ns)
if attr is None or attr.value is None:
return default
return attr.getValue()
|
def get(self, name, ns=None, default=None)
|
Get the value of an attribute by name.
@param name: The name of the attribute.
@type name: basestring
@param ns: The optional attribute's namespace.
@type ns: (I{prefix}, I{name})
@param default: An optional value to be returned when either the
attribute does not exist or has no value.
@type default: basestring
@return: The attribute's value or I{default}.
@rtype: basestring
@see: __getitem__()
| 4.344787
| 4.475583
| 0.970776
|
if self.prefix is None:
return self.defaultNamespace()
return self.resolvePrefix(self.prefix)
|
def namespace(self)
|
Get the element's namespace.
@return: The element's namespace by resolving the prefix, the explicit
namespace or the inherited namespace.
@rtype: (I{prefix}, I{name})
| 6.402407
| 5.231462
| 1.223828
|
p = self
while p is not None:
if p.expns is not None:
return None, p.expns
p = p.parent
return Namespace.default
|
def defaultNamespace(self)
|
Get the default (unqualified namespace).
This is the expns of the first node (looking up the tree) that has it
set.
@return: The namespace of a node when not qualified.
@rtype: (I{prefix}, I{name})
| 5.842378
| 5.374129
| 1.08713
|
if not isinstance(objects, (list, tuple)):
objects = (objects,)
for child in objects:
if isinstance(child, Element):
self.children.append(child)
child.parent = self
continue
if isinstance(child, Attribute):
self.attributes.append(child)
child.parent = self
continue
raise Exception("append %s not-valid" %
(child.__class__.__name__,))
return self
|
def append(self, objects)
|
Append the specified child based on whether it is an element or an
attribute.
@param objects: A (single|collection) of attribute(s) or element(s) to
be added as children.
@type objects: (L{Element}|L{Attribute})
@return: self
@rtype: L{Element}
| 2.948928
| 2.809406
| 1.049662
|
objects = (objects,)
for child in objects:
if not isinstance(child, Element):
raise Exception("append %s not-valid" %
(child.__class__.__name__,))
self.children.insert(index, child)
child.parent = self
return self
|
def insert(self, objects, index=0)
|
Insert an L{Element} content at the specified index.
@param objects: A (single|collection) of attribute(s) or element(s) to
be added as children.
@type objects: (L{Element}|L{Attribute})
@param index: The position in the list of children to insert.
@type index: int
@return: self
@rtype: L{Element}
| 5.151193
| 6.471422
| 0.795991
|
if ns is None:
prefix, name = splitPrefix(name)
if prefix is not None:
ns = self.resolvePrefix(prefix)
for c in self.children:
if c.match(name, ns):
return c
return default
|
def getChild(self, name, ns=None, default=None)
|
Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element}
| 3.391386
| 3.4041
| 0.996265
|
result = None
node = self
for name in path.split("/"):
if not name:
continue
ns = None
prefix, name = splitPrefix(name)
if prefix is not None:
ns = node.resolvePrefix(prefix)
result = node.getChild(name, ns)
if result is None:
return
node = result
return result
|
def childAtPath(self, path)
|
Get a child at I{path} where I{path} is a (/) separated list of element
names that are expected to be children.
@param path: A (/) separated list of element names.
@type path: basestring
@return: The leaf node at the end of I{path}.
@rtype: L{Element}
| 3.155971
| 3.091419
| 1.020881
|
parts = [p for p in path.split("/") if p]
if len(parts) == 1:
return self.getChildren(path)
return self.__childrenAtPath(parts)
|
def childrenAtPath(self, path)
|
Get a list of children at I{path} where I{path} is a (/) separated list
of element names expected to be children.
@param path: A (/) separated list of element names.
@type path: basestring
@return: The collection leaf nodes at the end of I{path}.
@rtype: [L{Element},...]
| 3.740963
| 4.528319
| 0.826126
|
if ns is None:
if name is None:
return self.children
prefix, name = splitPrefix(name)
if prefix is not None:
ns = self.resolvePrefix(prefix)
return [c for c in self.children if c.match(name, ns)]
|
def getChildren(self, name=None, ns=None)
|
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain a prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
| 3.161997
| 3.410241
| 0.927206
|
for c in self.children:
c.promotePrefixes()
if self.parent is None:
return
for p, u in self.nsprefixes.items():
if p in self.parent.nsprefixes:
pu = self.parent.nsprefixes[p]
if pu == u:
del self.nsprefixes[p]
continue
if p != self.parent.prefix:
self.parent.nsprefixes[p] = u
del self.nsprefixes[p]
return self
|
def promotePrefixes(self)
|
Push prefix declarations up the tree as far as possible.
Prefix mapping are pushed to its parent unless the parent has the
prefix mapped to another URI or the parent has the prefix. This is
propagated up the tree until the top is reached.
@return: self
@rtype: L{Element}
| 2.625695
| 2.495232
| 1.052285
|
nochildren = not self.children
notext = self.text is None
nocontent = nochildren and notext
if content:
return nocontent
noattrs = not len(self.attributes)
return nocontent and noattrs
|
def isempty(self, content=True)
|
Get whether the element has no children.
@param content: Test content (children & text) only.
@type content: boolean
@return: True when element has not children.
@rtype: boolean
| 4.668394
| 5.551741
| 0.840888
|
nilattr = self.getAttribute("nil", ns=Namespace.xsins)
return nilattr is not None and (nilattr.getValue().lower() == "true")
|
def isnil(self)
|
Get whether the element is I{nil} as defined by having an
I{xsi:nil="true"} attribute.
@return: True if I{nil}, else False
@rtype: boolean
| 11.632788
| 11.515047
| 1.010225
|
p, u = Namespace.xsins
name = ":".join((p, "nil"))
self.set(name, str(flag).lower())
self.addPrefix(p, u)
if flag:
self.text = None
return self
|
def setnil(self, flag=True)
|
Set this node to I{nil} as defined by having an I{xsi:nil}=I{flag}
attribute.
@param flag: A flag indicating how I{xsi:nil} will be set.
@type flag: boolean
@return: self
@rtype: L{Element}
| 12.554523
| 9.340789
| 1.344054
|
if ns is None:
return
if not isinstance(ns, (list, tuple)):
raise Exception("namespace must be a list or a tuple")
if ns[0] is None:
self.expns = ns[1]
else:
self.prefix = ns[0]
self.nsprefixes[ns[0]] = ns[1]
|
def applyns(self, ns)
|
Apply the namespace to this node.
If the prefix is I{None} then this element's explicit namespace
I{expns} is set to the URI defined by I{ns}. Otherwise, the I{ns} is
simply mapped.
@param ns: A namespace.
@type ns: (I{prefix}, I{URI})
| 3.336041
| 2.790084
| 1.195678
|
tab = "%*s" % (indent * 3, "")
result = []
result.append("%s<%s" % (tab, self.qname()))
result.append(self.nsdeclarations())
for a in self.attributes:
result.append(" %s" % (unicode(a),))
if self.isempty():
result.append("/>")
return "".join(result)
result.append(">")
if self.hasText():
result.append(self.text.escape())
for c in self.children:
result.append("\n")
result.append(c.str(indent + 1))
if len(self.children):
result.append("\n%s" % (tab,))
result.append("</%s>" % (self.qname(),))
return "".join(result)
|
def str(self, indent=0)
|
Get a string representation of this XML fragment.
@param indent: The indent to be used in formatting the output.
@type indent: int
@return: A I{pretty} string.
@rtype: basestring
| 2.590039
| 2.520806
| 1.027465
|
result = ["<%s" % (self.qname(),), self.nsdeclarations()]
for a in self.attributes:
result.append(" %s" % (unicode(a),))
if self.isempty():
result.append("/>")
return "".join(result)
result.append(">")
if self.hasText():
result.append(self.text.escape())
for c in self.children:
result.append(c.plain())
result.append("</%s>" % (self.qname(),))
return "".join(result)
|
def plain(self)
|
Get a string representation of this XML fragment.
@return: A I{plain} string.
@rtype: basestring
| 2.758215
| 2.643489
| 1.0434
|
s = []
myns = None, self.expns
if self.parent is None:
pns = Namespace.default
else:
pns = None, self.parent.expns
if myns[1] != pns[1]:
if self.expns is not None:
s.append(' xmlns="%s"' % (self.expns,))
for item in self.nsprefixes.items():
p, u = item
if self.parent is not None:
ns = self.parent.resolvePrefix(p)
if ns[1] == u:
continue
s.append(' xmlns:%s="%s"' % (p, u))
return "".join(s)
|
def nsdeclarations(self)
|
Get a string representation for all namespace declarations as xmlns=""
and xmlns:p="".
@return: A separated list of declarations.
@rtype: basestring
| 3.808881
| 3.691981
| 1.031663
|
cache = self.__cache()
id = self.mangle(url, "wsdl")
wsdl = cache.get(id)
if wsdl is None:
wsdl = self.fn(url, self.options)
cache.put(id, wsdl)
else:
# Cached WSDL Definitions objects may have been created with
# different options so we update them here with our current ones.
wsdl.options = self.options
for imp in wsdl.imports:
imp.imported.options = self.options
return wsdl
|
def open(self, url)
|
Open a WSDL schema at the specified I{URL}.
First, the WSDL schema is looked up in the I{object cache}. If not
found, a new one constructed using the I{fn} factory function and the
result is cached for the next open().
@param url: A WSDL URL.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions}
| 6.010472
| 4.712884
| 1.275328
|
if self.options.cachingpolicy == 1:
return self.options.cache
return suds.cache.NoCache()
|
def __cache(self)
|
Get the I{object cache}.
@return: The I{cache} when I{cachingpolicy} = B{1}.
@rtype: L{Cache}
| 13.925153
| 6.659617
| 2.090984
|
cache = self.__cache()
id = self.mangle(url, "document")
xml = cache.get(id)
if xml is None:
xml = self.__fetch(url)
cache.put(id, xml)
self.plugins.document.parsed(url=url, document=xml.root())
return xml
|
def open(self, url)
|
Open an XML document at the specified I{URL}.
First, a preparsed document is looked up in the I{object cache}. If not
found, its content is fetched from an external source and parsed using
the SAX parser. The result is cached for the next open().
@param url: A document URL.
@type url: str.
@return: The specified XML document.
@rtype: I{Document}
| 6.260173
| 6.027931
| 1.038528
|
content = None
store = self.options.documentStore
if store is not None:
content = store.open(url)
if content is None:
request = suds.transport.Request(url)
fp = self.options.transport.open(request)
try:
content = fp.read()
finally:
fp.close()
ctx = self.plugins.document.loaded(url=url, document=content)
content = ctx.document
sax = suds.sax.parser.Parser()
return sax.parse(string=content)
|
def __fetch(self, url)
|
Fetch document content from an external source.
The document content will first be looked up in the registered document
store, and if not found there, downloaded using the registered
transport system.
Before being returned, the fetched document content first gets
processed by all the registered 'loaded' plugins.
@param url: A document URL.
@type url: str.
@return: A file pointer to the fetched document content.
@rtype: file-like
| 4.619967
| 4.235302
| 1.090823
|
if isinstance(s, basestring) and self.__needs_encoding(s):
for x in self.encodings:
s = re.sub(x[0], x[1], s)
return s
|
def encode(self, s)
|
Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str
| 3.7664
| 4.304301
| 0.875032
|
if isinstance(s, basestring):
for c in self.special:
if c in s:
return True
|
def __needs_encoding(self, s)
|
Get whether string I{s} contains special characters.
@param s: A string to check.
@type s: str
@return: True if needs encoding.
@rtype: boolean
| 4.330868
| 4.852478
| 0.892507
|
def virtualenv_requirements(version_info=sys.version_info):
if version_info < (2, 5):
# 'virtualenv' release 1.8 dropped Python 2.4.x compatibility.
yield requirement_spec("virtualenv",
("<", lowest_version_string_with_prefix("1.8")))
elif version_info < (2, 6):
# 'virtualenv' release 1.10 dropped Python 2.5.x compatibility.
yield requirement_spec("virtualenv",
("<", lowest_version_string_with_prefix("1.10")))
else:
yield requirement_spec("virtualenv")
|
Generate Python version specific virtualenv package requirements.
The requirements are returned as setuptools/pip compatible requirement
specification strings.
| null | null | null |
|
def __construct_python_version(self):
major, minor, micro, release_level, serial = self.sys_version_info
assert release_level in ("alfa", "beta", "candidate", "final")
assert release_level != "final" or serial == 0
parts = [str(major), ".", str(minor), ".", str(micro)]
if release_level != "final":
parts.append(release_level[0])
parts.append(str(serial))
self.python_version = "".join(parts)
|
Construct a setuptools compatible Python version string.
Constructed based on the environment's reported sys.version_info.
| null | null | null |
|
def __parse_scanned_version_info(self):
string = self.sys_version_info_formatted
try:
major, minor, micro, release_level, serial = string.split(",")
if (release_level in ("alfa", "beta", "candidate", "final") and
(release_level != "final" or serial == "0") and
major.isdigit() and # --- --- --- --- --- --- --- --- ---
minor.isdigit() and # Explicit isdigit() checks to detect
micro.isdigit() and # leading/trailing whitespace.
serial.isdigit()): # --- --- --- --- --- --- --- --- ---
self.sys_version_info = (int(major), int(minor), int(micro),
release_level, int(serial))
self.__construct_python_version()
return
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
raise BadEnvironment("Unsupported Python version (%s)" % (string,))
|
Parses the environment's formatted version info string.
| null | null | null |
|
if d in h:
return "{}..."
h.append(d)
s = []
if nl:
s.append("\n")
s.append(self.indent(n))
s.append("{")
for item in d.items():
s.append("\n")
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(tostr(item[0]))
s.append("[]")
else:
s.append(tostr(item[0]))
s.append(" = ")
s.append(self.process(item[1], h, n, True))
s.append("\n")
s.append(self.indent(n))
s.append("}")
h.pop()
return "".join(s)
|
def print_dictionary(self, d, h, n, nl=False)
|
Print complex using the specified indent (n) and newline (nl).
| 2.327533
| 2.276451
| 1.02244
|
if c in h:
return "[]..."
h.append(c)
s = []
for item in c:
s.append("\n")
s.append(self.indent(n))
s.append(self.process(item, h, n - 2))
s.append(",")
h.pop()
return "".join(s)
|
def print_collection(self, c, h, n)
|
Print collection using the specified indent (n) and newline (nl).
| 3.755042
| 3.703609
| 1.013887
|
ns = content.type.namespace()
if content.type.form_qualified:
node = Element(content.tag, ns=ns)
if ns[0]:
node.addPrefix(ns[0], ns[1])
else:
node = Element(content.tag)
self.encode(node, content)
log.debug("created - node:\n%s", node)
return node
|
def node(self, content)
|
Create an XML node.
The XML node is namespace qualified as defined by the corresponding
schema element.
| 5.517968
| 4.880477
| 1.130621
|
if content.type.any():
return
if not content.real.extension():
return
if content.type.resolve() == content.real:
return
ns = None
name = content.real.name
if self.xstq:
ns = content.real.namespace("ns1")
Typer.manual(node, name, ns)
|
def encode(self, node, content)
|
Add (SOAP) encoding information if needed.
The encoding information is added only if the resolved type is derived
by extension. Furthermore, the xsi:type value is qualified by namespace
only if the content (tag) and referenced type are in different
namespaces.
| 12.047823
| 9.538209
| 1.263112
|
if self.optional(content):
v = content.value
if v is None:
return True
if isinstance(v, (list, tuple)) and not v:
return True
return False
|
def skip(self, content)
|
Get whether to skip this I{content}.
Should be skipped when the content is optional and value is either None
or an empty list.
@param content: Content to skip.
@type content: L{Object}
@return: True if content is to be skipped.
@rtype: bool
| 4.427678
| 3.599656
| 1.230028
|
def _detect_eggs_in_folder(folder):
eggs = {}
for x in os.listdir(folder):
zip = x.endswith(_zip_ext)
if zip:
root = x[:-len(_zip_ext)]
egg = _Egg.NONE
elif x.endswith(_egg_ext):
root = x[:-len(_egg_ext)]
if os.path.isdir(os.path.join(folder, x)):
egg = _Egg.FOLDER
else:
egg = _Egg.FILE
else:
continue
try:
info = eggs[root]
except KeyError:
eggs[root] = _Egg(os.path.join(folder, root), egg, zip)
else:
if egg is not _Egg.NONE:
info.set_egg(egg)
if zip:
info.set_zip()
return eggs.values()
|
Detect egg distributions located in the given folder.
Only direct folder content is considered and subfolders are not searched
recursively.
| null | null | null |
|
def normalize(self):
if self.has_egg_file():
if self.has_zip():
self.__remove_zip()
else:
if self.has_egg_folder():
if not self.has_zip():
self.__zip_egg_folder()
self.__remove_egg_folder()
self.__rename_zip_to_egg()
|
Makes sure this egg distribution is stored only as an egg file.
The egg file will be created from another existing distribution format
if needed.
| null | null | null |
|
arg_parser = _ArgParser(method_name, param_defs, external_param_processor)
return arg_parser(args, kwargs, extra_parameter_errors)
|
def parse_args(method_name, param_defs, args, kwargs, external_param_processor,
extra_parameter_errors)
|
Parse arguments for suds web service operation invocation functions.
Suds prepares Python function objects for invoking web service operations.
This function implements generic binding agnostic part of processing the
arguments passed when calling those function objects.
Argument parsing rules:
* Each input parameter element should be represented by single regular
Python function argument.
* At most one input parameter belonging to a single choice parameter
structure may have its value specified as something other than None.
* Positional arguments are mapped to choice group input parameters the
same as is done for a simple all/sequence group - each in turn.
Expects to be passed the web service operation's parameter definitions
(parameter name, type & optional ancestry information) in order and, based
on that, extracts the values for those parameter from the arguments
provided in the web service operation invocation call.
Ancestry information describes parameters constructed based on suds
library's automatic input parameter structure unwrapping. It is expected to
include the parameter's XSD schema 'ancestry' context, i.e. a list of all
the parent XSD schema tags containing the parameter's <element> tag. Such
ancestry context provides detailed information about how the parameter's
value is expected to be used, especially in relation to other input
parameters, e.g. at most one parameter value may be specified for
parameters directly belonging to the same choice input group.
Rules on acceptable ancestry items:
* Ancestry item's choice() method must return whether the item
represents a <choice> XSD schema tag.
* Passed ancestry items are used 'by address' internally and the same XSD
schema tag is expected to be identified by the exact same ancestry item
object during the whole argument processing.
During processing, each parameter's definition and value, together with any
additional pertinent information collected from the encountered parameter
definition structure, is passed on to the provided external parameter
processor function. There that information is expected to be used to
construct the actual binding specific web service operation invocation
request.
Raises a TypeError exception in case any argument related errors are
detected. The exceptions raised have been constructed to make them as
similar as possible to their respective exceptions raised during regular
Python function argument checking.
Does not support multiple same-named input parameters.
| 2.722906
| 3.902778
| 0.697684
|
assert self.active()
sentinel_frame = self.__stack[0]
self.__pop_frames_above(sentinel_frame)
assert len(self.__stack) == 1
self.__pop_top_frame()
assert not self.active()
args_required = sentinel_frame.args_required()
args_allowed = sentinel_frame.args_allowed()
self.__check_for_extra_arguments(args_required, args_allowed)
return args_required, args_allowed
|
def __all_parameters_processed(self)
|
Finish the argument processing.
Should be called after all the web service operation's parameters have
been successfully processed and, afterwards, no further parameter
processing is allowed.
Returns a 2-tuple containing the number of required & allowed
arguments.
See the _ArgParser class description for more detailed information.
| 4.505151
| 4.226332
| 1.065972
|
assert not self.active()
if not self.__extra_parameter_errors:
return
if self.__kwargs:
param_name = self.__kwargs.keys()[0]
if param_name in self.__params_with_arguments:
msg = "got multiple values for parameter '%s'"
else:
msg = "got an unexpected keyword argument '%s'"
self.__error(msg % (param_name,))
if self.__args:
def plural_suffix(count):
if count == 1:
return ""
return "s"
def plural_was_were(count):
if count == 1:
return "was"
return "were"
expected = args_required
if args_required != args_allowed:
expected = "%d to %d" % (args_required, args_allowed)
given = self.__args_count
msg_parts = ["takes %s positional argument" % (expected,),
plural_suffix(expected), " but %d " % (given,),
plural_was_were(given), " given"]
self.__error("".join(msg_parts))
|
def __check_for_extra_arguments(self, args_required, args_allowed)
|
Report an error in case any extra arguments are detected.
Does nothing if reporting extra arguments as exceptions has not been
enabled.
May only be called after the argument processing has been completed.
| 3.57165
| 3.440961
| 1.03798
|
frame_class = Frame
if ancestry_item is not None and ancestry_item.choice():
frame_class = ChoiceFrame
return frame_class(ancestry_item, self.__error,
self.__extra_parameter_errors)
|
def __frame_factory(self, ancestry_item)
|
Construct a new frame representing the given ancestry item.
| 6.818293
| 6.203443
| 1.099114
|
if self.__args:
return True, self.__args.pop(0)
try:
value = self.__kwargs.pop(name)
except KeyError:
return False, None
return True, value
|
def __get_param_value(self, name)
|
Extract a parameter value from the remaining given arguments.
Returns a 2-tuple consisting of the following:
* Boolean indicating whether an argument has been specified for the
requested input parameter.
* Parameter value.
| 3.59428
| 3.474986
| 1.03433
|
for x in self.__stack:
if x.__class__ is ChoiceFrame:
return True
return False
|
def __in_choice_context(self)
|
Whether we are currently processing a choice parameter group.
This includes processing a parameter defined directly or indirectly
within such a group.
May only be called during parameter processing or the result will be
calculated based on the context left behind by the previous parameter
processing if any.
| 6.867261
| 9.99331
| 0.687186
|
assert not self.active()
self.__args = list(args)
self.__kwargs = dict(kwargs)
self.__extra_parameter_errors = extra_parameter_errors
self.__args_count = len(args) + len(kwargs)
self.__params_with_arguments = set()
self.__stack = []
self.__push_frame(None)
|
def __init_run(self, args, kwargs, extra_parameter_errors)
|
Initializes data for a new argument parsing run.
| 3.832085
| 3.743091
| 1.023776
|
stack = self.__stack
if len(stack) == 1:
return stack[0], ancestry
previous = stack[0]
for frame, n in zip(stack[1:], xrange(len(ancestry))):
if frame.id() is not ancestry[n]:
return previous, ancestry[n:]
previous = frame
return frame, ancestry[n + 1:]
|
def __match_ancestry(self, ancestry)
|
Find frames matching the given ancestry.
Returns a tuple containing the following:
* Topmost frame matching the given ancestry or the bottom-most sentry
frame if no frame matches.
* Unmatched ancestry part.
| 3.807093
| 3.443585
| 1.105561
|
while self.__stack[-1] is not frame:
self.__pop_top_frame()
assert self.__stack
|
def __pop_frames_above(self, frame)
|
Pops all the frames above, but not including the given frame.
| 5.645362
| 5.77217
| 0.978031
|
popped = self.__stack.pop()
if self.__stack:
self.__stack[-1].process_subframe(popped)
|
def __pop_top_frame(self)
|
Pops the top frame off the frame stack.
| 4.864452
| 4.891743
| 0.994421
|
assert self.active()
param_optional = param_type.optional()
has_argument, value = self.__get_param_value(param_name)
if has_argument:
self.__params_with_arguments.add(param_name)
self.__update_context(ancestry)
self.__stack[-1].process_parameter(param_optional, value is not None)
self.__external_param_processor(param_name, param_type,
self.__in_choice_context(), value)
|
def __process_parameter(self, param_name, param_type, ancestry=None)
|
Collect values for a given web service operation input parameter.
| 6.535528
| 6.850824
| 0.953977
|
frame = self.__frame_factory(ancestry_item)
self.__stack.append(frame)
|
def __push_frame(self, ancestry_item)
|
Push a new frame on top of the frame stack.
| 4.415473
| 3.495229
| 1.263286
|
protocol, location = self.__split(url)
content = self.__find(location)
if protocol == 'suds' and content is None:
raise Exception, 'location "%s" not in document store' % location
return content
|
def open(self, url)
|
Open a document at the specified URL.
The document URL's needs not contain a protocol identifier, and if it
does, that protocol identifier is ignored when looking up the store
content.
Missing documents referenced using the internal 'suds' protocol are
reported by raising an exception. For other protocols, None is returned
instead.
@param url: A document URL.
@type url: str
@return: Document content or None if not found.
@rtype: bytes
| 8.984586
| 6.624815
| 1.356202
|
"Returns the given Python source file's compiled content."
file = open(filename, "rt")
try:
source = file.read()
finally:
file.close()
# Python 2.6 and below did not support passing strings to exec() &
# compile() functions containing line separators other than '\n'. To
# support them we need to manually make sure such line endings get
# converted even on platforms where this is not handled by native text file
# read operations.
source = source.replace("\r\n", "\n").replace("\r", "\n")
return compile(source, filename, "exec")
|
def read_python_code(filename)
|
Returns the given Python source file's compiled content.
| 6.936046
| 5.979147
| 1.160039
|
result = set()
todo = []
for package in packages:
folder = os.path.join(script_folder, *package.split("."))
if not os.path.isdir(folder):
raise Exception("Folder not found for package '%s'." % (package,))
todo.append((package, folder))
while todo:
package, folder = todo.pop()
if package in result:
continue
result.add(package)
for subitem in os.listdir(folder):
subpackage = ".".join((package, subitem))
subfolder = os.path.join(folder, subitem)
if not os.path.isfile(os.path.join(subfolder, "__init__.py")):
continue
todo.append((subpackage, subfolder))
return list(result)
|
def recursive_package_list(*packages)
|
Returns a list of all the given packages and all their subpackages.
Given packages are expected to be found relative to this script's location.
Subpackages are detected by scanning the given packages' subfolder
hierarchy for any folders containing the '__init__.py' module. As a
consequence, namespace packages are not supported.
This is our own specialized setuptools.find_packages() replacement so we
can avoid the setuptools dependency.
| 1.971351
| 2.055315
| 0.959148
|
max = self.max
if max is None:
return False
if max.isdigit():
return int(max) > 1
return max == "unbounded"
|
def multi_occurrence(self)
|
Get whether the node has multiple occurrences, i.e. is a I{collection}.
@return: True if it has, False if it has at most 1 occurrence.
@rtype: boolean
| 5.812397
| 5.33371
| 1.089748
|
if not len(classes):
classes = (self.__class__,)
if ignore is None:
ignore = set()
if self.qname in ignore:
return
ignore.add(self.qname)
if self.qname == qref and self.__class__ in classes:
return self
for c in self.rawchildren:
p = c.find(qref, classes, ignore=ignore)
if p is not None:
return p
|
def find(self, qref, classes=[], ignore=None)
|
Find a referenced type in self or children. Return None if not found.
Qualified references for all schema objects checked in this search will
be added to the set of ignored qualified references to avoid the find
operation going into an infinite loop in case of recursively defined
structures.
@param qref: A qualified reference.
@type qref: qref
@param classes: A collection of classes used to qualify the match.
@type classes: Collection(I{class},...), e.g. [I(class),...]
@param ignore: A set of qualified references to ignore in this search.
@type ignore: {qref,...}
@return: The referenced type.
@rtype: L{SchemaObject}
@see: L{qualify()}
| 2.534165
| 2.695995
| 0.939974
|
frame = self.top()
while True:
result = frame.next()
if result is None:
self.pop()
return self.next()
if isinstance(result, Content):
ancestry = [f.sx for f in self.stack]
return result, ancestry
self.push(result)
return self.next()
|
def next(self)
|
Get the next item.
@return: A tuple: the next (child, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
@raise StopIteration: A the end.
| 5.541173
| 4.777518
| 1.159843
|
log.debug('processing:\n%s', content)
self.reset()
if content.tag is None:
content.tag = content.value.__class__.__name__
document = Document()
self.append(document, content)
return document.root()
|
def process(self, content)
|
Process (marshal) the tag with the specified value using the
optional type information.
@param content: The content to process.
@type content: L{Object}
| 6.454906
| 5.915739
| 1.091141
|
namespaces = []
for l in (self.params, self.types):
for t,r in l:
ns = r.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
if Namespace.xs(ns) or Namespace.xsd(ns):
continue
namespaces.append(ns[1])
if t == r: continue
ns = t.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
namespaces.append(ns[1])
i = 0
namespaces.sort()
for u in namespaces:
p = self.nextprefix()
ns = (p, u)
self.prefixes.append(ns)
|
def getprefixes(self)
|
Add prefixes for each namespace referenced by parameter types.
| 4.093092
| 3.641652
| 1.123966
|
resolved = type.resolve()
name = resolved.name
if type.multi_occurrence():
name += '[]'
ns = resolved.namespace()
if ns[1] == self.wsdl.tns[1]:
return name
prefix = self.getprefix(ns[1])
return ':'.join((prefix, name))
|
def xlate(self, type)
|
Get a (namespace) translated I{qualified} name for specified type.
@param type: A schema type.
@type type: I{suds.xsd.sxbasic.SchemaObject}
@return: A translated I{qualified} name.
@rtype: str
| 6.989951
| 6.257883
| 1.116983
|
s = []
indent = (lambda n : '\n%*s'%(n*3,' '))
s.append('Service ( %s ) tns="%s"' % (self.service.name, self.wsdl.tns[1]))
s.append(indent(1))
s.append('Prefixes (%d)' % len(self.prefixes))
for p in self.prefixes:
s.append(indent(2))
s.append('%s = "%s"' % p)
s.append(indent(1))
s.append('Ports (%d):' % len(self.ports))
for p in self.ports:
s.append(indent(2))
s.append('(%s)' % p[0].name)
s.append(indent(3))
s.append('Methods (%d):' % len(p[1]))
for m in p[1]:
sig = []
s.append(indent(4))
sig.append(m[0])
sig.append('(')
sig.append(', '.join("%s %s" % (self.xlate(p[1]), p[0]) for p
in m[1]))
sig.append(')')
try:
s.append(''.join(sig))
except Exception:
pass
s.append(indent(3))
s.append('Types (%d):' % len(self.types))
for t in self.types:
s.append(indent(4))
s.append(self.xlate(t[0]))
s.append('\n\n')
return ''.join(s)
|
def description(self)
|
Get a textual description of the service for which this object represents.
@return: A textual description.
@rtype: str
| 2.610476
| 2.607003
| 1.001332
|
if isinstance(node, basestring):
self.__root = Element(node)
return
if isinstance(node, Element):
self.__root = node
return
|
def append(self, node)
|
Append (set) the document root.
@param node: A root L{Element} or name used to build
the document root element.
@type node: (L{Element}|str|None)
| 4.05923
| 3.552867
| 1.142522
|
if self.__root is None:
return default
if ns is None:
prefix, name = splitPrefix(name)
if prefix is None:
ns = None
else:
ns = self.__root.resolvePrefix(prefix)
if self.__root.match(name, ns):
return self.__root
else:
return default
|
def getChild(self, name, ns=None, default=None)
|
Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element}
| 3.28496
| 3.277955
| 1.002137
|
if self.__root is None:
return None
if path[0] == '/':
path = path[1:]
path = path.split('/',1)
if self.getChild(path[0]) is None:
return None
if len(path) > 1:
return self.__root.childAtPath(path[1])
else:
return self.__root
|
def childAtPath(self, path)
|
Get a child at I{path} where I{path} is a (/) separated
list of element names that are expected to be children.
@param path: A (/) separated list of element names.
@type path: basestring
@return: The leaf node at the end of I{path}
@rtype: L{Element}
| 2.271328
| 2.534216
| 0.896265
|
if self.__root is None:
return []
if path[0] == '/':
path = path[1:]
path = path.split('/',1)
if self.getChild(path[0]) is None:
return []
if len(path) > 1:
return self.__root.childrenAtPath(path[1])
else:
return [self.__root,]
|
def childrenAtPath(self, path)
|
Get a list of children at I{path} where I{path} is a (/) separated
list of element names that are expected to be children.
@param path: A (/) separated list of element names.
@type path: basestring
@return: The collection leaf nodes at the end of I{path}
@rtype: [L{Element},...]
| 2.34971
| 2.532725
| 0.92774
|
if name is None:
matched = self.__root
else:
matched = self.getChild(name, ns)
if matched is None:
return []
else:
return [matched,]
|
def getChildren(self, name=None, ns=None)
|
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
| 4.055558
| 4.181072
| 0.96998
|
s = []
s.append(self.DECL)
root = self.root()
if root is not None:
s.append(root.plain())
return ''.join(s)
|
def plain(self)
|
Get a string representation of this XML document.
@return: A I{plain} string.
@rtype: basestring
| 4.696093
| 3.807497
| 1.233381
|
if self.children:
schema = self.children[0]
for s in self.children[1:]:
schema.merge(s)
return schema
|
def merge(self)
|
Merge contained schemas into one.
@return: The merged schema.
@rtype: L{Schema}
| 3.826636
| 3.264796
| 1.17209
|
tns = self.root.get("targetNamespace")
tns_prefix = None
if tns is not None:
tns_prefix = self.root.findPrefix(tns)
return tns_prefix, tns
|
def mktns(self)
|
Make the schema's target namespace.
@return: namespace representation of the schema's targetNamespace
value.
@rtype: (prefix, URI)
| 4.282924
| 3.717052
| 1.152237
|
for imp in self.imports:
imported = imp.open(options, loaded_schemata)
if imported is None:
continue
imported.open_imports(options, loaded_schemata)
log.debug("imported:\n%s", imported)
self.merge(imported)
|
def open_imports(self, options, loaded_schemata)
|
Instruct all contained L{sxbasic.Import} children to import all of
their referenced schemas. The imported schema contents are I{merged}
in.
@param options: An options dictionary.
@type options: L{options.Options}
@param loaded_schemata: Already loaded schemata cache (URL --> Schema).
@type loaded_schemata: dict
| 3.415594
| 3.726761
| 0.916505
|
all = []
indexes = {}
for child in self.children:
child.content(all)
dependencies = {}
for x in all:
x.qualify()
midx, deps = x.dependencies()
dependencies[x] = deps
indexes[x] = midx
for x, deps in dependency_sort(dependencies):
midx = indexes.get(x)
if midx is None:
continue
d = deps[midx]
log.debug("(%s) merging %s <== %s", self.tns[1], Repr(x), Repr(d))
x.merge(d)
|
def dereference(self)
|
Instruct all children to perform dereferencing.
| 6.24699
| 6.102829
| 1.023622
|
return Schema(root, baseurl, options, loaded_schemata)
|
def instance(self, root, baseurl, loaded_schemata, options)
|
Create and return an new schema object using the specified I{root} and
I{URL}.
@param root: A schema root node.
@type root: L{sax.element.Element}
@param baseurl: A base URL.
@type baseurl: str
@param loaded_schemata: Already loaded schemata cache (URL --> Schema).
@type loaded_schemata: dict
@param options: An options dictionary.
@type options: L{options.Options}
@return: The newly created schema object.
@rtype: L{Schema}
@note: This is only used by Import children.
| 4.86912
| 7.185267
| 0.677653
|
t = self.template
arn_endpoints = []
url_endpoints = []
for sub in topic_subs:
arn_endpoints.append(sub["Endpoint"])
split_endpoint = sub["Endpoint"].split(":")
queue_url = "https://%s.%s.amazonaws.com/%s/%s" % (
split_endpoint[2], # literally "sqs"
split_endpoint[3], # AWS region
split_endpoint[4], # AWS ID
split_endpoint[5], # Queue name
)
url_endpoints.append(queue_url)
policy_doc = queue_policy(topic_arn, arn_endpoints)
t.add_resource(
sqs.QueuePolicy(
topic_name + "SubPolicy",
PolicyDocument=policy_doc,
Queues=url_endpoints,
)
)
|
def create_sqs_policy(self, topic_name, topic_arn, topic_subs)
|
This method creates the SQS policy needed for an SNS subscription. It
also takes the ARN of the SQS queue and converts it to the URL needed
for the subscription, as that takes a URL rather than the ARN.
| 3.152456
| 2.873426
| 1.097107
|
topic_subs = []
t = self.template
if "Subscription" in topic_config:
topic_subs = topic_config["Subscription"]
t.add_resource(
sns.Topic.from_dict(
topic_name,
topic_config
)
)
topic_arn = Ref(topic_name)
t.add_output(
Output(topic_name + "Name", Value=GetAtt(topic_name, "TopicName"))
)
t.add_output(Output(topic_name + "Arn", Value=topic_arn))
sqs_subs = [sub for sub in topic_subs if sub["Protocol"] == "sqs"]
if sqs_subs:
self.create_sqs_policy(topic_name, topic_arn, sqs_subs)
|
def create_topic(self, topic_name, topic_config)
|
Creates the SNS topic, along with any subscriptions requested.
| 2.892854
| 2.664317
| 1.085777
|
stream_type_map = {
"kinesis": awacs.kinesis.Action,
"dynamodb": awacs.dynamodb.Action,
}
stream_type = stream_arn.split(":")[2]
try:
return stream_type_map[stream_type]
except KeyError:
raise ValueError(
"Invalid stream type '%s' in arn '%s'" % (stream_type, stream_arn)
)
|
def get_stream_action_type(stream_arn)
|
Returns the awacs Action for a stream type given an arn
Args:
stream_arn (str): The Arn of the stream.
Returns:
:class:`awacs.aws.Action`: The appropriate stream type awacs Action
class
Raises:
ValueError: If the stream type doesn't match kinesis or dynamodb.
| 2.628062
| 2.114285
| 1.243003
|
action_type = get_stream_action_type(stream_arn)
arn_parts = stream_arn.split("/")
# Cut off the last bit and replace it with a wildcard
wildcard_arn_parts = arn_parts[:-1]
wildcard_arn_parts.append("*")
wildcard_arn = "/".join(wildcard_arn_parts)
return [
Statement(
Effect=Allow,
Resource=[stream_arn],
Action=[
action_type("DescribeStream"),
action_type("GetRecords"),
action_type("GetShardIterator"),
]
),
Statement(
Effect=Allow,
Resource=[wildcard_arn],
Action=[action_type("ListStreams")]
)
]
|
def stream_reader_statements(stream_arn)
|
Returns statements to allow Lambda to read from a stream.
Handles both DynamoDB & Kinesis streams. Automatically figures out the
type of stream, and provides the correct actions from the supplied Arn.
Arg:
stream_arn (str): A kinesis or dynamodb stream arn.
Returns:
list: A list of statements.
| 2.694816
| 2.803663
| 0.961177
|
if isinstance(statements, Statement):
statements = [statements]
self._policy_statements.extend(statements)
|
def add_policy_statements(self, statements)
|
Adds statements to the policy.
Args:
statements (:class:`awacs.aws.Statement` or list): Either a single
Statment, or a list of statements.
| 3.509316
| 4.799423
| 0.731196
|
statements = self._policy_statements
statements.extend(
lambda_basic_execution_statements(
self.function.Ref()
)
)
extended_statements = self.extended_policy_statements()
if extended_statements:
statements.extend(extended_statements)
return statements
|
def generate_policy_statements(self)
|
Generates the policy statements for the role used by the function.
To add additional statements you can either override the
`extended_policy_statements` method to return a list of Statements
to be added to the policy, or override this method itself if you
need more control.
Returns:
list: A list of :class:`awacs.aws.Statement` objects.
| 5.622425
| 4.286688
| 1.311601
|
name = name.replace("-", "_")
return "".join(word.capitalize() for word in name.split("_"))
|
def snake_to_camel_case(name)
|
Accept a snake_case string and return a CamelCase string.
For example::
>>> snake_to_camel_case('cidr_block')
'CidrBlock'
| 2.891585
| 4.578638
| 0.631538
|
template = self.template
variables = self.get_variables()
tclass = variables['Class']
tprops = variables['Properties']
output = variables['Output']
klass = load_object_from_string('troposphere.' + tclass)
instance = klass.from_dict('ResourceRefName', tprops)
template.add_resource(instance)
template.add_output(Output(
output,
Description="A reference to the object created in this blueprint",
Value=Ref(instance)
))
|
def setup_resource(self)
|
Setting Up Resource
| 6.334089
| 6.017488
| 1.052614
|
statements = []
statements.extend(kms_key_root_statements())
return Policy(
Version="2012-10-17",
Id="root-account-access",
Statement=statements
)
|
def kms_key_policy()
|
Creates a key policy for use of a KMS Key.
| 5.126044
| 4.542436
| 1.128479
|
p = Policy(
Statement=[
Statement(
Effect=Allow,
Resource=["*"],
Action=[
kinesis.CreateStream, kinesis.DescribeStream,
Action(kinesis.prefix, "AddTagsToStream"),
Action(kinesis.prefix, "PutRecords")
])])
return p
|
def logstream_policy()
|
Policy needed for logspout -> kinesis log streaming.
| 5.037327
| 4.338799
| 1.160996
|
p = Policy(
Statement=[
Statement(
Effect=Allow,
Resource=[
Join('', [
'arn:aws:logs:*:*:log-group:',
log_group_ref,
':log-stream:*'])],
Action=[
logs.CreateLogStream,
logs.PutLogEvents,
])])
return p
|
def runlogs_policy(log_group_ref)
|
Policy needed for Empire -> Cloudwatch logs to record run output.
| 2.784121
| 2.694484
| 1.033267
|
for key in properties.keys():
if key not in allowed_properties:
raise ValueError(
"%s is not a valid property of %s" % (key, resource)
)
|
def check_properties(properties, allowed_properties, resource)
|
Checks the list of properties in the properties variable against the
property list provided by the allowed_properties variable. If any property
does not match the properties in allowed_properties, a ValueError is
raised to prevent unexpected behavior when creating resources.
properties: The config (as dict) provided by the configuration file
allowed_properties: A list of strings representing the available params
for a resource.
resource: A string naming the resource in question for the error
message.
| 2.60949
| 2.927872
| 0.891258
|
if isinstance(left, Mapping):
tags = dict(left)
elif hasattr(left, 'tags'):
tags = _tags_to_dict(left.tags)
else:
tags = _tags_to_dict(left)
if isinstance(right, Mapping):
tags.update(right)
elif hasattr(left, 'tags'):
tags.update(_tags_to_dict(right.tags))
else:
tags.update(_tags_to_dict(right))
return factory(**tags)
|
def merge_tags(left, right, factory=Tags)
|
Merge two sets of tags into a new troposphere object
Args:
left (Union[dict, troposphere.Tags]): dictionary or Tags object to be
merged with lower priority
right (Union[dict, troposphere.Tags]): dictionary or Tags object to be
merged with higher priority
factory (type): Type of object to create. Defaults to the troposphere
Tags class.
| 1.820035
| 1.903674
| 0.956064
|
rs_name = rs_name.lower()
rs_type = rs_type.upper()
# Make A and CNAME records hash to same sum to support updates.
rs_type = "ACNAME" if rs_type in ["A", "CNAME"] else rs_type
return md5(rs_name + rs_type).hexdigest()
|
def get_record_set_md5(rs_name, rs_type)
|
Accept record_set Name and Type. Return MD5 sum of these values.
| 5.583146
| 4.983713
| 1.120278
|
alias_target = getattr(rs, "AliasTarget", None)
if alias_target:
hosted_zone_id = getattr(alias_target, "HostedZoneId", None)
if not hosted_zone_id:
dns_name = alias_target.DNSName
if dns_name.endswith(CF_DOMAIN):
alias_target.HostedZoneId = CLOUDFRONT_ZONE_ID
elif dns_name.endswith(ELB_DOMAIN):
region = dns_name.split('.')[-5]
alias_target.HostedZoneId = ELB_ZONE_IDS[region]
elif dns_name in S3_WEBSITE_ZONE_IDS:
alias_target.HostedZoneId = S3_WEBSITE_ZONE_IDS[dns_name]
else:
alias_target.HostedZoneId = self.hosted_zone_id
return rs
|
def add_hosted_zone_id_for_alias_target_if_missing(self, rs)
|
Add proper hosted zone id to record set alias target if missing.
| 2.152969
| 2.131573
| 1.010038
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.