code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if not isinstance(urlspec, URLSpec):
urlspec = URLSpec(*urlspec)
for operation in self._operations_from_methods(urlspec.handler_class):
operations.update(operation)
if not operations:
raise APISpecError(
'Could not find endpoint for urlspec {0}'.format(urlspec),
)
params_method = getattr(urlspec.handler_class, list(operations.keys())[0])
operations.update(self._extensions_from_handler(urlspec.handler_class))
return self.tornadopath2openapi(urlspec, params_method) | def path_helper(self, operations, urlspec, **kwargs) | Path helper that allows passing a Tornado URLSpec or tuple. | 4.367032 | 4.011933 | 1.088511 |
self.clean_error()
try:
url = self.get_url(Constants.GENERATE_MFA_TOKEN_URL, user_id)
data = {
'expires_in': expires_in,
'reusable': reusable
}
response = self.execute_call('post', url, json=data)
if response.status_code == 201:
json_data = response.json()
if json_data:
return MFAToken(json_data)
else:
self.error = self.extract_status_code_from_response(response)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0] | def generate_mfa_token(self, user_id, expires_in=259200, reusable=False) | Use to generate a temporary MFA token that can be used in place of other MFA tokens for a set time period.
For example, use this token for account recovery.
:param user_id: Id of the user
:type user_id: int
:param expires_in: Set the duration of the token in seconds.
(default: 259200 seconds = 72h) 72 hours is the max value.
:type expires_in: int
:param reusable: Defines if the token reusable. (default: false) If set to true, token can be used for multiple apps, until it expires.
:type reusable: bool
Returns a mfa token
:return: return the object if success
:rtype: MFAToken
See https://developers.onelogin.com/api-docs/1/multi-factor-authentication/generate-mfa-token Generate MFA Token documentation | 2.527575 | 2.555038 | 0.989252 |
ret = []
last_lower = False
for char in camel:
current_upper = char.upper() == char
if current_upper and last_lower:
ret.append("_")
ret.append(char.lower())
else:
ret.append(char.lower())
last_lower = not current_upper
return "".join(ret) | def camel_to_snake(camel) | Convert camelCase to snake_case. | 2.381106 | 2.331907 | 1.021098 |
try:
prefix, count = id_string.rsplit("_", 1)
count = int(count)
except ValueError:
# We don't need to worry about ids that don't match our pattern
pass
else:
if prefix == self.prefix:
self.counter = max(count, self.counter) | def register_id(self, id_string) | Register a manually assigned id as used, to avoid collisions. | 3.683788 | 3.525207 | 1.044985 |
if root.tag != utils.lxmlns("mets") + "amdSec":
raise exceptions.ParseError(
"AMDSec can only parse amdSec elements with METS namespace."
)
section_id = root.get("ID")
subsections = []
for child in root:
subsection = SubSection.parse(child)
subsections.append(subsection)
return cls(section_id, subsections) | def parse(cls, root) | Create a new AMDSec by parsing root.
:param root: Element or ElementTree to be parsed into an object. | 5.206327 | 5.405022 | 0.963239 |
if self._tree is not None:
return self._tree
el = etree.Element(utils.lxmlns("mets") + self.tag, ID=self.id_string)
self.subsections.sort()
for child in self.subsections:
el.append(child.serialize(now))
return el | def serialize(self, now=None) | Serialize this amdSec and all children to lxml Element and return it.
:param str now: Default value for CREATED in children if none set
:return: amdSec Element with all children | 7.112493 | 6.681974 | 1.06443 |
if element.tag != cls.ALT_RECORD_ID_TAG:
raise exceptions.ParseError(
u"AltRecordID got unexpected tag {}; expected {}".format(
element.tag, cls.ALT_RECORD_ID_TAG
)
)
return cls(element.text, id=element.get(u"ID"), type=element.get(u"TYPE")) | def parse(cls, element) | Create a new AltRecordID by parsing root.
:param element: Element to be parsed into an AltRecordID.
:raises exceptions.ParseError: If element is not a valid altRecordID. | 4.181494 | 3.061563 | 1.365804 |
if element.tag != cls.AGENT_TAG:
raise exceptions.ParseError(
u"Agent got unexpected tag {}; expected {}".format(
element.tag, cls.AGENT_TAG
)
)
role = element.get(u"ROLE")
if not role:
raise exceptions.ParseError(u"Agent must have a ROLE attribute.")
if role == u"OTHER":
role = element.get(u"OTHERROLE") or role
agent_type = element.get(u"TYPE")
if agent_type == u"OTHER":
agent_type = element.get(u"OTHERTYPE") or agent_type
agent_id = element.get(u"ID")
try:
name = element.find(cls.NAME_TAG).text
except AttributeError:
name = None
notes = [note.text for note in element.findall(cls.NOTE_TAG)]
return cls(role, id=agent_id, type=agent_type, name=name, notes=notes) | def parse(cls, element) | Create a new Agent by parsing root.
:param element: Element to be parsed into an Agent.
:raises exceptions.ParseError: If element is not a valid agent. | 2.447968 | 2.331015 | 1.050172 |
if self.status is not None:
return self.status
if self.subsection == "dmdSec":
if self.older is None:
return "original"
else:
return "updated"
if self.subsection in ("techMD", "rightsMD"):
# TODO how to handle ones where newer has been deleted?
if self.newer is None:
return "current"
else:
return "superseded"
return None | def get_status(self) | Returns the STATUS when serializing.
Calculates based on the subsection type and if it's replacing anything.
:returns: None or the STATUS string. | 5.844003 | 5.474694 | 1.067457 |
if self.subsection != new_subsection.subsection:
raise exceptions.MetsError(
"Must replace a SubSection with one of the same type."
)
# TODO convert this to a DB so have bidirectonal foreign keys??
self.newer = new_subsection
new_subsection.older = self
self.status = None | def replace_with(self, new_subsection) | Replace this SubSection with new_subsection.
Replacing SubSection must be the same time. That is, you can only
replace a dmdSec with another dmdSec, or a rightsMD with a rightsMD etc.
:param new_subsection: Updated version of this SubSection
:type new_subsection: :class:`SubSection` | 14.360343 | 13.772163 | 1.042708 |
subsection = root.tag.replace(utils.lxmlns("mets"), "", 1)
if subsection not in cls.ALLOWED_SUBSECTIONS:
raise exceptions.ParseError(
"SubSection can only parse elements with tag in %s with METS namespace"
% (cls.ALLOWED_SUBSECTIONS,)
)
section_id = root.get("ID")
created = root.get("CREATED", "")
status = root.get("STATUS", "")
child = root[0]
if child.tag == utils.lxmlns("mets") + "mdWrap":
mdwrap = MDWrap.parse(child)
obj = cls(subsection, mdwrap, section_id)
elif child.tag == utils.lxmlns("mets") + "mdRef":
mdref = MDRef.parse(child)
obj = cls(subsection, mdref, section_id)
else:
raise exceptions.ParseError(
"Child of %s must be mdWrap or mdRef" % subsection
)
obj.created = created
obj.status = status
return obj | def parse(cls, root) | Create a new SubSection by parsing root.
:param root: Element or ElementTree to be parsed into an object.
:raises exceptions.ParseError: If root's tag is not in :const:`SubSection.ALLOWED_SUBSECTIONS`.
:raises exceptions.ParseError: If the first child of root is not mdRef or mdWrap. | 3.264428 | 2.683547 | 1.21646 |
created = self.created if self.created is not None else now
el = etree.Element(utils.lxmlns("mets") + self.subsection, ID=self.id_string)
if created: # Don't add CREATED if none was parsed
el.set("CREATED", created)
status = self.get_status()
if status:
el.set("STATUS", status)
if self.contents:
el.append(self.contents.serialize())
return el | def serialize(self, now=None) | Serialize this SubSection and all children to lxml Element and return it.
:param str now: Default value for CREATED if none set
:return: dmdSec/techMD/rightsMD/sourceMD/digiprovMD Element with all children | 6.572125 | 5.761012 | 1.140794 |
if root.tag != utils.lxmlns("mets") + "mdRef":
raise exceptions.ParseError(
"MDRef can only parse mdRef elements with METS namespace."
)
# Required attributes
mdtype = root.get("MDTYPE")
if not mdtype:
raise exceptions.ParseError("mdRef must have a MDTYPE")
target = root.get(utils.lxmlns("xlink") + "href")
if not target:
raise exceptions.ParseError("mdRef must have an xlink:href.")
try:
target = utils.urldecode(target)
except ValueError:
raise exceptions.ParseError(
'Value "{}" (of attribute xlink:href) is not a valid'
" URL.".format(target)
)
loctype = root.get("LOCTYPE")
if not loctype:
raise exceptions.ParseError("mdRef must have a LOCTYPE")
# Optional attributes
label = root.get("LABEL")
otherloctype = root.get("OTHERLOCTYPE")
return cls(target, mdtype, loctype, label, otherloctype) | def parse(cls, root) | Create a new MDWrap by parsing root.
:param root: Element or ElementTree to be parsed into a MDWrap. | 3.081376 | 3.232032 | 0.953387 |
if root.tag != utils.lxmlns("mets") + "mdWrap":
raise exceptions.ParseError(
"MDWrap can only parse mdWrap elements with METS namespace."
)
mdtype = root.get("MDTYPE")
if not mdtype:
raise exceptions.ParseError("mdWrap must have a MDTYPE")
othermdtype = root.get("OTHERMDTYPE")
document = root.xpath("mets:xmlData/*", namespaces=utils.NAMESPACES)
if len(document) == 0:
raise exceptions.ParseError(
"All mdWrap/xmlData elements must have at least one child; this"
" one has none"
)
elif len(document) == 1:
document = document[0]
# Create a copy, so that the element is not moved by duplicate references.
document = copy.deepcopy(document)
return cls(document, mdtype, othermdtype) | def parse(cls, root) | Create a new MDWrap by parsing root.
:param root: Element or ElementTree to be parsed into a MDWrap.
:raises exceptions.ParseError: If mdWrap does not contain MDTYPE
:raises exceptions.ParseError: If xmlData contains no children | 4.804386 | 4.420609 | 1.086815 |
if not element_maker:
element_maker = ElementMaker(namespace=nsmap[ns], nsmap=nsmap)
tag = data[0]
if snake:
camel_tag = utils.snake_to_camel(tag)
func = getattr(element_maker, camel_tag)
args = []
attributes = {}
for element in data[1:]:
if isinstance(element, dict):
for key, val in element.items():
attributes[key] = val
elif isinstance(element, (tuple, list)):
args.append(
_data_to_lxml_el(
element, ns, nsmap, element_maker=element_maker, snake=snake
)
)
elif isinstance(element, six.text_type):
args.append(element)
elif isinstance(element, etree._Element):
args.append(element)
else:
args.append(six.binary_type(element))
ret = func(*args)
for attr, val in attributes.items():
try:
ns, attr = attr.split(":")
except ValueError:
ns = None
if snake:
attr = utils.snake_to_camel(attr)
if ns:
attr = "{" + nsmap[ns] + "}" + attr
ret.attrib[attr] = val
else:
ret.attrib[attr] = val
return ret | def _data_to_lxml_el(data, ns, nsmap, element_maker=None, snake=True) | Convert tuple/list ``data`` to an ``lxml.etree._Element`` instance.
:param tuple/list data: iterable whose first element is the snake-case
string which is the name of the root XML element. Subsequent elements
may be dicts (which encode XML attributes), tuples/lists (which encode
sub-elements), or scalars (strings, ints or floats, which encode text
under the element).
:param str ns: the implicit namespace of all elements in the XML.
:param dict nsmap: a dict of XML namespaces to define in the root element.
:param ElementMaker element_maker: instance for creating XML elements.
:returns: an ``lxml.etree._Element`` instance | 1.914315 | 2.02526 | 0.945219 |
parts = [x.strip("{") for x in bracket_ns.split("}")]
if len(parts) != 2:
return bracket_ns
ns, var = parts
if default_ns and nsmap:
try:
ns = [k for k, v in nsmap.items() if v == ns][0]
if ns == default_ns:
if snake:
return utils.camel_to_snake(var)
return var
except IndexError:
pass
if snake:
return ":".join([ns, utils.camel_to_snake(var)])
return ":".join([ns, var]) | def _to_colon_ns(bracket_ns, default_ns=None, nsmap=None, snake=True) | Convert a namespaced tag/attribute name from explicit XML "bracket"
notation to a more succinct Pythonic colon-separated notation using
snake_case, e.g.,::
>>> _to_colon_ns(
'{info:lc/xmlns/premis-v2}objectIdentifier',
'premis', utils.NAMESPACES)
'object_identifier'
>>> _to_colon_ns('{info:lc/xmlns/premis-v2}objectIdentifier')
'premis:object_identifier'
>>> _to_colon_ns(
'http://www.w3.org/2001/XMLSchema-instance}schemaLocation')
'xsi:schema_location' | 2.569292 | 2.633191 | 0.975733 |
attrs = {}
for attr, val in lxml_el.items():
attr = _to_colon_ns(attr, default_ns=ns, nsmap=nsmap)
attrs[attr] = val
return attrs | def _get_el_attributes(lxml_el, ns=None, nsmap=None) | Return the XML attributes of lxml ``Element`` instance lxml_el as a dict
where namespaced attributes are represented via colon-delimiting and using
snake case. | 3.469252 | 3.447774 | 1.00623 |
tag_name = _to_colon_ns(lxml_el.tag, default_ns=ns, nsmap=nsmap)
ret = [tag_name]
attributes = _get_el_attributes(lxml_el, ns=ns, nsmap=nsmap)
if attributes:
ret.append(attributes)
for sub_el in lxml_el:
ret.append(_lxml_el_to_data(sub_el, ns, nsmap, snake=snake))
text = lxml_el.text
if text:
ret.append(text)
return tuple(ret) | def _lxml_el_to_data(lxml_el, ns, nsmap, snake=True) | Convert an ``lxml._Element`` instance to a Python tuple. | 2.224193 | 2.189 | 1.016077 |
nsmap = utils.PREMIS_VERSIONS_MAP[premis_version]["namespaces"]
return _data_to_lxml_el(data, "premis", nsmap) | def data_to_premis(data, premis_version=utils.PREMIS_VERSION) | Given tuple ``data`` representing a PREMIS entity (object, event or
agent), return an ``lxml.etree._Element`` instance. E.g.,::
>>> p = data_to_premis((
'event',
utils.PREMIS_META,
(
'event_identifier',
('event_identifier_type', 'UUID'),
('event_identifier_value', str(uuid4()))
)
))
>>> etree.tostring(p, pretty_print=True).decode('utf8')
'''<premis:event
xmlns:premis="info:lc/xmlns/premis-v2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
version="2.2"
xsi:schemaLocation="info:lc/xmlns/premis-v2 http://www.loc.gov/standards/premis/v2/premis-v2-2.xsd">
<premis:eventIdentifier>
<premis:eventIdentifierType>UUID</premis:eventIdentifierType>
<premis:eventIdentifierValue>f4b7758f-e7b2-4155-9b56-d76965849fc1</premis:eventIdentifierValue>
</premis:eventIdentifier>
</premis:event>''' | 6.736167 | 5.175763 | 1.301483 |
premis_version = premis_lxml_el.get("version", utils.PREMIS_VERSION)
nsmap = utils.PREMIS_VERSIONS_MAP[premis_version]["namespaces"]
return _lxml_el_to_data(premis_lxml_el, "premis", nsmap) | def premis_to_data(premis_lxml_el) | Transform a PREMIS ``lxml._Element`` instance to a Python tuple. | 3.510261 | 3.435401 | 1.021791 |
path_parts = path.split("/")
try:
sub_elm = [
el
for el in data
if isinstance(el, (tuple, list)) and el[0] == path_parts[0]
][0]
except IndexError:
return None
else:
if len(path_parts) > 1:
return data_find(sub_elm, "/".join(path_parts[1:]))
return sub_elm | def data_find(data, path) | Find and return the first element-as-tuple in tuple ``data`` using simplified
XPath ``path``. | 2.445451 | 2.397153 | 1.020148 |
schema = []
for element in tuple_:
if isinstance(element, (tuple, list)):
try:
if isinstance(element[1], six.string_types):
schema.append((element[0],))
else:
schema.append(tuple_to_schema(element))
except IndexError:
schema.append((element[0],))
else:
schema.append(element)
return tuple(schema) | def tuple_to_schema(tuple_) | Convert a tuple representing an XML data structure into a schema tuple
that can be used in the ``.schema`` property of a sub-class of
PREMISElement. | 2.05028 | 2.081753 | 0.984881 |
schema = tuple_to_schema(tuple_instance)
def defaults(self):
return {}
def schema_getter(self):
return schema
new_class_name = "PREMIS{}Element".format(schema[0].capitalize())
return type(
new_class_name,
(PREMISElement,),
{"defaults": property(defaults), "schema": property(schema_getter)},
) | def generate_element_class(tuple_instance) | Dynamically create a sub-class of PREMISElement given
``tuple_instance``, which is a tuple representing an XML data structure. | 5.305346 | 4.138632 | 1.281908 |
path_parts = path.split("/")
try:
sub_elms = tuple(
el
for el in data
if isinstance(el, (tuple, list)) and el[0] == path_parts[0]
)
except IndexError:
return None
if len(path_parts) > 1:
ret = []
for sub_elm in sub_elms:
for x in data_find_all(sub_elm, "/".join(path_parts[1:])):
ret.append(x)
ret = tuple(ret)
else:
ret = sub_elms
if ret and dyn_cls:
cls = generate_element_class(ret[0])
return tuple(cls(data=tuple_) for tuple_ in ret)
return ret | def data_find_all(data, path, dyn_cls=False) | Find and return all element-as-tuples in tuple ``data`` using simplified
XPath ``path``. | 2.640294 | 2.51096 | 1.051508 |
el = data_find(data, path)
if not isinstance(el, (list, tuple)):
return None
texts = [child for child in el[1:] if not isinstance(child, (tuple, list, dict))]
if not texts:
return None
return " ".join(
[
# How should we deal with decoding errors when `x` is binary?
# For now, we're using the ``strict`` mode. Other options here:
# https://docs.python.org/3/library/functions.html#open.
six.ensure_text(x, encoding="utf-8", errors="strict")
for x in texts
]
) | def data_find_text(data, path) | Return the text value of the element-as-tuple in tuple ``data`` using
simplified XPath ``path``. | 4.670497 | 4.735646 | 0.986243 |
path = path or []
attributes = attributes or {}
tag_name = schema[0]
data = [tag_name]
if attributes:
data.append(attributes)
new_path = path[:]
new_path.append(tag_name)
root = new_path[0]
possible_paths = ["__".join(new_path), tag_name]
if root != tag_name and tag_name.startswith(root):
possible_paths.append(tag_name.lstrip(root)[1:])
for possible_path in possible_paths:
val = elements.get(possible_path)
if val:
if isinstance(val, (tuple, list)):
data = tuple(val)
else:
if attributes:
data = (tag_name, attributes, val)
else:
data = (tag_name, val)
return tuple(data)
for subschema in schema[1:]:
subel = _generate_data(subschema, elements, path=new_path)
if (not subel) or (subel == subschema):
continue
if all(map(lambda x: isinstance(x, tuple), subel)):
for subsubel in subel:
data.append(subsubel)
elif not el_is_empty(subel):
data.append(subel)
return tuple(data) | def _generate_data(schema, elements, attributes=None, path=None) | Using tree-as-tuple ``schema`` as guide, return a tree-as-tuple ``data``
representing a PREMIS XML element, where the values in dict ``elements`` and
the values in dict ``attributes`` are located in the appropriate locations
in the ``data`` tree structure. | 2.566666 | 2.528232 | 1.015202 |
if len(el) == 1 and not isinstance(el[0], (list, tuple)):
return True
subels_are_empty = []
for subel in el:
if isinstance(subel, (list, tuple)):
subels_are_empty.append(el_is_empty(subel))
else:
subels_are_empty.append(not bool(subel))
return all(subels_are_empty) | def el_is_empty(el) | Return ``True`` if tuple ``el`` represents an empty XML element. | 1.924498 | 1.877667 | 1.024941 |
attrs_to_paths = attrs_to_paths or {}
tag = schema[0]
if len(schema) == 1:
_insert_attr_path(attrs_to_paths, tag, "/".join(path + [tag]))
else:
for elem in schema[1:]:
if isinstance(elem, dict):
continue
new_path = [] if path is None else path + [tag]
if isinstance(elem, (list, tuple)):
attrs_to_paths.update(
get_attrs_to_paths(
elem, attrs_to_paths=attrs_to_paths, path=new_path
)
)
else:
_insert_attr_path(attrs_to_paths, tag, "/".join(new_path))
return attrs_to_paths | def get_attrs_to_paths(schema, attrs_to_paths=None, path=None) | Analyze PREMIS-element-as-tuple ``schema`` and return a dict that maps
attribute names to the simplified XPaths needed to retrieve them, e.g.,::
>>> {'object_identifier_type':
'object_identifier/object_identifier_type',
'object_identifier_value':
'object_identifier/object_identifier_value'} | 2.140843 | 2.214301 | 0.966826 |
for child in data:
if isinstance(child, dict):
version = child.get("version")
if version:
return version
return utils.PREMIS_VERSION | def _premis_version_from_data(data) | Given tuple ``data`` encoding a PREMIS element, attempt to return the
PREMIS version it is using. If none can be found, return the default PREMIS
version. | 4.522815 | 4.676313 | 0.967175 |
attr = (
"event_detail_information__event_detail"
if self.premis_version == utils.PREMIS_3_0_VERSION
else "event_detail"
)
return dict(
[
tuple([x.strip(' "') for x in kv.strip().split("=", 1)])
for kv in getattr(self, attr).split(";")
]
) | def parsed_event_detail(self) | Parse and return our PREMIS eventDetail string value like::
'program="7z"; version="9.20"; algorithm="bzip2"'
and return a dict like::
{'algorithm': 'bzip2', 'version': '9.20', 'program': '7z'} | 5.980639 | 5.244065 | 1.140459 |
event_type = self.findtext("event_type")
if event_type != "compression":
raise AttributeError(
'PREMIS events of type "{}" have no compression'
" details".format(event_type)
)
parsed_compression_event_detail = self.parsed_event_detail
compression_program = _get_event_detail_attr(
"program", parsed_compression_event_detail
)
compression_algorithm = _get_event_detail_attr(
"algorithm", parsed_compression_event_detail
)
compression_program_version = _get_event_detail_attr(
"version", parsed_compression_event_detail
)
archive_tool = {"7z": "7-Zip"}.get(compression_program, compression_program)
return compression_algorithm, compression_program_version, archive_tool | def compression_details(self) | Return as a 3-tuple, this PREMIS compression event's program,
version, and algorithm used to perform the compression. | 3.510679 | 3.018503 | 1.163053 |
compression_algorithm, _, _ = self.compression_details
return [
{
"algorithm": algorithm,
"order": str(index + offset + 1),
"type": "decompression",
}
for index, algorithm in enumerate(compression_algorithm.split(","))
] | def get_decompression_transform_files(self, offset=0) | Returns a list of dicts representing ``<mets:transformFile>``
elements with ``TRANSFORMTYPE="decompression"`` given
``compression_algorithm`` which is a comma-separated string of
algorithms that must be used in the order provided to decompress
the package, e.g., 'bzip2,tar' or 'lzma'. | 5.57963 | 3.820242 | 1.460544 |
event_type = self.findtext("event_type")
if event_type != "encryption":
raise AttributeError(
'PREMIS events of type "{}" have no encryption'
" details".format(event_type)
)
parsed_encryption_event_detail = self.parsed_event_detail
encryption_program = _get_event_detail_attr(
"program", parsed_encryption_event_detail
)
encryption_program_version = _get_event_detail_attr(
"version", parsed_encryption_event_detail
)
encryption_key = _get_event_detail_attr("key", parsed_encryption_event_detail)
return encryption_program, encryption_program_version, encryption_key | def encryption_details(self) | Return as a 3-tuple, this PREMIS encryption event's program,
version, and key used to perform the encryption. | 3.19213 | 2.637253 | 1.2104 |
def test(cls):
if not isinstance(cls, type):
cls = type(cls)
for class_method_name in class_method_names:
try:
class_method = getattr(cls, class_method_name)
if class_method.__self__ is not cls:
return False
except AttributeError:
return False
return True
return test | def has_class_methods(*class_method_names) | Return a test function that, when given a class, returns ``True`` if that
class has all of the class methods in ``class_method_names``. If an object
is passed to the test function, check for the class methods on its
class. | 2.346662 | 2.03938 | 1.150674 |
def test(obj):
for method_name in method_names:
try:
method = getattr(obj, method_name)
except AttributeError:
return False
else:
if not callable(method):
return False
if not isinstance(obj, type):
try:
# An instance method is a method type with a __self__
# attribute that references the instance.
if method.__self__ is not obj:
return False
except AttributeError:
return False
return True
return test | def has_methods(*method_names) | Return a test function that, when given an object (class or an
instance), returns ``True`` if that object has all of the (regular) methods
in ``method_names``. Note: this is testing for regular methods only and the
test function will correctly return ``False`` if an instance has one of the
specified methods as a classmethod or a staticmethod. However, it will
incorrectly return ``True`` (false positives) for classmethods and
staticmethods on a *class*. | 2.856888 | 2.738234 | 1.043332 |
if not self.allow_replace:
assert feature_name not in self.providers, "Duplicate feature: {!r}".format(
feature_name
)
if callable(provider) and not isinstance(provider, type):
self.providers[feature_name] = lambda: provider(*args, **kwargs)
else:
self.providers[feature_name] = lambda: provider | def provide(self, feature_name, provider, *args, **kwargs) | Provide a feature named ``feature_name`` using the provider object
``provider`` and any arguments (``args``, ``kwargs``) needed by the
provider if it is callable. | 3.096968 | 3.064709 | 1.010526 |
parsed = urlparse(url)
for attr in URL_ENCODABLE_PARTS:
parsed = parsed._replace(**{attr: func(getattr(parsed, attr))})
return urlunparse(parsed) | def _urlendecode(url, func) | Encode or decode ``url`` by applying ``func`` to all of its
URL-encodable parts. | 3.245135 | 3.069987 | 1.057052 |
# This code is based off django.contrib.admin.__init__
from django.conf import settings
try:
# Django versions >= 1.9
from django.utils.module_loading import import_module
except ImportError:
# Django versions < 1.9
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from backbone.views import BackboneAPIView # This is to prevent a circular import issue
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's backbone module.
try:
import_module('%s.backbone_api' % app)
except:
# Decide whether to bubble up this error. If the app just
# doesn't have an backbone module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'backbone_api'):
raise | def autodiscover() | Auto-discover INSTALLED_APPS backbone_api.py modules. | 2.862081 | 2.584803 | 1.107273 |
if backbone_view_class not in self._registry:
self._registry.append(backbone_view_class) | def register(self, backbone_view_class) | Registers the given backbone view class. | 2.724258 | 2.549949 | 1.068358 |
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs | def queryset(self, request, **kwargs) | Returns the queryset (along with ordering) to be used when retrieving object(s). | 2.379607 | 2.042621 | 1.164978 |
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs) | def get(self, request, id=None, **kwargs) | Handles get requests for either the collection or an object detail. | 2.345068 | 2.09526 | 1.119225 |
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json') | def get_object_detail(self, request, obj) | Handles get requests for the details of the given object. | 3.000565 | 2.966784 | 1.011387 |
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json') | def get_collection(self, request, **kwargs) | Handles get requests for the list of objects. | 2.453673 | 2.432627 | 1.008651 |
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request) | def post(self, request, id=None, **kwargs) | Handles post requests. | 4.295336 | 4.231387 | 1.015113 |
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json') | def add_object(self, request) | Adds an object. | 3.249135 | 3.258642 | 0.997083 |
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden() | def put(self, request, id=None, **kwargs) | Handles put requests. | 3.214697 | 3.153625 | 1.019366 |
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json') | def update_object(self, request, obj) | Updates an object. | 3.748528 | 3.700437 | 1.012996 |
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance) | def get_form_instance(self, request, data=None, instance=None) | Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object). | 2.531255 | 2.715341 | 0.932205 |
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden() | def delete(self, request, id=None) | Handles delete requests. | 3.299593 | 3.252552 | 1.014463 |
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string) | def has_add_permission(self, request) | Returns True if the requesting user is allowed to add an object, False otherwise. | 2.127656 | 2.002097 | 1.062714 |
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string) | def has_update_permission(self, request, obj) | Returns True if the requesting user is allowed to update the given object, False otherwise. | 2.259332 | 2.077686 | 1.087427 |
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data | def serialize(self, obj, fields) | Serializes a single model instance to a Python dict, based on the specified list of fields. | 2.878067 | 2.849434 | 1.010049 |
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params) | def json_dumps(self, data, **options) | Wrapper around `json.dumps` that uses a special JSON encoder. | 6.67867 | 6.485167 | 1.029838 |
return FSEntry(label=label, children=children, type=u"Directory", use=None) | def dir(cls, label, children) | Return ``FSEntry`` directory object. | 16.016094 | 9.243486 | 1.73269 |
return FSEntry(
label=label,
type=type_,
path=fptr.path,
use=fptr.use,
file_uuid=fptr.file_uuid,
derived_from=fptr.derived_from,
checksum=fptr.checksum,
checksumtype=fptr.checksumtype,
) | def from_fptr(cls, label, type_, fptr) | Return ``FSEntry`` object. | 4.394458 | 3.375845 | 1.301736 |
if self.type.lower() == "directory":
return None
if self.file_uuid is None:
raise exceptions.MetsError(
"No FILEID: File %s does not have file_uuid set" % self.path
)
if self.is_aip:
return os.path.splitext(os.path.basename(self.path))[0]
return utils.FILE_ID_PREFIX + self.file_uuid | def file_id(self) | Returns the fptr @FILEID if this is not a Directory. | 5.22369 | 4.416053 | 1.182887 |
if self.derived_from is not None:
return self.derived_from.group_id()
if self.file_uuid is None:
return None
return utils.GROUP_ID_PREFIX + self.file_uuid | def group_id(self) | Returns the @GROUPID.
If derived_from is set, returns that group_id. | 4.693202 | 3.192945 | 1.469866 |
# HELP how handle multiple amdSecs?
# When adding *MD which amdSec to add to?
if mode.lower() == "mdwrap":
othermdtype = kwargs.get("othermdtype")
mdsec = MDWrap(md, mdtype, othermdtype)
elif mode.lower() == "mdref":
loctype = kwargs.get("loctype")
label = kwargs.get("label")
otherloctype = kwargs.get("otherloctype")
mdsec = MDRef(md, mdtype, loctype, label, otherloctype)
subsection = SubSection(subsection, mdsec)
if subsection.subsection == "dmdSec":
self.dmdsecs.append(subsection)
else:
try:
amdsec = self.amdsecs[0]
except IndexError:
amdsec = AMDSec()
self.amdsecs.append(amdsec)
amdsec.subsections.append(subsection)
return subsection | def _add_metadata_element(self, md, subsection, mdtype, mode="mdwrap", **kwargs) | :param md: Value to pass to the MDWrap/MDRef
:param str subsection: Metadata tag to create. See :const:`SubSection.ALLOWED_SUBSECTIONS`
:param str mdtype: Value for mdWrap/mdRef @MDTYPE
:param str mode: 'mdwrap' or 'mdref'
:param str loctype: Required if mode is 'mdref'. LOCTYPE of a mdRef
:param str label: Optional. Label of a mdRef
:param str otherloctype: Optional. OTHERLOCTYPE of a mdRef.
:param str othermdtype: Optional. OTHERMDTYPE of a mdWrap. | 4.711368 | 3.735382 | 1.261281 |
valid_insts = tuple(
chain((etree._ElementTree, etree._Element), six.string_types)
)
if isinstance(md_inst, valid_insts):
return md_inst
if not isinstance(md_inst, md_class):
raise TypeError(
"Instance {!r} must be instance of {!r}".format(md_inst, md_class)
)
return md_inst.serialize() | def serialize_md_inst(self, md_inst, md_class) | Serialize object ``md_inst`` by transforming it into an
``lxml.etree._ElementTree``. If it already is such, return it. If not,
make sure it is the correct type and return the output of calling
``seriaize()`` on it. | 3.423327 | 3.004956 | 1.139227 |
if self.type.lower() != "directory":
raise ValueError("Only directory objects can have children")
if child is self:
raise ValueError("Cannot be a child of itself!")
if child not in self._children:
self._children.append(child)
child.parent = self
return child | def add_child(self, child) | Add a child FSEntry to this FSEntry.
Only FSEntrys with a type of 'directory' can have children.
This does not detect cyclic parent/child relationships, but that will
cause problems.
:param metsrw.fsentry.FSEntry child: FSEntry to add as a child
:return: The newly added child
:raises ValueError: If this FSEntry cannot have children.
:raises ValueError: If the child and the parent are the same | 3.657706 | 3.651245 | 1.00177 |
try:
self._children.remove(child)
except ValueError: # Child may not be in list
pass
else:
child.parent = None | def remove_child(self, child) | Remove a child from this FSEntry
If `child` is not actually a child of this entry, nothing happens.
:param child: Child to remove | 4.437523 | 5.446534 | 0.814743 |
if (
self.type.lower() not in ("item", "archival information package")
or self.use is None
):
return None
el = etree.Element(utils.lxmlns("mets") + "file", ID=self.file_id())
if self.group_id():
el.attrib["GROUPID"] = self.group_id()
if self.admids:
el.set("ADMID", " ".join(self.admids))
if self.checksum and self.checksumtype:
el.attrib["CHECKSUM"] = self.checksum
el.attrib["CHECKSUMTYPE"] = self.checksumtype
if self.path:
flocat = etree.SubElement(el, utils.lxmlns("mets") + "FLocat")
# Setting manually so order is correct
try:
flocat.set(utils.lxmlns("xlink") + "href", utils.urlencode(self.path))
except ValueError:
raise exceptions.SerializeError(
'Value "{}" (for attribute xlink:href) is not a valid'
" URL.".format(self.path)
)
flocat.set("LOCTYPE", "OTHER")
flocat.set("OTHERLOCTYPE", "SYSTEM")
for transform_file in self.transform_files:
transform_file_el = etree.SubElement(
el, utils.lxmlns("mets") + "transformFile"
)
for key, val in transform_file.items():
attribute = "transform{}".format(key).upper()
transform_file_el.attrib[attribute] = str(val)
return el | def serialize_filesec(self) | Return the file Element for this file, appropriate for use in a fileSec.
If this is not an Item or has no use, return None.
:return: fileSec element for this FSEntry | 3.615078 | 3.438154 | 1.051459 |
if self.mets_div_type == "Directory":
children = self._children
if children:
if all(child.is_empty_dir for child in children):
return True
else:
return False
else:
return True
else:
return False | def is_empty_dir(self) | Returns ``True`` if this fs item is a directory with no children or
a directory with only other empty directories as children. | 4.199487 | 3.566515 | 1.177476 |
if not self.label:
return None
# Empty directories are not included in the physical structmap.
if self.is_empty_dir and not normative:
return None
el = etree.Element(utils.lxmlns("mets") + "div", TYPE=self.mets_div_type)
el.attrib["LABEL"] = self.label
if (not normative) and self.file_id():
etree.SubElement(el, utils.lxmlns("mets") + "fptr", FILEID=self.file_id())
if self.dmdids:
if (not normative) or (normative and self.is_empty_dir):
el.set("DMDID", " ".join(self.dmdids))
if recurse and self._children:
for child in self._children:
child_el = child.serialize_structmap(
recurse=recurse, normative=normative
)
if child_el is not None:
el.append(child_el)
return el | def serialize_structmap(self, recurse=True, normative=False) | Return the div Element for this file, appropriate for use in a
structMap.
If this FSEntry represents a directory, its children will be
recursively appended to itself. If this FSEntry represents a file, it
will contain a <fptr> element.
:param bool recurse: If true, serialize and apppend all children.
Otherwise, only serialize this element but not any children.
:param bool normative: If true, we are creating a "Normative Directory
Structure" logical structmap, in which case we add div elements for
empty directories and do not add fptr elements for files.
:return: structMap element for this FSEntry | 3.304729 | 3.010858 | 1.097604 |
try:
return hmac.compare_digest(secret, self.secret)
except AttributeError: # pragma: no cover
return secret == self.secret | def check_secret(self, secret) | Checks if the secret string used in the authentication attempt
matches the "known" secret string. Some mechanisms will override this
method to control how this comparison is made.
Args:
secret: The secret string to compare against what was used in the
authentication attempt.
Returns:
True if the given secret matches the authentication attempt. | 3.51151 | 4.034095 | 0.870458 |
builtin_mechs = cls._get_builtin_mechanisms()
secure_mechs = [mech for _, mech in builtin_mechs.items()
if not mech.insecure and mech.priority is not None]
return SASLAuth(secure_mechs) | def secure(cls) | Uses only authentication mechanisms that are secure for use in
non-encrypted sessions.
Returns:
A new :class:`SASLAuth` object. | 5.662798 | 4.031095 | 1.404779 |
builtin_mechs = cls._get_builtin_mechanisms()
plaintext_mechs = [mech for _, mech in builtin_mechs.items()
if mech.insecure and mech.priority is not None]
return SASLAuth(plaintext_mechs) | def plaintext(cls) | Uses only authentication mechanisms that provide the credentials in
un-hashed form, typically meaning
:attr:`~pysasl.AuthenticationCredentials.has_secret` is True.
Returns:
A new :class:`SASLAuth` object. | 6.419991 | 4.749112 | 1.35183 |
return [mech for mech in self.mechs.values()
if isinstance(mech, ServerMechanism)] | def server_mechanisms(self) | List of available :class:`ServerMechanism` objects. | 4.168584 | 3.279865 | 1.270962 |
return [mech for mech in self.mechs.values()
if isinstance(mech, ClientMechanism)] | def client_mechanisms(self) | List of available :class:`ClientMechanism` objects. | 4.174531 | 3.2633 | 1.279236 |
mech = self.get(name)
return mech if isinstance(mech, ServerMechanism) else None | def get_server(self, name) | Like :meth:`.get`, but only mechanisms inheriting
:class:`ServerMechanism` will be returned.
Args:
name: The SASL mechanism name.
Returns:
The mechanism object or ``None`` | 5.955815 | 4.881194 | 1.220155 |
mech = self.get(name)
return mech if isinstance(mech, ClientMechanism) else None | def get_client(self, name) | Like :meth:`.get`, but only mechanisms inheriting
:class:`ClientMechanism` will be returned.
Args:
name: The SASL mechanism name.
Returns:
The mechanism object or ``None`` | 6.368824 | 4.738766 | 1.343984 |
if hasattr(source, "read"):
return cls.fromfile(source)
if os.path.exists(source):
return cls.fromfile(source)
if isinstance(source, six.string_types):
source = source.encode("utf8")
return cls.fromstring(source) | def read(cls, source) | Read ``source`` into a ``METSDocument`` instance. This is an
instance constructor. The ``source`` may be a path to a METS file, a
file-like object, or a string of XML. | 2.258137 | 2.264542 | 0.997172 |
if files is None:
files = self._root_elements
collected = set()
for entry in files:
collected.add(entry)
collected.update(self._collect_all_files(entry.children))
return collected | def _collect_all_files(self, files=None) | Collect all FSEntrys into a set, including all descendants.
:param list files: List of :class:`FSEntry` to traverse.
:returns: Set of FSEntry | 3.315096 | 3.451833 | 0.960387 |
# TODO put this in a sqlite DB so it can be queried efficiently
# TODO handle multiple matches (with DB?)
# TODO check that kwargs are actual attrs
for entry in self.all_files():
if all(value == getattr(entry, key) for key, value in kwargs.items()):
return entry
return None | def get_file(self, **kwargs) | Return the FSEntry that matches parameters.
:param str file_uuid: UUID of the target FSEntry.
:param str label: structMap LABEL of the target FSEntry.
:param str type: structMap TYPE of the target FSEntry.
:returns: :class:`FSEntry` that matches parameters, or None. | 9.162304 | 9.85189 | 0.930005 |
if fs_entry in self._root_elements:
return
self._root_elements.append(fs_entry)
# Reset file lists so they get regenerated with the new files(s)
self._all_files = None | def append_file(self, fs_entry) | Adds an FSEntry object to this METS document's tree. Any of the
represented object's children will also be added to the document.
A given FSEntry object can only be included in a document once,
and any attempt to add an object the second time will be ignored.
:param metsrw.mets.FSEntry fs_entry: FSEntry to add to the METS document | 8.351831 | 7.994753 | 1.044664 |
try:
self._root_elements.remove(fs_entry)
except ValueError: # fs_entry may not be in the root elements
pass
if fs_entry.parent:
fs_entry.parent.remove_child(fs_entry)
# Reset file lists so they get regenerated without the removed file(s)
self._all_files = None | def remove_entry(self, fs_entry) | Removes an FSEntry object from this METS document.
Any children of this FSEntry will also be removed. This will be removed
as a child of it's parent, if any.
:param metsrw.mets.FSEntry fs_entry: FSEntry to remove from the METS | 5.387801 | 5.355552 | 1.006022 |
nsmap = {"xsi": utils.NAMESPACES["xsi"], "xlink": utils.NAMESPACES["xlink"]}
if fully_qualified:
nsmap["mets"] = utils.NAMESPACES["mets"]
else:
nsmap[None] = utils.NAMESPACES["mets"]
attrib = {
"{}schemaLocation".format(utils.lxmlns("xsi")): utils.SCHEMA_LOCATIONS
}
if self.objid:
attrib["OBJID"] = self.objid
return etree.Element(utils.lxmlns("mets") + "mets", nsmap=nsmap, attrib=attrib) | def _document_root(self, fully_qualified=True) | Return the mets Element for the document root. | 3.360195 | 2.940493 | 1.142732 |
header_tag = etree.QName(utils.NAMESPACES[u"mets"], u"metsHdr")
header_attrs = {}
if self.createdate is None:
header_attrs[u"CREATEDATE"] = now
else:
header_attrs[u"CREATEDATE"] = self.createdate
header_attrs[u"LASTMODDATE"] = now
header_element = etree.Element(header_tag, **header_attrs)
for agent in self.agents:
header_element.append(agent.serialize())
for alternate_id in self.alternate_ids:
header_element.append(alternate_id.serialize())
return header_element | def _mets_header(self, now) | Return the metsHdr Element. | 2.676288 | 2.491212 | 1.074292 |
dmdsecs = []
amdsecs = []
for f in files:
for d in f.dmdsecs:
dmdsecs.append(d)
for a in f.amdsecs:
amdsecs.append(a)
dmdsecs.sort(key=lambda x: x.id_string)
amdsecs.sort(key=lambda x: x.id_string)
return dmdsecs + amdsecs | def _collect_mdsec_elements(files) | Return all dmdSec and amdSec classes associated with the files.
Returns all dmdSecs, then all amdSecs, so they only need to be
serialized before being appended to the METS document.
:param List files: List of :class:`FSEntry` to collect MDSecs for.
:returns: List of AMDSecs and SubSections | 2.410844 | 2.085496 | 1.156005 |
structmap = etree.Element(
utils.lxmlns("mets") + "structMap",
TYPE="physical",
# TODO Add ability for multiple structMaps
ID="structMap_1",
# TODO don't hardcode this
LABEL="Archivematica default",
)
for item in self._root_elements:
child = item.serialize_structmap(recurse=True)
if child is not None:
structmap.append(child)
return structmap | def _structmap(self) | Returns structMap element for all files. | 9.589653 | 8.611283 | 1.113615 |
if files is None:
files = self.all_files()
filesec = etree.Element(utils.lxmlns("mets") + "fileSec")
filegrps = {}
for file_ in files:
if file_.type.lower() not in ("item", AIP_ENTRY_TYPE):
continue
# Get fileGrp, or create if not exist
filegrp = filegrps.get(file_.use)
if filegrp is None:
filegrp = etree.SubElement(
filesec, utils.lxmlns("mets") + "fileGrp", USE=file_.use
)
filegrps[file_.use] = filegrp
file_el = file_.serialize_filesec()
if file_el is not None:
filegrp.append(file_el)
return filesec | def _filesec(self, files=None) | Returns fileSec Element containing all files grouped by use. | 3.880457 | 3.300116 | 1.175855 |
now = datetime.utcnow().replace(microsecond=0).isoformat("T")
files = self.all_files()
mdsecs = self._collect_mdsec_elements(files)
root = self._document_root(fully_qualified=fully_qualified)
root.append(self._mets_header(now=now))
for section in mdsecs:
root.append(section.serialize(now=now))
root.append(self._filesec(files))
root.append(self._structmap())
root.append(self._normative_structmap())
return root | def serialize(self, fully_qualified=True) | Returns this document serialized to an xml Element.
:return: Element for this document | 4.562022 | 4.359239 | 1.046518 |
root = self.serialize(fully_qualified=fully_qualified)
kwargs = {"pretty_print": pretty_print, "encoding": encoding}
if encoding != "unicode":
kwargs["xml_declaration"] = True
return etree.tostring(root, **kwargs) | def tostring(self, fully_qualified=True, pretty_print=True, encoding="UTF-8") | Serialize and return a string of this METS document.
To write to file, see :meth:`write`.
The default encoding is ``UTF-8``. This method will return a unicode
string when ``encoding`` is set to ``unicode``.
:return: String of this document | 3.040822 | 3.48479 | 0.872598 |
root = self.serialize(fully_qualified=fully_qualified)
tree = root.getroottree()
kwargs = {"pretty_print": pretty_print, "encoding": encoding}
if encoding != "unicode":
kwargs["xml_declaration"] = True
tree.write(filepath, **kwargs) | def write(
self, filepath, fully_qualified=True, pretty_print=False, encoding="UTF-8"
) | Serialize and write this METS document to `filepath`.
The default encoding is ``UTF-8``. This method will return a unicode
string when ``encoding`` is set to ``unicode``.
:param str filepath: Path to write the METS document to | 3.173563 | 4.161137 | 0.762667 |
siblings = []
el_to_normative = self._get_el_to_normative(parent_elem, normative_parent_elem)
for elem, normative_elem in el_to_normative.items():
if elem.tag != utils.lxmlns("mets") + "div":
continue # Only handle divs, not fptrs
entry_type = elem.get("TYPE")
label = elem.get("LABEL")
fptr_elems = elem.findall("mets:fptr", namespaces=utils.NAMESPACES)
# Directories are walked recursively. Additionally, they may
# contain direct fptrs.
if entry_type.lower() == "directory":
children = self._parse_tree_structmap(
tree, elem, normative_parent_elem=normative_elem
)
fs_entry = fsentry.FSEntry.dir(label, children)
self._add_dmdsecs_to_fs_entry(elem, fs_entry, tree)
siblings.append(fs_entry)
for fptr_elem in fptr_elems:
fptr = self._analyze_fptr(fptr_elem, tree, entry_type)
fs_entry = fsentry.FSEntry.from_fptr(
label=None, type_=u"Item", fptr=fptr
)
self._add_amdsecs_to_fs_entry(fptr.amdids, fs_entry, tree)
siblings.append(fs_entry)
continue
# Other types, e.g.: items, aips...
if not len(fptr_elems):
continue
fptr = self._analyze_fptr(fptr_elems[0], tree, entry_type)
fs_entry = fsentry.FSEntry.from_fptr(label, entry_type, fptr)
self._add_dmdsecs_to_fs_entry(elem, fs_entry, tree)
self._add_amdsecs_to_fs_entry(fptr.amdids, fs_entry, tree)
siblings.append(fs_entry)
return siblings | def _parse_tree_structmap(self, tree, parent_elem, normative_parent_elem=None) | Recursively parse all the children of parent_elem, including amdSecs
and dmdSecs.
:param lxml._ElementTree tree: encodes the entire METS file.
:param lxml._Element parent_elem: the element whose children we are
parsing.
:param lxml._Element normative_parent_elem: the normative
counterpart of ``parent_elem`` taken from the logical structMap
labelled "Normative Directory Structure". | 3.26091 | 3.056772 | 1.066782 |
el_to_normative = OrderedDict()
if normative_parent_elem is None:
for el in parent_elem:
el_to_normative[el] = None
else:
for norm_el in normative_parent_elem:
matches = [
el
for el in parent_elem
if el.get("TYPE") == norm_el.get("TYPE")
and el.get("LABEL") == norm_el.get("LABEL")
]
if matches:
el_to_normative[matches[0]] = norm_el
else:
el_to_normative[norm_el] = None
return el_to_normative | def _get_el_to_normative(parent_elem, normative_parent_elem) | Return ordered dict ``el_to_normative``, which maps children of
``parent_elem`` to their normative counterparts in the children of
``normative_parent_elem`` or to ``None`` if there is no normative
parent. If there is a normative div element with no non-normative
counterpart, that element is treated as a key with value ``None``.
This allows us to create ``FSEntry`` instances for empty directory div
elements, which are only documented in a normative logical structmap. | 1.852667 | 1.811027 | 1.022992 |
parser = etree.XMLParser(remove_blank_text=True)
return cls.fromtree(etree.parse(path, parser=parser)) | def fromfile(cls, path) | Creates a METS by parsing a file.
:param str path: Path to a METS document. | 3.549355 | 3.869888 | 0.917173 |
parser = etree.XMLParser(remove_blank_text=True)
root = etree.fromstring(string, parser)
tree = root.getroottree()
return cls.fromtree(tree) | def fromstring(cls, string) | Create a METS by parsing a string.
:param str string: String containing a METS document. | 2.670657 | 2.935737 | 0.909706 |
mets = cls()
mets.tree = tree
mets._parse_tree(tree)
return mets | def fromtree(cls, tree) | Create a METS from an ElementTree or Element.
:param ElementTree tree: ElementTree to build a METS document from. | 5.49929 | 4.670418 | 1.177473 |
sct_path = _get_file_path(sct_path)
parser = etree.XMLParser(remove_blank_text=True)
sct_doc = etree.parse(sct_path, parser=parser)
return isoschematron.Schematron(sct_doc, store_report=True) | def get_schematron(sct_path) | Return an lxml ``isoschematron.Schematron()`` instance using the
schematron file at ``sct_path``. | 2.576971 | 2.269433 | 1.135513 |
is_xsd_valid, xsd_error_log = xsd_validate(mets_doc, xmlschema=xmlschema)
is_sct_valid, sct_report = schematron_validate(mets_doc, schematron=schematron)
valid = is_xsd_valid and is_sct_valid
report = {
"is_xsd_valid": is_xsd_valid,
"is_sct_valid": is_sct_valid,
"xsd_error_log": xsd_error_log,
"sct_report": sct_report,
}
report["report"] = report_string(report)
return valid, report | def validate(mets_doc, xmlschema=METS_XSD_PATH, schematron=AM_SCT_PATH) | Validate a METS file using both an XMLSchema (.xsd) schema and a
schematron schema, the latter of which typically places additional
constraints on what a METS file can look like. | 1.86794 | 1.892372 | 0.987089 |
xsd_path = _get_file_path(xmlschema)
xmlschema = etree.parse(xsd_path)
schema_locations = set(
mets_doc.xpath("//*/@xsi:schemaLocation", namespaces=NAMESPACES)
)
for schema_location in schema_locations:
namespaces_locations = schema_location.strip().split()
for namespace, location in zip(*[iter(namespaces_locations)] * 2):
if namespace == NAMESPACES["mets"]:
continue
xs_import = etree.Element("{http://www.w3.org/2001/XMLSchema}import")
xs_import.attrib["namespace"] = namespace
xs_import.attrib["schemaLocation"] = location
xmlschema.getroot().insert(0, xs_import)
return etree.XMLSchema(xmlschema) | def get_xmlschema(xmlschema, mets_doc) | Return a ``class::lxml.etree.XMLSchema`` instance given the path to the
XMLSchema (.xsd) file in ``xmlschema`` and the
``class::lxml.etree._ElementTree`` instance ``mets_doc`` representing the
METS file being parsed. The complication here is that the METS file to be
validated via the .xsd file may reference additional schemata via
``xsi:schemaLocation`` attributes. We have to find all of these and import
them from within the returned XMLSchema.
For the solution that this is based on, see:
http://code.activestate.com/recipes/578503-validate-xml-with-schemalocation/
For other descriptions of the problem, see:
- https://groups.google.com/forum/#!topic/archivematica/UBS1ay-g_tE
- https://stackoverflow.com/questions/26712645/xml-type-definition-is-absent
- https://stackoverflow.com/questions/2979824/in-document-schema-declarations-and-lxml | 2.364034 | 2.394204 | 0.987399 |
if isinstance(schematron, six.string_types):
schematron = get_schematron(schematron)
is_valid = schematron.validate(mets_doc)
report = schematron.validation_report
return is_valid, report | def schematron_validate(mets_doc, schematron=AM_SCT_PATH) | Validate a METS file using a schematron schema. Return a boolean
indicating validity and a report as an ``lxml.ElementTree`` instance. | 2.433896 | 2.209669 | 1.101475 |
ret = []
namespaces = {"svrl": "http://purl.oclc.org/dsdl/svrl"}
for index, failed_assert_el in enumerate(
report.findall("svrl:failed-assert", namespaces=namespaces)
):
ret.append(
"{}. {}".format(
index + 1,
failed_assert_el.find("svrl:text", namespaces=namespaces).text,
)
)
ret.append(" test: {}".format(failed_assert_el.attrib["test"]))
ret.append(" location: {}".format(failed_assert_el.attrib["location"]))
ret.append("\n")
return "\n".join(ret) | def sct_report_string(report) | Return a human-readable string representation of the error report
returned by lxml's schematron validator. | 2.64993 | 2.375356 | 1.115593 |
ret = []
for error in xsd_error_log:
ret.append(
"ERROR ON LINE {}: {}".format(error.line, error.message.encode("utf-8"))
)
return "\n".join(ret) | def xsd_error_log_string(xsd_error_log) | Return a human-readable string representation of the error log
returned by lxml's XMLSchema validator. | 3.116186 | 3.065014 | 1.016696 |
# Check view_class inherit from django View
if not issubclass(view_class, View):
raise PopupViewIsNotSubclassView()
self.view_class_name = view_class.__name__
self.popup_dialog_title = kwargs.pop("popup_dialog_title", _("Popup Dialog: Select value"))
self.callback_data = kwargs.pop("callback_data", {})
if not isinstance(self.callback_data, dict):
raise AttributeError("callback_data argument must be a dictionary")
try:
self.callback_data = urllib.urlencode(self.callback_data)
except AttributeError:
self.callback_data = urllib.parse.urlencode(self.callback_data) | def validate_arguments(self, view_class, kwargs) | view_class : View Class used to render content popup dialog
view_class must be subclass of django.views.generic.View | 3.305827 | 2.962514 | 1.115886 |
url = reverse("django_popup_view_field:get_popup_view", args=(self.view_class_name,))
return "{url}?{cd}".format(
url=url,
cd=self.callback_data
) | def get_view_url(self) | Return url for ajax to view for render dialog content | 7.327497 | 7.080954 | 1.034818 |
try:
self.flush()
except Exception as e:
log.exception('Error while flushing: %s', e)
self._set_timer() | def on_timer(self) | Executes flush(). Ignores any errors to make sure one exception
doesn't halt the whole flushing process. | 5.084096 | 4.019274 | 1.264929 |
self.timing(stat, int((time.time() - start) * 1000000), sample_rate) | def timing_since(self, stat, start, sample_rate=1) | Log timing information as the number of microseconds since the provided time float
>>> start = time.time()
>>> # do stuff
>>> statsd_client.timing_since('some.time', start) | 3.521952 | 3.049118 | 1.155073 |
stats = {stat: "%f|ms" % time}
self.send(stats, sample_rate) | def timing(self, stat, time, sample_rate=1) | Log timing information for a single stat
>>> statsd_client.timing('some.time',500) | 9.656482 | 10.726414 | 0.900253 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.