Search is not available for this dataset
text stringlengths 75 104k |
|---|
def handle_extracted_license(self, extr_lic):
"""
Build and return an ExtractedLicense or None.
Note that this function adds the license to the document.
"""
lic = self.parse_only_extr_license(extr_lic)
if lic is not None:
self.doc.add_extr_lic(lic)
return lic |
def _handle_license_list(self, lics_set, cls=None):
"""
Return a license representing a `cls` object (LicenseConjunction
or LicenseDisjunction) from a list of license resources or None.
"""
licenses = []
for _, _, lics_member in self.graph.triples(
(lics_set, self.spdx_namespace['member'], None)):
try:
if (lics_member, RDF.type, self.spdx_namespace['ExtractedLicensingInfo']) in self.graph:
lics = self.handle_extracted_license(lics_member)
if lics is not None:
licenses.append(lics)
else:
licenses.append(self.handle_lics(lics_member))
except CardinalityError:
self.value_error('LICS_LIST_MEMBER', lics_member)
break
if len(licenses) > 1:
return reduce(lambda a, b: cls(a, b), licenses)
else:
self.value_error('PKG_CONC_LIST', '')
return |
def parse_package(self, p_term):
"""Parses package fields."""
# Check there is a pacakge name
if not (p_term, self.spdx_namespace['name'], None) in self.graph:
self.error = True
self.logger.log('Package must have a name.')
# Create dummy package so that we may continue parsing the rest of
# the package fields.
self.builder.create_package(self.doc, 'dummy_package')
else:
for _s, _p, o in self.graph.triples((p_term, self.spdx_namespace['name'], None)):
try:
self.builder.create_package(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('Package name')
break
self.p_pkg_vinfo(p_term, self.spdx_namespace['versionInfo'])
self.p_pkg_fname(p_term, self.spdx_namespace['packageFileName'])
self.p_pkg_suppl(p_term, self.spdx_namespace['supplier'])
self.p_pkg_originator(p_term, self.spdx_namespace['originator'])
self.p_pkg_down_loc(p_term, self.spdx_namespace['downloadLocation'])
self.p_pkg_homepg(p_term, self.doap_namespace['homepage'])
self.p_pkg_chk_sum(p_term, self.spdx_namespace['checksum'])
self.p_pkg_src_info(p_term, self.spdx_namespace['sourceInfo'])
self.p_pkg_verif_code(p_term, self.spdx_namespace['packageVerificationCode'])
self.p_pkg_lic_conc(p_term, self.spdx_namespace['licenseConcluded'])
self.p_pkg_lic_decl(p_term, self.spdx_namespace['licenseDeclared'])
self.p_pkg_lics_info_from_files(p_term, self.spdx_namespace['licenseInfoFromFiles'])
self.p_pkg_comments_on_lics(p_term, self.spdx_namespace['licenseComments'])
self.p_pkg_cr_text(p_term, self.spdx_namespace['copyrightText'])
self.p_pkg_summary(p_term, self.spdx_namespace['summary'])
self.p_pkg_descr(p_term, self.spdx_namespace['description']) |
def handle_pkg_lic(self, p_term, predicate, builder_func):
"""Handles package lics concluded or declared."""
try:
for _, _, licenses in self.graph.triples((p_term, predicate, None)):
if (licenses, RDF.type, self.spdx_namespace['ConjunctiveLicenseSet']) in self.graph:
lics = self.handle_conjunctive_list(licenses)
builder_func(self.doc, lics)
elif (licenses, RDF.type, self.spdx_namespace['DisjunctiveLicenseSet']) in self.graph:
lics = self.handle_disjunctive_list(licenses)
builder_func(self.doc, lics)
else:
try:
lics = self.handle_lics(licenses)
builder_func(self.doc, lics)
except SPDXValueError:
self.value_error('PKG_SINGLE_LICS', licenses)
except CardinalityError:
self.more_than_one_error('package {0}'.format(predicate)) |
def get_file_name(self, f_term):
"""Returns first found fileName property or None if not found."""
for _, _, name in self.graph.triples((f_term, self.spdx_namespace['fileName'], None)):
return name
return |
def p_file_depends(self, f_term, predicate):
"""Sets file dependencies."""
for _, _, other_file in self.graph.triples((f_term, predicate, None)):
name = self.get_file_name(other_file)
if name is not None:
self.builder.add_file_dep(six.text_type(name))
else:
self.error = True
msg = 'File depends on file with no name'
self.logger.log(msg) |
def p_file_contributor(self, f_term, predicate):
"""
Parse all file contributors and adds them to the model.
"""
for _, _, contributor in self.graph.triples((f_term, predicate, None)):
self.builder.add_file_contribution(self.doc, six.text_type(contributor)) |
def p_file_notice(self, f_term, predicate):
"""Sets file notice text."""
try:
for _, _, notice in self.graph.triples((f_term, predicate, None)):
self.builder.set_file_notice(self.doc, six.text_type(notice))
except CardinalityError:
self.more_than_one_error('file notice') |
def p_file_comment(self, f_term, predicate):
"""Sets file comment text."""
try:
for _, _, comment in self.graph.triples((f_term, predicate, None)):
self.builder.set_file_comment(self.doc, six.text_type(comment))
except CardinalityError:
self.more_than_one_error('file comment') |
def p_file_artifact(self, f_term, predicate):
"""Handles file artifactOf.
Note: does not handle artifact of project URI.
"""
for _, _, project in self.graph.triples((f_term, predicate, None)):
if (project, RDF.type, self.doap_namespace['Project']):
self.p_file_project(project)
else:
self.error = True
msg = 'File must be artifact of doap:Project'
self.logger.log(msg) |
def p_file_project(self, project):
"""Helper function for parsing doap:project name and homepage.
and setting them using the file builder.
"""
for _, _, name in self.graph.triples((project, self.doap_namespace['name'], None)):
self.builder.set_file_atrificat_of_project(self.doc, 'name', six.text_type(name))
for _, _, homepage in self.graph.triples(
(project, self.doap_namespace['homepage'], None)):
self.builder.set_file_atrificat_of_project(self.doc, 'home', six.text_type(homepage)) |
def p_file_cr_text(self, f_term, predicate):
"""Sets file copyright text."""
try:
for _, _, cr_text in self.graph.triples((f_term, predicate, None)):
self.builder.set_file_copyright(self.doc, six.text_type(cr_text))
except CardinalityError:
self.more_than_one_error('file copyright text') |
def p_file_comments_on_lics(self, f_term, predicate):
"""Sets file license comment."""
try:
for _, _, comment in self.graph.triples((f_term, predicate, None)):
self.builder.set_file_license_comment(self.doc, six.text_type(comment))
except CardinalityError:
self.more_than_one_error('file comments on license') |
def p_file_lic_info(self, f_term, predicate):
"""Sets file license information."""
for _, _, info in self.graph.triples((f_term, predicate, None)):
lic = self.handle_lics(info)
if lic is not None:
self.builder.set_file_license_in_file(self.doc, lic) |
def p_file_type(self, f_term, predicate):
"""Sets file type."""
try:
for _, _, ftype in self.graph.triples((f_term, predicate, None)):
try:
if ftype.endswith('binary'):
ftype = 'BINARY'
elif ftype.endswith('source'):
ftype = 'SOURCE'
elif ftype.endswith('other'):
ftype = 'OTHER'
elif ftype.endswith('archive'):
ftype = 'ARCHIVE'
self.builder.set_file_type(self.doc, ftype)
except SPDXValueError:
self.value_error('FILE_TYPE', ftype)
except CardinalityError:
self.more_than_one_error('file type') |
def p_file_chk_sum(self, f_term, predicate):
"""Sets file checksum. Assumes SHA1 algorithm without checking."""
try:
for _s, _p, checksum in self.graph.triples((f_term, predicate, None)):
for _, _, value in self.graph.triples((checksum, self.spdx_namespace['checksumValue'], None)):
self.builder.set_file_chksum(self.doc, six.text_type(value))
except CardinalityError:
self.more_than_one_error('File checksum') |
def p_file_lic_conc(self, f_term, predicate):
"""Sets file licenses concluded."""
try:
for _, _, licenses in self.graph.triples((f_term, predicate, None)):
if (licenses, RDF.type, self.spdx_namespace['ConjunctiveLicenseSet']) in self.graph:
lics = self.handle_conjunctive_list(licenses)
self.builder.set_concluded_license(self.doc, lics)
elif (licenses, RDF.type, self.spdx_namespace['DisjunctiveLicenseSet']) in self.graph:
lics = self.handle_disjunctive_list(licenses)
self.builder.set_concluded_license(self.doc, lics)
else:
try:
lics = self.handle_lics(licenses)
self.builder.set_concluded_license(self.doc, lics)
except SPDXValueError:
self.value_error('FILE_SINGLE_LICS', licenses)
except CardinalityError:
self.more_than_one_error('file {0}'.format(predicate)) |
def get_review_date(self, r_term):
"""Returns review date or None if not found.
Reports error on failure.
Note does not check value format.
"""
reviewed_list = list(self.graph.triples((r_term, self.spdx_namespace['reviewDate'], None)))
if len(reviewed_list) != 1:
self.error = True
msg = 'Review must have exactlyone review date'
self.logger.log(msg)
return
return six.text_type(reviewed_list[0][2]) |
def get_reviewer(self, r_term):
"""Returns reviewer as creator object or None if failed.
Reports errors on failure.
"""
reviewer_list = list(self.graph.triples((r_term, self.spdx_namespace['reviewer'], None)))
if len(reviewer_list) != 1:
self.error = True
msg = 'Review must have exactly one reviewer'
self.logger.log(msg)
return
try:
return self.builder.create_entity(self.doc, six.text_type(reviewer_list[0][2]))
except SPDXValueError:
self.value_error('REVIEWER_VALUE', reviewer_list[0][2]) |
def get_annotation_type(self, r_term):
"""Returns annotation type or None if found none or more than one.
Reports errors on failure."""
for _, _, typ in self.graph.triples((
r_term, self.spdx_namespace['annotationType'], None)):
if typ is not None:
return typ
else:
self.error = True
msg = 'Annotation must have exactly one annotation type.'
self.logger.log(msg)
return |
def get_annotation_comment(self, r_term):
"""Returns annotation comment or None if found none or more than one.
Reports errors.
"""
comment_list = list(self.graph.triples((r_term, RDFS.comment, None)))
if len(comment_list) > 1:
self.error = True
msg = 'Annotation can have at most one comment.'
self.logger.log(msg)
return
else:
return six.text_type(comment_list[0][2]) |
def get_annotation_date(self, r_term):
"""Returns annotation date or None if not found.
Reports error on failure.
Note does not check value format.
"""
annotation_date_list = list(self.graph.triples((r_term, self.spdx_namespace['annotationDate'], None)))
if len(annotation_date_list) != 1:
self.error = True
msg = 'Annotation must have exactly one annotation date.'
self.logger.log(msg)
return
return six.text_type(annotation_date_list[0][2]) |
def parse(self, fil):
"""Parses a file and returns a document object.
File, a file like object.
"""
self.error = False
self.graph = Graph()
self.graph.parse(file=fil, format='xml')
self.doc = document.Document()
for s, _p, o in self.graph.triples((None, RDF.type, self.spdx_namespace['SpdxDocument'])):
self.parse_doc_fields(s)
for s, _p, o in self.graph.triples((None, RDF.type, self.spdx_namespace['ExternalDocumentRef'])):
self.parse_ext_doc_ref(s)
for s, _p, o in self.graph.triples((None, RDF.type, self.spdx_namespace['CreationInfo'])):
self.parse_creation_info(s)
for s, _p, o in self.graph.triples((None, RDF.type, self.spdx_namespace['Package'])):
self.parse_package(s)
for s, _p, o in self.graph.triples((None, self.spdx_namespace['referencesFile'], None)):
self.parse_file(o)
for s, _p, o in self.graph.triples((None, self.spdx_namespace['reviewed'], None)):
self.parse_review(o)
for s, _p, o in self.graph.triples((None, self.spdx_namespace['annotation'], None)):
self.parse_annotation(o)
validation_messages = []
# Report extra errors if self.error is False otherwise there will be
# redundent messages
validation_messages = self.doc.validate(validation_messages)
if not self.error:
if validation_messages:
for msg in validation_messages:
self.logger.log(msg)
self.error = True
return self.doc, self.error |
def parse_creation_info(self, ci_term):
"""
Parse creators, created and comment.
"""
for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['creator'], None)):
try:
ent = self.builder.create_entity(self.doc, six.text_type(o))
self.builder.add_creator(self.doc, ent)
except SPDXValueError:
self.value_error('CREATOR_VALUE', o)
for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['created'], None)):
try:
self.builder.set_created_date(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('CREATED_VALUE', o)
except CardinalityError:
self.more_than_one_error('created')
break
for _s, _p, o in self.graph.triples((ci_term, RDFS.comment, None)):
try:
self.builder.set_creation_comment(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('CreationInfo comment')
break
for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['licenseListVersion'], None)):
try:
self.builder.set_lics_list_ver(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('licenseListVersion')
break
except SPDXValueError:
self.value_error('LL_VALUE', o) |
def parse_doc_fields(self, doc_term):
"""Parses the version, data license, name, SPDX Identifier, namespace,
and comment."""
try:
self.builder.set_doc_spdx_id(self.doc, doc_term)
except SPDXValueError:
self.value_error('DOC_SPDX_ID_VALUE', doc_term)
try:
if doc_term.count('#', 0, len(doc_term)) <= 1:
doc_namespace = doc_term.split('#')[0]
self.builder.set_doc_namespace(self.doc, doc_namespace)
else:
self.value_error('DOC_NAMESPACE_VALUE', doc_term)
except SPDXValueError:
self.value_error('DOC_NAMESPACE_VALUE', doc_term)
for _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace['specVersion'], None)):
try:
self.builder.set_doc_version(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('DOC_VERS_VALUE', o)
except CardinalityError:
self.more_than_one_error('specVersion')
break
for _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace['dataLicense'], None)):
try:
self.builder.set_doc_data_lic(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('DOC_D_LICS', o)
except CardinalityError:
self.more_than_one_error('dataLicense')
break
for _s, _p, o in self.graph.triples(
(doc_term, self.spdx_namespace['name'], None)):
try:
self.builder.set_doc_name(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('name')
break
for _s, _p, o in self.graph.triples((doc_term, RDFS.comment, None)):
try:
self.builder.set_doc_comment(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('Document comment')
break |
def parse_ext_doc_ref(self, ext_doc_ref_term):
"""
Parses the External Document ID, SPDX Document URI and Checksum.
"""
for _s, _p, o in self.graph.triples(
(ext_doc_ref_term,
self.spdx_namespace['externalDocumentId'],
None)):
try:
self.builder.set_ext_doc_id(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('EXT_DOC_REF_VALUE', 'External Document ID')
break
for _s, _p, o in self.graph.triples(
(ext_doc_ref_term,
self.spdx_namespace['spdxDocument'],
None)):
try:
self.builder.set_spdx_doc_uri(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('EXT_DOC_REF_VALUE', 'SPDX Document URI')
break
for _s, _p, checksum in self.graph.triples(
(ext_doc_ref_term, self.spdx_namespace['checksum'], None)):
for _, _, value in self.graph.triples(
(checksum, self.spdx_namespace['checksumValue'], None)):
try:
self.builder.set_chksum(self.doc, six.text_type(value))
except SPDXValueError:
self.value_error('EXT_DOC_REF_VALUE', 'Checksum')
break |
def validate(self, messages):
"""
Validate the package fields.
Append user friendly error messages to the `messages` list.
"""
messages = self.validate_checksum(messages)
messages = self.validate_optional_str_fields(messages)
messages = self.validate_mandatory_str_fields(messages)
messages = self.validate_files(messages)
messages = self.validate_mandatory_fields(messages)
messages = self.validate_optional_fields(messages)
return messages |
def validate_optional_str_fields(self, messages):
"""Fields marked as optional and of type string in class
docstring must be of a type that provides __str__ method.
"""
FIELDS = [
'file_name',
'version',
'homepage',
'source_info',
'summary',
'description'
]
messages = self.validate_str_fields(FIELDS, True, messages)
return messages |
def validate_mandatory_str_fields(self, messages):
"""Fields marked as Mandatory and of type string in class
docstring must be of a type that provides __str__ method.
"""
FIELDS = ['name', 'download_location', 'verif_code', 'cr_text']
messages = self.validate_str_fields(FIELDS, False, messages)
return messages |
def validate_str_fields(self, fields, optional, messages):
"""Helper for validate_mandatory_str_field and
validate_optional_str_fields"""
for field_str in fields:
field = getattr(self, field_str)
if field is not None:
# FIXME: this does not make sense???
attr = getattr(field, '__str__', None)
if not callable(attr):
messages = messages + [
'{0} must provide __str__ method.'.format(field)
]
# Continue checking.
elif not optional:
messages = messages + [
'Package {0} can not be None.'.format(field_str)
]
return messages |
def set_doc_data_lic(self, doc, res):
"""
Set the document data license.
Raise exceptions:
- SPDXValueError if malformed value,
- CardinalityError if already defined.
"""
if not self.doc_data_lics_set:
self.doc_data_lics_set = True
# TODO: what is this split?
res_parts = res.split('/')
if len(res_parts) != 0:
identifier = res_parts[-1]
doc.data_license = document.License.from_identifier(identifier)
else:
raise SPDXValueError('Document::License')
else:
raise CardinalityError('Document::License') |
def set_doc_comment(self, doc, comment):
"""Sets document comment, Raises CardinalityError if
comment already set.
"""
if not self.doc_comment_set:
self.doc_comment_set = True
doc.comment = comment
else:
raise CardinalityError('Document::Comment') |
def set_chksum(self, doc, chk_sum):
"""
Sets the external document reference's check sum, if not already set.
chk_sum - The checksum value in the form of a string.
"""
if chk_sum:
doc.ext_document_references[-1].check_sum = checksum.Algorithm(
'SHA1', chk_sum)
else:
raise SPDXValueError('ExternalDocumentRef::Checksum') |
def set_creation_comment(self, doc, comment):
"""Sets creation comment, Raises CardinalityError if
comment already set.
Raises SPDXValueError if not free form text.
"""
if not self.creation_comment_set:
self.creation_comment_set = True
doc.creation_info.comment = comment
return True
else:
raise CardinalityError('CreationInfo::Comment') |
def set_pkg_chk_sum(self, doc, chk_sum):
"""Sets the package check sum, if not already set.
chk_sum - A string
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_chk_sum_set:
self.package_chk_sum_set = True
doc.package.check_sum = checksum.Algorithm('SHA1', chk_sum)
else:
raise CardinalityError('Package::CheckSum') |
def set_pkg_source_info(self, doc, text):
"""Sets the package's source information, if not already set.
text - Free form text.
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_source_info_set:
self.package_source_info_set = True
doc.package.source_info = text
return True
else:
raise CardinalityError('Package::SourceInfo') |
def set_pkg_verif_code(self, doc, code):
"""Sets the package verification code, if not already set.
code - A string.
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_verif_set:
self.package_verif_set = True
doc.package.verif_code = code
else:
raise CardinalityError('Package::VerificationCode') |
def set_pkg_excl_file(self, doc, filename):
"""Sets the package's verification code excluded file.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
doc.package.add_exc_file(filename) |
def set_pkg_license_comment(self, doc, text):
"""Sets the package's license comment.
Raises OrderError if no package previously defined.
Raises CardinalityError if already set.
"""
self.assert_package_exists()
if not self.package_license_comment_set:
self.package_license_comment_set = True
doc.package.license_comment = text
return True
else:
raise CardinalityError('Package::LicenseComment') |
def set_pkg_cr_text(self, doc, text):
"""Sets the package's license comment.
Raises OrderError if no package previously defined.
Raises CardinalityError if already set.
"""
self.assert_package_exists()
if not self.package_cr_text_set:
self.package_cr_text_set = True
doc.package.cr_text = text
else:
raise CardinalityError('Package::CopyrightText') |
def set_pkg_summary(self, doc, text):
"""Set's the package summary.
Raises CardinalityError if summary already set.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_summary_set:
self.package_summary_set = True
doc.package.summary = text
else:
raise CardinalityError('Package::Summary') |
def set_pkg_desc(self, doc, text):
"""Set's the package's description.
Raises CardinalityError if description already set.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_desc_set:
self.package_desc_set = True
doc.package.description = text
else:
raise CardinalityError('Package::Description') |
def set_file_chksum(self, doc, chk_sum):
"""Sets the file check sum, if not already set.
chk_sum - A string
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_chksum_set:
self.file_chksum_set = True
self.file(doc).chk_sum = checksum.Algorithm('SHA1', chk_sum)
return True
else:
raise CardinalityError('File::CheckSum')
else:
raise OrderError('File::CheckSum') |
def set_file_license_comment(self, doc, text):
"""
Raises OrderError if no package or file defined.
Raises CardinalityError if more than one per file.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_license_comment_set:
self.file_license_comment_set = True
self.file(doc).license_comment = text
return True
else:
raise CardinalityError('File::LicenseComment')
else:
raise OrderError('File::LicenseComment') |
def set_file_copyright(self, doc, text):
"""Raises OrderError if no package or file defined.
Raises CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_copytext_set:
self.file_copytext_set = True
self.file(doc).copyright = text
return True
else:
raise CardinalityError('File::CopyRight')
else:
raise OrderError('File::CopyRight') |
def set_file_comment(self, doc, text):
"""Raises OrderError if no package or no file defined.
Raises CardinalityError if more than one comment set.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_comment_set:
self.file_comment_set = True
self.file(doc).comment = text
return True
else:
raise CardinalityError('File::Comment')
else:
raise OrderError('File::Comment') |
def set_file_notice(self, doc, text):
"""Raises OrderError if no package or file defined.
Raises CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_notice_set:
self.file_notice_set = True
self.file(doc).notice = tagvaluebuilders.str_from_text(text)
return True
else:
raise CardinalityError('File::Notice')
else:
raise OrderError('File::Notice') |
def add_review_comment(self, doc, comment):
"""Sets the review comment. Raises CardinalityError if
already set. OrderError if no reviewer defined before.
"""
if len(doc.reviews) != 0:
if not self.review_comment_set:
self.review_comment_set = True
doc.reviews[-1].comment = comment
return True
else:
raise CardinalityError('ReviewComment')
else:
raise OrderError('ReviewComment') |
def add_annotation_comment(self, doc, comment):
"""Sets the annotation comment. Raises CardinalityError if
already set. OrderError if no annotator defined before.
"""
if len(doc.annotations) != 0:
if not self.annotation_comment_set:
self.annotation_comment_set = True
doc.annotations[-1].comment = comment
return True
else:
raise CardinalityError('AnnotationComment')
else:
raise OrderError('AnnotationComment') |
def add_annotation_type(self, doc, annotation_type):
"""Sets the annotation type. Raises CardinalityError if
already set. OrderError if no annotator defined before.
"""
if len(doc.annotations) != 0:
if not self.annotation_type_set:
if annotation_type.endswith('annotationType_other'):
self.annotation_type_set = True
doc.annotations[-1].annotation_type = 'OTHER'
return True
elif annotation_type.endswith('annotationType_review'):
self.annotation_type_set = True
doc.annotations[-1].annotation_type = 'REVIEW'
return True
else:
raise SPDXValueError('Annotation::AnnotationType')
else:
raise CardinalityError('Annotation::AnnotationType')
else:
raise OrderError('Annotation::AnnotationType') |
def validate(self, messages):
"""
Validate all fields of the document and update the
messages list with user friendly error messages for display.
"""
messages = self.validate_version(messages)
messages = self.validate_data_lics(messages)
messages = self.validate_name(messages)
messages = self.validate_spdx_id(messages)
messages = self.validate_namespace(messages)
messages = self.validate_ext_document_references(messages)
messages = self.validate_creation_info(messages)
messages = self.validate_package(messages)
messages = self.validate_extracted_licenses(messages)
messages = self.validate_reviews(messages)
return messages |
def include(f):
'''
includes the contents of a file on disk.
takes a filename
'''
fl = open(f, 'r')
data = fl.read()
fl.close()
return raw(data) |
def system(cmd, data=None):
'''
pipes the output of a program
'''
import subprocess
s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = s.communicate(data)
return out.decode('utf8') |
def escape(data, quote=True): # stoled from std lib cgi
'''
Escapes special characters into their html entities
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.
This is used to escape content that appears in the body of an HTML cocument
'''
data = data.replace("&", "&") # Must be done first!
data = data.replace("<", "<")
data = data.replace(">", ">")
if quote:
data = data.replace('"', """)
return data |
def unescape(data):
'''
unescapes html entities. the opposite of escape.
'''
cc = re.compile(r'&(?:(?:#(\d+))|([^;]+));')
result = []
m = cc.search(data)
while m:
result.append(data[0:m.start()])
d = m.group(1)
if d:
d = int(d)
result.append(unichr(d))
else:
d = _unescape.get(m.group(2), ord('?'))
result.append(unichr(d))
data = data[m.end():]
m = cc.search(data)
result.append(data)
return ''.join(result) |
def attr(*args, **kwargs):
'''
Set attributes on the current active tag context
'''
ctx = dom_tag._with_contexts[_get_thread_context()]
if ctx and ctx[-1]:
dicts = args + (kwargs,)
for d in dicts:
for attr, value in d.items():
ctx[-1].tag.set_attribute(*dom_tag.clean_pair(attr, value))
else:
raise ValueError('not in a tag context') |
def set_attribute(self, key, value):
'''
Add or update the value of an attribute.
'''
if isinstance(key, int):
self.children[key] = value
elif isinstance(key, basestring):
self.attributes[key] = value
else:
raise TypeError('Only integer and string types are valid for assigning '
'child tags and attributes, respectively.') |
def setdocument(self, doc):
'''
Creates a reference to the parent document to allow for partial-tree
validation.
'''
# assume that a document is correct in the subtree
if self.document != doc:
self.document = doc
for i in self.children:
if not isinstance(i, dom_tag): return
i.setdocument(doc) |
def add(self, *args):
'''
Add new child tags.
'''
for obj in args:
if isinstance(obj, numbers.Number):
# Convert to string so we fall into next if block
obj = str(obj)
if isinstance(obj, basestring):
obj = escape(obj)
self.children.append(obj)
elif isinstance(obj, dom_tag):
ctx = dom_tag._with_contexts[_get_thread_context()]
if ctx and ctx[-1]:
ctx[-1].used.add(obj)
self.children.append(obj)
obj.parent = self
obj.setdocument(self.document)
elif isinstance(obj, dict):
for attr, value in obj.items():
self.set_attribute(*dom_tag.clean_pair(attr, value))
elif hasattr(obj, '__iter__'):
for subobj in obj:
self.add(subobj)
else: # wtf is it?
raise ValueError('%r not a tag or string.' % obj)
if len(args) == 1:
return args[0]
return args |
def get(self, tag=None, **kwargs):
'''
Recursively searches children for tags of a certain
type with matching attributes.
'''
# Stupid workaround since we can not use dom_tag in the method declaration
if tag is None: tag = dom_tag
attrs = [(dom_tag.clean_attribute(attr), value)
for attr, value in kwargs.items()]
results = []
for child in self.children:
if (isinstance(tag, basestring) and type(child).__name__ == tag) or \
(not isinstance(tag, basestring) and isinstance(child, tag)):
if all(child.attributes.get(attribute) == value
for attribute, value in attrs):
# If the child is of correct type and has all attributes and values
# in kwargs add as a result
results.append(child)
if isinstance(child, dom_tag):
# If the child is a dom_tag extend the search down through its children
results.extend(child.get(tag, **kwargs))
return results |
def clean_attribute(attribute):
'''
Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax
'''
# Shorthand
attribute = {
'cls': 'class',
'className': 'class',
'class_name': 'class',
'fr': 'for',
'html_for': 'for',
'htmlFor': 'for',
}.get(attribute, attribute)
# Workaround for Python's reserved words
if attribute[0] == '_':
attribute = attribute[1:]
# Workaround for dash
if attribute in set(['http_equiv']) or attribute.startswith('data_'):
attribute = attribute.replace('_', '-').lower()
# Workaround for colon
if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'):
attribute = attribute.replace('_', ':', 1).lower()
return attribute |
def clean_pair(cls, attribute, value):
'''
This will call `clean_attribute` on the attribute and also allows for the
creation of boolean attributes.
Ex. input(selected=True) is equivalent to input(selected="selected")
'''
attribute = cls.clean_attribute(attribute)
# Check for boolean attributes
# (i.e. selected=True becomes selected="selected")
if value is True:
value = attribute
if value is False:
value = "false"
return (attribute, value) |
def render(self, *args, **kwargs):
'''
Creates a <title> tag if not present and renders the DOCTYPE and tag tree.
'''
r = []
#Validates the tag tree and adds the doctype if one was set
if self.doctype:
r.append(self.doctype)
r.append('\n')
r.append(super(document, self).render(*args, **kwargs))
return u''.join(r) |
def getElementById(self, id):
'''
DOM API: Returns single element with matching id value.
'''
results = self.get(id=id)
if len(results) > 1:
raise ValueError('Multiple tags with id "%s".' % id)
elif results:
return results[0]
else:
return None |
def getElementsByTagName(self, name):
'''
DOM API: Returns all tags that match name.
'''
if isinstance(name, basestring):
return self.get(name.lower())
else:
return None |
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing(
"127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming(
"127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}"
.format(self._queue_management_thread))
if self.provider:
# debug_opts = "--debug" if self.worker_debug else ""
l_cmd = self.launch_cmd.format( # debug=debug_opts,
task_url=self.worker_task_url,
workers_per_node=self.workers_per_node,
logdir="{}/{}".format(self.run_dir, self.label))
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = self.provider.scaling_enabled
logger.debug(
"Starting LowLatencyExecutor with provider:\n%s", self.provider)
if hasattr(self.provider, 'init_blocks'):
try:
for i in range(self.provider.init_blocks):
block = self.provider.submit(
self.launch_cmd, 1, self.workers_per_node)
logger.debug("Launched block {}:{}".format(i, block))
if not block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
self.blocks.extend([block])
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
else:
self._scaling_enabled = False
logger.debug("Starting LowLatencyExecutor with no provider") |
def _start_local_queue_process(self):
""" TODO: docstring """
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port),
"worker_port": self.worker_port,
"worker_port_range": self.worker_port_range
# TODO: logdir and logging level
})
self.queue_proc.start()
try:
worker_port = comm_q.get(block=True, timeout=120)
logger.debug(
"Got worker port {} from interchange".format(worker_port))
except queue.Empty:
logger.error(
"Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
self.worker_task_url = "tcp://{}:{}".format(
self.address, worker_port) |
def _start_queue_management_thread(self):
""" TODO: docstring """
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(
target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning") |
def _queue_management_worker(self):
""" TODO: docstring """
logger.debug("[MTHREAD] queue management worker starting")
while True:
task_id, buf = self.incoming_q.get() # TODO: why does this hang?
msg = deserialize_object(buf)[0]
# TODO: handle exceptions
task_fut = self.tasks[task_id]
logger.debug("Got response for task id {}".format(task_id))
if "result" in msg:
task_fut.set_result(msg["result"])
elif "exception" in msg:
# TODO: handle exception
pass
elif 'exception' in msg:
logger.warning("Task: {} has returned with an exception")
try:
s, _ = deserialize_object(msg['exception'])
exception = ValueError("Remote exception description: {}".format(s))
task_fut.set_exception(exception)
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage(
"Message received is neither result nor exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished") |
def scale_in(self, blocks):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Raises:
NotImplementedError
"""
to_kill = self.blocks[:blocks]
if self.provider:
r = self.provider.cancel(to_kill)
return r |
def create_reg_message(self):
""" Creates a registration message to identify the worker to the interchange
"""
msg = {'parsl_v': PARSL_VERSION,
'python_v': "{}.{}.{}".format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro),
'os': platform.system(),
'hname': platform.node(),
'dir': os.getcwd(),
}
b_msg = json.dumps(msg).encode('utf-8')
return b_msg |
def heartbeat(self):
""" Send heartbeat to the incoming task queue
"""
heartbeat = (HEARTBEAT_CODE).to_bytes(4, "little")
r = self.task_incoming.send(heartbeat)
logger.debug("Return from heartbeat : {}".format(r)) |
def recv_result_from_workers(self):
""" Receives a results from the MPI worker pool and send it out via 0mq
Returns:
--------
result: task result from the workers
"""
info = MPI.Status()
result = self.comm.recv(source=MPI.ANY_SOURCE, tag=RESULT_TAG, status=info)
logger.debug("Received result from workers: {}".format(result))
return result |
def recv_task_request_from_workers(self):
""" Receives 1 task request from MPI comm
Returns:
--------
worker_rank: worker_rank id
"""
info = MPI.Status()
comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)
worker_rank = info.Get_source()
logger.info("Received task request from worker:{}".format(worker_rank))
return worker_rank |
def pull_tasks(self, kill_event):
""" Pulls tasks from the incoming tasks 0mq pipe onto the internal
pending task queue
Parameters:
-----------
kill_event : threading.Event
Event to let the thread know when it is time to die.
"""
logger.info("[TASK PULL THREAD] starting")
poller = zmq.Poller()
poller.register(self.task_incoming, zmq.POLLIN)
# Send a registration message
msg = self.create_reg_message()
logger.debug("Sending registration message: {}".format(msg))
self.task_incoming.send(msg)
last_beat = time.time()
last_interchange_contact = time.time()
task_recv_counter = 0
poll_timer = 1
while not kill_event.is_set():
time.sleep(LOOP_SLOWDOWN)
ready_worker_count = self.ready_worker_queue.qsize()
pending_task_count = self.pending_task_queue.qsize()
logger.debug("[TASK_PULL_THREAD] ready workers:{}, pending tasks:{}".format(ready_worker_count,
pending_task_count))
if time.time() > last_beat + self.heartbeat_period:
self.heartbeat()
last_beat = time.time()
if pending_task_count < self.max_queue_size and ready_worker_count > 0:
logger.debug("[TASK_PULL_THREAD] Requesting tasks: {}".format(ready_worker_count))
msg = ((ready_worker_count).to_bytes(4, "little"))
self.task_incoming.send(msg)
socks = dict(poller.poll(timeout=poll_timer))
if self.task_incoming in socks and socks[self.task_incoming] == zmq.POLLIN:
_, pkl_msg = self.task_incoming.recv_multipart()
tasks = pickle.loads(pkl_msg)
last_interchange_contact = time.time()
if tasks == 'STOP':
logger.critical("[TASK_PULL_THREAD] Received stop request")
kill_event.set()
break
elif tasks == HEARTBEAT_CODE:
logger.debug("Got heartbeat from interchange")
else:
# Reset timer on receiving message
poll_timer = 1
task_recv_counter += len(tasks)
logger.debug("[TASK_PULL_THREAD] Got tasks: {} of {}".format([t['task_id'] for t in tasks],
task_recv_counter))
for task in tasks:
self.pending_task_queue.put(task)
else:
logger.debug("[TASK_PULL_THREAD] No incoming tasks")
# Limit poll duration to heartbeat_period
# heartbeat_period is in s vs poll_timer in ms
poll_timer = min(self.heartbeat_period * 1000, poll_timer * 2)
# Only check if no messages were received.
if time.time() > last_interchange_contact + self.heartbeat_threshold:
logger.critical("[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold")
kill_event.set()
logger.critical("[TASK_PULL_THREAD] Exiting")
break |
def push_results(self, kill_event):
""" Listens on the pending_result_queue and sends out results via 0mq
Parameters:
-----------
kill_event : threading.Event
Event to let the thread know when it is time to die.
"""
# We set this timeout so that the thread checks the kill_event and does not
# block forever on the internal result queue
timeout = 0.1
# timer = time.time()
logger.debug("[RESULT_PUSH_THREAD] Starting thread")
while not kill_event.is_set():
time.sleep(LOOP_SLOWDOWN)
try:
items = []
while not self.pending_result_queue.empty():
r = self.pending_result_queue.get(block=True)
items.append(r)
if items:
self.result_outgoing.send_multipart(items)
except queue.Empty:
logger.debug("[RESULT_PUSH_THREAD] No results to send in past {}seconds".format(timeout))
except Exception as e:
logger.exception("[RESULT_PUSH_THREAD] Got an exception : {}".format(e))
logger.critical("[RESULT_PUSH_THREAD] Exiting") |
def start(self):
""" Start the Manager process.
The worker loops on this:
1. If the last message sent was older than heartbeat period we send a heartbeat
2.
TODO: Move task receiving to a thread
"""
self.comm.Barrier()
logger.debug("Manager synced with workers")
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(target=self.pull_tasks,
args=(self._kill_event,))
self._result_pusher_thread = threading.Thread(target=self.push_results,
args=(self._kill_event,))
self._task_puller_thread.start()
self._result_pusher_thread.start()
start = None
result_counter = 0
task_recv_counter = 0
task_sent_counter = 0
logger.info("Loop start")
while not self._kill_event.is_set():
time.sleep(LOOP_SLOWDOWN)
# In this block we attempt to probe MPI for a set amount of time,
# and if we have exhausted all available MPI events, we move on
# to the next block. The timer and counter trigger balance
# fairness and responsiveness.
timer = time.time() + 0.05
counter = min(10, comm.size)
while time.time() < timer:
info = MPI.Status()
if counter > 10:
logger.debug("Hit max mpi events per round")
break
if not self.comm.Iprobe(status=info):
logger.debug("Timer expired, processed {} mpi events".format(counter))
break
else:
tag = info.Get_tag()
logger.info("Message with tag {} received".format(tag))
counter += 1
if tag == RESULT_TAG:
result = self.recv_result_from_workers()
self.pending_result_queue.put(result)
result_counter += 1
elif tag == TASK_REQUEST_TAG:
worker_rank = self.recv_task_request_from_workers()
self.ready_worker_queue.put(worker_rank)
else:
logger.error("Unknown tag {} - ignoring this message and continuing".format(tag))
available_worker_cnt = self.ready_worker_queue.qsize()
available_task_cnt = self.pending_task_queue.qsize()
logger.debug("[MAIN] Ready workers: {} Ready tasks: {}".format(available_worker_cnt,
available_task_cnt))
this_round = min(available_worker_cnt, available_task_cnt)
for i in range(this_round):
worker_rank = self.ready_worker_queue.get()
task = self.pending_task_queue.get()
comm.send(task, dest=worker_rank, tag=worker_rank)
task_sent_counter += 1
logger.debug("Assigning worker:{} task:{}".format(worker_rank, task['task_id']))
if not start:
start = time.time()
logger.debug("Tasks recvd:{} Tasks dispatched:{} Results recvd:{}".format(
task_recv_counter, task_sent_counter, result_counter))
# print("[{}] Received: {}".format(self.identity, msg))
# time.sleep(random.randint(4,10)/10)
self._task_puller_thread.join()
self._result_pusher_thread.join()
self.task_incoming.close()
self.result_outgoing.close()
self.context.term()
delta = time.time() - start
logger.info("mpi_worker_pool ran for {} seconds".format(delta)) |
def async_process(fn):
""" Decorator function to launch a function as a separate process """
def run(*args, **kwargs):
proc = mp.Process(target=fn, args=args, kwargs=kwargs)
proc.start()
return proc
return run |
def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):
"""Send UDP messages to usage tracker asynchronously
This multiprocessing based messenger was written to overcome the limitations
of signalling/terminating a thread that is blocked on a system call. This
messenger is created as a separate process, and initialized with 2 queues,
to_send to receive messages to be sent to the internet.
Args:
- domain_name (str) : Domain name string
- UDP_IP (str) : IP address YYY.YYY.YYY.YYY
- UDP_PORT (int) : UDP port to send out on
- sock_timeout (int) : Socket timeout
- to_send (multiprocessing.Queue) : Queue of outgoing messages to internet
"""
try:
if message is None:
raise ValueError("message was none")
encoded_message = bytes(message, "utf-8")
if encoded_message is None:
raise ValueError("utf-8 encoding of message failed")
if domain_name:
try:
UDP_IP = socket.gethostbyname(domain_name)
except Exception:
# (False, "Domain lookup failed, defaulting to {0}".format(UDP_IP))
pass
if UDP_IP is None:
raise Exception("UDP_IP is None")
if UDP_PORT is None:
raise Exception("UDP_PORT is None")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.settimeout(sock_timeout)
sock.sendto(bytes(message, "utf-8"), (UDP_IP, UDP_PORT))
sock.close()
except socket.timeout:
logger.debug("Failed to send usage tracking data: socket timeout")
except OSError as e:
logger.debug("Failed to send usage tracking data: OSError: {}".format(e))
except Exception as e:
logger.debug("Failed to send usage tracking data: Exception: {}".format(e)) |
def check_tracking_enabled(self):
"""By default tracking is enabled.
If Test mode is set via env variable PARSL_TESTING, a test flag is set
Tracking is disabled if :
1. config["globals"]["usageTracking"] is set to False (Bool)
2. Environment variable PARSL_TRACKING is set to false (case insensitive)
"""
track = True # By default we track usage
test = False # By default we are not in testing mode
testvar = str(os.environ.get("PARSL_TESTING", 'None')).lower()
if testvar == 'true':
test = True
if not self.config.usage_tracking:
track = False
envvar = str(os.environ.get("PARSL_TRACKING", True)).lower()
if envvar == "false":
track = False
return test, track |
def construct_start_message(self):
"""Collect preliminary run info at the start of the DFK.
Returns :
- Message dict dumped as json string, ready for UDP
"""
uname = getpass.getuser().encode('latin1')
hashed_username = hashlib.sha256(uname).hexdigest()[0:10]
hname = socket.gethostname().encode('latin1')
hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]
message = {'uuid': self.uuid,
'uname': hashed_username,
'hname': hashed_hostname,
'test': self.test_mode,
'parsl_v': self.parsl_version,
'python_v': self.python_version,
'os': platform.system(),
'os_v': platform.release(),
'start': time.time()}
return json.dumps(message) |
def construct_end_message(self):
"""Collect the final run information at the time of DFK cleanup.
Returns:
- Message dict dumped as json string, ready for UDP
"""
app_count = self.dfk.task_count
site_count = len([x for x in self.dfk.config.executors if x.managed])
app_fails = len([t for t in self.dfk.tasks if
self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])
message = {'uuid': self.uuid,
'end': time.time(),
't_apps': app_count,
'sites': site_count,
'c_time': None,
'failed': app_fails,
'test': self.test_mode,
}
return json.dumps(message) |
def send_UDP_message(self, message):
"""Send UDP message."""
x = 0
if self.tracking_enabled:
try:
proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)
self.procs.append(proc)
except Exception as e:
logger.debug("Usage tracking failed: {}".format(e))
else:
x = -1
return x |
def send_message(self):
"""Send message over UDP.
If tracking is disables, the bytes_sent will always be set to -1
Returns:
(bytes_sent, time_taken)
"""
start = time.time()
message = None
if not self.initialized:
message = self.construct_start_message()
self.initialized = True
else:
message = self.construct_end_message()
self.send_UDP_message(message)
end = time.time()
return end - start |
def set_file_logger(filename: str, name: str = 'parsl', level: int = logging.DEBUG, format_string: Optional[str] = None):
"""Add a stream log handler.
Args:
- filename (string): Name of the file to write logs to
- name (string): Logger name
- level (logging.LEVEL): Set the logging level.
- format_string (string): Set the format string
Returns:
- None
"""
if format_string is None:
format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
# see note in set_stream_logger for notes about logging
# concurrent.futures
futures_logger = logging.getLogger("concurrent.futures")
futures_logger.addHandler(handler) |
def start_file_logger(filename, name='database_manager', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Parameters
---------
filename: string
Name of the file to write logs to. Required.
name: string
Logger name. Default="parsl.executors.interchange"
level: logging.LEVEL
Set the logging level. Default=logging.DEBUG
- format_string (string): Set the format string
format_string: string
Format string to use.
Returns
-------
None.
"""
if format_string is None:
format_string = "%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s"
global logger
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger |
def dbm_starter(priority_msgs, resource_msgs, *args, **kwargs):
"""Start the database manager process
The DFK should start this function. The args, kwargs match that of the monitoring config
"""
dbm = DatabaseManager(*args, **kwargs)
dbm.start(priority_msgs, resource_msgs) |
def start(self, priority_queue, resource_queue):
self._kill_event = threading.Event()
self._priority_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal,
args=(
priority_queue, 'priority', self._kill_event,)
)
self._priority_queue_pull_thread.start()
self._resource_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal,
args=(
resource_queue, 'resource', self._kill_event,)
)
self._resource_queue_pull_thread.start()
"""
maintain a set to track the tasks that are already INSERTED into database
to prevent race condition that the first resource message (indicate 'running' state)
arrives before the first task message.
If race condition happens, add to left_messages and operate them later
"""
inserted_tasks = set()
left_messages = {}
while (not self._kill_event.is_set() or
self.pending_priority_queue.qsize() != 0 or self.pending_resource_queue.qsize() != 0 or
priority_queue.qsize() != 0 or resource_queue.qsize() != 0):
"""
WORKFLOW_INFO and TASK_INFO messages
"""
self.logger.debug("""Checking STOP conditions: {}, {}, {}, {}, {}""".format(
self._kill_event.is_set(),
self.pending_priority_queue.qsize() != 0, self.pending_resource_queue.qsize() != 0,
priority_queue.qsize() != 0, resource_queue.qsize() != 0))
# This is the list of first resource messages indicating that task starts running
first_messages = []
# Get a batch of priority messages
messages = self._get_messages_in_batch(self.pending_priority_queue,
interval=self.batching_interval,
threshold=self.batching_threshold)
if messages:
self.logger.debug(
"Got {} messages from priority queue".format(len(messages)))
update_messages, insert_messages, all_messages = [], [], []
for msg_type, msg in messages:
if msg_type.value == MessageType.WORKFLOW_INFO.value:
if "python_version" in msg: # workflow start message
self.logger.debug(
"Inserting workflow start info to WORKFLOW table")
self._insert(table=WORKFLOW, messages=[msg])
else: # workflow end message
self.logger.debug(
"Updating workflow end info to WORKFLOW table")
self._update(table=WORKFLOW,
columns=['run_id', 'tasks_failed_count',
'tasks_completed_count', 'time_completed',
'workflow_duration'],
messages=[msg])
else: # TASK_INFO message
all_messages.append(msg)
if msg['task_time_returned'] is not None:
update_messages.append(msg)
else:
inserted_tasks.add(msg['task_id'])
insert_messages.append(msg)
# check if there is an left_message for this task
if msg['task_id'] in left_messages:
first_messages.append(
left_messages.pop(msg['task_id']))
self.logger.debug(
"Updating and inserting TASK_INFO to all tables")
self._update(table=WORKFLOW,
columns=['run_id', 'tasks_failed_count',
'tasks_completed_count'],
messages=update_messages)
if insert_messages:
self._insert(table=TASK, messages=insert_messages)
self.logger.debug(
"There are {} inserted task records".format(len(inserted_tasks)))
if update_messages:
self._update(table=TASK,
columns=['task_time_returned',
'task_elapsed_time', 'run_id', 'task_id'],
messages=update_messages)
self._insert(table=STATUS, messages=all_messages)
"""
RESOURCE_INFO messages
"""
messages = self._get_messages_in_batch(self.pending_resource_queue,
interval=self.batching_interval,
threshold=self.batching_threshold)
if messages or first_messages:
self.logger.debug(
"Got {} messages from resource queue".format(len(messages)))
self._insert(table=RESOURCE, messages=messages)
for msg in messages:
if msg['first_msg']:
msg['task_status_name'] = States.running.name
msg['task_time_running'] = msg['timestamp']
if msg['task_id'] in inserted_tasks:
first_messages.append(msg)
else:
left_messages[msg['task_id']] = msg
if first_messages:
self._insert(table=STATUS, messages=first_messages)
self._update(table=TASK,
columns=['task_time_running',
'run_id', 'task_id'],
messages=first_messages) |
def _create_task_log_info(self, task_id, fail_mode=None):
"""
Create the dictionary that will be included in the log.
"""
info_to_monitor = ['func_name', 'fn_hash', 'memoize', 'checkpoint', 'fail_count',
'fail_history', 'status', 'id', 'time_submitted', 'time_returned', 'executor']
task_log_info = {"task_" + k: self.tasks[task_id][k] for k in info_to_monitor}
task_log_info['run_id'] = self.run_id
task_log_info['timestamp'] = datetime.datetime.now()
task_log_info['task_status_name'] = self.tasks[task_id]['status'].name
task_log_info['tasks_failed_count'] = self.tasks_failed_count
task_log_info['tasks_completed_count'] = self.tasks_completed_count
task_log_info['task_inputs'] = str(self.tasks[task_id]['kwargs'].get('inputs', None))
task_log_info['task_outputs'] = str(self.tasks[task_id]['kwargs'].get('outputs', None))
task_log_info['task_stdin'] = self.tasks[task_id]['kwargs'].get('stdin', None)
task_log_info['task_stdout'] = self.tasks[task_id]['kwargs'].get('stdout', None)
task_log_info['task_depends'] = None
if self.tasks[task_id]['depends'] is not None:
task_log_info['task_depends'] = ",".join([str(t._tid) for t in self.tasks[task_id]['depends']])
task_log_info['task_elapsed_time'] = None
if self.tasks[task_id]['time_returned'] is not None:
task_log_info['task_elapsed_time'] = (self.tasks[task_id]['time_returned'] -
self.tasks[task_id]['time_submitted']).total_seconds()
if fail_mode is not None:
task_log_info['task_fail_mode'] = fail_mode
return task_log_info |
def _count_deps(self, depends):
"""Internal.
Count the number of unresolved futures in the list depends.
"""
count = 0
for dep in depends:
if isinstance(dep, Future):
if not dep.done():
count += 1
return count |
def handle_exec_update(self, task_id, future):
"""This function is called only as a callback from an execution
attempt reaching a final state (either successfully or failing).
It will launch retries if necessary, and update the task
structure.
Args:
task_id (string) : Task id which is a uuid string
future (Future) : The future object corresponding to the task which
makes this callback
KWargs:
memo_cbk(Bool) : Indicates that the call is coming from a memo update,
that does not require additional memo updates.
"""
try:
res = future.result()
if isinstance(res, RemoteExceptionWrapper):
res.reraise()
except Exception:
logger.exception("Task {} failed".format(task_id))
# We keep the history separately, since the future itself could be
# tossed.
self.tasks[task_id]['fail_history'].append(future._exception)
self.tasks[task_id]['fail_count'] += 1
if not self._config.lazy_errors:
logger.debug("Eager fail, skipping retry logic")
self.tasks[task_id]['status'] = States.failed
if self.monitoring:
task_log_info = self._create_task_log_info(task_id, 'eager')
self.monitoring.send(MessageType.TASK_INFO, task_log_info)
return
if self.tasks[task_id]['fail_count'] <= self._config.retries:
self.tasks[task_id]['status'] = States.pending
logger.debug("Task {} marked for retry".format(task_id))
else:
logger.info("Task {} failed after {} retry attempts".format(task_id,
self._config.retries))
self.tasks[task_id]['status'] = States.failed
self.tasks_failed_count += 1
self.tasks[task_id]['time_returned'] = datetime.datetime.now()
else:
self.tasks[task_id]['status'] = States.done
self.tasks_completed_count += 1
logger.info("Task {} completed".format(task_id))
self.tasks[task_id]['time_returned'] = datetime.datetime.now()
if self.monitoring:
task_log_info = self._create_task_log_info(task_id, 'lazy')
self.monitoring.send(MessageType.TASK_INFO, task_log_info)
# it might be that in the course of the update, we've gone back to being
# pending - in which case, we should consider ourself for relaunch
if self.tasks[task_id]['status'] == States.pending:
self.launch_if_ready(task_id)
return |
def handle_app_update(self, task_id, future, memo_cbk=False):
"""This function is called as a callback when an AppFuture
is in its final state.
It will trigger post-app processing such as checkpointing
and stageout.
Args:
task_id (string) : Task id
future (Future) : The relevant app future (which should be
consistent with the task structure 'app_fu' entry
KWargs:
memo_cbk(Bool) : Indicates that the call is coming from a memo update,
that does not require additional memo updates.
"""
if not self.tasks[task_id]['app_fu'].done():
logger.error("Internal consistency error: app_fu is not done for task {}".format(task_id))
if not self.tasks[task_id]['app_fu'] == future:
logger.error("Internal consistency error: callback future is not the app_fu in task structure, for task {}".format(task_id))
if not memo_cbk:
# Update the memoizer with the new result if this is not a
# result from a memo lookup and the task has reached a terminal state.
self.memoizer.update_memo(task_id, self.tasks[task_id], future)
if self.checkpoint_mode == 'task_exit':
self.checkpoint(tasks=[task_id])
# Submit _*_stage_out tasks for output data futures that correspond with remote files
if (self.tasks[task_id]['app_fu'] and
self.tasks[task_id]['app_fu'].done() and
self.tasks[task_id]['app_fu'].exception() is None and
self.tasks[task_id]['executor'] != 'data_manager' and
self.tasks[task_id]['func_name'] != '_ftp_stage_in' and
self.tasks[task_id]['func_name'] != '_http_stage_in'):
for dfu in self.tasks[task_id]['app_fu'].outputs:
f = dfu.file_obj
if isinstance(f, File) and f.is_remote():
self.data_manager.stage_out(f, self.tasks[task_id]['executor'])
return |
def launch_if_ready(self, task_id):
"""
launch_if_ready will launch the specified task, if it is ready
to run (for example, without dependencies, and in pending state).
This should be called by any piece of the DataFlowKernel that
thinks a task may have become ready to run.
It is not an error to call launch_if_ready on a task that is not
ready to run - launch_if_ready will not incorrectly launch that
task.
launch_if_ready is thread safe, so may be called from any thread
or callback.
"""
if self._count_deps(self.tasks[task_id]['depends']) == 0:
# We can now launch *task*
new_args, kwargs, exceptions = self.sanitize_and_wrap(task_id,
self.tasks[task_id]['args'],
self.tasks[task_id]['kwargs'])
self.tasks[task_id]['args'] = new_args
self.tasks[task_id]['kwargs'] = kwargs
if not exceptions:
# There are no dependency errors
exec_fu = None
# Acquire a lock, retest the state, launch
with self.tasks[task_id]['task_launch_lock']:
if self.tasks[task_id]['status'] == States.pending:
exec_fu = self.launch_task(
task_id, self.tasks[task_id]['func'], *new_args, **kwargs)
if exec_fu:
try:
exec_fu.add_done_callback(partial(self.handle_exec_update, task_id))
except Exception as e:
logger.error("add_done_callback got an exception {} which will be ignored".format(e))
self.tasks[task_id]['exec_fu'] = exec_fu
try:
self.tasks[task_id]['app_fu'].update_parent(exec_fu)
self.tasks[task_id]['exec_fu'] = exec_fu
except AttributeError as e:
logger.error(
"Task {}: Caught AttributeError at update_parent".format(task_id))
raise e
else:
logger.info(
"Task {} failed due to dependency failure".format(task_id))
# Raise a dependency exception
self.tasks[task_id]['status'] = States.dep_fail
if self.monitoring is not None:
task_log_info = self._create_task_log_info(task_id, 'lazy')
self.monitoring.send(MessageType.TASK_INFO, task_log_info)
try:
fu = Future()
fu.retries_left = 0
self.tasks[task_id]['exec_fu'] = fu
self.tasks[task_id]['app_fu'].update_parent(fu)
fu.set_exception(DependencyError(exceptions,
task_id,
None))
except AttributeError as e:
logger.error(
"Task {} AttributeError at update_parent".format(task_id))
raise e |
def launch_task(self, task_id, executable, *args, **kwargs):
"""Handle the actual submission of the task to the executor layer.
If the app task has the executors attributes not set (default=='all')
the task is launched on a randomly selected executor from the
list of executors. This behavior could later be updated to support
binding to executors based on user specified criteria.
If the app task specifies a particular set of executors, it will be
targeted at those specific executors.
Args:
task_id (uuid string) : A uuid string that uniquely identifies the task
executable (callable) : A callable object
args (list of positional args)
kwargs (arbitrary keyword arguments)
Returns:
Future that tracks the execution of the submitted executable
"""
self.tasks[task_id]['time_submitted'] = datetime.datetime.now()
hit, memo_fu = self.memoizer.check_memo(task_id, self.tasks[task_id])
if hit:
logger.info("Reusing cached result for task {}".format(task_id))
return memo_fu
executor_label = self.tasks[task_id]["executor"]
try:
executor = self.executors[executor_label]
except Exception:
logger.exception("Task {} requested invalid executor {}: config is\n{}".format(task_id, executor_label, self._config))
if self.monitoring is not None and self.monitoring.resource_monitoring_enabled:
executable = self.monitoring.monitor_wrapper(executable, task_id,
self.monitoring.monitoring_hub_url,
self.run_id,
self.monitoring.resource_monitoring_interval)
with self.submitter_lock:
exec_fu = executor.submit(executable, *args, **kwargs)
self.tasks[task_id]['status'] = States.launched
if self.monitoring is not None:
task_log_info = self._create_task_log_info(task_id, 'lazy')
self.monitoring.send(MessageType.TASK_INFO, task_log_info)
exec_fu.retries_left = self._config.retries - \
self.tasks[task_id]['fail_count']
logger.info("Task {} launched on executor {}".format(task_id, executor.label))
return exec_fu |
def _add_input_deps(self, executor, args, kwargs):
"""Look for inputs of the app that are remote files. Submit stage_in
apps for such files and replace the file objects in the inputs list with
corresponding DataFuture objects.
Args:
- executor (str) : executor where the app is going to be launched
- args (List) : Positional args to app function
- kwargs (Dict) : Kwargs to app function
"""
# Return if the task is _*_stage_in
if executor == 'data_manager':
return args, kwargs
inputs = kwargs.get('inputs', [])
for idx, f in enumerate(inputs):
if isinstance(f, File) and f.is_remote():
inputs[idx] = self.data_manager.stage_in(f, executor)
for kwarg, f in kwargs.items():
if isinstance(f, File) and f.is_remote():
kwargs[kwarg] = self.data_manager.stage_in(f, executor)
newargs = list(args)
for idx, f in enumerate(newargs):
if isinstance(f, File) and f.is_remote():
newargs[idx] = self.data_manager.stage_in(f, executor)
return tuple(newargs), kwargs |
def _gather_all_deps(self, args, kwargs):
"""Count the number of unresolved futures on which a task depends.
Args:
- args (List[args]) : The list of args list to the fn
- kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn
Returns:
- count, [list of dependencies]
"""
# Check the positional args
depends = []
count = 0
for dep in args:
if isinstance(dep, Future):
if self.tasks[dep.tid]['status'] not in FINAL_STATES:
count += 1
depends.extend([dep])
# Check for explicit kwargs ex, fu_1=<fut>
for key in kwargs:
dep = kwargs[key]
if isinstance(dep, Future):
if self.tasks[dep.tid]['status'] not in FINAL_STATES:
count += 1
depends.extend([dep])
# Check for futures in inputs=[<fut>...]
for dep in kwargs.get('inputs', []):
if isinstance(dep, Future):
if self.tasks[dep.tid]['status'] not in FINAL_STATES:
count += 1
depends.extend([dep])
return count, depends |
def sanitize_and_wrap(self, task_id, args, kwargs):
"""This function should be called **ONLY** when all the futures we track have been resolved.
If the user hid futures a level below, we will not catch
it, and will (most likely) result in a type error.
Args:
task_id (uuid str) : Task id
func (Function) : App function
args (List) : Positional args to app function
kwargs (Dict) : Kwargs to app function
Return:
partial function evaluated with all dependencies in args, kwargs and kwargs['inputs'] evaluated.
"""
dep_failures = []
# Replace item in args
new_args = []
for dep in args:
if isinstance(dep, Future):
try:
new_args.extend([dep.result()])
except Exception as e:
if self.tasks[dep.tid]['status'] in FINAL_FAILURE_STATES:
dep_failures.extend([e])
else:
new_args.extend([dep])
# Check for explicit kwargs ex, fu_1=<fut>
for key in kwargs:
dep = kwargs[key]
if isinstance(dep, Future):
try:
kwargs[key] = dep.result()
except Exception as e:
if self.tasks[dep.tid]['status'] in FINAL_FAILURE_STATES:
dep_failures.extend([e])
# Check for futures in inputs=[<fut>...]
if 'inputs' in kwargs:
new_inputs = []
for dep in kwargs['inputs']:
if isinstance(dep, Future):
try:
new_inputs.extend([dep.result()])
except Exception as e:
if self.tasks[dep.tid]['status'] in FINAL_FAILURE_STATES:
dep_failures.extend([e])
else:
new_inputs.extend([dep])
kwargs['inputs'] = new_inputs
return new_args, kwargs, dep_failures |
def submit(self, func, *args, executors='all', fn_hash=None, cache=False, **kwargs):
"""Add task to the dataflow system.
If the app task has the executors attributes not set (default=='all')
the task will be launched on a randomly selected executor from the
list of executors. If the app task specifies a particular set of
executors, it will be targeted at the specified executors.
>>> IF all deps are met:
>>> send to the runnable queue and launch the task
>>> ELSE:
>>> post the task in the pending queue
Args:
- func : A function object
- *args : Args to the function
KWargs :
- executors (list or string) : List of executors this call could go to.
Default='all'
- fn_hash (Str) : Hash of the function and inputs
Default=None
- cache (Bool) : To enable memoization or not
- kwargs (dict) : Rest of the kwargs to the fn passed as dict.
Returns:
(AppFuture) [DataFutures,]
"""
if self.cleanup_called:
raise ValueError("Cannot submit to a DFK that has been cleaned up")
task_id = self.task_count
self.task_count += 1
if isinstance(executors, str) and executors.lower() == 'all':
choices = list(e for e in self.executors if e != 'data_manager')
elif isinstance(executors, list):
choices = executors
executor = random.choice(choices)
# Transform remote input files to data futures
args, kwargs = self._add_input_deps(executor, args, kwargs)
task_def = {'depends': None,
'executor': executor,
'func': func,
'func_name': func.__name__,
'args': args,
'kwargs': kwargs,
'fn_hash': fn_hash,
'memoize': cache,
'callback': None,
'exec_fu': None,
'checkpoint': None,
'fail_count': 0,
'fail_history': [],
'env': None,
'status': States.unsched,
'id': task_id,
'time_submitted': None,
'time_returned': None,
'app_fu': None}
if task_id in self.tasks:
raise DuplicateTaskError(
"internal consistency error: Task {0} already exists in task list".format(task_id))
else:
self.tasks[task_id] = task_def
# Get the dep count and a list of dependencies for the task
dep_cnt, depends = self._gather_all_deps(args, kwargs)
self.tasks[task_id]['depends'] = depends
# Extract stdout and stderr to pass to AppFuture:
task_stdout = kwargs.get('stdout')
task_stderr = kwargs.get('stderr')
logger.info("Task {} submitted for App {}, waiting on tasks {}".format(task_id,
task_def['func_name'],
[fu.tid for fu in depends]))
self.tasks[task_id]['task_launch_lock'] = threading.Lock()
app_fu = AppFuture(tid=task_id,
stdout=task_stdout,
stderr=task_stderr)
self.tasks[task_id]['app_fu'] = app_fu
app_fu.add_done_callback(partial(self.handle_app_update, task_id))
self.tasks[task_id]['status'] = States.pending
logger.debug("Task {} set to pending state with AppFuture: {}".format(task_id, task_def['app_fu']))
# at this point add callbacks to all dependencies to do a launch_if_ready
# call whenever a dependency completes.
# we need to be careful about the order of setting the state to pending,
# adding the callbacks, and caling launch_if_ready explicitly once always below.
# I think as long as we call launch_if_ready once after setting pending, then
# we can add the callback dependencies at any point: if the callbacks all fire
# before then, they won't cause a launch, but the one below will. if they fire
# after we set it pending, then the last one will cause a launch, and the
# explicit one won't.
for d in depends:
def callback_adapter(dep_fut):
self.launch_if_ready(task_id)
try:
d.add_done_callback(callback_adapter)
except Exception as e:
logger.error("add_done_callback got an exception {} which will be ignored".format(e))
self.launch_if_ready(task_id)
return task_def['app_fu'] |
def wait_for_current_tasks(self):
"""Waits for all tasks in the task list to be completed, by waiting for their
AppFuture to be completed. This method will not necessarily wait for any tasks
added after cleanup has started (such as data stageout?)
"""
logger.info("Waiting for all remaining tasks to complete")
for task_id in self.tasks:
# .exception() is a less exception throwing way of
# waiting for completion than .result()
fut = self.tasks[task_id]['app_fu']
if not fut.done():
logger.debug("Waiting for task {} to complete".format(task_id))
fut.exception()
logger.info("All remaining tasks completed") |
def cleanup(self):
"""DataFlowKernel cleanup.
This involves killing resources explicitly and sending die messages to IPP workers.
If the executors are managed (created by the DFK), then we call scale_in on each of
the executors and call executor.shutdown. Otherwise, we do nothing, and executor
cleanup is left to the user.
"""
logger.info("DFK cleanup initiated")
# this check won't detect two DFK cleanups happening from
# different threads extremely close in time because of
# non-atomic read/modify of self.cleanup_called
if self.cleanup_called:
raise Exception("attempt to clean up DFK when it has already been cleaned-up")
self.cleanup_called = True
self.log_task_states()
# Checkpointing takes priority over the rest of the tasks
# checkpoint if any valid checkpoint method is specified
if self.checkpoint_mode is not None:
self.checkpoint()
if self._checkpoint_timer:
logger.info("Stopping checkpoint timer")
self._checkpoint_timer.close()
# Send final stats
self.usage_tracker.send_message()
self.usage_tracker.close()
logger.info("Terminating flow_control and strategy threads")
self.flowcontrol.close()
for executor in self.executors.values():
if executor.managed:
if executor.scaling_enabled:
job_ids = executor.provider.resources.keys()
executor.scale_in(len(job_ids))
executor.shutdown()
self.time_completed = datetime.datetime.now()
if self.monitoring:
self.monitoring.send(MessageType.WORKFLOW_INFO,
{'tasks_failed_count': self.tasks_failed_count,
'tasks_completed_count': self.tasks_completed_count,
"time_began": self.time_began,
'time_completed': self.time_completed,
'workflow_duration': (self.time_completed - self.time_began).total_seconds(),
'run_id': self.run_id, 'rundir': self.run_dir})
self.monitoring.close()
"""
if self.logging_server is not None:
self.logging_server.terminate()
self.logging_server.join()
if self.web_app is not None:
self.web_app.terminate()
self.web_app.join()
"""
logger.info("DFK cleanup complete") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.