sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def modified_created(instance):
"""`modified` property must be later or equal to `created` property
"""
if 'modified' in instance and 'created' in instance and \
instance['modified'] < instance['created']:
msg = "'modified' (%s) must be later or equal to 'created' (%s)"
return JSONError(msg % (instance['modified'], instance['created']),
instance['id'])
|
`modified` property must be later or equal to `created` property
|
entailment
|
def object_marking_circular_refs(instance):
"""Ensure that marking definitions do not contain circular references (ie.
they do not reference themselves in the `object_marking_refs` property).
"""
if instance['type'] != 'marking-definition':
return
if 'object_marking_refs' in instance:
for ref in instance['object_marking_refs']:
if ref == instance['id']:
yield JSONError("`object_marking_refs` cannot contain any "
"references to this marking definition object"
" (no circular references).", instance['id'])
|
Ensure that marking definitions do not contain circular references (ie.
they do not reference themselves in the `object_marking_refs` property).
|
entailment
|
def granular_markings_circular_refs(instance):
"""Ensure that marking definitions do not contain circular references (ie.
they do not reference themselves in the `granular_markings` property).
"""
if instance['type'] != 'marking-definition':
return
if 'granular_markings' in instance:
for marking in instance['granular_markings']:
if 'marking_ref' in marking and marking['marking_ref'] == instance['id']:
yield JSONError("`granular_markings` cannot contain any "
"references to this marking definition object"
" (no circular references).", instance['id'])
|
Ensure that marking definitions do not contain circular references (ie.
they do not reference themselves in the `granular_markings` property).
|
entailment
|
def marking_selector_syntax(instance):
"""Ensure selectors in granular markings refer to items which are actually
present in the object.
"""
if 'granular_markings' not in instance:
return
list_index_re = re.compile(r"\[(\d+)\]")
for marking in instance['granular_markings']:
if 'selectors' not in marking:
continue
selectors = marking['selectors']
for selector in selectors:
segments = selector.split('.')
obj = instance
prev_segmt = None
for segmt in segments:
index_match = list_index_re.match(segmt)
if index_match:
try:
idx = int(index_match.group(1))
obj = obj[idx]
except IndexError:
yield JSONError("'%s' is not a valid selector because"
" %s is not a valid index."
% (selector, idx), instance['id'])
except KeyError:
yield JSONError("'%s' is not a valid selector because"
" '%s' is not a list."
% (selector, prev_segmt), instance['id'])
else:
try:
obj = obj[segmt]
except KeyError as e:
yield JSONError("'%s' is not a valid selector because"
" %s is not a property."
% (selector, e), instance['id'])
except TypeError:
yield JSONError("'%s' is not a valid selector because"
" '%s' is not a property."
% (selector, segmt), instance['id'])
prev_segmt = segmt
|
Ensure selectors in granular markings refer to items which are actually
present in the object.
|
entailment
|
def observable_object_references(instance):
"""Ensure certain observable object properties reference the correct type
of object.
"""
for key, obj in instance['objects'].items():
if 'type' not in obj:
continue
elif obj['type'] not in enums.OBSERVABLE_PROP_REFS:
continue
obj_type = obj['type']
for obj_prop in enums.OBSERVABLE_PROP_REFS[obj_type]:
if obj_prop not in obj:
continue
enum_prop = enums.OBSERVABLE_PROP_REFS[obj_type][obj_prop]
if isinstance(enum_prop, list):
refs = obj[obj_prop]
enum_vals = enum_prop
for x in check_observable_refs(refs, obj_prop, enum_prop, '',
enum_vals, key, instance):
yield x
elif isinstance(enum_prop, dict):
for embedded_prop in enum_prop:
if isinstance(obj[obj_prop], dict):
if embedded_prop not in obj[obj_prop]:
continue
embedded_obj = obj[obj_prop][embedded_prop]
for embed_obj_prop in embedded_obj:
if embed_obj_prop not in enum_prop[embedded_prop]:
continue
refs = embedded_obj[embed_obj_prop]
enum_vals = enum_prop[embedded_prop][embed_obj_prop]
for x in check_observable_refs(refs, obj_prop, enum_prop,
embed_obj_prop, enum_vals,
key, instance):
yield x
elif isinstance(obj[obj_prop], list):
for embedded_list_obj in obj[obj_prop]:
if embedded_prop not in embedded_list_obj:
continue
embedded_obj = embedded_list_obj[embedded_prop]
refs = embedded_obj
enum_vals = enum_prop[embedded_prop]
for x in check_observable_refs(refs, obj_prop, enum_prop,
embedded_prop, enum_vals,
key, instance):
yield x
|
Ensure certain observable object properties reference the correct type
of object.
|
entailment
|
def artifact_mime_type(instance):
"""Ensure the 'mime_type' property of artifact objects comes from the
Template column in the IANA media type registry.
"""
for key, obj in instance['objects'].items():
if ('type' in obj and obj['type'] == 'artifact' and 'mime_type' in obj):
if enums.media_types():
if obj['mime_type'] not in enums.media_types():
yield JSONError("The 'mime_type' property of object '%s' "
"('%s') must be an IANA registered MIME "
"Type of the form 'type/subtype'."
% (key, obj['mime_type']), instance['id'])
else:
info("Can't reach IANA website; using regex for mime types.")
mime_re = re.compile(r'^(application|audio|font|image|message|model'
'|multipart|text|video)/[a-zA-Z0-9.+_-]+')
if not mime_re.match(obj['mime_type']):
yield JSONError("The 'mime_type' property of object '%s' "
"('%s') should be an IANA MIME Type of the"
" form 'type/subtype'."
% (key, obj['mime_type']), instance['id'])
|
Ensure the 'mime_type' property of artifact objects comes from the
Template column in the IANA media type registry.
|
entailment
|
def character_set(instance):
"""Ensure certain properties of cyber observable objects come from the IANA
Character Set list.
"""
char_re = re.compile(r'^[a-zA-Z0-9_\(\)-]+$')
for key, obj in instance['objects'].items():
if ('type' in obj and obj['type'] == 'directory' and 'path_enc' in obj):
if enums.char_sets():
if obj['path_enc'] not in enums.char_sets():
yield JSONError("The 'path_enc' property of object '%s' "
"('%s') must be an IANA registered "
"character set."
% (key, obj['path_enc']), instance['id'])
else:
info("Can't reach IANA website; using regex for character_set.")
if not char_re.match(obj['path_enc']):
yield JSONError("The 'path_enc' property of object '%s' "
"('%s') must be an IANA registered "
"character set."
% (key, obj['path_enc']), instance['id'])
if ('type' in obj and obj['type'] == 'file' and 'name_enc' in obj):
if enums.char_sets():
if obj['name_enc'] not in enums.char_sets():
yield JSONError("The 'name_enc' property of object '%s' "
"('%s') must be an IANA registered "
"character set."
% (key, obj['name_enc']), instance['id'])
else:
info("Can't reach IANA website; using regex for character_set.")
if not char_re.match(obj['name_enc']):
yield JSONError("The 'name_enc' property of object '%s' "
"('%s') must be an IANA registered "
"character set."
% (key, obj['name_enc']), instance['id'])
|
Ensure certain properties of cyber observable objects come from the IANA
Character Set list.
|
entailment
|
def software_language(instance):
"""Ensure the 'language' property of software objects is a valid ISO 639-2
language code.
"""
for key, obj in instance['objects'].items():
if ('type' in obj and obj['type'] == 'software' and
'languages' in obj):
for lang in obj['languages']:
if lang not in enums.SOFTWARE_LANG_CODES:
yield JSONError("The 'languages' property of object '%s' "
"contains an invalid ISO 639-2 language "
" code ('%s')."
% (key, lang), instance['id'])
|
Ensure the 'language' property of software objects is a valid ISO 639-2
language code.
|
entailment
|
def types_strict(instance):
"""Ensure that no custom object types are used, but only the official ones
from the specification.
"""
if instance['type'] not in enums.TYPES:
yield JSONError("Object type '%s' is not one of those defined in the"
" specification." % instance['type'], instance['id'])
if has_cyber_observable_data(instance):
for key, obj in instance['objects'].items():
if 'type' in obj and obj['type'] not in enums.OBSERVABLE_TYPES:
yield JSONError("Observable object %s is type '%s' which is "
"not one of those defined in the "
"specification."
% (key, obj['type']), instance['id'])
|
Ensure that no custom object types are used, but only the official ones
from the specification.
|
entailment
|
def properties_strict(instance):
"""Ensure that no custom properties are used, but only the official ones
from the specification.
"""
if instance['type'] not in enums.TYPES:
return # only check properties for official objects
defined_props = enums.PROPERTIES.get(instance['type'], [])
for prop in instance.keys():
if prop not in defined_props:
yield JSONError("Property '%s' is not one of those defined in the"
" specification." % prop, instance['id'])
if has_cyber_observable_data(instance):
for key, obj in instance['objects'].items():
type_ = obj.get('type', '')
if type_ not in enums.OBSERVABLE_PROPERTIES:
continue # custom observable types handled outside this function
observable_props = enums.OBSERVABLE_PROPERTIES.get(type_, [])
embedded_props = enums.OBSERVABLE_EMBEDDED_PROPERTIES.get(type_, {})
extensions = enums.OBSERVABLE_EXTENSIONS.get(type_, [])
for prop in obj.keys():
if prop not in observable_props:
yield JSONError("Property '%s' is not one of those defined in the"
" specification for %s objects."
% (prop, type_), instance['id'])
# Check properties of embedded cyber observable types
elif prop in embedded_props:
embedded_prop_keys = embedded_props.get(prop, [])
for embedded_key in obj[prop]:
if isinstance(embedded_key, dict):
for embedded in embedded_key:
if embedded not in embedded_prop_keys:
yield JSONError("Property '%s' is not one of those defined in the"
" specification for the %s property in %s objects."
% (embedded, prop, type_), instance['id'])
elif embedded_key not in embedded_prop_keys:
yield JSONError("Property '%s' is not one of those defined in the"
" specification for the %s property in %s objects."
% (embedded_key, prop, type_), instance['id'])
# Check properties of embedded cyber observable types
for ext_key in obj.get('extensions', {}):
if ext_key not in extensions:
continue # don't check custom extensions
extension_props = enums.OBSERVABLE_EXTENSION_PROPERTIES[ext_key]
for ext_prop in obj['extensions'][ext_key]:
if ext_prop not in extension_props:
yield JSONError("Property '%s' is not one of those defined in the"
" specification for the %s extension in %s objects."
% (ext_prop, ext_key, type_), instance['id'])
embedded_ext_props = enums.OBSERVABLE_EXTENSION_EMBEDDED_PROPERTIES.get(ext_key, {}).get(ext_prop, [])
if embedded_ext_props:
for embed_ext_prop in obj['extensions'][ext_key].get(ext_prop, []):
if embed_ext_prop not in embedded_ext_props:
yield JSONError("Property '%s' in the %s property of the %s extension "
"is not one of those defined in the specification."
% (embed_ext_prop, ext_prop, ext_key), instance['id'])
|
Ensure that no custom properties are used, but only the official ones
from the specification.
|
entailment
|
def patterns(instance, options):
"""Ensure that the syntax of the pattern of an indicator is valid, and that
objects and properties referenced by the pattern are valid.
"""
if instance['type'] != 'indicator' or 'pattern' not in instance:
return
pattern = instance['pattern']
if not isinstance(pattern, string_types):
return # This error already caught by schemas
errors = pattern_validator(pattern)
# Check pattern syntax
if errors:
for e in errors:
yield PatternError(str(e), instance['id'])
return
type_format_re = re.compile(r'^\-?[a-z0-9]+(-[a-z0-9]+)*\-?$')
property_format_re = re.compile(r'^[a-z0-9_]{3,250}$')
p = Pattern(pattern)
inspection = p.inspect().comparisons
for objtype in inspection:
# Check observable object types
if objtype in enums.OBSERVABLE_TYPES:
pass
elif options.strict_types:
yield PatternError("'%s' is not a valid STIX observable type"
% objtype, instance['id'])
elif (not type_format_re.match(objtype) or
len(objtype) < 3 or len(objtype) > 250):
yield PatternError("'%s' is not a valid observable type name"
% objtype, instance['id'])
elif (all(x not in options.disabled for x in ['all', 'format-checks', 'custom-prefix']) and
not CUSTOM_TYPE_PREFIX_RE.match(objtype)):
yield PatternError("Custom Observable Object type '%s' should start "
"with 'x-' followed by a source unique identifier "
"(like a domain name with dots replaced by "
"hyphens), a hyphen and then the name"
% objtype, instance['id'])
elif (all(x not in options.disabled for x in ['all', 'format-checks', 'custom-prefix-lax']) and
not CUSTOM_TYPE_LAX_PREFIX_RE.match(objtype)):
yield PatternError("Custom Observable Object type '%s' should start "
"with 'x-'" % objtype, instance['id'])
# Check observable object properties
expression_list = inspection[objtype]
for exp in expression_list:
path = exp[0]
# Get the property name without list index, dictionary key, or referenced object property
prop = path[0]
if objtype in enums.OBSERVABLE_PROPERTIES and prop in enums.OBSERVABLE_PROPERTIES[objtype]:
continue
elif options.strict_properties:
yield PatternError("'%s' is not a valid property for '%s' objects"
% (prop, objtype), instance['id'])
elif not property_format_re.match(prop):
yield PatternError("'%s' is not a valid observable property name"
% prop, instance['id'])
elif (all(x not in options.disabled for x in ['all', 'format-checks', 'custom-prefix']) and
not CUSTOM_PROPERTY_PREFIX_RE.match(prop)):
yield PatternError("Cyber Observable Object custom property '%s' "
"should start with 'x_' followed by a source "
"unique identifier (like a domain name with "
"dots replaced by underscores), an "
"underscore and then the name"
% prop, instance['id'])
elif (all(x not in options.disabled for x in ['all', 'format-checks', 'custom-prefix-lax']) and
not CUSTOM_PROPERTY_LAX_PREFIX_RE.match(prop)):
yield PatternError("Cyber Observable Object custom property '%s' "
"should start with 'x_'" % prop, instance['id'])
|
Ensure that the syntax of the pattern of an indicator is valid, and that
objects and properties referenced by the pattern are valid.
|
entailment
|
def list_musts(options):
"""Construct the list of 'MUST' validators to be run by the validator.
"""
validator_list = [
timestamp,
modified_created,
object_marking_circular_refs,
granular_markings_circular_refs,
marking_selector_syntax,
observable_object_references,
artifact_mime_type,
character_set,
software_language,
patterns
]
# --strict-types
if options.strict_types:
validator_list.append(types_strict)
# --strict-properties
if options.strict_properties:
validator_list.append(properties_strict)
return validator_list
|
Construct the list of 'MUST' validators to be run by the validator.
|
entailment
|
def media_types():
"""Return a list of the IANA Media (MIME) Types, or an empty list if the
IANA website is unreachable.
Store it as a function attribute so that we only build the list once.
"""
if not hasattr(media_types, 'typelist'):
tlist = []
categories = [
'application',
'audio',
'font',
'image',
'message',
'model',
'multipart',
'text',
'video'
]
for cat in categories:
try:
data = requests.get('http://www.iana.org/assignments/'
'media-types/%s.csv' % cat)
except requests.exceptions.RequestException:
return []
types = []
for line in data.iter_lines():
if line:
line = line.decode("utf-8")
if line.count(',') > 0:
reg_template = line.split(',')[1]
if reg_template:
types.append(reg_template)
else:
types.append(cat + '/' + line.split(',')[0])
tlist.extend(types)
media_types.typelist = tlist
return media_types.typelist
|
Return a list of the IANA Media (MIME) Types, or an empty list if the
IANA website is unreachable.
Store it as a function attribute so that we only build the list once.
|
entailment
|
def char_sets():
"""Return a list of the IANA Character Sets, or an empty list if the
IANA website is unreachable.
Store it as a function attribute so that we only build the list once.
"""
if not hasattr(char_sets, 'setlist'):
clist = []
try:
data = requests.get('http://www.iana.org/assignments/character-'
'sets/character-sets-1.csv')
except requests.exceptions.RequestException:
return []
for line in data.iter_lines():
if line:
line = line.decode("utf-8")
if line.count(',') > 0:
vals = line.split(',')
if vals[0]:
clist.append(vals[0])
else:
clist.append(vals[1])
char_sets.setlist = clist
return char_sets.setlist
|
Return a list of the IANA Character Sets, or an empty list if the
IANA website is unreachable.
Store it as a function attribute so that we only build the list once.
|
entailment
|
def protocols():
"""Return a list of values from the IANA Service Name and Transport
Protocol Port Number Registry, or an empty list if the IANA website is
unreachable.
Store it as a function attribute so that we only build the list once.
"""
if not hasattr(protocols, 'protlist'):
plist = []
try:
data = requests.get('http://www.iana.org/assignments/service-names'
'-port-numbers/service-names-port-numbers.csv')
except requests.exceptions.RequestException:
return []
for line in data.iter_lines():
if line:
line = line.decode("utf-8")
if line.count(',') > 0:
vals = line.split(',')
if vals[0]:
plist.append(vals[0])
if len(vals) > 2 and vals[2] and vals[2] not in plist:
plist.append(vals[2])
plist.append('ipv4')
plist.append('ipv6')
plist.append('ssl')
plist.append('tls')
plist.append('dns')
protocols.protlist = plist
return protocols.protlist
|
Return a list of values from the IANA Service Name and Transport
Protocol Port Number Registry, or an empty list if the IANA website is
unreachable.
Store it as a function attribute so that we only build the list once.
|
entailment
|
def ipfix():
"""Return a list of values from the list of IANA IP Flow Information Export
(IPFIX) Entities, or an empty list if the IANA website is unreachable.
Store it as a function attribute so that we only build the list once.
"""
if not hasattr(ipfix, 'ipflist'):
ilist = []
try:
data = requests.get('http://www.iana.org/assignments/ipfix/ipfix-'
'information-elements.csv')
except requests.exceptions.RequestException:
return []
for line in data.iter_lines():
if line:
line = line.decode("utf-8")
if re.match(r'^\d+(,[a-zA-Z0-9]+){2},', line):
vals = line.split(',')
if vals[1]:
ilist.append(vals[1])
ipfix.ipflist = ilist
return ipfix.ipflist
|
Return a list of values from the list of IANA IP Flow Information Export
(IPFIX) Entities, or an empty list if the IANA website is unreachable.
Store it as a function attribute so that we only build the list once.
|
entailment
|
def print_level(log_function, fmt, level, *args):
"""Print a formatted message to stdout prepended by spaces. Useful for
printing hierarchical information, like bullet lists.
Note:
If the application is running in "Silent Mode"
(i.e., ``_SILENT == True``), this function will return
immediately and no message will be printed.
Args:
log_function: The function that will be called to output the formatted
message.
fmt (str): A Python formatted string.
level (int): Used to determing how many spaces to print. The formula
is ``' ' * level ``.
*args: Variable length list of arguments. Values are plugged into the
format string.
Examples:
>>> print_level("%s %d", 0, "TEST", 0)
TEST 0
>>> print_level("%s %d", 1, "TEST", 1)
TEST 1
>>> print_level("%s %d", 2, "TEST", 2)
TEST 2
"""
if _SILENT:
return
msg = fmt % args
spaces = ' ' * level
log_function("%s%s" % (spaces, msg))
|
Print a formatted message to stdout prepended by spaces. Useful for
printing hierarchical information, like bullet lists.
Note:
If the application is running in "Silent Mode"
(i.e., ``_SILENT == True``), this function will return
immediately and no message will be printed.
Args:
log_function: The function that will be called to output the formatted
message.
fmt (str): A Python formatted string.
level (int): Used to determing how many spaces to print. The formula
is ``' ' * level ``.
*args: Variable length list of arguments. Values are plugged into the
format string.
Examples:
>>> print_level("%s %d", 0, "TEST", 0)
TEST 0
>>> print_level("%s %d", 1, "TEST", 1)
TEST 1
>>> print_level("%s %d", 2, "TEST", 2)
TEST 2
|
entailment
|
def print_fatal_results(results, level=0):
"""Print fatal errors that occurred during validation runs.
"""
print_level(logger.critical, _RED + "[X] Fatal Error: %s", level, results.error)
|
Print fatal errors that occurred during validation runs.
|
entailment
|
def print_schema_results(results, level=0):
"""Print JSON Schema validation errors to stdout.
Args:
results: An instance of ObjectValidationResults.
level: The level at which to print the results.
"""
for error in results.errors:
print_level(logger.error, _RED + "[X] %s", level, error)
|
Print JSON Schema validation errors to stdout.
Args:
results: An instance of ObjectValidationResults.
level: The level at which to print the results.
|
entailment
|
def print_warning_results(results, level=0):
"""Print warning messages found during validation.
"""
marker = _YELLOW + "[!] "
for warning in results.warnings:
print_level(logger.warning, marker + "Warning: %s", level, warning)
|
Print warning messages found during validation.
|
entailment
|
def print_results_header(identifier, is_valid):
"""Print a header for the results of either a file or an object.
"""
print_horizontal_rule()
print_level(logger.info, "[-] Results for: %s", 0, identifier)
if is_valid:
marker = _GREEN + "[+]"
verdict = "Valid"
log_func = logger.info
else:
marker = _RED + "[X]"
verdict = "Invalid"
log_func = logger.error
print_level(log_func, "%s STIX JSON: %s", 0, marker, verdict)
|
Print a header for the results of either a file or an object.
|
entailment
|
def print_object_results(obj_result):
"""Print the results of validating an object.
Args:
obj_result: An ObjectValidationResults instance.
"""
print_results_header(obj_result.object_id, obj_result.is_valid)
if obj_result.warnings:
print_warning_results(obj_result, 1)
if obj_result.errors:
print_schema_results(obj_result, 1)
|
Print the results of validating an object.
Args:
obj_result: An ObjectValidationResults instance.
|
entailment
|
def print_file_results(file_result):
"""Print the results of validating a file.
Args:
file_result: A FileValidationResults instance.
"""
print_results_header(file_result.filepath, file_result.is_valid)
for object_result in file_result.object_results:
if object_result.warnings:
print_warning_results(object_result, 1)
if object_result.errors:
print_schema_results(object_result, 1)
if file_result.fatal:
print_fatal_results(file_result.fatal, 1)
|
Print the results of validating a file.
Args:
file_result: A FileValidationResults instance.
|
entailment
|
def print_results(results):
"""Print `results` (the results of validation) to stdout.
Args:
results: A list of FileValidationResults or ObjectValidationResults
instances.
"""
if not isinstance(results, list):
results = [results]
for r in results:
try:
r.log()
except AttributeError:
raise ValueError('Argument to print_results() must be a list of '
'FileValidationResults or ObjectValidationResults.')
|
Print `results` (the results of validation) to stdout.
Args:
results: A list of FileValidationResults or ObjectValidationResults
instances.
|
entailment
|
def vocab_encryption_algo(instance):
"""Ensure file objects' 'encryption_algorithm' property is from the
encryption-algo-ov vocabulary.
"""
for key, obj in instance['objects'].items():
if 'type' in obj and obj['type'] == 'file':
try:
enc_algo = obj['encryption_algorithm']
except KeyError:
continue
if enc_algo not in enums.ENCRYPTION_ALGO_OV:
yield JSONError("Object '%s' has an 'encryption_algorithm' of "
"'%s', which is not a value in the "
"encryption-algo-ov vocabulary."
% (key, enc_algo), instance['id'],
'encryption-algo')
|
Ensure file objects' 'encryption_algorithm' property is from the
encryption-algo-ov vocabulary.
|
entailment
|
def enforce_relationship_refs(instance):
"""Ensures that all SDOs being referenced by the SRO are contained
within the same bundle"""
if instance['type'] != 'bundle' or 'objects' not in instance:
return
rel_references = set()
"""Find and store all ids"""
for obj in instance['objects']:
if obj['type'] != 'relationship':
rel_references.add(obj['id'])
"""Check if id has been encountered"""
for obj in instance['objects']:
if obj['type'] == 'relationship':
if obj['source_ref'] not in rel_references:
yield JSONError("Relationship object %s makes reference to %s "
"Which is not found in current bundle "
% (obj['id'], obj['source_ref']), 'enforce-relationship-refs')
if obj['target_ref'] not in rel_references:
yield JSONError("Relationship object %s makes reference to %s "
"Which is not found in current bundle "
% (obj['id'], obj['target_ref']), 'enforce-relationship-refs')
|
Ensures that all SDOs being referenced by the SRO are contained
within the same bundle
|
entailment
|
def timestamp_compare(instance):
"""Ensure timestamp properties with a comparison requirement are valid.
E.g. `modified` must be later or equal to `created`.
"""
compares = [('modified', 'ge', 'created')]
additional_compares = enums.TIMESTAMP_COMPARE.get(instance.get('type', ''), [])
compares.extend(additional_compares)
for first, op, second in compares:
comp = getattr(operator, op)
comp_str = get_comparison_string(op)
if first in instance and second in instance and \
not comp(instance[first], instance[second]):
msg = "'%s' (%s) must be %s '%s' (%s)"
yield JSONError(msg % (first, instance[first], comp_str, second, instance[second]),
instance['id'])
|
Ensure timestamp properties with a comparison requirement are valid.
E.g. `modified` must be later or equal to `created`.
|
entailment
|
def observable_timestamp_compare(instance):
"""Ensure cyber observable timestamp properties with a comparison
requirement are valid.
"""
for key, obj in instance['objects'].items():
compares = enums.TIMESTAMP_COMPARE_OBSERVABLE.get(obj.get('type', ''), [])
print(compares)
for first, op, second in compares:
comp = getattr(operator, op)
comp_str = get_comparison_string(op)
if first in obj and second in obj and \
not comp(obj[first], obj[second]):
msg = "In object '%s', '%s' (%s) must be %s '%s' (%s)"
yield JSONError(msg % (key, first, obj[first], comp_str, second, obj[second]),
instance['id'])
|
Ensure cyber observable timestamp properties with a comparison
requirement are valid.
|
entailment
|
def language_contents(instance):
"""Ensure keys in Language Content's 'contents' dictionary are valid
language codes, and that the keys in the sub-dictionaries match the rules
for object property names.
"""
if instance['type'] != 'language-content' or 'contents' not in instance:
return
for key, value in instance['contents'].items():
if key not in enums.LANG_CODES:
yield JSONError("Invalid key '%s' in 'contents' property must be"
" an RFC 5646 code" % key, instance['id'])
for subkey, subvalue in value.items():
if not PROPERTY_FORMAT_RE.match(subkey):
yield JSONError("'%s' in '%s' of the 'contents' property is "
"invalid and must match a valid property name"
% (subkey, key), instance['id'], 'observable-dictionary-keys')
|
Ensure keys in Language Content's 'contents' dictionary are valid
language codes, and that the keys in the sub-dictionaries match the rules
for object property names.
|
entailment
|
def list_musts(options):
"""Construct the list of 'MUST' validators to be run by the validator.
"""
validator_list = [
timestamp,
timestamp_compare,
observable_timestamp_compare,
object_marking_circular_refs,
granular_markings_circular_refs,
marking_selector_syntax,
observable_object_references,
artifact_mime_type,
character_set,
language,
software_language,
patterns,
language_contents,
]
# --strict-types
if options.strict_types:
validator_list.append(types_strict)
# --strict-properties
if options.strict_properties:
validator_list.append(properties_strict)
return validator_list
|
Construct the list of 'MUST' validators to be run by the validator.
|
entailment
|
def get_code(results):
"""Determines the exit status code to be returned from a script by
inspecting the results returned from validating file(s).
Status codes are binary OR'd together, so exit codes can communicate
multiple error conditions.
"""
status = EXIT_SUCCESS
for file_result in results:
error = any(object_result.errors for object_result in file_result.object_results)
fatal = file_result.fatal
if error:
status |= EXIT_SCHEMA_INVALID
if fatal:
status |= EXIT_VALIDATION_ERROR
return status
|
Determines the exit status code to be returned from a script by
inspecting the results returned from validating file(s).
Status codes are binary OR'd together, so exit codes can communicate
multiple error conditions.
|
entailment
|
def parse_args(cmd_args, is_script=False):
"""Parses a list of command line arguments into a ValidationOptions object.
Args:
cmd_args (list of str): The list of command line arguments to be parsed.
is_script: Whether the arguments are intended for use in a stand-alone
script or imported into another tool.
Returns:
Instance of ``ValidationOptions``
"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=NewlinesHelpFormatter,
epilog=CODES_TABLE
)
# Input options
if is_script:
parser.add_argument(
"files",
metavar="FILES",
nargs="*",
default=sys.stdin,
help="A whitespace separated list of STIX files or directories of "
"STIX files to validate. If none given, stdin will be used."
)
parser.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
default=True,
help="Recursively descend into input directories."
)
parser.add_argument(
"-s",
"--schemas",
dest="schema_dir",
help="Custom schema directory. If provided, input will be validated "
"against these schemas in addition to the STIX schemas bundled "
"with this script."
)
parser.add_argument(
"--version",
dest="version",
default=DEFAULT_VER,
help="The version of the STIX specification to validate against (e.g. "
"\"2.0\")."
)
# Output options
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Print informational notes and more verbose error messages."
)
parser.add_argument(
"-q",
"--silent",
dest="silent",
action="store_true",
default=False,
help="Silence all output to stdout."
)
parser.add_argument(
"-d",
"--disable",
"--ignore",
dest="disabled",
default="",
help="A comma-separated list of recommended best practice checks to "
"skip. By default, no checks are disabled. \n\n"
"Example: --disable 202,210"
)
parser.add_argument(
"-e",
"--enable",
"--select",
dest="enabled",
default="",
help="A comma-separated list of recommended best practice checks to "
"enable. If the --disable option is not used, no other checks "
"will be run. By default, all checks are enabled.\n\n"
"Example: --enable 218"
)
parser.add_argument(
"--strict",
dest="strict",
action="store_true",
default=False,
help="Treat warnings as errors and fail validation if any are found."
)
parser.add_argument(
"--strict-types",
dest="strict_types",
action="store_true",
default=False,
help="Ensure that no custom object types are used, only those defined"
" in the STIX specification."
)
parser.add_argument(
"--strict-properties",
dest="strict_properties",
action="store_true",
default=False,
help="Ensure that no custom properties are used, only those defined"
" in the STIX specification."
)
parser.add_argument(
"--no-cache",
dest="no_cache",
action="store_true",
default=False,
help="Disable the caching of external source values."
)
parser.add_argument(
"--refresh-cache",
dest="refresh_cache",
action="store_true",
default=False,
help="Clears the cache of external source values, then "
"during validation downloads them again."
)
parser.add_argument(
"--clear-cache",
dest="clear_cache",
action="store_true",
default=False,
help="Clear the cache of external source values after validation."
)
parser.add_argument(
"--enforce-refs",
dest="enforce_refs",
action="store_true",
default=False,
help="Ensures that all SDOs being referenced by SROs are contained "
"within the same bundle."
)
args = parser.parse_args(cmd_args)
if not is_script:
args.files = ""
if not args.version:
args.version = DEFAULT_VER
return ValidationOptions(args)
|
Parses a list of command line arguments into a ValidationOptions object.
Args:
cmd_args (list of str): The list of command line arguments to be parsed.
is_script: Whether the arguments are intended for use in a stand-alone
script or imported into another tool.
Returns:
Instance of ``ValidationOptions``
|
entailment
|
def cyber_observable_check(original_function):
"""Decorator for functions that require cyber observable data.
"""
def new_function(*args, **kwargs):
if not has_cyber_observable_data(args[0]):
return
func = original_function(*args, **kwargs)
if isinstance(func, Iterable):
for x in original_function(*args, **kwargs):
yield x
new_function.__name__ = original_function.__name__
return new_function
|
Decorator for functions that require cyber observable data.
|
entailment
|
def init_requests_cache(refresh_cache=False):
"""
Initializes a cache which the ``requests`` library will consult for
responses, before making network requests.
:param refresh_cache: Whether the cache should be cleared out
"""
# Cache data from external sources; used in some checks
dirs = AppDirs("stix2-validator", "OASIS")
# Create cache dir if doesn't exist
try:
os.makedirs(dirs.user_cache_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
requests_cache.install_cache(
cache_name=os.path.join(dirs.user_cache_dir, 'py{}cache'.format(
sys.version_info[0])),
expire_after=datetime.timedelta(weeks=1))
if refresh_cache:
clear_requests_cache()
|
Initializes a cache which the ``requests`` library will consult for
responses, before making network requests.
:param refresh_cache: Whether the cache should be cleared out
|
entailment
|
def render_tag(self, context, kwargs, nodelist):
'''render content with "active" urls logic'''
# load configuration from passed options
self.load_configuration(**kwargs)
# get request from context
request = context['request']
# get full path from request
self.full_path = request.get_full_path()
# render content of template tag
context.push()
content = nodelist.render(context)
context.pop()
# check content for "active" urls
content = render_content(
content,
full_path=self.full_path,
parent_tag=self.parent_tag,
css_class=self.css_class,
menu=self.menu,
ignore_params=self.ignore_params,
)
return content
|
render content with "active" urls logic
|
entailment
|
def parse(self, parser):
'''parse content of extension'''
# line number of token that started the tag
lineno = next(parser.stream).lineno
# template context
context = nodes.ContextReference()
# parse keyword arguments
kwargs = []
while parser.stream.look().type == lexer.TOKEN_ASSIGN:
key = parser.stream.expect(lexer.TOKEN_NAME)
next(parser.stream)
kwargs.append(
nodes.Keyword(key.value, parser.parse_expression()),
)
parser.stream.skip_if('comma')
# parse content of the activeurl block up to endactiveurl
body = parser.parse_statements(['name:endactiveurl'], drop_needle=True)
args = [context]
call_method = self.call_method(
'render_tag',
args=args,
kwargs=kwargs,
)
return nodes.CallBlock(call_method, [], [], body).set_lineno(lineno)
|
parse content of extension
|
entailment
|
def render_tag(self, context, caller, **kwargs):
'''render content with "active" urls logic'''
# load configuration from passed options
self.load_configuration(**kwargs)
# get request from context
request = context['request']
# get full path from request
self.full_path = request.get_full_path()
# render content of extension
content = caller()
# check content for "active" urls
content = render_content(
content,
full_path=self.full_path,
parent_tag=self.parent_tag,
css_class=self.css_class,
menu=self.menu,
ignore_params=self.ignore_params,
)
return content
|
render content with "active" urls logic
|
entailment
|
def get_cache_key(content, **kwargs):
'''generate cache key'''
cache_key = ''
for key in sorted(kwargs.keys()):
cache_key = '{cache_key}.{key}:{value}'.format(
cache_key=cache_key,
key=key,
value=kwargs[key],
)
cache_key = '{content}{cache_key}'.format(
content=content,
cache_key=cache_key,
)
# fix for non ascii symbols, ensure encoding, python3 hashlib fix
cache_key = cache_key.encode('utf-8', 'ignore')
cache_key = md5(cache_key).hexdigest()
cache_key = '{prefix}.{version}.{language}.{cache_key}'.format(
prefix=settings.ACTIVE_URL_CACHE_PREFIX,
version=__version__,
language=get_language(),
cache_key=cache_key
)
return cache_key
|
generate cache key
|
entailment
|
def yesno_to_bool(value, varname):
"""Return True/False from "yes"/"no".
:param value: template keyword argument value
:type value: string
:param varname: name of the variable, for use on exception raising
:type varname: string
:raises: :exc:`ImproperlyConfigured`
Django > 1.5 template boolean/None variables feature.
"""
if isinstance(value, bool):
if value:
value = 'yes'
else:
value = 'no'
elif value is None:
value = 'no'
# check value configuration, set boolean value
if value.lower() in ('yes', 'true'):
value = True
elif value.lower() in ('no', 'false'):
value = False
else:
raise ImproperlyConfigured(
'activeurl: malformed param value for %s' % varname
)
return value
|
Return True/False from "yes"/"no".
:param value: template keyword argument value
:type value: string
:param varname: name of the variable, for use on exception raising
:type varname: string
:raises: :exc:`ImproperlyConfigured`
Django > 1.5 template boolean/None variables feature.
|
entailment
|
def check_active(url, element, **kwargs):
'''check "active" url, apply css_class'''
menu = yesno_to_bool(kwargs['menu'], 'menu')
ignore_params = yesno_to_bool(kwargs['ignore_params'], 'ignore_params')
# check missing href parameter
if not url.attrib.get('href', None) is None:
# get href attribute
href = url.attrib['href'].strip()
# href="#" is often used when links shouldn't be handled by browsers.
# For example, Bootstrap uses this for expandable menus on
# small screens, see
# https://getbootstrap.com/docs/4.0/components/navs/#using-dropdowns
if href == '#':
return False
# split into urlparse object
href = urlparse.urlsplit(href)
# cut off hashtag (anchor)
href = href._replace(fragment='')
# cut off get params (?key=var&etc=var2)
if ignore_params:
href = href._replace(query='')
kwargs['full_path'] = urlparse.urlunsplit(
urlparse.urlsplit(
kwargs['full_path']
)._replace(query='')
)
# build urlparse object back into string
href = urlparse.urlunsplit(href)
# check empty href
if href == '':
# replace href with current location
href = kwargs['full_path']
# compare full_path with href according to menu configuration
if menu:
# try mark "root" (/) url as "active", in equals way
if href == '/' == kwargs['full_path']:
logic = True
# skip "root" (/) url, otherwise it will be always "active"
elif href != '/':
# start with logic
logic = (
kwargs['full_path'].startswith(href)
or
# maybe an urlquoted href was supplied
urlquote(kwargs['full_path']).startswith(href)
or
kwargs['full_path'].startswith(urlquote(href))
)
else:
logic = False
else:
# equals logic
logic = (
kwargs['full_path'] == href
or
# maybe an urlquoted href was supplied
urlquote(kwargs['full_path']) == href
or
kwargs['full_path'] == urlquote(href)
)
# "active" url found
if logic:
# check parent tag has "class" attribute or it is empty
if element.attrib.get('class'):
# prevent multiple "class" attribute adding
if kwargs['css_class'] not in element.attrib['class']:
# append "active" class
element.attrib['class'] += ' {css_class}'.format(
css_class=kwargs['css_class'],
)
else:
# create or set (if empty) "class" attribute
element.attrib['class'] = kwargs['css_class']
return True
# no "active" urls found
return False
|
check "active" url, apply css_class
|
entailment
|
def check_content(content, **kwargs):
'''check content for "active" urls'''
# valid html root tag
try:
# render elements tree from content
tree = fragment_fromstring(content)
# flag for prevent content rerendering, when no "active" urls found
processed = False
# django > 1.5 template boolean\None variables feature
if isinstance(kwargs['parent_tag'], bool):
if not kwargs['parent_tag']:
kwargs['parent_tag'] = 'self'
else:
raise ImproperlyConfigured('''
parent_tag=True is not allowed
''')
elif kwargs['parent_tag'] is None:
kwargs['parent_tag'] = 'self'
# if parent_tag is False\None\''\a\self
# "active" status will be applied directly to "<a>"
if kwargs['parent_tag'].lower() in ('a', 'self', ''):
# xpath query to get all "<a>"
urls = tree.xpath('.//a')
# check "active" status for all urls
for url in urls:
if check_active(url, url, **kwargs):
# mark flag for rerendering content
processed = True
# otherwise css_class must be applied to parent_tag
else:
# xpath query to get all parent tags
elements = tree.xpath('.//{parent_tag}'.format(
parent_tag=kwargs['parent_tag'],
))
# check all elements for "active" "<a>"
for element in elements:
# xpath query to get all "<a>"
urls = element.xpath('.//a')
# check "active" status for all urls
for url in urls:
if check_active(url, element, **kwargs):
# flag for rerendering content tree
processed = True
# stop checking other "<a>"
break
# do not rerender content if no "active" urls found
if processed:
# render content from tree
return tostring(tree, encoding='unicode')
# not valid html root tag
except ParserError:
# raise an exception with configuration example
raise ImproperlyConfigured('''
content of {% activeurl %} must have valid html root tag
for example
{% activeurl %}
<ul>
<li>
<a href="/page/">page</a>
</li>
<li>
<a href="/other_page/">other_page</a>
</li>
</ul>
{% endactiveurl %}
in this case <ul> is valid content root tag
''')
return content
|
check content for "active" urls
|
entailment
|
def render_content(content, **kwargs):
'''check content for "active" urls, store results to django cache'''
# try to take pre rendered content from django cache, if caching is enabled
if settings.ACTIVE_URL_CACHE:
cache_key = get_cache_key(content, **kwargs)
# get cached content from django cache backend
from_cache = cache.get(cache_key)
# return pre rendered content if it exist in cache
if from_cache is not None:
return from_cache
# render content with "active" logic
content = check_content(content, **kwargs)
# write rendered content to django cache backend, if caching is enabled
if settings.ACTIVE_URL_CACHE:
cache.set(cache_key, content, settings.ACTIVE_URL_CACHE_TIMEOUT)
return content
|
check content for "active" urls, store results to django cache
|
entailment
|
def load_configuration(self, **kwargs):
'''load configuration, merge with default settings'''
# update passed arguments with default values
for key in settings.ACTIVE_URL_KWARGS:
kwargs.setdefault(key, settings.ACTIVE_URL_KWARGS[key])
# "active" html tag css class
self.css_class = kwargs['css_class']
# "active" html tag
self.parent_tag = kwargs['parent_tag']
# flipper for menu support
self.menu = kwargs['menu']
# whether to ignore / chomp get_params
self.ignore_params = kwargs['ignore_params']
|
load configuration, merge with default settings
|
entailment
|
def _parse_response(response, clazz, is_list=False, resource_name=None):
"""Parse a Marathon response into an object or list of objects."""
target = response.json()[
resource_name] if resource_name else response.json()
if is_list:
return [clazz.from_json(resource) for resource in target]
else:
return clazz.from_json(target)
|
Parse a Marathon response into an object or list of objects.
|
entailment
|
def _do_request(self, method, path, params=None, data=None):
"""Query Marathon server."""
headers = {
'Content-Type': 'application/json', 'Accept': 'application/json'}
if self.auth_token:
headers['Authorization'] = "token={}".format(self.auth_token)
response = None
servers = list(self.servers)
while servers and response is None:
server = servers.pop(0)
url = ''.join([server.rstrip('/'), path])
try:
response = self.session.request(
method, url, params=params, data=data, headers=headers,
auth=self.auth, timeout=self.timeout, verify=self.verify)
marathon.log.info('Got response from %s', server)
except requests.exceptions.RequestException as e:
marathon.log.error(
'Error while calling %s: %s', url, str(e))
if response is None:
raise NoResponseError('No remaining Marathon servers to try')
if response.status_code >= 500:
marathon.log.error('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
raise InternalServerError(response)
elif response.status_code >= 400:
marathon.log.error('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
if response.status_code == 404:
raise NotFoundError(response)
elif response.status_code == 409:
raise ConflictError(response)
else:
raise MarathonHttpError(response)
elif response.status_code >= 300:
marathon.log.warn('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
else:
marathon.log.debug('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
return response
|
Query Marathon server.
|
entailment
|
def _do_sse_request(self, path, params=None):
"""Query Marathon server for events."""
urls = [''.join([server.rstrip('/'), path]) for server in self.servers]
while urls:
url = urls.pop()
try:
# Requests does not set the original Authorization header on cross origin
# redirects. If set allow_redirects=True we may get a 401 response.
response = self.sse_session.get(
url,
params=params,
stream=True,
headers={'Accept': 'text/event-stream'},
auth=self.auth,
verify=self.verify,
allow_redirects=False
)
except Exception as e:
marathon.log.error(
'Error while calling %s: %s', url, e.message)
else:
if response.is_redirect and response.next:
urls.append(response.next.url)
marathon.log.debug("Got redirect to {}".format(response.next.url))
elif response.ok:
return response.iter_lines()
raise MarathonError('No remaining Marathon servers to try')
|
Query Marathon server for events.
|
entailment
|
def create_app(self, app_id, app, minimal=True):
"""Create and start an app.
:param str app_id: application ID
:param :class:`marathon.models.app.MarathonApp` app: the application to create
:param bool minimal: ignore nulls and empty collections
:returns: the created app (on success)
:rtype: :class:`marathon.models.app.MarathonApp` or False
"""
app.id = app_id
data = app.to_json(minimal=minimal)
response = self._do_request('POST', '/v2/apps', data=data)
if response.status_code == 201:
return self._parse_response(response, MarathonApp)
else:
return False
|
Create and start an app.
:param str app_id: application ID
:param :class:`marathon.models.app.MarathonApp` app: the application to create
:param bool minimal: ignore nulls and empty collections
:returns: the created app (on success)
:rtype: :class:`marathon.models.app.MarathonApp` or False
|
entailment
|
def list_apps(self, cmd=None, embed_tasks=False, embed_counts=False,
embed_deployments=False, embed_readiness=False,
embed_last_task_failure=False, embed_failures=False,
embed_task_stats=False, app_id=None, label=None, **kwargs):
"""List all apps.
:param str cmd: if passed, only show apps with a matching `cmd`
:param bool embed_tasks: embed tasks in result
:param bool embed_counts: embed all task counts
:param bool embed_deployments: embed all deployment identifier
:param bool embed_readiness: embed all readiness check results
:param bool embed_last_task_failure: embeds the last task failure
:param bool embed_failures: shorthand for embed_last_task_failure
:param bool embed_task_stats: embed task stats in result
:param str app_id: if passed, only show apps with an 'id' that matches or contains this value
:param str label: if passed, only show apps with the selected labels
:param kwargs: arbitrary search filters
:returns: list of applications
:rtype: list[:class:`marathon.models.app.MarathonApp`]
"""
params = {}
if cmd:
params['cmd'] = cmd
if app_id:
params['id'] = app_id
if label:
params['label'] = label
embed_params = {
'app.tasks': embed_tasks,
'app.counts': embed_counts,
'app.deployments': embed_deployments,
'app.readiness': embed_readiness,
'app.lastTaskFailure': embed_last_task_failure,
'app.failures': embed_failures,
'app.taskStats': embed_task_stats
}
filtered_embed_params = [k for (k, v) in embed_params.items() if v]
if filtered_embed_params:
params['embed'] = filtered_embed_params
response = self._do_request('GET', '/v2/apps', params=params)
apps = self._parse_response(
response, MarathonApp, is_list=True, resource_name='apps')
for k, v in kwargs.items():
apps = [o for o in apps if getattr(o, k) == v]
return apps
|
List all apps.
:param str cmd: if passed, only show apps with a matching `cmd`
:param bool embed_tasks: embed tasks in result
:param bool embed_counts: embed all task counts
:param bool embed_deployments: embed all deployment identifier
:param bool embed_readiness: embed all readiness check results
:param bool embed_last_task_failure: embeds the last task failure
:param bool embed_failures: shorthand for embed_last_task_failure
:param bool embed_task_stats: embed task stats in result
:param str app_id: if passed, only show apps with an 'id' that matches or contains this value
:param str label: if passed, only show apps with the selected labels
:param kwargs: arbitrary search filters
:returns: list of applications
:rtype: list[:class:`marathon.models.app.MarathonApp`]
|
entailment
|
def get_app(self, app_id, embed_tasks=False, embed_counts=False,
embed_deployments=False, embed_readiness=False,
embed_last_task_failure=False, embed_failures=False,
embed_task_stats=False):
"""Get a single app.
:param str app_id: application ID
:param bool embed_tasks: embed tasks in result
:param bool embed_counts: embed all task counts
:param bool embed_deployments: embed all deployment identifier
:param bool embed_readiness: embed all readiness check results
:param bool embed_last_task_failure: embeds the last task failure
:param bool embed_failures: shorthand for embed_last_task_failure
:param bool embed_task_stats: embed task stats in result
:returns: application
:rtype: :class:`marathon.models.app.MarathonApp`
"""
params = {}
embed_params = {
'app.tasks': embed_tasks,
'app.counts': embed_counts,
'app.deployments': embed_deployments,
'app.readiness': embed_readiness,
'app.lastTaskFailure': embed_last_task_failure,
'app.failures': embed_failures,
'app.taskStats': embed_task_stats
}
filtered_embed_params = [k for (k, v) in embed_params.items() if v]
if filtered_embed_params:
params['embed'] = filtered_embed_params
response = self._do_request(
'GET', '/v2/apps/{app_id}'.format(app_id=app_id), params=params)
return self._parse_response(response, MarathonApp, resource_name='app')
|
Get a single app.
:param str app_id: application ID
:param bool embed_tasks: embed tasks in result
:param bool embed_counts: embed all task counts
:param bool embed_deployments: embed all deployment identifier
:param bool embed_readiness: embed all readiness check results
:param bool embed_last_task_failure: embeds the last task failure
:param bool embed_failures: shorthand for embed_last_task_failure
:param bool embed_task_stats: embed task stats in result
:returns: application
:rtype: :class:`marathon.models.app.MarathonApp`
|
entailment
|
def update_app(self, app_id, app, force=False, minimal=True):
"""Update an app.
Applies writable settings in `app` to `app_id`
Note: this method can not be used to rename apps.
:param str app_id: target application ID
:param app: application settings
:type app: :class:`marathon.models.app.MarathonApp`
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
"""
# Changes won't take if version is set - blank it for convenience
app.version = None
params = {'force': force}
data = app.to_json(minimal=minimal)
response = self._do_request(
'PUT', '/v2/apps/{app_id}'.format(app_id=app_id), params=params, data=data)
return response.json()
|
Update an app.
Applies writable settings in `app` to `app_id`
Note: this method can not be used to rename apps.
:param str app_id: target application ID
:param app: application settings
:type app: :class:`marathon.models.app.MarathonApp`
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
|
entailment
|
def update_apps(self, apps, force=False, minimal=True):
"""Update multiple apps.
Applies writable settings in elements of apps either by upgrading existing ones or creating new ones
:param apps: sequence of application settings
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
"""
json_repr_apps = []
for app in apps:
# Changes won't take if version is set - blank it for convenience
app.version = None
json_repr_apps.append(app.json_repr(minimal=minimal))
params = {'force': force}
encoder = MarathonMinimalJsonEncoder if minimal else MarathonJsonEncoder
data = json.dumps(json_repr_apps, cls=encoder, sort_keys=True)
response = self._do_request(
'PUT', '/v2/apps', params=params, data=data)
return response.json()
|
Update multiple apps.
Applies writable settings in elements of apps either by upgrading existing ones or creating new ones
:param apps: sequence of application settings
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
|
entailment
|
def rollback_app(self, app_id, version, force=False):
"""Roll an app back to a previous version.
:param str app_id: application ID
:param str version: application version
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
"""
params = {'force': force}
data = json.dumps({'version': version})
response = self._do_request(
'PUT', '/v2/apps/{app_id}'.format(app_id=app_id), params=params, data=data)
return response.json()
|
Roll an app back to a previous version.
:param str app_id: application ID
:param str version: application version
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
|
entailment
|
def delete_app(self, app_id, force=False):
"""Stop and destroy an app.
:param str app_id: application ID
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
"""
params = {'force': force}
response = self._do_request(
'DELETE', '/v2/apps/{app_id}'.format(app_id=app_id), params=params)
return response.json()
|
Stop and destroy an app.
:param str app_id: application ID
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
|
entailment
|
def scale_app(self, app_id, instances=None, delta=None, force=False):
"""Scale an app.
Scale an app to a target number of instances (with `instances`), or scale the number of
instances up or down by some delta (`delta`). If the resulting number of instances would be negative,
desired instances will be set to zero.
If both `instances` and `delta` are passed, use `instances`.
:param str app_id: application ID
:param int instances: [optional] the number of instances to scale to
:param int delta: [optional] the number of instances to scale up or down by
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
"""
if instances is None and delta is None:
marathon.log.error('instances or delta must be passed')
return
try:
app = self.get_app(app_id)
except NotFoundError:
marathon.log.error('App "{app}" not found'.format(app=app_id))
return
desired = instances if instances is not None else (
app.instances + delta)
return self.update_app(app.id, MarathonApp(instances=desired), force=force)
|
Scale an app.
Scale an app to a target number of instances (with `instances`), or scale the number of
instances up or down by some delta (`delta`). If the resulting number of instances would be negative,
desired instances will be set to zero.
If both `instances` and `delta` are passed, use `instances`.
:param str app_id: application ID
:param int instances: [optional] the number of instances to scale to
:param int delta: [optional] the number of instances to scale up or down by
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
|
entailment
|
def create_group(self, group):
"""Create and start a group.
:param :class:`marathon.models.group.MarathonGroup` group: the group to create
:returns: success
:rtype: dict containing the version ID
"""
data = group.to_json()
response = self._do_request('POST', '/v2/groups', data=data)
return response.json()
|
Create and start a group.
:param :class:`marathon.models.group.MarathonGroup` group: the group to create
:returns: success
:rtype: dict containing the version ID
|
entailment
|
def list_groups(self, **kwargs):
"""List all groups.
:param kwargs: arbitrary search filters
:returns: list of groups
:rtype: list[:class:`marathon.models.group.MarathonGroup`]
"""
response = self._do_request('GET', '/v2/groups')
groups = self._parse_response(
response, MarathonGroup, is_list=True, resource_name='groups')
for k, v in kwargs.items():
groups = [o for o in groups if getattr(o, k) == v]
return groups
|
List all groups.
:param kwargs: arbitrary search filters
:returns: list of groups
:rtype: list[:class:`marathon.models.group.MarathonGroup`]
|
entailment
|
def get_group(self, group_id):
"""Get a single group.
:param str group_id: group ID
:returns: group
:rtype: :class:`marathon.models.group.MarathonGroup`
"""
response = self._do_request(
'GET', '/v2/groups/{group_id}'.format(group_id=group_id))
return self._parse_response(response, MarathonGroup)
|
Get a single group.
:param str group_id: group ID
:returns: group
:rtype: :class:`marathon.models.group.MarathonGroup`
|
entailment
|
def update_group(self, group_id, group, force=False, minimal=True):
"""Update a group.
Applies writable settings in `group` to `group_id`
Note: this method can not be used to rename groups.
:param str group_id: target group ID
:param group: group settings
:type group: :class:`marathon.models.group.MarathonGroup`
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
"""
# Changes won't take if version is set - blank it for convenience
group.version = None
params = {'force': force}
data = group.to_json(minimal=minimal)
response = self._do_request(
'PUT', '/v2/groups/{group_id}'.format(group_id=group_id), data=data, params=params)
return response.json()
|
Update a group.
Applies writable settings in `group` to `group_id`
Note: this method can not be used to rename groups.
:param str group_id: target group ID
:param group: group settings
:type group: :class:`marathon.models.group.MarathonGroup`
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
|
entailment
|
def rollback_group(self, group_id, version, force=False):
"""Roll a group back to a previous version.
:param str group_id: group ID
:param str version: group version
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
"""
params = {'force': force}
response = self._do_request(
'PUT',
'/v2/groups/{group_id}/versions/{version}'.format(
group_id=group_id, version=version),
params=params)
return response.json()
|
Roll a group back to a previous version.
:param str group_id: group ID
:param str version: group version
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
|
entailment
|
def delete_group(self, group_id, force=False):
"""Stop and destroy a group.
:param str group_id: group ID
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deleted version
:rtype: dict
"""
params = {'force': force}
response = self._do_request(
'DELETE', '/v2/groups/{group_id}'.format(group_id=group_id), params=params)
return response.json()
|
Stop and destroy a group.
:param str group_id: group ID
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deleted version
:rtype: dict
|
entailment
|
def scale_group(self, group_id, scale_by):
"""Scale a group by a factor.
:param str group_id: group ID
:param int scale_by: factor to scale by
:returns: a dict containing the deployment id and version
:rtype: dict
"""
data = {'scaleBy': scale_by}
response = self._do_request(
'PUT', '/v2/groups/{group_id}'.format(group_id=group_id), data=json.dumps(data))
return response.json()
|
Scale a group by a factor.
:param str group_id: group ID
:param int scale_by: factor to scale by
:returns: a dict containing the deployment id and version
:rtype: dict
|
entailment
|
def list_tasks(self, app_id=None, **kwargs):
"""List running tasks, optionally filtered by app_id.
:param str app_id: if passed, only show tasks for this application
:param kwargs: arbitrary search filters
:returns: list of tasks
:rtype: list[:class:`marathon.models.task.MarathonTask`]
"""
response = self._do_request(
'GET', '/v2/apps/%s/tasks' % app_id if app_id else '/v2/tasks')
tasks = self._parse_response(
response, MarathonTask, is_list=True, resource_name='tasks')
[setattr(t, 'app_id', app_id)
for t in tasks if app_id and t.app_id is None]
for k, v in kwargs.items():
tasks = [o for o in tasks if getattr(o, k) == v]
return tasks
|
List running tasks, optionally filtered by app_id.
:param str app_id: if passed, only show tasks for this application
:param kwargs: arbitrary search filters
:returns: list of tasks
:rtype: list[:class:`marathon.models.task.MarathonTask`]
|
entailment
|
def kill_given_tasks(self, task_ids, scale=False, force=None):
"""Kill a list of given tasks.
:param list[str] task_ids: tasks to kill
:param bool scale: if true, scale down the app by the number of tasks killed
:param bool force: if true, ignore any current running deployments
:return: True on success
:rtype: bool
"""
params = {'scale': scale}
if force is not None:
params['force'] = force
data = json.dumps({"ids": task_ids})
response = self._do_request(
'POST', '/v2/tasks/delete', params=params, data=data)
return response == 200
|
Kill a list of given tasks.
:param list[str] task_ids: tasks to kill
:param bool scale: if true, scale down the app by the number of tasks killed
:param bool force: if true, ignore any current running deployments
:return: True on success
:rtype: bool
|
entailment
|
def kill_tasks(self, app_id, scale=False, wipe=False,
host=None, batch_size=0, batch_delay=0):
"""Kill all tasks belonging to app.
:param str app_id: application ID
:param bool scale: if true, scale down the app by the number of tasks killed
:param str host: if provided, only terminate tasks on this Mesos slave
:param int batch_size: if non-zero, terminate tasks in groups of this size
:param int batch_delay: time (in seconds) to wait in between batched kills. If zero, automatically determine
:returns: list of killed tasks
:rtype: list[:class:`marathon.models.task.MarathonTask`]
"""
def batch(iterable, size):
sourceiter = iter(iterable)
while True:
batchiter = itertools.islice(sourceiter, size)
yield itertools.chain([next(batchiter)], batchiter)
if batch_size == 0:
# Terminate all at once
params = {'scale': scale, 'wipe': wipe}
if host:
params['host'] = host
response = self._do_request(
'DELETE', '/v2/apps/{app_id}/tasks'.format(app_id=app_id), params)
# Marathon is inconsistent about what type of object it returns on the multi
# task deletion endpoint, depending on the version of Marathon. See:
# https://github.com/mesosphere/marathon/blob/06a6f763a75fb6d652b4f1660685ae234bd15387/src/main/scala/mesosphere/marathon/api/v2/AppTasksResource.scala#L88-L95
if "tasks" in response.json():
return self._parse_response(response, MarathonTask, is_list=True, resource_name='tasks')
else:
return response.json()
else:
# Terminate in batches
tasks = self.list_tasks(
app_id, host=host) if host else self.list_tasks(app_id)
for tbatch in batch(tasks, batch_size):
killed_tasks = [self.kill_task(app_id, t.id, scale=scale, wipe=wipe)
for t in tbatch]
# Pause until the tasks have been killed to avoid race
# conditions
killed_task_ids = set(t.id for t in killed_tasks)
running_task_ids = killed_task_ids
while killed_task_ids.intersection(running_task_ids):
time.sleep(1)
running_task_ids = set(
t.id for t in self.get_app(app_id).tasks)
if batch_delay == 0:
# Pause until the replacement tasks are healthy
desired_instances = self.get_app(app_id).instances
running_instances = 0
while running_instances < desired_instances:
time.sleep(1)
running_instances = sum(
t.started_at is None for t in self.get_app(app_id).tasks)
else:
time.sleep(batch_delay)
return tasks
|
Kill all tasks belonging to app.
:param str app_id: application ID
:param bool scale: if true, scale down the app by the number of tasks killed
:param str host: if provided, only terminate tasks on this Mesos slave
:param int batch_size: if non-zero, terminate tasks in groups of this size
:param int batch_delay: time (in seconds) to wait in between batched kills. If zero, automatically determine
:returns: list of killed tasks
:rtype: list[:class:`marathon.models.task.MarathonTask`]
|
entailment
|
def kill_task(self, app_id, task_id, scale=False, wipe=False):
"""Kill a task.
:param str app_id: application ID
:param str task_id: the task to kill
:param bool scale: if true, scale down the app by one if the task exists
:returns: the killed task
:rtype: :class:`marathon.models.task.MarathonTask`
"""
params = {'scale': scale, 'wipe': wipe}
response = self._do_request('DELETE', '/v2/apps/{app_id}/tasks/{task_id}'
.format(app_id=app_id, task_id=task_id), params)
# Marathon is inconsistent about what type of object it returns on the multi
# task deletion endpoint, depending on the version of Marathon. See:
# https://github.com/mesosphere/marathon/blob/06a6f763a75fb6d652b4f1660685ae234bd15387/src/main/scala/mesosphere/marathon/api/v2/AppTasksResource.scala#L88-L95
if "task" in response.json():
return self._parse_response(response, MarathonTask, is_list=False, resource_name='task')
else:
return response.json()
|
Kill a task.
:param str app_id: application ID
:param str task_id: the task to kill
:param bool scale: if true, scale down the app by one if the task exists
:returns: the killed task
:rtype: :class:`marathon.models.task.MarathonTask`
|
entailment
|
def list_versions(self, app_id):
"""List the versions of an app.
:param str app_id: application ID
:returns: list of versions
:rtype: list[str]
"""
response = self._do_request(
'GET', '/v2/apps/{app_id}/versions'.format(app_id=app_id))
return [version for version in response.json()['versions']]
|
List the versions of an app.
:param str app_id: application ID
:returns: list of versions
:rtype: list[str]
|
entailment
|
def get_version(self, app_id, version):
"""Get the configuration of an app at a specific version.
:param str app_id: application ID
:param str version: application version
:return: application configuration
:rtype: :class:`marathon.models.app.MarathonApp`
"""
response = self._do_request('GET', '/v2/apps/{app_id}/versions/{version}'
.format(app_id=app_id, version=version))
return MarathonApp.from_json(response.json())
|
Get the configuration of an app at a specific version.
:param str app_id: application ID
:param str version: application version
:return: application configuration
:rtype: :class:`marathon.models.app.MarathonApp`
|
entailment
|
def create_event_subscription(self, url):
"""Register a callback URL as an event subscriber.
:param str url: callback URL
:returns: the created event subscription
:rtype: dict
"""
params = {'callbackUrl': url}
response = self._do_request('POST', '/v2/eventSubscriptions', params)
return response.json()
|
Register a callback URL as an event subscriber.
:param str url: callback URL
:returns: the created event subscription
:rtype: dict
|
entailment
|
def delete_event_subscription(self, url):
"""Deregister a callback URL as an event subscriber.
:param str url: callback URL
:returns: the deleted event subscription
:rtype: dict
"""
params = {'callbackUrl': url}
response = self._do_request('DELETE', '/v2/eventSubscriptions', params)
return response.json()
|
Deregister a callback URL as an event subscriber.
:param str url: callback URL
:returns: the deleted event subscription
:rtype: dict
|
entailment
|
def list_deployments(self):
"""List all running deployments.
:returns: list of deployments
:rtype: list[:class:`marathon.models.deployment.MarathonDeployment`]
"""
response = self._do_request('GET', '/v2/deployments')
return self._parse_response(response, MarathonDeployment, is_list=True)
|
List all running deployments.
:returns: list of deployments
:rtype: list[:class:`marathon.models.deployment.MarathonDeployment`]
|
entailment
|
def list_queue(self, embed_last_unused_offers=False):
"""List all the tasks queued up or waiting to be scheduled.
:returns: list of queue items
:rtype: list[:class:`marathon.models.queue.MarathonQueueItem`]
"""
if embed_last_unused_offers:
params = {'embed': 'lastUnusedOffers'}
else:
params = {}
response = self._do_request('GET', '/v2/queue', params=params)
return self._parse_response(response, MarathonQueueItem, is_list=True, resource_name='queue')
|
List all the tasks queued up or waiting to be scheduled.
:returns: list of queue items
:rtype: list[:class:`marathon.models.queue.MarathonQueueItem`]
|
entailment
|
def delete_deployment(self, deployment_id, force=False):
"""Cancel a deployment.
:param str deployment_id: deployment id
:param bool force: if true, don't create a rollback deployment to restore the previous configuration
:returns: a dict containing the deployment id and version (empty dict if force=True)
:rtype: dict
"""
if force:
params = {'force': True}
self._do_request('DELETE', '/v2/deployments/{deployment}'.format(
deployment=deployment_id), params=params)
# Successful DELETE with ?force=true returns empty text (and status
# code 202). Client code should poll until deployment is removed.
return {}
else:
response = self._do_request(
'DELETE', '/v2/deployments/{deployment}'.format(deployment=deployment_id))
return response.json()
|
Cancel a deployment.
:param str deployment_id: deployment id
:param bool force: if true, don't create a rollback deployment to restore the previous configuration
:returns: a dict containing the deployment id and version (empty dict if force=True)
:rtype: dict
|
entailment
|
def event_stream(self, raw=False, event_types=None):
"""Polls event bus using /v2/events
:param bool raw: if true, yield raw event text, else yield MarathonEvent object
:param event_types: a list of event types to consume
:type event_types: list[type] or list[str]
:returns: iterator with events
:rtype: iterator
"""
ef = EventFactory()
params = {
'event_type': [
EventFactory.class_to_event[et] if isinstance(
et, type) and issubclass(et, MarathonEvent) else et
for et in event_types or []
]
}
for raw_message in self._do_sse_request('/v2/events', params=params):
try:
_data = raw_message.decode('utf8').split(':', 1)
if _data[0] == 'data':
if raw:
yield _data[1]
else:
event_data = json.loads(_data[1].strip())
if 'eventType' not in event_data:
raise MarathonError('Invalid event data received.')
yield ef.process(event_data)
except ValueError:
raise MarathonError('Invalid event data received.')
|
Polls event bus using /v2/events
:param bool raw: if true, yield raw event text, else yield MarathonEvent object
:param event_types: a list of event types to consume
:type event_types: list[type] or list[str]
:returns: iterator with events
:rtype: iterator
|
entailment
|
def assert_valid_path(path):
"""Checks if a path is a correct format that Marathon expects. Raises ValueError if not valid.
:param str path: The app id.
:rtype: str
"""
if path is None:
return
# As seen in:
# https://github.com/mesosphere/marathon/blob/0c11661ca2f259f8a903d114ef79023649a6f04b/src/main/scala/mesosphere/marathon/state/PathId.scala#L71
for id in filter(None, path.strip('/').split('/')):
if not ID_PATTERN.match(id):
raise ValueError(
'invalid path (allowed: lowercase letters, digits, hyphen, "/", ".", ".."): %r' % path)
return path
|
Checks if a path is a correct format that Marathon expects. Raises ValueError if not valid.
:param str path: The app id.
:rtype: str
|
entailment
|
def assert_valid_id(id):
"""Checks if an id is the correct format that Marathon expects. Raises ValueError if not valid.
:param str id: App or group id.
:rtype: str
"""
if id is None:
return
if not ID_PATTERN.match(id.strip('/')):
raise ValueError(
'invalid id (allowed: lowercase letters, digits, hyphen, ".", ".."): %r' % id)
return id
|
Checks if an id is the correct format that Marathon expects. Raises ValueError if not valid.
:param str id: App or group id.
:rtype: str
|
entailment
|
def json_repr(self, minimal=False):
"""Construct a JSON-friendly representation of the object.
:param bool minimal: Construct a minimal representation of the object (ignore nulls and empty collections)
:rtype: dict
"""
if minimal:
return {to_camel_case(k): v for k, v in vars(self).items() if (v or v is False or v == 0)}
else:
return {to_camel_case(k): v for k, v in vars(self).items()}
|
Construct a JSON-friendly representation of the object.
:param bool minimal: Construct a minimal representation of the object (ignore nulls and empty collections)
:rtype: dict
|
entailment
|
def from_json(cls, attributes):
"""Construct an object from a parsed response.
:param dict attributes: object attributes from parsed response
"""
return cls(**{to_snake_case(k): v for k, v in attributes.items()})
|
Construct an object from a parsed response.
:param dict attributes: object attributes from parsed response
|
entailment
|
def to_json(self, minimal=True):
"""Encode an object as a JSON string.
:param bool minimal: Construct a minimal representation of the object (ignore nulls and empty collections)
:rtype: str
"""
if minimal:
return json.dumps(self.json_repr(minimal=True), cls=MarathonMinimalJsonEncoder, sort_keys=True)
else:
return json.dumps(self.json_repr(), cls=MarathonJsonEncoder, sort_keys=True)
|
Encode an object as a JSON string.
:param bool minimal: Construct a minimal representation of the object (ignore nulls and empty collections)
:rtype: str
|
entailment
|
def json_repr(self, minimal=False):
"""Construct a JSON-friendly representation of the object.
:param bool minimal: [ignored]
:rtype: list
"""
if self.value:
return [self.field, self.operator, self.value]
else:
return [self.field, self.operator]
|
Construct a JSON-friendly representation of the object.
:param bool minimal: [ignored]
:rtype: list
|
entailment
|
def from_json(cls, obj):
"""Construct a MarathonConstraint from a parsed response.
:param dict attributes: object attributes from parsed response
:rtype: :class:`MarathonConstraint`
"""
if len(obj) == 2:
(field, operator) = obj
return cls(field, operator)
if len(obj) > 2:
(field, operator, value) = obj
return cls(field, operator, value)
|
Construct a MarathonConstraint from a parsed response.
:param dict attributes: object attributes from parsed response
:rtype: :class:`MarathonConstraint`
|
entailment
|
def from_string(cls, constraint):
"""
:param str constraint: The string representation of a constraint
:rtype: :class:`MarathonConstraint`
"""
obj = constraint.split(':')
marathon_constraint = cls.from_json(obj)
if marathon_constraint:
return marathon_constraint
raise ValueError("Invalid string format. "
"Expected `field:operator:value`")
|
:param str constraint: The string representation of a constraint
:rtype: :class:`MarathonConstraint`
|
entailment
|
def from_tasks(cls, tasks):
"""Construct a list of MarathonEndpoints from a list of tasks.
:param list[:class:`marathon.models.MarathonTask`] tasks: list of tasks to parse
:rtype: list[:class:`MarathonEndpoint`]
"""
endpoints = [
[
MarathonEndpoint(task.app_id, task.service_ports[
port_index], task.host, task.id, port)
for port_index, port in enumerate(task.ports)
]
for task in tasks
]
# Flatten result
return [item for sublist in endpoints for item in sublist]
|
Construct a list of MarathonEndpoints from a list of tasks.
:param list[:class:`marathon.models.MarathonTask`] tasks: list of tasks to parse
:rtype: list[:class:`MarathonEndpoint`]
|
entailment
|
def _format_newlines(prefix, formatted_node, options):
"""
Convert newlines into U+23EC characters, followed by an actual newline and
then a tree prefix so as to position the remaining text under the previous
line.
"""
replacement = u''.join([
options.NEWLINE,
u'\n',
prefix])
return formatted_node.replace(u'\n', replacement)
|
Convert newlines into U+23EC characters, followed by an actual newline and
then a tree prefix so as to position the remaining text under the previous
line.
|
entailment
|
def get_band(self, tag):
"""Gets a band.
Gets a band with specified tag. If no tag is specified, the request will fail.
If the tag is invalid, a brawlstars.InvalidTag will be raised.
If the data is missing, a ValueError will be raised.
If the connection times out, a brawlstars.Timeout will be raised.
If the data was unable to be received, a brawlstars.HTTPError will be raised along with the
HTTP status code.
On success, will return a Band.
"""
tag = tag.strip("#")
tag = tag.upper()
try:
resp = requests.get(self._base_url + 'bands/' + tag, headers=self.headers, timeout=self.timeout)
if resp.status_code == 200:
data = resp.json()
elif 500 > resp.status_code > 400:
raise HTTPError(resp.status_code)
else:
raise Error()
except ValueError:
raise MissingData('data')
except Exception:
raise Timeout()
data = Box(data)
band = Band(data)
return band
|
Gets a band.
Gets a band with specified tag. If no tag is specified, the request will fail.
If the tag is invalid, a brawlstars.InvalidTag will be raised.
If the data is missing, a ValueError will be raised.
If the connection times out, a brawlstars.Timeout will be raised.
If the data was unable to be received, a brawlstars.HTTPError will be raised along with the
HTTP status code.
On success, will return a Band.
|
entailment
|
async def get_player(self, tag):
"""Gets a player.
Gets a player with specified tag. If no tag is specified, the request will fail.
If the tag is invalid, a brawlstars.InvalidTag will be raised.
If the data is missing, a ValueError will be raised.
If the connection times out, a brawlstars.Timeout will be raised.
If the data was unable to be received, a brawlstars.HTTPError will be raised along with the
HTTP status code.
On success, will return a Player.
"""
tag = tag.strip("#")
tag = tag.upper()
try:
async with self.session.get(self._base_url + 'players/' + tag, timeout=self.timeout,
headers=self.headers) as resp:
if resp.status == 200:
data = await resp.json()
elif 500 > resp.status > 400:
raise HTTPError(resp.status)
else:
raise Error()
except asyncio.TimeoutError:
raise Timeout()
except ValueError:
raise MissingData('data')
except Exception:
raise InvalidArg('tag')
data = Box(data)
player = Player(data)
return player
|
Gets a player.
Gets a player with specified tag. If no tag is specified, the request will fail.
If the tag is invalid, a brawlstars.InvalidTag will be raised.
If the data is missing, a ValueError will be raised.
If the connection times out, a brawlstars.Timeout will be raised.
If the data was unable to be received, a brawlstars.HTTPError will be raised along with the
HTTP status code.
On success, will return a Player.
|
entailment
|
async def get_band(self, tag):
"""Gets a band.
Gets a band with specified tag. If no tag is specified, the request will fail.
If the tag is invalid, a brawlstars.InvalidTag will be raised.
If the data is missing, a ValueError will be raised.
If the connection times out, a brawlstars.Timeout will be raised.
If the data was unable to be received, a brawlstars.HTTPError will be raised along with the
HTTP status code.
On success, will return a Band.
"""
tag = tag.strip("#")
tag = tag.upper()
try:
async with self.session.get(self._base_url + 'bands/' + tag, timeout=self.timeout,
headers=self.headers) as resp:
if resp.status == 200:
data = await resp.json()
elif 500 > resp.status > 400:
raise HTTPError(resp.status)
else:
raise Error()
except asyncio.TimeoutError:
raise Timeout()
except ValueError:
raise MissingData('data')
except Exception:
raise InvalidArg('tag')
data = Box(data)
band = Band(data)
return band
|
Gets a band.
Gets a band with specified tag. If no tag is specified, the request will fail.
If the tag is invalid, a brawlstars.InvalidTag will be raised.
If the data is missing, a ValueError will be raised.
If the connection times out, a brawlstars.Timeout will be raised.
If the data was unable to be received, a brawlstars.HTTPError will be raised along with the
HTTP status code.
On success, will return a Band.
|
entailment
|
def lazy_module(modname, error_strings=None, lazy_mod_class=LazyModule,
level='leaf'):
"""Function allowing lazy importing of a module into the namespace.
A lazy module object is created, registered in `sys.modules`, and
returned. This is a hollow module; actual loading, and `ImportErrors` if
not found, are delayed until an attempt is made to access attributes of the
lazy module.
A handy application is to use :func:`lazy_module` early in your own code
(say, in `__init__.py`) to register all modulenames you want to be lazy.
Because of registration in `sys.modules` later invocations of
`import modulename` will also return the lazy object. This means that after
initial registration the rest of your code can use regular pyhon import
statements and retain the lazyness of the modules.
Parameters
----------
modname : str
The module to import.
error_strings : dict, optional
A dictionary of strings to use when module-loading fails. Key 'msg'
sets the message to use (defaults to :attr:`lazy_import._MSG`). The
message is formatted using the remaining dictionary keys. The default
message informs the user of which module is missing (key 'module'),
what code loaded the module as lazy (key 'caller'), and which package
should be installed to solve the dependency (key 'install_name').
None of the keys is mandatory and all are given smart names by default.
lazy_mod_class: type, optional
Which class to use when instantiating the lazy module, to allow
deep customization. The default is :class:`LazyModule` and custom
alternatives **must** be a subclass thereof.
level : str, optional
Which submodule reference to return. Either a reference to the 'leaf'
module (the default) or to the 'base' module. This is useful if you'll
be using the module functionality in the same place you're calling
:func:`lazy_module` from, since then you don't need to run `import`
again. Setting *level* does not affect which names/modules get
registered in `sys.modules`.
For *level* set to 'base' and *modulename* 'aaa.bbb.ccc'::
aaa = lazy_import.lazy_module("aaa.bbb.ccc", level='base')
# 'aaa' becomes defined in the current namespace, with
# (sub)attributes 'aaa.bbb' and 'aaa.bbb.ccc'.
# It's the lazy equivalent to:
import aaa.bbb.ccc
For *level* set to 'leaf'::
ccc = lazy_import.lazy_module("aaa.bbb.ccc", level='leaf')
# Only 'ccc' becomes set in the current namespace.
# Lazy equivalent to:
from aaa.bbb import ccc
Returns
-------
module
The module specified by *modname*, or its base, depending on *level*.
The module isn't immediately imported. Instead, an instance of
*lazy_mod_class* is returned. Upon access to any of its attributes, the
module is finally loaded.
Examples
--------
>>> import lazy_import, sys
>>> np = lazy_import.lazy_module("numpy")
>>> np
Lazily-loaded module numpy
>>> np is sys.modules['numpy']
True
>>> np.pi # This causes the full loading of the module ...
3.141592653589793
>>> np # ... and the module is changed in place.
<module 'numpy' from '/usr/local/lib/python/site-packages/numpy/__init__.py'>
>>> import lazy_import, sys
>>> # The following succeeds even when asking for a module that's not available
>>> missing = lazy_import.lazy_module("missing_module")
>>> missing
Lazily-loaded module missing_module
>>> missing is sys.modules['missing_module']
True
>>> missing.some_attr # This causes the full loading of the module, which now fails.
ImportError: __main__ attempted to use a functionality that requires module missing_module, but it couldn't be loaded. Please install missing_module and retry.
See Also
--------
:func:`lazy_callable`
:class:`LazyModule`
"""
if error_strings is None:
error_strings = {}
_set_default_errornames(modname, error_strings)
mod = _lazy_module(modname, error_strings, lazy_mod_class)
if level == 'base':
return sys.modules[module_basename(modname)]
elif level == 'leaf':
return mod
else:
raise ValueError("Parameter 'level' must be one of ('base', 'leaf')")
|
Function allowing lazy importing of a module into the namespace.
A lazy module object is created, registered in `sys.modules`, and
returned. This is a hollow module; actual loading, and `ImportErrors` if
not found, are delayed until an attempt is made to access attributes of the
lazy module.
A handy application is to use :func:`lazy_module` early in your own code
(say, in `__init__.py`) to register all modulenames you want to be lazy.
Because of registration in `sys.modules` later invocations of
`import modulename` will also return the lazy object. This means that after
initial registration the rest of your code can use regular pyhon import
statements and retain the lazyness of the modules.
Parameters
----------
modname : str
The module to import.
error_strings : dict, optional
A dictionary of strings to use when module-loading fails. Key 'msg'
sets the message to use (defaults to :attr:`lazy_import._MSG`). The
message is formatted using the remaining dictionary keys. The default
message informs the user of which module is missing (key 'module'),
what code loaded the module as lazy (key 'caller'), and which package
should be installed to solve the dependency (key 'install_name').
None of the keys is mandatory and all are given smart names by default.
lazy_mod_class: type, optional
Which class to use when instantiating the lazy module, to allow
deep customization. The default is :class:`LazyModule` and custom
alternatives **must** be a subclass thereof.
level : str, optional
Which submodule reference to return. Either a reference to the 'leaf'
module (the default) or to the 'base' module. This is useful if you'll
be using the module functionality in the same place you're calling
:func:`lazy_module` from, since then you don't need to run `import`
again. Setting *level* does not affect which names/modules get
registered in `sys.modules`.
For *level* set to 'base' and *modulename* 'aaa.bbb.ccc'::
aaa = lazy_import.lazy_module("aaa.bbb.ccc", level='base')
# 'aaa' becomes defined in the current namespace, with
# (sub)attributes 'aaa.bbb' and 'aaa.bbb.ccc'.
# It's the lazy equivalent to:
import aaa.bbb.ccc
For *level* set to 'leaf'::
ccc = lazy_import.lazy_module("aaa.bbb.ccc", level='leaf')
# Only 'ccc' becomes set in the current namespace.
# Lazy equivalent to:
from aaa.bbb import ccc
Returns
-------
module
The module specified by *modname*, or its base, depending on *level*.
The module isn't immediately imported. Instead, an instance of
*lazy_mod_class* is returned. Upon access to any of its attributes, the
module is finally loaded.
Examples
--------
>>> import lazy_import, sys
>>> np = lazy_import.lazy_module("numpy")
>>> np
Lazily-loaded module numpy
>>> np is sys.modules['numpy']
True
>>> np.pi # This causes the full loading of the module ...
3.141592653589793
>>> np # ... and the module is changed in place.
<module 'numpy' from '/usr/local/lib/python/site-packages/numpy/__init__.py'>
>>> import lazy_import, sys
>>> # The following succeeds even when asking for a module that's not available
>>> missing = lazy_import.lazy_module("missing_module")
>>> missing
Lazily-loaded module missing_module
>>> missing is sys.modules['missing_module']
True
>>> missing.some_attr # This causes the full loading of the module, which now fails.
ImportError: __main__ attempted to use a functionality that requires module missing_module, but it couldn't be loaded. Please install missing_module and retry.
See Also
--------
:func:`lazy_callable`
:class:`LazyModule`
|
entailment
|
def lazy_callable(modname, *names, **kwargs):
"""Performs lazy importing of one or more callables.
:func:`lazy_callable` creates functions that are thin wrappers that pass
any and all arguments straight to the target module's callables. These can
be functions or classes. The full loading of that module is only actually
triggered when the returned lazy function itself is called. This lazy
import of the target module uses the same mechanism as
:func:`lazy_module`.
If, however, the target module has already been fully imported prior
to invocation of :func:`lazy_callable`, then the target callables
themselves are returned and no lazy imports are made.
:func:`lazy_function` and :func:`lazy_function` are aliases of
:func:`lazy_callable`.
Parameters
----------
modname : str
The base module from where to import the callable(s) in *names*,
or a full 'module_name.callable_name' string.
names : str (optional)
The callable name(s) to import from the module specified by *modname*.
If left empty, *modname* is assumed to also include the callable name
to import.
error_strings : dict, optional
A dictionary of strings to use when reporting loading errors (either a
missing module, or a missing callable name in the loaded module).
*error_string* follows the same usage as described under
:func:`lazy_module`, with the exceptions that 1) a further key,
'msg_callable', can be supplied to be used as the error when a module
is successfully loaded but the target callable can't be found therein
(defaulting to :attr:`lazy_import._MSG_CALLABLE`); 2) a key 'callable'
is always added with the callable name being loaded.
lazy_mod_class : type, optional
See definition under :func:`lazy_module`.
lazy_call_class : type, optional
Analogously to *lazy_mod_class*, allows setting a custom class to
handle lazy callables, other than the default :class:`LazyCallable`.
Returns
-------
wrapper function or tuple of wrapper functions
If *names* is passed, returns a tuple of wrapper functions, one for
each element in *names*.
If only *modname* is passed it is assumed to be a full
'module_name.callable_name' string, in which case the wrapper for the
imported callable is returned directly, and not in a tuple.
Notes
-----
Unlike :func:`lazy_module`, which returns a lazy module that eventually
mutates into the fully-functional version, :func:`lazy_callable` only
returns thin wrappers that never change. This means that the returned
wrapper object never truly becomes the one under the module's namespace,
even after successful loading of the module in *modname*. This is fine for
most practical use cases, but may break code that relies on the usage of
the returned objects oter than calling them. One such example is the lazy
import of a class: it's fine to use the returned wrapper to instantiate an
object, but it can't be used, for instance, to subclass from.
Examples
--------
>>> import lazy_import, sys
>>> fn = lazy_import.lazy_callable("numpy.arange")
>>> sys.modules['numpy']
Lazily-loaded module numpy
>>> fn(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> sys.modules['numpy']
<module 'numpy' from '/usr/local/lib/python3.5/site-packages/numpy/__init__.py'>
>>> import lazy_import, sys
>>> cl = lazy_import.lazy_callable("numpy.ndarray") # a class
>>> obj = cl([1, 2]) # This works OK (and also triggers the loading of numpy)
>>> class MySubclass(cl): # This fails because cls is just a wrapper,
>>> pass # not an actual class.
See Also
--------
:func:`lazy_module`
:class:`LazyCallable`
:class:`LazyModule`
"""
if not names:
modname, _, name = modname.rpartition(".")
lazy_mod_class = _setdef(kwargs, 'lazy_mod_class', LazyModule)
lazy_call_class = _setdef(kwargs, 'lazy_call_class', LazyCallable)
error_strings = _setdef(kwargs, 'error_strings', {})
_set_default_errornames(modname, error_strings, call=True)
if not names:
# We allow passing a single string as 'modname.callable_name',
# in which case the wrapper is returned directly and not as a list.
return _lazy_callable(modname, name, error_strings.copy(),
lazy_mod_class, lazy_call_class)
return tuple(_lazy_callable(modname, cname, error_strings.copy(),
lazy_mod_class, lazy_call_class) for cname in names)
|
Performs lazy importing of one or more callables.
:func:`lazy_callable` creates functions that are thin wrappers that pass
any and all arguments straight to the target module's callables. These can
be functions or classes. The full loading of that module is only actually
triggered when the returned lazy function itself is called. This lazy
import of the target module uses the same mechanism as
:func:`lazy_module`.
If, however, the target module has already been fully imported prior
to invocation of :func:`lazy_callable`, then the target callables
themselves are returned and no lazy imports are made.
:func:`lazy_function` and :func:`lazy_function` are aliases of
:func:`lazy_callable`.
Parameters
----------
modname : str
The base module from where to import the callable(s) in *names*,
or a full 'module_name.callable_name' string.
names : str (optional)
The callable name(s) to import from the module specified by *modname*.
If left empty, *modname* is assumed to also include the callable name
to import.
error_strings : dict, optional
A dictionary of strings to use when reporting loading errors (either a
missing module, or a missing callable name in the loaded module).
*error_string* follows the same usage as described under
:func:`lazy_module`, with the exceptions that 1) a further key,
'msg_callable', can be supplied to be used as the error when a module
is successfully loaded but the target callable can't be found therein
(defaulting to :attr:`lazy_import._MSG_CALLABLE`); 2) a key 'callable'
is always added with the callable name being loaded.
lazy_mod_class : type, optional
See definition under :func:`lazy_module`.
lazy_call_class : type, optional
Analogously to *lazy_mod_class*, allows setting a custom class to
handle lazy callables, other than the default :class:`LazyCallable`.
Returns
-------
wrapper function or tuple of wrapper functions
If *names* is passed, returns a tuple of wrapper functions, one for
each element in *names*.
If only *modname* is passed it is assumed to be a full
'module_name.callable_name' string, in which case the wrapper for the
imported callable is returned directly, and not in a tuple.
Notes
-----
Unlike :func:`lazy_module`, which returns a lazy module that eventually
mutates into the fully-functional version, :func:`lazy_callable` only
returns thin wrappers that never change. This means that the returned
wrapper object never truly becomes the one under the module's namespace,
even after successful loading of the module in *modname*. This is fine for
most practical use cases, but may break code that relies on the usage of
the returned objects oter than calling them. One such example is the lazy
import of a class: it's fine to use the returned wrapper to instantiate an
object, but it can't be used, for instance, to subclass from.
Examples
--------
>>> import lazy_import, sys
>>> fn = lazy_import.lazy_callable("numpy.arange")
>>> sys.modules['numpy']
Lazily-loaded module numpy
>>> fn(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> sys.modules['numpy']
<module 'numpy' from '/usr/local/lib/python3.5/site-packages/numpy/__init__.py'>
>>> import lazy_import, sys
>>> cl = lazy_import.lazy_callable("numpy.ndarray") # a class
>>> obj = cl([1, 2]) # This works OK (and also triggers the loading of numpy)
>>> class MySubclass(cl): # This fails because cls is just a wrapper,
>>> pass # not an actual class.
See Also
--------
:func:`lazy_module`
:class:`LazyCallable`
:class:`LazyModule`
|
entailment
|
def _load_module(module):
"""Ensures that a module, and its parents, are properly loaded
"""
modclass = type(module)
# We only take care of our own LazyModule instances
if not issubclass(modclass, LazyModule):
raise TypeError("Passed module is not a LazyModule instance.")
with _ImportLockContext():
parent, _, modname = module.__name__.rpartition('.')
logger.debug("loading module {}".format(modname))
# We first identify whether this is a loadable LazyModule, then we
# strip as much of lazy_import behavior as possible (keeping it cached,
# in case loading fails and we need to reset the lazy state).
if not hasattr(modclass, '_lazy_import_error_msgs'):
# Alreay loaded (no _lazy_import_error_msgs attr). Not reloading.
return
# First, ensure the parent is loaded (using recursion; *very* unlikely
# we'll ever hit a stack limit in this case).
modclass._LOADING = True
try:
if parent:
logger.debug("first loading parent module {}".format(parent))
setattr(sys.modules[parent], modname, module)
if not hasattr(modclass, '_LOADING'):
logger.debug("Module {} already loaded by the parent"
.format(modname))
# We've been loaded by the parent. Let's bail.
return
cached_data = _clean_lazymodule(module)
try:
# Get Python to do the real import!
reload_module(module)
except:
# Loading failed. We reset our lazy state.
logger.debug("Failed to load module {}. Resetting..."
.format(modname))
_reset_lazymodule(module, cached_data)
raise
else:
# Successful load
logger.debug("Successfully loaded module {}".format(modname))
delattr(modclass, '_LOADING')
_reset_lazy_submod_refs(module)
except (AttributeError, ImportError) as err:
logger.debug("Failed to load {}.\n{}: {}"
.format(modname, err.__class__.__name__, err))
logger.lazy_trace()
# Under Python 3 reloading our dummy LazyModule instances causes an
# AttributeError if the module can't be found. Would be preferrable
# if we could always rely on an ImportError. As it is we vet the
# AttributeError as thoroughly as possible.
if ((six.PY3 and isinstance(err, AttributeError)) and not
err.args[0] == "'NoneType' object has no attribute 'name'"):
# Not the AttributeError we were looking for.
raise
msg = modclass._lazy_import_error_msgs['msg']
raise_from(ImportError(
msg.format(**modclass._lazy_import_error_strings)), None)
|
Ensures that a module, and its parents, are properly loaded
|
entailment
|
def _setdef(argdict, name, defaultvalue):
"""Like dict.setdefault but sets the default value also if None is present.
"""
if not name in argdict or argdict[name] is None:
argdict[name] = defaultvalue
return argdict[name]
|
Like dict.setdefault but sets the default value also if None is present.
|
entailment
|
def _clean_lazymodule(module):
"""Removes all lazy behavior from a module's class, for loading.
Also removes all module attributes listed under the module's class deletion
dictionaries. Deletion dictionaries are class attributes with names
specified in `_DELETION_DICT`.
Parameters
----------
module: LazyModule
Returns
-------
dict
A dictionary of deleted class attributes, that can be used to reset the
lazy state using :func:`_reset_lazymodule`.
"""
modclass = type(module)
_clean_lazy_submod_refs(module)
modclass.__getattribute__ = ModuleType.__getattribute__
modclass.__setattr__ = ModuleType.__setattr__
cls_attrs = {}
for cls_attr in _CLS_ATTRS:
try:
cls_attrs[cls_attr] = getattr(modclass, cls_attr)
delattr(modclass, cls_attr)
except AttributeError:
pass
return cls_attrs
|
Removes all lazy behavior from a module's class, for loading.
Also removes all module attributes listed under the module's class deletion
dictionaries. Deletion dictionaries are class attributes with names
specified in `_DELETION_DICT`.
Parameters
----------
module: LazyModule
Returns
-------
dict
A dictionary of deleted class attributes, that can be used to reset the
lazy state using :func:`_reset_lazymodule`.
|
entailment
|
def _reset_lazymodule(module, cls_attrs):
"""Resets a module's lazy state from cached data.
"""
modclass = type(module)
del modclass.__getattribute__
del modclass.__setattr__
try:
del modclass._LOADING
except AttributeError:
pass
for cls_attr in _CLS_ATTRS:
try:
setattr(modclass, cls_attr, cls_attrs[cls_attr])
except KeyError:
pass
_reset_lazy_submod_refs(module)
|
Resets a module's lazy state from cached data.
|
entailment
|
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if six.PY2:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
|
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
|
entailment
|
def timestamp_from_datetime(dt):
"""
Compute timestamp from a datetime object that could be timezone aware
or unaware.
"""
try:
utc_dt = dt.astimezone(pytz.utc)
except ValueError:
utc_dt = dt.replace(tzinfo=pytz.utc)
return timegm(utc_dt.timetuple())
|
Compute timestamp from a datetime object that could be timezone aware
or unaware.
|
entailment
|
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, memoryview):
s = bytes(s)
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
|
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
|
entailment
|
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
@wraps(func)
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wrapper
|
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
|
entailment
|
def reraise_as(new_exception_or_type):
"""
Obtained from https://github.com/dcramer/reraise/blob/master/src/reraise.py
>>> try:
>>> do_something_crazy()
>>> except Exception:
>>> reraise_as(UnhandledException)
"""
__traceback_hide__ = True # NOQA
e_type, e_value, e_traceback = sys.exc_info()
if inspect.isclass(new_exception_or_type):
new_type = new_exception_or_type
new_exception = new_exception_or_type()
else:
new_type = type(new_exception_or_type)
new_exception = new_exception_or_type
new_exception.__cause__ = e_value
try:
six.reraise(new_type, new_exception, e_traceback)
finally:
del e_traceback
|
Obtained from https://github.com/dcramer/reraise/blob/master/src/reraise.py
>>> try:
>>> do_something_crazy()
>>> except Exception:
>>> reraise_as(UnhandledException)
|
entailment
|
def hide_auth(msg):
"""Remove sensitive information from msg."""
for pattern, repl in RE_HIDE_AUTH:
msg = pattern.sub(repl, msg)
return msg
|
Remove sensitive information from msg.
|
entailment
|
def init(self):
"""Prepare the HTTP handler, URL, and HTTP headers for all subsequent requests"""
self.debug('Initializing %r', self)
proto = self.server.split('://')[0]
if proto == 'https':
if hasattr(ssl, 'create_default_context'):
context = ssl.create_default_context()
if self.ssl_verify:
context.check_hostname = True
context.verify_mode = ssl.CERT_REQUIRED
else:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
self._http_handler = urllib2.HTTPSHandler(debuglevel=0, context=context)
else:
self._http_handler = urllib2.HTTPSHandler(debuglevel=0)
elif proto == 'http':
self._http_handler = urllib2.HTTPHandler(debuglevel=0)
else:
raise ValueError('Invalid protocol %s' % proto)
self._api_url = self.server + '/api_jsonrpc.php'
self._http_headers = {
'Content-Type': 'application/json-rpc',
'User-Agent': 'python/zabbix_api',
}
if self.httpuser:
self.debug('HTTP authentication enabled')
auth = self.httpuser + ':' + self.httppasswd
self._http_headers['Authorization'] = 'Basic ' + b64encode(auth.encode('utf-8')).decode('ascii')
|
Prepare the HTTP handler, URL, and HTTP headers for all subsequent requests
|
entailment
|
def timestamp_to_datetime(cls, dt, dt_format=DATETIME_FORMAT):
"""Convert unix timestamp to human readable date/time string"""
return cls.convert_datetime(cls.get_datetime(dt), dt_format=dt_format)
|
Convert unix timestamp to human readable date/time string
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.