code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if not isinstance(item, tuple) or len(item) != 2:
raise ValueError('Linked items must be instance/prop-name tuple')
if not isinstance(item[0], tuple(LINK_OBSERVERS)):
raise ValueError('Only {} instances may be linked'.format(
', '.join([link_cls.__name__ for link_cls in LINK_OBSERVERS])
))
if not isinstance(item[1], string_types):
raise ValueError('Properties must be specified as string names')
if not hasattr(item[0], item[1]):
raise ValueError('Invalid property {} for {} instance'.format(
item[1], item[0].__class__.__name__
)) | def _validate(item) | Validate (instance, prop name) tuple | 3.313869 | 2.877012 | 1.151844 |
if not isinstance(value, (tuple, list, np.ndarray)):
self.error(instance, value)
if self.coerce:
value = self.wrapper(value)
valid_class = (
self.wrapper if isinstance(self.wrapper, type) else np.ndarray
)
if not isinstance(value, valid_class):
self.error(instance, value)
allowed_kinds = ''.join(TYPE_MAPPINGS[typ] for typ in self.dtype)
if value.dtype.kind not in allowed_kinds:
self.error(instance, value, extra='Invalid dtype.')
if self.shape is None:
return value
for shape in self.shape:
if len(shape) != value.ndim:
continue
for i, shp in enumerate(shape):
if shp not in ('*', value.shape[i]):
break
else:
return value
self.error(instance, value, extra='Invalid shape.') | def validate(self, instance, value) | Determine if array is valid based on shape and dtype | 2.945947 | 2.794884 | 1.05405 |
error_class = error_class or ValidationError
if not isinstance(value, (list, tuple, np.ndarray)):
super(Array, self).error(instance, value, error_class, extra)
if isinstance(value, (list, tuple)):
val_description = 'A {typ} of length {len}'.format(
typ=value.__class__.__name__,
len=len(value)
)
else:
val_description = 'An array of shape {shp} and dtype {typ}'.format(
shp=value.shape,
typ=value.dtype
)
if instance is None:
prefix = '{} property'.format(self.__class__.__name__)
else:
prefix = "The '{name}' property of a {cls} instance".format(
name=self.name,
cls=instance.__class__.__name__,
)
message = (
'{prefix} must be {info}. {desc} was specified. {extra}'.format(
prefix=prefix,
info=self.info,
desc=val_description,
extra=extra,
)
)
if issubclass(error_class, ValidationError):
raise error_class(message, 'invalid', self.name, instance)
raise error_class(message) | def error(self, instance, value, error_class=None, extra='') | Generates a ValueError on setting property to an invalid value | 2.591956 | 2.559103 | 1.012838 |
kwargs.update({'trusted': kwargs.get('trusted', False)})
if self.deserializer is not None:
return self.deserializer(value, **kwargs)
if value is None:
return None
return self.wrapper(value).astype(self.dtype[0]) | def deserialize(self, value, **kwargs) | De-serialize the property value from JSON
If no deserializer has been registered, this converts the value
to the wrapper class with given dtype. | 4.467691 | 3.833488 | 1.165438 |
def _recurse_list(val):
if val and isinstance(val[0], list):
return [_recurse_list(v) for v in val]
return [str(v) if np.isnan(v) or np.isinf(v) else v for v in val]
return _recurse_list(value.tolist()) | def to_json(value, **kwargs) | Convert array to JSON list
nan values are converted to string 'nan', inf values to 'inf'. | 3.219411 | 2.746641 | 1.172127 |
value = super(BaseVector, self).validate(instance, value)
if self.length is not None:
try:
value.length = self._length_array(value)
except ZeroDivisionError:
self.error(
instance, value,
error_class=ZeroDivValidationError,
extra='The vector must have a length specified.'
)
return value | def validate(self, instance, value) | Check shape and dtype of vector and scales it to given length | 6.248818 | 5.567027 | 1.122469 |
if isinstance(value, string_types):
if (
value.upper() not in VECTOR_DIRECTIONS or
value.upper() in ('Z', '-Z', 'UP', 'DOWN')
):
self.error(instance, value)
value = VECTOR_DIRECTIONS[value.upper()][:2]
return super(Vector2, self).validate(instance, value) | def validate(self, instance, value) | Check shape and dtype of vector
validate also coerces the vector from valid strings (these
include ZERO, X, Y, -X, -Y, EAST, WEST, NORTH, and SOUTH) and
scales it to the given length. | 4.124141 | 3.39082 | 1.216267 |
if not isinstance(value, (tuple, list, np.ndarray)):
self.error(instance, value)
if isinstance(value, (tuple, list)):
for i, val in enumerate(value):
if isinstance(val, string_types):
if val.upper() not in VECTOR_DIRECTIONS:
self.error(
instance=instance,
value=val,
extra='This is an invalid Vector3 representation.',
)
value[i] = VECTOR_DIRECTIONS[val.upper()]
return super(Vector3Array, self).validate(instance, value) | def validate(self, instance, value) | Check shape and dtype of vector
validate also coerces the vector from valid strings (these
include ZERO, X, Y, Z, -X, -Y, -Z, EAST, WEST, NORTH, SOUTH, UP,
and DOWN) and scales it to the given length. | 3.025988 | 2.694897 | 1.122859 |
if self.data is None:
return ""
if isinstance(self.data, dict):
firstpart = lng.split('-')[0]
similar = [l for l in self.data.keys() if (l.startswith(firstpart + "-") or firstpart == l) and l != lng]
if self.data.get(lng):
return self.data[lng]
elif self.data.get(firstpart):
return self.data[firstpart]
elif similar and any([self.data.get(s) for s in similar]):
for s in similar:
if self.data.get(s):
return self.data.get(s)
elif self.data.get(settings.LANGUAGE_CODE):
return self.data[settings.LANGUAGE_CODE]
elif len(self.data):
return list(self.data.items())[0][1]
else:
return ""
else:
return str(self.data) | def localize(self, lng: str) -> str | Evaluate the given string with respect to the locale defined by ``lng``.
If no string is available in the currently active language, this will give you
the string in the system's default language. If this is unavailable as well, it
will give you the string in the first language available.
:param lng: A locale code, e.g. ``de``. If you specify a code including a country
or region like ``de-AT``, exact matches will be used preferably, but if only
a ``de`` or ``de-AT`` translation exists, this might be returned as well. | 2.422544 | 2.379072 | 1.018273 |
if obs.names is everything:
names = list(instance._props)
else:
names = obs.names
for name in names:
if name not in instance._listeners:
instance._listeners[name] = {typ: [] for typ in LISTENER_TYPES}
instance._listeners[name][obs.mode] += [obs] | def _set_listener(instance, obs) | Add listeners to a HasProperties instance | 4.487775 | 4.030816 | 1.113366 |
if (
change['mode'] not in listeners_disabled._quarantine and #pylint: disable=protected-access
change['name'] in instance._listeners
):
return instance._listeners[change['name']][change['mode']]
return [] | def _get_listeners(instance, change) | Gets listeners of changed Property on a HasProperties instance | 8.155145 | 7.434561 | 1.096924 |
mode = 'observe_change' if change_only else 'observe_set'
if names is None and func is None:
return Observer(names_or_instance, mode)
obs = Observer(names, mode)(func)
_set_listener(names_or_instance, obs)
return obs | def observer(names_or_instance, names=None, func=None, change_only=False) | Specify a callback function that will fire on Property value change
Observer functions on a HasProperties class fire after the observed
Property or Properties have been changed (unlike validator functions
that fire on set before the value is changed).
You can use this method as a decorator inside a HasProperties class
.. code::
@properties.observer('variable_name')
def callback_function(self, change):
print(change)
or you can use it to register a function to a single HasProperties
instance
.. code::
properties.observer(my_has_props, 'variable_name', callback_function)
The variable name must refer to a Property name on the HasProperties
class. A list of Property names may also be used; the same
callback function will fire when any of these Properties change. Also,
:class:`properties.everything <properties.utils.Sentinel>` may be
specified instead of the variable name. In that case, the callback
function will fire when any Property changes.
The callback function must take two arguments. The first is the
HasProperties instance; the second is the change notification dictionary.
This dictionary contains:
* 'name' - the name of the changed Property
* 'previous' - the value of the Property prior to change (this will be
:code:`properties.undefined` if the value was not previously set)
* 'value' - the new value of the Property (this will be
:code:`properties.undefined` if the value is deleted)
* 'mode' - the mode of the change; for observers, this is either
'observe_set' or 'observe_change'
Finally, the keyword argument **change_only** may be specified as a
boolean. If False (the default), the callback function will fire any
time the Property is set. If True, the callback function will only fire
if the new value is different than the previous value, determined by
the :code:`Property.equal` method. | 4.681276 | 5.384733 | 0.869361 |
if names is None and func is None:
if callable(names_or_instance):
return ClassValidator(names_or_instance)
return Observer(names_or_instance, 'validate')
val = Observer(names, 'validate')(func)
_set_listener(names_or_instance, val)
return val | def validator(names_or_instance, names=None, func=None) | Specify a callback function to fire on class validation OR property set
This function has two modes of operation:
1. Registering callback functions that validate Property values when
they are set, before the change is saved to the HasProperties instance.
This mode is very similar to the :code:`observer` function.
2. Registering callback functions that fire only when the HasProperties
:code:`validate` method is called. This allows for cross-validation
of Properties that should only fire when all required Properties are
set.
**Mode 1:**
Validator functions on a HasProperties class fire on set but before the
observed Property or Properties have been changed (unlike observer
functions that fire after the value has been changed).
You can use this method as a decorator inside a HasProperties class
.. code::
@properties.validator('variable_name')
def callback_function(self, change):
print(change)
or you can use it to register a function to a single HasProperties
instance
.. code::
properties.validator(my_has_props, 'variable_name', callback_function)
The variable name must refer to a Property name on the HasProperties
class. A list of Property names may also be used; the same
callback function will fire when any of these Properties change. Also,
:class:`properties.everything <properties.utils.Sentinel>` may be
specified instead of the variable name. In that case, the callback
function will fire when any Property changes.
The callback function must take two arguments. The first is the
HasProperties instance; the second is the change notification dictionary.
This dictionary contains:
* 'name' - the name of the changed Property
* 'previous' - the value of the Property prior to change (this will be
:code:`properties.undefined` if the value was not previously set)
* 'value' - the new value of the Property (this will be
:code:`properties.undefined` if the value is deleted)
* 'mode' - the mode of the change; for validators, this is 'validate'
**Mode 2:**
When used as a decorator without arguments (i.e. called directly on a
HasProperties method), the decorated method is registered as a class
validator. These methods execute only when :code:`validate()` is called
on the HasProperties instance.
.. code::
@properties.validator
def validation_method(self):
print('validating instance of {}'.format(self.__class__))
The decorated function must only take one argument, the HasProperties
instance. | 4.506598 | 6.527617 | 0.69039 |
output = OrderedDict()
output_keys = set()
all_bases = []
# Go through the bases from furthest to nearest ancestor
for base in reversed(bases):
# Only keep the items that are still defined on the bases
if base is not object and isinstance(base, PropertyMetaclass):
output_keys = output_keys.union(getattr(base, attr))
# Collect all bases so we ensure overridden items are assigned
# in the correct order
for item in reversed(base.__mro__):
if item is object or not isinstance(item, PropertyMetaclass):
continue
if item not in all_bases:
all_bases.append(item)
# Update the items in reverse MRO order; only keep those that are
# defined on the bases
for base in all_bases:
for key, val in iteritems(getattr(base, attr)):
if key in base.__dict__ and key in output_keys:
output.update({key: val})
# Remove all items that were overridden by this class; this is
# potentially a superset of the items added back in the next step.
for key in classdict:
if key in output:
output.pop(key)
# Update the items with those defined on this class
output.update(attr_dict)
return output | def build_from_bases(bases, classdict, attr, attr_dict) | Helper function to build private HasProperties attributes | 4.21233 | 4.221751 | 0.997768 |
if (
not isinstance(value_a, HasProperties) or
not isinstance(value_b, HasProperties)
):
return value_a == value_b
if getattr(value_a, '_testing_equality', False):
return False
value_a._testing_equality = True #pylint: disable=protected-access
try:
if value_a is value_b:
return True
if value_a.__class__ is not value_b.__class__:
return False
for prop in itervalues(value_a._props):
prop_a = getattr(value_a, prop.name)
prop_b = getattr(value_b, prop.name)
if prop_a is None and prop_b is None:
continue
if (
prop_a is not None and
prop_b is not None and
prop.equal(prop_a, prop_b)
):
continue
return False
return True
finally:
value_a._testing_equality = False | def equal(value_a, value_b) | Determine if two **HasProperties** instances are equivalent
Equivalence is determined by checking if (1) the two instances are
the same class and (2) all Property values on two instances are
equal, using :code:`Property.equal`. If the two values are the same
HasProperties instance (eg. :code:`value_a is value_b`) this method
returns True. Finally, if either value is not a HasProperties
instance, equality is simply checked with ==.
.. note::
HasProperties objects with recursive self-references will not
evaluate to equal, even if their property values and structure
are equivalent. | 2.088528 | 1.939841 | 1.076649 |
if not isinstance(value, HasProperties):
raise ValueError('properties.copy may only be used to copy'
'HasProperties instances')
kwargs.update({'include_class': kwargs.get('include_class', True)})
kwargs.update({'trusted': kwargs.get('trusted', True)})
return value.__class__.deserialize(value.serialize(**kwargs), **kwargs) | def copy(value, **kwargs) | Return a copy of a **HasProperties** instance
A copy is produced by serializing the HasProperties instance then
deserializing it to a new instance. Therefore, if any properties
cannot be serialized/deserialized, :code:`copy` will fail. Any
keyword arguments will be passed through to both :code:`serialize`
and :code:`deserialize`. | 4.837585 | 3.845741 | 1.257907 |
if name is None:
for key in self._props:
if isinstance(self._props[key], basic.Property):
self._reset(key)
return
if name not in self._props:
raise AttributeError("Input name '{}' is not a known "
"property or attribute".format(name))
if not isinstance(self._props[name], basic.Property):
raise AttributeError("Cannot reset GettableProperty "
"'{}'".format(name))
if name in self._defaults:
val = self._defaults[name]
else:
val = self._props[name].default
if callable(val):
val = val()
setattr(self, name, val) | def _reset(self, name=None) | Revert specified property to default value
If no property is specified, all properties are returned to default. | 2.989002 | 2.830415 | 1.056029 |
if getattr(self, '_getting_validated', False):
return True
self._getting_validated = True
self._validation_error_tuples = []
self._non_validation_error = None
try:
for val in itervalues(self._class_validators):
try:
if isinstance(val.func, string_types):
valid = getattr(self, val.func)()
else:
valid = val.func(self)
if valid is False:
raise utils.ValidationError(
'Validation failed', None, None, self
)
except utils.ValidationError as val_err:
self._validation_error_tuples += val_err.error_tuples
except GENERIC_ERRORS as err:
if not self._non_validation_error:
self._non_validation_error = err
if self._validation_error_tuples:
self._error_hook(self._validation_error_tuples)
msgs = ['Validation failed:']
msgs += [val.message for val in self._validation_error_tuples]
raise utils.ValidationError(
message='\n- '.join(msgs),
_error_tuples=self._validation_error_tuples,
)
if self._non_validation_error:
raise self._non_validation_error #pylint: disable=raising-bad-type
return True
finally:
self._getting_validated = False
self._validation_error_tuples = None
self._non_validation_error = None | def validate(self) | Call all registered class validator methods
These are all methods decorated with :code:`@properties.validator`.
Validator methods are expected to raise a ValidationError if they
fail. | 2.776348 | 2.724829 | 1.018907 |
for key, prop in iteritems(self._props):
try:
value = self._get(key)
err_msg = 'Invalid value for property {}: {}'.format(key, value)
if value is not None:
change = dict(name=key, previous=value, value=value,
mode='validate')
self._notify(change)
if not prop.equal(value, change['value']):
raise utils.ValidationError(err_msg, 'invalid',
prop.name, self)
if not prop.assert_valid(self):
raise utils.ValidationError(err_msg, 'invalid',
prop.name, self)
except utils.ValidationError as val_err:
if getattr(self, '_validation_error_tuples', None) is not None:
self._validation_error_tuples += val_err.error_tuples
else:
raise
return True | def _validate_props(self) | Assert that all the properties are valid on validate() | 3.653887 | 3.510367 | 1.040885 |
if getattr(self, '_getting_serialized', False):
raise utils.SelfReferenceError('Object contains unserializable '
'self reference')
self._getting_serialized = True
try:
kwargs.update({
'include_class': include_class,
'save_dynamic': save_dynamic
})
if save_dynamic:
prop_source = self._props
else:
prop_source = self._backend
data = (
(key, self._props[key].serialize(getattr(self, key), **kwargs))
for key in prop_source
)
json_dict = {k: v for k, v in data if v is not None}
if include_class:
json_dict.update({'__class__': self.__class__.__name__})
return json_dict
finally:
self._getting_serialized = False | def serialize(self, include_class=True, save_dynamic=False, **kwargs) | Serializes a **HasProperties** instance to dictionary
This uses the Property serializers to serialize all Property values
to a JSON-compatible dictionary. Properties that are undefined are
not included. If the **HasProperties** instance contains a reference
to itself, a :code:`properties.SelfReferenceError` will be raised.
**Parameters**:
* **include_class** - If True (the default), the name of the class
will also be saved to the serialized dictionary under key
:code:`'__class__'`
* **save_dynamic** - If True, dynamic properties are written to
the serialized dict (default: False).
* Any other keyword arguments will be passed through to the Property
serializers. | 3.12954 | 2.917761 | 1.072583 |
if not isinstance(value, dict):
raise ValueError(
'HasProperties class {} must deserialize from dictionary, '
'not input of type {}'.format(
cls.__name__, value.__class__.__name__
)
)
output_cls = cls._deserialize_class(
input_cls_name=value.get('__class__'),
trusted=trusted,
strict=strict,
)
instance = kwargs.pop('_instance', None)
if instance is not None and not isinstance(instance, output_cls):
raise ValueError(
'Input _instance must be of class {}, not {}'.format(
output_cls.__name__, instance.__class__.__name__
)
)
state, unused = utils.filter_props(output_cls, value, True)
unused.pop('__class__', None)
if unused and strict:
raise ValueError(
'Unused properties during deserialization: {}'.format(
', '.join(unused)
)
)
kwargs.update({'trusted': trusted, 'strict': strict})
newstate = {}
for key, val in iteritems(state):
newstate[key] = output_cls._props[key].deserialize(val, **kwargs)
mutable, immutable = utils.filter_props(output_cls, newstate, False)
with handlers.listeners_disabled():
if instance is None:
instance = output_cls(**mutable)
else:
for key, val in iteritems(mutable):
setattr(instance, key, val)
for key, val in iteritems(immutable):
valid_val = output_cls._props[key].validate(instance, val)
instance._backend[key] = valid_val
if assert_valid and not instance.validate():
raise utils.ValidationError('Deserialized instance is not valid')
return instance | def deserialize(cls, value, trusted=False, strict=False, #pylint: disable=too-many-locals
assert_valid=False, **kwargs) | Creates **HasProperties** instance from serialized dictionary
This uses the Property deserializers to deserialize all
JSON-compatible dictionary values into their corresponding Property
values on a new instance of a **HasProperties** class. Extra keys
in the dictionary that do not correspond to Properties will be
ignored.
**Parameters**:
* **value** - Dictionary to deserialize new instance from.
* **trusted** - If True (and if the input dictionary has
:code:`'__class__'` keyword and this class is in the registry), the
new **HasProperties** class will come from the dictionary.
If False (the default), only the **HasProperties** class this
method is called on will be constructed.
* **strict** - Requires :code:`'__class__'`, if present on the input
dictionary, to match the deserialized instance's class. Also
disallows unused properties in the input dictionary. Default
is False.
* **assert_valid** - Require deserialized instance to be valid.
Default is False.
* Any other keyword arguments will be passed through to the Property
deserializers. | 3.149019 | 3.072909 | 1.024768 |
if not input_cls_name or input_cls_name == cls.__name__:
return cls
if trusted and input_cls_name in cls._REGISTRY:
return cls._REGISTRY[input_cls_name]
if strict:
raise ValueError(
'Class name {} from deserialization input dictionary does '
'not match input class {}'.format(input_cls_name, cls.__name__)
)
return cls | def _deserialize_class(cls, input_cls_name, trusted, strict) | Returns the HasProperties class to use for deserialization | 3.136843 | 3.037012 | 1.032871 |
status = Instance('', TaskStatus).validate(None, status)
print(r'{taskname} | {percent:>3}% | {message}'.format(
taskname=self.__class__.__name__,
percent=int(round(100*status.progress)),
message=status.message if status.message else '',
)) | def report_status(self, status) | Hook for reporting the task status towards completion | 7.522707 | 6.256213 | 1.202438 |
registry = kwargs.pop('registry', None)
if registry is None:
registry = dict()
if not registry:
root = True
registry.update({'__root__': self.uid})
else:
root = False
key = self.uid
if key not in registry:
registry.update({key: None})
registry.update({key: super(HasUID, self).serialize(
registry=registry,
include_class=include_class,
save_dynamic=save_dynamic,
**kwargs
)})
if root:
return registry
return key | def serialize(self, include_class=True, save_dynamic=False, **kwargs) | Serialize nested HasUID instances to a flat dictionary
**Parameters**:
* **include_class** - If True (the default), the name of the class
will also be saved to the serialized dictionary under key
:code:`'__class__'`
* **save_dynamic** - If True, dynamic properties are written to
the serialized dict (default: False).
* You may also specify a **registry** - This is the flat dictionary
where UID/HasUID pairs are stored. By default, no registry need
be provided; a new dictionary will be created.
* Any other keyword arguments will be passed through to the Property
serializers. | 3.440529 | 3.148706 | 1.09268 |
registry = kwargs.pop('registry', None)
if registry is None:
if not isinstance(value, dict):
raise ValueError('HasUID must deserialize from dictionary')
registry = value.copy()
uid = kwargs.get('root', registry.get('__root__'))
else:
uid = value
if uid in cls._INSTANCES and uid not in registry:
return cls._INSTANCES[uid]
if uid in cls._INSTANCES:
raise ValueError('UID already used: {}'.format(uid))
if uid not in registry:
raise ValueError('Invalid UID: {}'.format(uid))
value = registry[uid]
if not isinstance(value, HasUID):
try:
input_class = value.get('__class__')
except AttributeError:
input_class = None
new_cls = cls._deserialize_class(input_class, trusted, strict)
new_inst = new_cls()
registry.update({uid: new_inst})
super(HasUID, cls).deserialize(
value=value,
trusted=trusted,
strict=strict,
registry=registry,
_instance=new_inst,
**kwargs
)
cls._INSTANCES[uid] = registry[uid]
return registry[uid] | def deserialize(cls, value, trusted=False, strict=False,
assert_valid=False, **kwargs) | Deserialize nested HasUID instance from flat pointer dictionary
**Parameters**
* **value** - Flat pointer dictionary produced by :code:`serialize`
with UID/HasUID key/value pairs. It also includes a
:code:`__root__` key to specify the root HasUID instance.
* **trusted** - If True (and if the input dictionaries have
:code:`'__class__'` keyword and this class is in the registry), the
new **HasProperties** class will come from the dictionary.
If False (the default), only the **HasProperties** class this
method is called on will be constructed.
* **strict** - Requires :code:`'__class__'`, if present on the input
dictionary, to match the deserialized instance's class. Also
disallows unused properties in the input dictionary. Default
is False.
* **assert_valid** - Require deserialized instance to be valid.
Default is False.
* You may also specify an alternative **root** - This allows a different
HasUID root instance to be specified. It overrides :code:`__root__`
in the input dictionary.
* Any other keyword arguments will be passed through to the Property
deserializers.
.. note::
HasUID instances are constructed with no input arguments
(ie :code:`cls()` is called). This means deserialization will
fail if the init method has been overridden to require
input parameters. | 2.92926 | 2.868108 | 1.021321 |
kwargs.update({'trusted': kwargs.get('trusted', False)})
if self.deserializer is not None:
return self.deserializer(value, **kwargs)
if value is None:
return None
if isinstance(value, string_types):
return value
if issubclass(self.instance_class, base.HasProperties):
return self.instance_class.deserialize(value, **kwargs)
return self.from_json(value, **kwargs) | def deserialize(self, value, **kwargs) | Deserialize instance from JSON value
If a deserializer is registered, that is used. Otherwise, if the
instance_class is a HasProperties subclass, an instance can be
deserialized from a dictionary. | 3.032854 | 2.716305 | 1.116537 |
# Pass if already validated
if getattr(value, '__valid__', False):
return value
# Validate that value is PNG
if isinstance(value, png.Image):
pass
else:
value = super(ImagePNG, self).validate(instance, value)
try:
png.Reader(value).validate_signature()
except png.FormatError:
self.error(instance, value, extra='Open file is not PNG.')
value.seek(0)
# Write input to new bytestream
output = BytesIO()
output.name = self.filename
output.__valid__ = True
if isinstance(value, png.Image):
value.save(output)
else:
fid = value
fid.seek(0)
output.write(fid.read())
fid.close()
output.seek(0)
return output | def validate(self, instance, value) | Checks if value is an open PNG file, valid filename, or png.Image
Returns an open bytestream of the image | 4.347171 | 3.998459 | 1.087212 |
b64rep = base64.b64encode(value.read())
value.seek(0)
jsonrep = '{preamble}{b64}'.format(
preamble=PNG_PREAMBLE,
b64=b64rep.decode(),
)
return jsonrep | def to_json(value, **kwargs) | Convert a PNG Image to base64-encoded JSON
to_json assumes that value has passed validation. | 4.505131 | 3.4747 | 1.296552 |
if not value.startswith(PNG_PREAMBLE):
raise ValueError('Not a valid base64-encoded PNG image')
infile = BytesIO()
rep = base64.b64decode(value[len(PNG_PREAMBLE):].encode('utf-8'))
infile.write(rep)
infile.seek(0)
return infile | def from_json(value, **kwargs) | Convert a PNG Image from base64-encoded JSON | 3.679944 | 3.056383 | 1.204019 |
errors = schema.errors(value)
if errors:
error_details = ''
for error in errors:
if error.pointer:
error_details += ' - %s: %s\n' % (error.pointer, error.message)
else:
error_details += ' - %s\n' % error.message
raise ValidationError('Invalid %s:\n%s' % (noun, error_details)) | def validate(schema, value, noun='value') | Checks the value against the schema, and raises ValidationError if validation
fails. | 2.134239 | 2.134554 | 0.999853 |
def decorator(func):
@wraps(func)
def inner(*passed_args, **passed_kwargs):
# Enforce no positional args
# first argument of instance method and class method is always positonal so we need
# to make expception for them. Static methods are still validated according to standard rules
# this check happens before methods are bound, so instance method is still a regular function
max_allowed_passed_args_len = 0
if is_method and type(func) in (types.FunctionType, classmethod):
max_allowed_passed_args_len = 1
if len(passed_args) > max_allowed_passed_args_len:
raise PositionalError('You cannot call this with positional arguments.')
# Validate keyword arguments
validate(kwargs, passed_kwargs, 'keyword arguments')
# Call callable
return_value = func(*passed_args, **passed_kwargs)
# Validate return value
validate(returns, return_value, 'return value')
return return_value
inner.__wrapped__ = func
# caveat: checking for f.__validated__ will only work if @validate_call
# is not masked by other decorators except for @classmethod or @staticmethod
inner.__validated__ = True
return inner
return decorator | def validate_call(kwargs, returns, is_method=False) | Decorator which runs validation on a callable's arguments and its return
value. Pass a schema for the kwargs and for the return value. Positional
arguments are not supported. | 6.133654 | 5.989111 | 1.024134 |
if error.pointer:
error.pointer = '{}.{}'.format(pointer_or_prefix, error.pointer)
else:
error.pointer = '{}'.format(pointer_or_prefix)
return error | def _update_error_pointer(error, pointer_or_prefix) | Helper function to update an Error's pointer attribute with a (potentially
prefixed) dictionary key or list index. | 2.527685 | 2.22799 | 1.134513 |
optional_keys = set(optional_keys or [])
return Dictionary(
contents=type(self.contents)(
(k, v) for d in (self.contents, contents) for k, v in six.iteritems(d)
) if contents else self.contents,
optional_keys=optional_keys if replace_optional_keys else self.optional_keys | optional_keys,
allow_extra_keys=self.allow_extra_keys if allow_extra_keys is None else allow_extra_keys,
description=self.description if description is None else description,
) | def extend(
self,
contents=None,
optional_keys=None,
allow_extra_keys=None,
description=None,
replace_optional_keys=False,
) | This method allows you to create a new `Dictionary` that extends the current `Dictionary` with additional
contents and/or optional keys, and/or replaces the `allow_extra_keys` and/or `description` attributes.
:param contents: More contents, if any, to extend the current contents
:type contents: dict
:param optional_keys: More optional keys, if any, to extend the current optional keys
:type optional_keys: union[set, list, tuple]
:param allow_extra_keys: If non-`None`, this overrides the current `allow_extra_keys` attribute
:type allow_extra_keys: bool
:param description: If non-`None`, this overrides the current `description` attribute
:type description: union[str, unicode]
:param replace_optional_keys: If `True`, then the `optional_keys` argument will completely replace, instead of
extend, the current optional keys
:type replace_optional_keys: bool
:return: A new `Dictionary` extended from the current `Dictionary` based on the supplied arguments
:rtype: Dictionary | 2.202574 | 2.071899 | 1.06307 |
if value not in self.values:
return [Error(self._error_message, code=ERROR_CODE_UNKNOWN)]
return [] | def errors(self, value) | Returns a list of errors with the value. An empty/None return means
that it's valid. | 8.430996 | 6.943461 | 1.214235 |
# NO Dump file selected -> DO NOTHING
if self.running_config.output_file:
# Determinate file format
_, extension = op.splitext(self.running_config.output_file)
extension = extension.replace(".", "")
if extension not in self.ALLOWED_DUMP_FORMATS:
raise PCException(
f"Extension of dump file is not available. "
f"Allowed extensions are: "
f"{', '.join(self.ALLOWED_DUMP_FORMATS)}")
with open(self.running_config.output_file, "w") as f:
if extension == "csv":
csv_writer = csv.writer(f)
csv_writer.writerow(("# Name",
"CPE",
"CVE",
"Score",
"Summary"))
csv_writer.writerows(self._to_csv())
elif extension == "json":
json.dump(self.results,
f,
indent=4,
sort_keys=True)
elif extension == "raw":
f.write(self._to_table()) | def dump(self) | Dump to file | 3.761712 | 3.641267 | 1.033078 |
def _read_stdin() -> str:
# Read input with 5 sec timeout
while sys.stdin in select.select([sys.stdin], [], [], 2)[0]:
line = sys.stdin.readline()
if line:
yield line
else: # an empty line means stdin has been closed
return
# --------------------------------------------------------------------------
# Find data source
# --------------------------------------------------------------------------
dependencies = []
# Data from command line from user?
if patton_config.nargs_input:
if dependency_or_banner == "banner":
dependencies.append(["cli_input", patton_config.nargs_input])
else:
dependencies.extend(patton_config.nargs_input)
# Data form stdin input ?
if not sys.stdin.isatty():
input_read = "".join(list(_read_stdin()))
# YES => Data from stdin
if input_read:
if dependency_or_banner == "banner":
dependencies.append(["stdin", input_read])
else:
dependencies.extend(input_read.splitlines())
# Data from file?
if patton_config.data_from_file:
f = op.abspath(op.join(op.abspath(os.getcwd()),
patton_config.data_from_file))
# YES => dependencies from file
with open(f, "r") as f:
if dependency_or_banner == "banner":
dependencies.append(["file", f.read()])
else:
dependencies.extend(f.read().splitlines())
# NO data from any other source => Continuous check selected?
if not dependencies and not patton_config.follow_checking:
# NO data source found => Error! We need some data!
raise PCException("You need to specify andy dependency "
"from any kind of source: stdin, "
"file of cli")
return dependencies | def get_data_from_sources(patton_config: PattonRunningConfig,
dependency_or_banner: str = "dependency") \
-> List[str] | This function try to get data from different sources:
- command line arguments
- from external input file
- from stdin
Return a list with the content of all of collected data. A list element by
each input data found.
:param dependency_or_banner: allowed values are: ["dependency" | "banner"]
:type dependency_or_banner: str | 4.523847 | 4.44276 | 1.018252 |
result = set()
for source_type, source_content in banners:
if source_type == "file":
result.update(nmap_file(source_content))
return result | def parse_banners(banners: List[List[str]],
patton_config: PattonRunningConfig) -> Set | This function try to find the better function to parser input banners
and parse it | 6.957053 | 5.799061 | 1.199686 |
result = {}
for source_type, source_content in dependencies:
# Select parser
parser = DEPENDENCIES_PARSERS[patton_config.source_type]
if not source_content:
continue
if source_type == "cli_input":
fixed_source = "\n".join(source_content)
else:
fixed_source = source_content
result.update(parser(fixed_source, patton_config))
return result | def parse_dependencies(dependencies: List[List[str]],
patton_config: PattonRunningConfig) -> Dict | This function try to find the better function to parser input banners
and parse it | 4.097093 | 3.906564 | 1.048772 |
def on_init(app): # pylint: disable=unused-argument
docs_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.abspath(os.path.join(docs_path, '..'))
apidoc_path = 'sphinx-apidoc'
swg2rst_path = 'swg2rst'
if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv
# If we are, assemble the path manually
bin_path = os.path.abspath(os.path.join(sys.prefix, 'bin'))
apidoc_path = os.path.join(bin_path, apidoc_path)
swg2rst_path = os.path.join(bin_path, swg2rst_path)
check_call([apidoc_path, '-o', docs_path, os.path.join(root_path, 'user_tasks'),
os.path.join(root_path, 'user_tasks/migrations')])
json_path = os.path.join(docs_path, 'swagger.json')
rst_path = os.path.join(docs_path, 'rest_api.rst')
check_call([swg2rst_path, json_path, '-f', 'rst', '-o', rst_path]) | Run sphinx-apidoc and swg2rst after Sphinx initialization.
Read the Docs won't run tox or custom shell commands, so we need this to
avoid checking in the generated reStructuredText files. | null | null | null | |
task_class = import_string(sender)
except ImportError:
return
if issubclass(task_class.__class__, UserTaskMixin):
arguments_dict = task_class.arguments_as_dict(*body['args'], **body['kwargs'])
user_id = _get_user_id(arguments_dict)
task_id = body['id']
if body.get('callbacks', []):
return _create_chain_entry(user_id, task_id, task_class, body['args'], body['kwargs'], body['callbacks'])
if body.get('chord', None):
return _create_chord_entry(task_id, task_class, body, user_id)
parent = _get_or_create_group_parent(body, user_id)
name = task_class.generate_name(arguments_dict)
total_steps = task_class.calculate_total_steps(arguments_dict)
UserTaskStatus.objects.get_or_create(
task_id=task_id, defaults={'user_id': user_id, 'parent': parent, 'name': name, 'task_class': sender,
'total_steps': total_steps})
if parent:
parent.increment_total_steps(total_steps) | def create_user_task(sender=None, body=None, **kwargs): # pylint: disable=unused-argument
try | Create a :py:class:`UserTaskStatus` record for each :py:class:`UserTaskMixin`.
Also creates a :py:class:`UserTaskStatus` for each chain, chord, or group containing
the new :py:class:`UserTaskMixin`. | 3.385156 | 3.156878 | 1.072311 |
LOGGER.debug(task_class)
if issubclass(task_class.__class__, UserTaskMixin):
arguments_dict = task_class.arguments_as_dict(*args, **kwargs)
name = task_class.generate_name(arguments_dict)
total_steps = task_class.calculate_total_steps(arguments_dict)
parent_name = kwargs.get('user_task_name', '')
with transaction.atomic():
if parent is None:
# First task in the chain, create a status record for it
parent = UserTaskStatus.objects.create(
is_container=True, name=parent_name, task_class='celery.chain', task_id=str(uuid4()),
total_steps=0, user_id=user_id)
UserTaskStatus.objects.create(
name=name, parent=parent, task_class=task_class, task_id=task_id, total_steps=total_steps,
user_id=user_id)
parent.increment_total_steps(total_steps)
if parent_name and not parent.name:
parent.set_name(parent_name)
for callback in callbacks:
links = callback.options.get('link', [])
callback_class = import_string(callback.task)
_create_chain_entry(user_id, callback.id, callback_class, callback.args, callback.kwargs, links, parent=parent) | def _create_chain_entry(user_id, task_id, task_class, args, kwargs, callbacks, parent=None) | Create and update status records for a new :py:class:`UserTaskMixin` in a Celery chain. | 3.110906 | 2.895058 | 1.074557 |
args = message_body['args']
kwargs = message_body['kwargs']
arguments_dict = task_class.arguments_as_dict(*args, **kwargs)
name = task_class.generate_name(arguments_dict)
total_steps = task_class.calculate_total_steps(arguments_dict)
parent_name = kwargs.get('user_task_name', '')
chord_data = message_body['chord']
group_id = message_body['taskset']
with transaction.atomic():
group, created = UserTaskStatus.objects.get_or_create(
task_id=group_id, defaults={'is_container': True, 'name': parent_name, 'task_class': 'celery.group',
'total_steps': total_steps, 'user_id': user_id})
if created:
# Also create a status for the chord as a whole
chord = UserTaskStatus.objects.create(
is_container=True, name=parent_name, task_class='celery.chord', task_id=str(uuid4()),
total_steps=total_steps, user_id=user_id)
group.parent = chord
group.save(update_fields={'parent', 'modified'})
else:
chord = None
group.increment_total_steps(total_steps)
if parent_name and not group.name:
group.set_name(parent_name)
UserTaskStatus.objects.create(
name=name, parent=group, task_class=task_class, task_id=task_id, total_steps=total_steps, user_id=user_id)
# chord body task status
if not created:
# body being handled by another of the tasks in the header
return
task_id = chord_data['options']['task_id']
body_task = chord_data['task']
body_class = import_string(body_task).__class__
if not issubclass(body_class, UserTaskMixin):
return
args = chord_data['args']
kwargs = chord_data['kwargs']
arguments_dict = body_class.arguments_as_dict(*args, **kwargs)
name = body_class.generate_name(arguments_dict)
total_steps = body_class.calculate_total_steps(arguments_dict)
UserTaskStatus.objects.get_or_create(
task_id=task_id, defaults={'name': name, 'parent': chord, 'task_class': body_task,
'total_steps': total_steps, 'user_id': user_id})
chord.increment_total_steps(total_steps) | def _create_chord_entry(task_id, task_class, message_body, user_id) | Create and update status records for a new :py:class:`UserTaskMixin` in a Celery chord. | 2.676628 | 2.533861 | 1.056344 |
parent_id = message_body.get('taskset', None)
if not parent_id:
# Not part of a group
return None
parent_class = 'celery.group'
parent_name = message_body['kwargs'].get('user_task_name', '')
parent, _ = UserTaskStatus.objects.get_or_create(
task_id=parent_id, defaults={'is_container': True, 'name': parent_name, 'task_class': parent_class,
'total_steps': 0, 'user_id': user_id})
if parent_name and not parent.name:
parent.name = parent_name
parent.save(update_fields={'name', 'modified'})
return parent | def _get_or_create_group_parent(message_body, user_id) | Determine if the given task belongs to a group or not, and if so, get or create a status record for the group.
Arguments:
message_body (dict): The body of the before_task_publish signal for the task in question
user_id (int): The primary key of the user model record for the user who triggered the task.
(If using a custom user model, this may not be an integer.)
Returns
-------
UserTaskStatus: The status record for the containing group, or `None` if there isn't one | 3.988291 | 3.960561 | 1.007001 |
if 'user_id' not in arguments_dict:
raise TypeError('Each invocation of a UserTaskMixin subclass must include the user_id')
user_id = arguments_dict['user_id']
try:
get_user_model().objects.get(pk=user_id)
except (ValueError, get_user_model().DoesNotExist):
raise TypeError('Invalid user_id: {}'.format(user_id))
return user_id | def _get_user_id(arguments_dict) | Get and validate the `user_id` argument to a task derived from `UserTaskMixin`.
Arguments:
arguments_dict (dict): The parsed positional and keyword arguments to the task
Returns
-------
int: The primary key of a user record (may not be an int if using a custom user model) | 2.905028 | 2.628735 | 1.105105 |
if isinstance(sender, UserTaskMixin):
exception = kwargs['exception']
if not isinstance(exception, TaskCanceledException):
# Don't include traceback, since this is intended for end users
sender.status.fail(str(exception))
user_task_stopped.send_robust(sender=UserTaskStatus, status=sender.status) | def task_failed(sender=None, **kwargs) | Update the status record accordingly when a :py:class:`UserTaskMixin` fails. | 8.113694 | 6.545874 | 1.239513 |
status = sender.status
# Failed tasks with good exception handling did not succeed just because they ended cleanly
if status.state not in (UserTaskStatus.CANCELED, UserTaskStatus.FAILED, UserTaskStatus.RETRYING):
status.succeed()
user_task_stopped.send_robust(sender=UserTaskStatus, status=sender.status) | def task_succeeded(sender=None, **kwargs): # pylint: disable=unused-argument
if isinstance(sender, UserTaskMixin) | Update the status record accordingly when a :py:class:`UserTaskMixin` finishes successfully. | 11.533376 | 9.387501 | 1.228589 |
hcolors = np.array([rgb2hcl(*i[:3]) for i in colors])
# unwrap colormap in hcl space
hcolors[:, 0] = np.rad2deg(np.unwrap(np.deg2rad(np.array(hcolors)[:, 0])))
channels = [np.interp(arr,
np.array(values),
np.array(hcolors)[:, i])
for i in range(3)]
channels = list(hcl2rgb(*channels))
rest = [np.interp(arr,
np.array(values),
np.array(colors)[:, i + 3])
for i in range(np.array(colors).shape[1] - 3)]
channels.extend(rest)
try:
return [np.ma.array(channel, mask=arr.mask) for channel in channels]
except AttributeError:
return channels | def colorize(arr, colors, values) | Colorize a monochromatic array *arr*, based *colors* given for
*values*. Interpolation is used. *values* must be in ascending order. | 3.526369 | 3.550496 | 0.993204 |
new_arr = np.digitize(arr.ravel(),
np.concatenate((values,
[max(np.nanmax(arr),
values.max()) + 1])))
new_arr -= 1
new_arr = new_arr.clip(min=0, max=len(values) - 1)
try:
new_arr = np.ma.array(new_arr.reshape(arr.shape), mask=arr.mask)
except AttributeError:
new_arr = new_arr.reshape(arr.shape)
return new_arr, tuple(colors) | def palettize(arr, colors, values) | From start *values* apply *colors* to *data*. | 2.80454 | 2.801516 | 1.00108 |
cbar = np.tile(np.arange(length) * 1.0 / (length - 1), (height, 1))
cbar = (cbar * (colormap.values.max() - colormap.values.min())
+ colormap.values.min())
return colormap.colorize(cbar) | def colorbar(height, length, colormap) | Return the channels of a colorbar. | 3.018306 | 2.667527 | 1.1315 |
cbar = np.tile(np.arange(length) * 1.0 / (length - 1), (height, 1))
cbar = (cbar * (colormap.values.max() + 1 - colormap.values.min())
+ colormap.values.min())
return colormap.palettize(cbar) | def palettebar(height, length, colormap) | Return the channels of a palettebar. | 3.595811 | 3.185698 | 1.128736 |
if min_val > max_val:
max_val, min_val = min_val, max_val
self.values = (((self.values * 1.0 - self.values.min()) /
(self.values.max() - self.values.min()))
* (max_val - min_val) + min_val) | def set_range(self, min_val, max_val) | Set the range of the colormap to [*min_val*, *max_val*] | 2.496189 | 2.445219 | 1.020845 |
self.colors = (((self.colors * 1.0 - self.colors.min()) /
(self.colors.max() - self.colors.min())) * 255)
return dict(zip(self.values, tuple(map(tuple, self.colors)))) | def to_rio(self) | Converts the colormap to a rasterio colormap. | 4.660524 | 3.811914 | 1.22262 |
def cancel(self, request, *args, **kwargs): # pylint: disable=unused-argument
status = self.get_object()
status.cancel()
serializer = StatusSerializer(status, context={'request': request})
return Response(serializer.data) | Cancel the task associated with the specified status record.
Arguments:
request (Request): A POST including a task status record ID
Returns
-------
Response: A JSON response indicating whether the cancellation succeeded or not | null | null | null | |
def swagger(request): # pylint: disable=unused-argument
generator = schemas.SchemaGenerator(title='django-user-tasks REST API')
return response.Response(generator.get_schema()) | Render Swagger UI and the underlying Open API schema JSON file. | null | null | null | |
if 'SWAGGER_JSON_PATH' in os.environ:
with io.open(os.environ['SWAGGER_JSON_PATH'], 'rb') as f:
return f.read()
else:
return super(ConditionalOpenAPIRenderer, self).render(data, accepted_media_type, renderer_context) | def render(self, data, accepted_media_type=None, renderer_context=None) | Render the appropriate Open API JSON file. | 2.92219 | 2.525754 | 1.156957 |
rules.add_perm('user_tasks.view_usertaskstatus', STATUS_PERMISSION)
rules.add_perm('user_tasks.cancel_usertaskstatus', STATUS_PERMISSION)
rules.add_perm('user_tasks.change_usertaskstatus', rules.predicates.is_superuser)
rules.add_perm('user_tasks.delete_usertaskstatus', STATUS_PERMISSION)
rules.add_perm('user_tasks.view_usertaskartifact', ARTIFACT_PERMISSION)
rules.add_perm('user_tasks.change_usertaskartifact', rules.predicates.is_superuser)
rules.add_perm('user_tasks.delete_usertaskartifact', rules.predicates.is_superuser) | def add_rules() | Use the rules provided in this module to implement authorization checks for the ``django-user-tasks`` models.
These rules allow only superusers and the user who triggered a task to view its status or artifacts, cancel the
task, or delete the status information and all its related artifacts. Only superusers are allowed to directly
modify or delete an artifact (or to modify a task status record). | 2.223287 | 1.930198 | 1.151844 |
if request.user.is_superuser:
return queryset
return queryset.filter(status__user=request.user) | def filter_queryset(self, request, queryset, view) | Filter out any artifacts which the requesting user does not have permission to view. | 3.3787 | 2.853849 | 1.18391 |
'''
_text_to_rgb takes as input a string composed by 3 values in the range [0,255]
and returns a tuple of integers. If the parameters cat and tot are given,
the function generates a transparency value for this color and returns a tuple
of length 4.
tot is the total number of colors in the colormap
cat is the index of the current colour in the colormap
if norm is set to True, the input values are normalized between 0 and 1.
'''
tokens = value.split()
if hex:
for i in range(len(tokens)):
tokens[i] = _hex_to_rgb(tokens[i])
transparency = float(cat)/float(tot)+offset
if transparency > 1.0:
transparency = 1.0
if norm:
return (float(tokens[0])/255.0, float(tokens[1])/255.0, float(tokens[2])/255.0, transparency)
else:
return (int(tokens[0]), int(tokens[1]), int(tokens[2]), int(round(transparency * 255.0))) | def _text_to_rgb(value,norm=False,cat=1, tot=1,offset=0.5,hex=False) | _text_to_rgb takes as input a string composed by 3 values in the range [0,255]
and returns a tuple of integers. If the parameters cat and tot are given,
the function generates a transparency value for this color and returns a tuple
of length 4.
tot is the total number of colors in the colormap
cat is the index of the current colour in the colormap
if norm is set to True, the input values are normalized between 0 and 1. | 3.335015 | 1.737652 | 1.919265 |
'''
_make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). _make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color.
'''
bit_rgb = np.linspace(0,1,256)
if position == None:
position = np.linspace(0,1,len(colors))
else:
if len(position) != len(colors):
sys.exit("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
sys.exit("position must start with 0 and end with 1")
palette = [(i, (float(r), float(g), float(b), float(a))) for
i, (r, g, b, a) in enumerate(colors)]
cmap = Colormap(*palette)
return cmap | def _make_cmap(colors, position=None, bit=False) | _make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). _make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color. | 3.899962 | 1.85204 | 2.105765 |
'''
cmap_from_text takes as input a file that contains a colormap in text format
composed by lines with 3 values in the range [0,255] or [00,FF]
and returns a tuple of integers. If the parameters cat and tot are given,
the function generates a transparency value for this color and returns a tuple
of length 4.
tot is the total number of colors in the colormap
cat is the index of the current colour in the colormap
if norm is set to True, the input values are normalized between 0 and 1.
'''
lines = [line.rstrip('\n') for line in open(filename)]
_colors=[]
_tot = len(lines)
_index = 1
for i in lines:
if transparency:
_colors.append(_text_to_rgb(i,norm=norm,cat=_index,tot=_tot,hex=hex))
else:
_colors.append(_text_to_rgb(i,norm=norm,hex=hex))
_index = _index + 1
return _make_cmap(_colors) | def cmap_from_text(filename, norm=False, transparency=False, hex=False) | cmap_from_text takes as input a file that contains a colormap in text format
composed by lines with 3 values in the range [0,255] or [00,FF]
and returns a tuple of integers. If the parameters cat and tot are given,
the function generates a transparency value for this color and returns a tuple
of length 4.
tot is the total number of colors in the colormap
cat is the index of the current colour in the colormap
if norm is set to True, the input values are normalized between 0 and 1. | 4.511275 | 1.847441 | 2.441905 |
'''
Utility function that converts an image file in 3 np arrays
that can be fed into geo_image.GeoImage in order to generate
a PyTROLL GeoImage object.
'''
im = Pimage.open(filepath).convert('RGB')
(width, height) = im.size
_r = np.array(list(im.getdata(0)))/255.0
_g = np.array(list(im.getdata(1)))/255.0
_b = np.array(list(im.getdata(2)))/255.0
_r = _r.reshape((height, width))
_g = _g.reshape((height, width))
_b = _b.reshape((height, width))
return _r, _g, _b | def _image2array(filepath) | Utility function that converts an image file in 3 np arrays
that can be fed into geo_image.GeoImage in order to generate
a PyTROLL GeoImage object. | 3.003931 | 1.691264 | 1.776145 |
cases = {"jpg": "jpeg",
"jpeg": "jpeg",
"tif": "tiff",
"tiff": "tif",
"pgm": "ppm",
"pbm": "ppm",
"ppm": "ppm",
"bmp": "bmp",
"dib": "bmp",
"gif": "gif",
"im": "im",
"pcx": "pcx",
"png": "png",
"xbm": "xbm",
"xpm": "xpm",
'jp2': 'jp2',
}
fformat = fformat.lower()
try:
fformat = cases[fformat]
except KeyError:
raise UnknownImageFormat("Unknown image format '%s'." % fformat)
return fformat | def check_image_format(fformat) | Check that *fformat* is valid | 2.119572 | 2.117645 | 1.00091 |
return (isinstance(item, (list, tuple, set)) and
len(item) == 2 and
not isinstance(item[0], (list, tuple, set)) and
not isinstance(item[1], (list, tuple, set))) | def _is_pair(item) | Check if an item is a pair (tuple of size 2). | 2.04826 | 1.814816 | 1.128632 |
kb_ = 0.114
kr_ = 0.299
r__ = 2 * cr_ / (1 - kr_) + y__
b__ = 2 * cb_ / (1 - kb_) + y__
g__ = (y__ - kr_ * r__ - kb_ * b__) / (1 - kr_ - kb_)
return r__, g__, b__ | def ycbcr2rgb(y__, cb_, cr_) | Convert the three YCbCr channels to RGB channels. | 3.369561 | 3.293009 | 1.023247 |
if isinstance(chn, np.ma.core.MaskedArray):
chn_data = chn.data
chn_mask = chn.mask
else:
chn_data = np.array(chn)
chn_mask = False
scaled = ((chn_data - color_min) *
1.0 / (color_max - color_min))
self.channels.append(np.ma.array(scaled, mask=chn_mask)) | def _add_channel(self, chn, color_min, color_max) | Adds a channel to the image object | 2.369238 | 2.255626 | 1.050368 |
channels = []
if self.mode == "P":
self.convert("RGB")
if self.mode == "PA":
self.convert("RGBA")
for chn in self.channels:
if isinstance(chn, np.ma.core.MaskedArray):
final_data = chn.data.clip(0, 1) * np.iinfo(dtype).max
else:
final_data = chn.clip(0, 1) * np.iinfo(dtype).max
if np.issubdtype(dtype, np.integer):
final_data = np.round(final_data)
channels.append(np.ma.array(final_data,
dtype,
mask=np.ma.getmaskarray(chn)))
if self.fill_value is not None:
fill_value = [int(col * np.iinfo(dtype).max)
for col in self.fill_value]
else:
fill_value = None
return channels, fill_value | def _finalize(self, dtype=np.uint8) | Finalize the image, that is put it in RGB mode, and set the channels
in unsigned 8bit format ([0,255] range) (if the *dtype* doesn't say
otherwise). | 2.676986 | 2.67337 | 1.001352 |
if(((self.channels == []) and (not self.shape == (0, 0))) or
((not self.channels == []) and (self.shape == (0, 0)))):
raise RuntimeError("Channels-shape mismatch.")
return self.channels == [] and self.shape == (0, 0) | def is_empty(self) | Checks for an empty image. | 4.538404 | 4.063288 | 1.116929 |
self.pil_save(filename, compression, fformat,
thumbnail_name, thumbnail_size) | def save(self, filename, compression=6, fformat=None,
thumbnail_name=None, thumbnail_size=None) | Save the image to the given *filename*. For some formats like jpg
and png, the work is delegated to :meth:`pil_save`, which doesn't
support the *compression* option. | 4.114977 | 3.289411 | 1.250977 |
# PIL does not support compression option.
del compression
if self.is_empty():
raise IOError("Cannot save an empty image")
if isinstance(filename, (str, six.text_type)):
ensure_dir(filename)
fformat = fformat or os.path.splitext(filename)[1][1:4]
fformat = check_image_format(fformat)
params = {}
if fformat == 'png':
# Take care of GeoImage.tags (if any).
params['pnginfo'] = self._pngmeta()
# JPEG images does not support transparency
if fformat == 'jpeg' and not self.fill_value:
self.fill_value = [0, 0, 0, 0]
logger.debug("No fill_value provided, setting it to 0.")
img = self.pil_image()
img.save(filename, fformat, **params)
if thumbnail_name is not None and thumbnail_size is not None:
img.thumbnail(thumbnail_size, Pil.ANTIALIAS)
img.save(thumbnail_name, fformat, **params) | def pil_save(self, filename, compression=6, fformat=None,
thumbnail_name=None, thumbnail_size=None) | Save the image to the given *filename* using PIL. For now, the
compression level [0-9] is ignored, due to PIL's lack of support. See
also :meth:`save`. | 3.763152 | 4.023273 | 0.935346 |
reserved = ('interlace', 'gamma', 'dpi', 'transparency', 'aspect')
try:
tags = self.tags
except AttributeError:
tags = {}
# Undocumented class
from PIL import PngImagePlugin
meta = PngImagePlugin.PngInfo()
# Copy from tags to new dict
for k__, v__ in tags.items():
if k__ not in reserved:
meta.add_text(k__, v__, 0)
return meta | def _pngmeta(self) | It will return GeoImage.tags as a PNG metadata object.
Inspired by:
public domain, Nick Galbreath
http://blog.modp.com/2007/08/python-pil-and-png-metadata-take-2.html | 5.628085 | 5.173327 | 1.087904 |
alpha = np.ma.array(alpha)
if(not (alpha.shape[0] == 0 and
self.shape[0] == 0) and
alpha.shape != self.shape):
raise ValueError("Alpha channel shape should match image shape")
if not self.mode.endswith("A"):
self.convert(self.mode + "A")
if not self.is_empty():
self.channels[-1] = alpha | def putalpha(self, alpha) | Adds an *alpha* channel to the current image, or replaces it with
*alpha* if it already exists. | 5.218785 | 4.847572 | 1.076577 |
self._check_modes(("RGB", "RGBA"))
(self.channels[0], self.channels[1], self.channels[2]) = \
rgb2ycbcr(self.channels[0],
self.channels[1],
self.channels[2])
if self.fill_value is not None:
self.fill_value[0:3] = rgb2ycbcr(self.fill_value[0],
self.fill_value[1],
self.fill_value[2])
self.mode = mode | def _rgb2ycbcr(self, mode) | Convert the image from RGB mode to YCbCr. | 2.442288 | 2.347636 | 1.040318 |
self._check_modes(("YCbCr", "YCbCrA"))
(self.channels[0], self.channels[1], self.channels[2]) = \
ycbcr2rgb(self.channels[0],
self.channels[1],
self.channels[2])
if self.fill_value is not None:
self.fill_value[0:3] = ycbcr2rgb(self.fill_value[0],
self.fill_value[1],
self.fill_value[2])
self.mode = mode | def _ycbcr2rgb(self, mode) | Convert the image from YCbCr mode to RGB. | 2.406708 | 2.291499 | 1.050277 |
if self.mode.endswith("A"):
chans = self.channels[:-1]
alpha = self.channels[-1]
self._secondary_mode = self.mode[:-1]
else:
chans = self.channels
alpha = None
self._secondary_mode = self.mode
palette = []
selfmask = chans[0].mask
for chn in chans[1:]:
selfmask = np.ma.mask_or(selfmask, chn.mask)
new_chn = np.ma.zeros(self.shape, dtype=int)
color_nb = 0
for i in range(self.height):
for j in range(self.width):
current_col = tuple([chn[i, j] for chn in chans])
try:
next(idx
for idx in range(len(palette))
if palette[idx] == current_col)
except StopIteration:
idx = color_nb
palette.append(current_col)
color_nb = color_nb + 1
new_chn[i, j] = idx
if self.fill_value is not None:
if self.mode.endswith("A"):
current_col = tuple(self.fill_value[:-1])
fill_alpha = [self.fill_value[-1]]
else:
current_col = tuple(self.fill_value)
fill_alpha = []
try:
next(idx
for idx in range(len(palette))
if palette[idx] == current_col)
except StopIteration:
idx = color_nb
palette.append(current_col)
color_nb = color_nb + 1
self.fill_value = [idx] + fill_alpha
new_chn.mask = selfmask
self.palette = palette
if alpha is None:
self.channels = [new_chn]
else:
self.channels = [new_chn, alpha]
self.mode = mode | def _to_p(self, mode) | Convert the image to P or PA mode. | 2.45583 | 2.345561 | 1.047012 |
self._check_modes(("P", "PA"))
if self.mode.endswith("A"):
alpha = self.channels[-1]
else:
alpha = None
chans = []
cdfs = []
color_chan = self.channels[0]
for i in range(len(self.palette[0])):
cdfs.append(np.zeros(len(self.palette)))
for j in range(len(self.palette)):
cdfs[i][j] = self.palette[j][i]
new_chn = np.ma.array(np.interp(color_chan,
np.arange(len(self.palette)),
cdfs[i]),
mask=color_chan.mask)
chans.append(new_chn)
if self.fill_value is not None:
if alpha is not None:
fill_alpha = self.fill_value[-1]
self.fill_value = list(self.palette[int(self.fill_value[0])])
self.fill_value += [fill_alpha]
else:
self.fill_value = list(self.palette[int(self.fill_value[0])])
self.mode = self._secondary_mode
self.channels = chans
if alpha is not None:
self.channels.append(alpha)
self.mode = self.mode + "A"
self.convert(mode) | def _from_p(self, mode) | Convert the image from P or PA mode. | 3.020822 | 2.775285 | 1.088473 |
self._check_modes(("L", "LA"))
self.channels.append(self.channels[0].copy())
self.channels.append(self.channels[0].copy())
if self.fill_value is not None:
self.fill_value = self.fill_value[:1] * 3 + self.fill_value[1:]
if self.mode == "LA":
self.channels[1], self.channels[3] = \
self.channels[3], self.channels[1]
self.mode = mode | def _l2rgb(self, mode) | Convert from L (black and white) to RGB. | 3.035107 | 2.925617 | 1.037425 |
self._check_modes(("RGB", "RGBA"))
kb_ = 0.114
kr_ = 0.299
r__ = self.channels[0]
g__ = self.channels[1]
b__ = self.channels[2]
y__ = kr_ * r__ + (1 - kr_ - kb_) * g__ + kb_ * b__
if self.fill_value is not None:
self.fill_value = ([rgb2ycbcr(self.fill_value[0],
self.fill_value[1],
self.fill_value[2])[0]] +
self.fill_value[3:])
self.channels = [y__] + self.channels[3:]
self.mode = mode | def _rgb2l(self, mode) | Convert from RGB to monochrome L. | 3.413412 | 3.277323 | 1.041524 |
self._check_modes(("YCbCr", "YCbCrA"))
self.channels = [self.channels[0]] + self.channels[3:]
if self.fill_value is not None:
self.fill_value = [self.fill_value[0]] + self.fill_value[3:]
self.mode = mode | def _ycbcr2l(self, mode) | Convert from YCbCr to L. | 3.38432 | 3.244986 | 1.042938 |
self._check_modes(("L", "LA"))
luma = self.channels[0]
zeros = np.ma.zeros(luma.shape)
zeros.mask = luma.mask
self.channels = [luma, zeros, zeros] + self.channels[1:]
if self.fill_value is not None:
self.fill_value = [self.fill_value[0], 0, 0] + self.fill_value[1:]
self.mode = mode | def _l2ycbcr(self, mode) | Convert from L to YCbCr. | 3.691129 | 3.569645 | 1.034033 |
if mode == self.mode:
return
if mode not in ["L", "LA", "RGB", "RGBA",
"YCbCr", "YCbCrA", "P", "PA"]:
raise ValueError("Mode %s not recognized." % (mode))
if self.is_empty():
self.mode = mode
return
if mode == self.mode + "A":
self.channels.append(np.ma.ones(self.channels[0].shape))
if self.fill_value is not None:
self.fill_value += [1]
self.mode = mode
elif mode + "A" == self.mode:
self.channels = self.channels[:-1]
if self.fill_value is not None:
self.fill_value = self.fill_value[:-1]
self.mode = mode
elif mode.endswith("A") and not self.mode.endswith("A"):
self.convert(self.mode + "A")
self.convert(mode)
elif self.mode.endswith("A") and not mode.endswith("A"):
self.convert(self.mode[:-1])
self.convert(mode)
else:
cases = {
"RGB": {"YCbCr": self._rgb2ycbcr,
"L": self._rgb2l,
"P": self._to_p},
"RGBA": {"YCbCrA": self._rgb2ycbcr,
"LA": self._rgb2l,
"PA": self._to_p},
"YCbCr": {"RGB": self._ycbcr2rgb,
"L": self._ycbcr2l,
"P": self._to_p},
"YCbCrA": {"RGBA": self._ycbcr2rgb,
"LA": self._ycbcr2l,
"PA": self._to_p},
"L": {"RGB": self._l2rgb,
"YCbCr": self._l2ycbcr,
"P": self._to_p},
"LA": {"RGBA": self._l2rgb,
"YCbCrA": self._l2ycbcr,
"PA": self._to_p},
"P": {"RGB": self._from_p,
"YCbCr": self._from_p,
"L": self._from_p},
"PA": {"RGBA": self._from_p,
"YCbCrA": self._from_p,
"LA": self._from_p}}
try:
cases[self.mode][mode](mode)
except KeyError:
raise ValueError("Conversion from %s to %s not implemented !"
% (self.mode, mode)) | def convert(self, mode) | Convert the current image to the given *mode*. See :class:`Image`
for a list of available modes. | 1.821751 | 1.816904 | 1.002668 |
if not isinstance(channels, (tuple, list)):
channels = [channels] * len(self.channels)
for i in range(len(self.channels)):
if channels[i]:
self.channels[i] = np.ma.clip(self.channels[i], 0.0, 1.0) | def clip(self, channels=True) | Limit the values of the array to the default [0,1] range. *channels*
says which channels should be clipped. | 2.348543 | 2.234295 | 1.051134 |
if self.is_empty():
raise ValueError("Cannot resize an empty image")
factor = [1, 1]
zoom = [True, True]
zoom[0] = shape[0] >= self.height
zoom[1] = shape[1] >= self.width
if zoom[0]:
factor[0] = shape[0] * 1.0 / self.height
else:
factor[0] = self.height * 1.0 / shape[0]
if zoom[1]:
factor[1] = shape[1] * 1.0 / self.width
else:
factor[1] = self.width * 1.0 / shape[1]
if(int(factor[0]) != factor[0] or
int(factor[1]) != factor[1]):
raise ValueError("Resize not of integer factor!")
factor[0] = int(factor[0])
factor[1] = int(factor[1])
i = 0
for chn in self.channels:
if zoom[0]:
chn = chn.repeat([factor[0]] * chn.shape[0], axis=0)
else:
chn = chn[[idx * factor[0]
for idx in range(int(self.height / factor[0]))],
:]
if zoom[1]:
self.channels[i] = chn.repeat([factor[1]] * chn.shape[1],
axis=1)
else:
self.channels[i] = chn[:,
[idx * factor[1]
for idx in range(int(self.width /
factor[1]))]]
i = i + 1
self.height = self.channels[0].shape[0]
self.width = self.channels[0].shape[1]
self.shape = self.channels[0].shape | def resize(self, shape) | Resize the image to the given *shape* tuple, in place. For zooming,
nearest neighbour method is used, while for shrinking, decimation is
used. Therefore, *shape* must be a multiple or a divisor of the image
shape. | 1.941227 | 1.928645 | 1.006524 |
if self.is_empty():
return
if luminance.shape != self.channels[0].shape:
if ((luminance.shape[0] * 1.0 / luminance.shape[1]) ==
(self.channels[0].shape[0] * 1.0 / self.channels[0].shape[1])):
if luminance.shape[0] > self.channels[0].shape[0]:
self.resize(luminance.shape)
else:
raise NameError("Luminance smaller than the image !")
else:
raise NameError("Not the good shape !")
mode = self.mode
if mode.endswith("A"):
self.convert("YCbCrA")
self.channels[0] = luminance
self.convert(mode)
else:
self.convert("YCbCr")
self.channels[0] = luminance
self.convert(mode) | def replace_luminance(self, luminance) | Replace the Y channel of the image by the array *luminance*. If the
image is not in YCbCr mode, it is converted automatically to and
from that mode. | 2.708531 | 2.617086 | 1.034941 |
self.invert(inverse)
if stretch_parameters is None:
stretch_parameters = {}
stretch_parameters.update(kwargs)
self.stretch(stretch, **stretch_parameters)
self.gamma(gamma) | def enhance(self, inverse=False, gamma=1.0, stretch="no",
stretch_parameters=None, **kwargs) | Image enhancement function. It applies **in this order** inversion,
gamma correction, and stretching to the current image, with parameters
*inverse* (see :meth:`Image.invert`), *gamma* (see
:meth:`Image.gamma`), and *stretch* (see :meth:`Image.stretch`). | 2.872491 | 3.949612 | 0.727284 |
if(isinstance(gamma, (list, tuple, set)) and
len(gamma) != len(self.channels)):
raise ValueError("Number of channels and gamma components differ.")
if isinstance(gamma, (tuple, list)):
gamma_list = list(gamma)
else:
gamma_list = [gamma] * len(self.channels)
for i in range(len(self.channels)):
gamma = float(gamma_list[i])
if gamma < 0:
raise ValueError("Gamma correction must be a positive number.")
logger.debug("Applying gamma %f", gamma)
if gamma == 1.0:
continue
if isinstance(self.channels[i], np.ma.core.MaskedArray):
if ne:
self.channels[i] = np.ma.array(
ne.evaluate("data ** (1.0 / gamma)",
local_dict={"data": self.channels[i].data,
'gamma': gamma}),
mask=self.channels[i].mask,
copy=False)
else:
self.channels[i] = np.ma.array(self.channels[i].data **
(1.0 / gamma),
mask=self.channels[i].mask,
copy=False)
else:
self.channels[i] = np.where(self.channels[i] >= 0,
self.channels[i] **
(1.0 / gamma),
self.channels[i]) | def gamma(self, gamma=1.0) | Apply gamma correction to the channels of the image. If *gamma* is a
tuple, then it should have as many elements as the channels of the
image, and the gamma correction is applied elementwise. If *gamma* is a
number, the same gamma correction is applied on every channel, if there
are several channels in the image. The behaviour of :func:`gamma` is
undefined outside the normal [0,1] range of the channels. | 2.393975 | 2.238537 | 1.069438 |
logger.debug("Applying stretch %s with parameters %s",
stretch, str(kwargs))
ch_len = len(self.channels)
if self.mode.endswith("A"):
ch_len -= 1
if((isinstance(stretch, tuple) or
isinstance(stretch, list))):
if len(stretch) == 2:
for i in range(ch_len):
self.stretch_linear(i, cutoffs=stretch, **kwargs)
else:
raise ValueError(
"Stretch tuple must have exactly two elements")
elif stretch == "linear":
for i in range(ch_len):
self.stretch_linear(i, **kwargs)
elif stretch == "histogram":
for i in range(ch_len):
self.stretch_hist_equalize(i, **kwargs)
elif stretch in ["crude", "crude-stretch"]:
for i in range(ch_len):
self.crude_stretch(i, **kwargs)
elif stretch in ["log", "logarithmic"]:
for i in range(ch_len):
self.stretch_logarithmic(i, **kwargs)
elif stretch == "no":
return
elif isinstance(stretch, str):
raise ValueError("Stretching method %s not recognized." % stretch)
else:
raise TypeError("Stretch parameter must be a string or a tuple.") | def stretch(self, stretch="crude", **kwargs) | Apply stretching to the current image. The value of *stretch* sets
the type of stretching applied. The values "histogram", "linear",
"crude" (or "crude-stretch") perform respectively histogram
equalization, contrast stretching (with 5% cutoff on both sides), and
contrast stretching without cutoff. The value "logarithmic" or "log"
will do a logarithmic enhancement towards white. If a tuple or a list
of two values is given as input, then a contrast stretching is performed
with the values as cutoff. These values should be normalized in the
range [0.0,1.0]. | 2.56785 | 2.320083 | 1.106792 |
if(isinstance(invert, (tuple, list)) and
len(self.channels) != len(invert)):
raise ValueError(
"Number of channels and invert components differ.")
logger.debug("Applying invert with parameters %s", str(invert))
if isinstance(invert, (tuple, list)):
for i, chn in enumerate(self.channels):
if invert[i]:
self.channels[i] = 1 - chn
elif invert:
for i, chn in enumerate(self.channels):
self.channels[i] = 1 - chn | def invert(self, invert=True) | Inverts all the channels of a image according to *invert*. If invert is a tuple or a list, elementwise
invertion is performed, otherwise all channels are inverted if *invert* is true (default).
Note: 'Inverting' means that black becomes white, and vice-versa, not that the values are negated ! | 3.131509 | 2.9076 | 1.077008 |
logger.info("Perform a histogram equalized contrast stretch.")
if(self.channels[ch_nb].size ==
np.ma.count_masked(self.channels[ch_nb])):
logger.warning("Nothing to stretch !")
return
arr = self.channels[ch_nb]
nwidth = 2048.0
carr = arr.compressed()
cdf = np.arange(0.0, 1.0, 1 / nwidth)
logger.debug("Make histogram bins having equal amount of data, " +
"using numpy percentile function:")
bins = np.percentile(carr, list(cdf * 100))
res = np.ma.empty_like(arr)
res.mask = np.ma.getmaskarray(arr)
res[~res.mask] = np.interp(carr, bins, cdf)
self.channels[ch_nb] = res | def stretch_hist_equalize(self, ch_nb) | Stretch the current image's colors by performing histogram
equalization on channel *ch_nb*. | 4.649395 | 4.664275 | 0.99681 |
logger.debug("Perform a logarithmic contrast stretch.")
if ((self.channels[ch_nb].size ==
np.ma.count_masked(self.channels[ch_nb])) or
(self.channels[ch_nb].min() == self.channels[ch_nb].max())):
logger.warning("Nothing to stretch !")
return
crange = (0., 1.0)
arr = self.channels[ch_nb]
b__ = float(crange[1] - crange[0]) / np.log(factor)
c__ = float(crange[0])
slope = (factor - 1.) / float(arr.max() - arr.min())
arr = 1. + (arr - arr.min()) * slope
arr = c__ + b__ * np.log(arr)
self.channels[ch_nb] = arr | def stretch_logarithmic(self, ch_nb, factor=100.) | Move data into range [1:factor] and do a normalized logarithmic
enhancement. | 3.602426 | 3.432022 | 1.049651 |
logger.debug("Perform a linear contrast stretch.")
if((self.channels[ch_nb].size ==
np.ma.count_masked(self.channels[ch_nb])) or
self.channels[ch_nb].min() == self.channels[ch_nb].max()):
logger.warning("Nothing to stretch !")
return
arr = self.channels[ch_nb]
carr = arr.compressed()
logger.debug("Calculate the histogram percentiles: ")
logger.debug("Left and right percentiles: " +
str(cutoffs[0] * 100) + " " + str(cutoffs[1] * 100))
left, right = np.percentile(
carr, [cutoffs[0] * 100, 100. - cutoffs[1] * 100])
delta_x = (right - left)
logger.debug("Interval: left=%f, right=%f width=%f",
left, right, delta_x)
if delta_x > 0.0:
self.channels[ch_nb] = np.ma.array((arr - left) / delta_x,
mask=arr.mask)
else:
logger.warning("Unable to make a contrast stretch!") | def stretch_linear(self, ch_nb, cutoffs=(0.005, 0.005)) | Stretch linearly the contrast of the current image on channel
*ch_nb*, using *cutoffs* for left and right trimming. | 3.238176 | 3.158762 | 1.025141 |
if min_stretch is None:
min_stretch = self.channels[ch_nb].min()
if max_stretch is None:
max_stretch = self.channels[ch_nb].max()
if isinstance(min_stretch, (list, tuple)):
min_stretch = min_stretch[ch_nb]
if isinstance(max_stretch, (list, tuple)):
max_stretch = max_stretch[ch_nb]
if((not self.channels[ch_nb].mask.all()) and
abs(max_stretch - min_stretch) > 0):
stretched = self.channels[ch_nb].data.astype(np.float)
stretched -= min_stretch
stretched /= max_stretch - min_stretch
self.channels[ch_nb] = np.ma.array(stretched,
mask=self.channels[ch_nb].mask,
copy=False)
else:
logger.warning("Nothing to stretch !") | def crude_stretch(self, ch_nb, min_stretch=None, max_stretch=None) | Perform simple linear stretching (without any cutoff) on the channel
*ch_nb* of the current image and normalize to the [0,1] range. | 2.042154 | 1.995062 | 1.023604 |
if self.mode not in ("L", "LA"):
raise ValueError("Image should be grayscale to colorize")
if self.mode == "LA":
alpha = self.channels[1]
else:
alpha = None
self.channels = colormap.colorize(self.channels[0])
if alpha is not None:
self.channels.append(alpha)
self.mode = "RGBA"
else:
self.mode = "RGB" | def colorize(self, colormap) | Colorize the current image using
*colormap*. Works only on"L" or "LA" images. | 3.170672 | 2.79492 | 1.134441 |
if self.mode not in ("L", "LA"):
raise ValueError("Image should be grayscale to colorize")
self.channels[0], self.palette = colormap.palettize(self.channels[0])
if self.mode == "L":
self.mode = "P"
else:
self.mode = "PA" | def palettize(self, colormap) | Palettize the current image using
*colormap*. Works only on"L" or "LA" images. | 4.357008 | 3.650838 | 1.193427 |
def f_inv(arr):
return np.where(arr > 6.0/29.0,
arr ** 3,
3 * (6.0 / 29.0) * (6.0 / 29.0) * (arr - 4.0 / 29.0))
new_l = (l__ + 16.0) / 116.0
x__ = 95.047 * f_inv(new_l + a__ / 500.0)
y__ = 100.0 * f_inv(new_l)
z__ = 108.883 * f_inv(new_l - b__ / 200.0)
return x__, y__, z__ | def lab2xyz(l__, a__, b__) | Convert L*a*b* to XYZ, L*a*b* expressed within [0, 1]. | 2.527373 | 2.440096 | 1.035768 |
def f__(arr):
return np.where(arr > 216.0 / 24389.0,
arr ** (1.0/3.0),
(1.0 / 3.0) * (29.0 / 6.0) * (29.0 / 6.0) * arr
+ 4.0 / 29.0)
fy_ = f__(y__ / 100.0)
l__ = 116 * fy_ - 16
a__ = 500.0 * (f__(x__ / 95.047) - fy_)
b__ = 200.0 * (fy_ - f__(z__ / 108.883))
return l__, a__, b__ | def xyz2lab(x__, y__, z__) | Convert XYZ to L*a*b*. | 2.917778 | 2.811784 | 1.037696 |
h_rad = np.deg2rad(h__)
l2_ = l__ * 61 + 9
angle = np.pi / 3.0 - h_rad
r__ = (l__ * 311 + 125) * c__
a__ = np.sin(angle) * r__
b__ = np.cos(angle) * r__
return l2_, a__, b__ | def hcl2lab(h__, c__, l__) | HCL to L*ab | 4.405132 | 4.478706 | 0.983572 |
l2_ = (l__ - 9) / 61.0
r__ = np.sqrt(a__*a__ + b__*b__)
s__ = r__ / (l2_ * 311 + 125)
angle = np.arctan2(a__, b__)
c__ = np.rad2deg(np.pi / 3 - angle)%360
return c__, s__, l2_ | def lab2hcl(l__, a__, b__) | L*a*b* to HCL | 4.977455 | 5.103335 | 0.975334 |
r2_ = r__ / 255.0
g2_ = g__ / 255.0
b2_ = b__ / 255.0
def f__(arr):
return np.where(arr > 0.04045,
((arr + 0.055) / 1.055) ** 2.4,
arr / 12.92)
r2_ = f__(r2_) * 100
g2_ = f__(g2_) * 100
b2_ = f__(b2_) * 100
x__ = r2_ * 0.4124 + g2_ * 0.3576 + b2_ * 0.1805
y__ = r2_ * 0.2126 + g2_ * 0.7152 + b2_ * 0.0722
z__ = r2_ * 0.0193 + g2_ * 0.1192 + b2_ * 0.9505
return x__, y__, z__ | def rgb2xyz(r__, g__, b__) | RGB to XYZ | 1.460101 | 1.4659 | 0.996044 |
x2_ = x__ / 100.0
y2_ = y__ / 100.0
z2_ = z__ / 100.0
r__ = x2_ * 3.2406 + y2_ * -1.5372 + z2_ * -0.4986
g__ = x2_ * -0.9689 + y2_ * 1.8758 + z2_ * 0.0415
b__ = x2_ * 0.0557 + y2_ * -0.2040 + z2_ * 1.0570
def finv(arr):
return np.where(arr > 0.0031308,
1.055 * (arr ** (1.0 / 2.4)) - 0.055,
12.92 * arr)
return finv(r__) * 255, finv(g__) * 255, finv(b__) * 255 | def xyz2rgb(x__, y__, z__) | XYZ colorspace to RGB | 1.523492 | 1.515748 | 1.005109 |
limit = now() - settings.USER_TASKS_MAX_AGE
# UserTaskArtifacts will also be removed via deletion cascading
UserTaskStatus.objects.filter(created__lt=limit).delete() | def purge_old_user_tasks() | Delete any UserTaskStatus and UserTaskArtifact records older than ``settings.USER_TASKS_MAX_AGE``.
Intended to be run as a scheduled task. | 10.631921 | 7.144188 | 1.488192 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.