sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def get_private_room_history(self, room_id, oldest=None, **kwargs):
"""
Get various history of specific private group in this case private
:param room_id:
:param kwargs:
:return:
"""
return GetPrivateRoomHistory(settings=self.settings, **kwargs).call(
room_id=room_id,
oldest=oldest,
**kwargs
) | Get various history of specific private group in this case private
:param room_id:
:param kwargs:
:return: | entailment |
def get_public_rooms(self, **kwargs):
"""
Get a listing of all public rooms with their names and IDs
"""
return GetPublicRooms(settings=self.settings, **kwargs).call(**kwargs) | Get a listing of all public rooms with their names and IDs | entailment |
def get_room_info(self, room_id, **kwargs):
"""
Get various information about a specific channel/room
:param room_id:
:param kwargs:
:return:
"""
return GetRoomInfo(settings=self.settings, **kwargs).call(
room_id=room_id,
**kwargs
) | Get various information about a specific channel/room
:param room_id:
:param kwargs:
:return: | entailment |
def upload_file(self, room_id, description, file, message, **kwargs):
"""
Upload file to room
:param room_id:
:param description:
:param file:
:param kwargs:
:return:
"""
return UploadFile(settings=self.settings, **kwargs).call(
room_id=room_id,
description=description,
file=file,
message=message,
**kwargs
) | Upload file to room
:param room_id:
:param description:
:param file:
:param kwargs:
:return: | entailment |
def get_private_room_info(self, room_id, **kwargs):
"""
Get various information about a specific private group
:param room_id:
:param kwargs:
:return:
"""
return GetPrivateRoomInfo(settings=self.settings, **kwargs).call(
room_id=room_id,
**kwargs
) | Get various information about a specific private group
:param room_id:
:param kwargs:
:return: | entailment |
def get_room_id(self, room_name, **kwargs):
"""
Get room ID
:param room_name:
:param kwargs:
:return:
"""
return GetRoomId(settings=self.settings, **kwargs).call(
room_name=room_name,
**kwargs
) | Get room ID
:param room_name:
:param kwargs:
:return: | entailment |
def get_room_history(
self,
room_id,
oldest=None,
latest=datetime.now(),
inclusive=False,
count=20,
unreads=False,
**kwargs
):
"""
Get various history of specific channel/room
:param room_id:
:param kwargs:
:return:
"""
return GetRoomHistory(settings=self.settings, **kwargs).call(
room_id=room_id,
oldest=oldest,
latest=latest,
inclusive=inclusive,
count=count,
unreads=unreads,
**kwargs
) | Get various history of specific channel/room
:param room_id:
:param kwargs:
:return: | entailment |
def create_public_room(self, name, **kwargs):
"""
Create room with given name
:param name: Room name
:param kwargs:
members: The users to add to the channel when it is created.
Optional; Ex.: ["rocket.cat"], Default: []
read_only: Set if the channel is read only or not.
Optional; Ex.: True, Default: False
:return:
"""
return CreatePublicRoom(settings=self.settings, **kwargs).call(name=name, **kwargs) | Create room with given name
:param name: Room name
:param kwargs:
members: The users to add to the channel when it is created.
Optional; Ex.: ["rocket.cat"], Default: []
read_only: Set if the channel is read only or not.
Optional; Ex.: True, Default: False
:return: | entailment |
def delete_public_room(self, room_id, **kwargs):
"""
Delete room with given ID
:param room_id: Room ID
:param kwargs:
:return:
"""
return DeletePublicRoom(settings=self.settings, **kwargs).call(room_id=room_id, **kwargs) | Delete room with given ID
:param room_id: Room ID
:param kwargs:
:return: | entailment |
def get_users(self, **kwargs):
"""
Gets all of the users in the system and their information
:param kwargs:
:return:
"""
return GetUsers(settings=self.settings, **kwargs).call(**kwargs) | Gets all of the users in the system and their information
:param kwargs:
:return: | entailment |
def get_user_info(self, user_id, **kwargs):
"""
Retrieves information about a user,
the result is only limited to what the callee has access to view.
:param user_id:
:param kwargs:
:return:
"""
return GetUserInfo(settings=self.settings, **kwargs).call(
user_id=user_id,
**kwargs
) | Retrieves information about a user,
the result is only limited to what the callee has access to view.
:param user_id:
:param kwargs:
:return: | entailment |
def create_user(self, email, name, password, username, **kwargs):
"""
Create user
:param email: E-mail
:param name: Full name
:param password: Password
:param username: Username
:param kwargs:
active:
roles:
join_default_channels:
require_password_change:
send_welcome_email:
verified:
custom_fields:
:return:
"""
return CreateUser(settings=self.settings, **kwargs).call(
email=email,
name=name,
password=password,
username=username,
**kwargs
) | Create user
:param email: E-mail
:param name: Full name
:param password: Password
:param username: Username
:param kwargs:
active:
roles:
join_default_channels:
require_password_change:
send_welcome_email:
verified:
custom_fields:
:return: | entailment |
def delete_user(self, user_id, **kwargs):
"""
Delete user
:param user_id: User ID
:param kwargs:
:return:
"""
return DeleteUser(settings=self.settings, **kwargs).call(user_id=user_id, **kwargs) | Delete user
:param user_id: User ID
:param kwargs:
:return: | entailment |
def get_index(cls, model, via_class=False):
'''
Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
return cls._model_to_index[model] if via_class else cls._model_name_to_index[model]
except KeyError:
raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model)) | Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated. | entailment |
def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model)) | Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated. | entailment |
def get_models(cls, index, as_class=False):
'''
Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string.
'''
try:
return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index)) | Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string. | entailment |
def get_model_indices(cls, index):
'''
Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name.
'''
try:
return cls._idx_name_to_mdl_to_mdlidx[index].values()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index)) | Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name. | entailment |
def map_raw_results(cls, raw_results, instance=None):
'''
Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch.
'''
# Let's iterate over the results and determine the appropriate mapping.
model_results = defaultdict(list)
# Initializing the list to the number of returned results. This allows us to restore each item in its position.
if hasattr(raw_results, 'hits'):
results = [None] * len(raw_results.hits)
else:
results = [None] * len(raw_results)
found_results = {}
for pos, result in enumerate(raw_results):
model_name = result.meta.doc_type
if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]:
logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result))
results[pos] = result
else:
meta = Bungiesearch.get_model_index(model_name).Meta
model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id)
found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta)
# Now that we have model ids per model name, let's fetch everything at once.
for ref_name, ids in iteritems(model_results):
index_name, model_name = ref_name.split('.')
model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name]
model_obj = model_idx.get_model()
items = model_obj.objects.filter(pk__in=ids)
if instance:
if instance._only == '__model' or model_idx.optimize_queries:
desired_fields = model_idx.fields_to_fetch
elif instance._only == '__fields':
desired_fields = instance._fields
else:
desired_fields = instance._only
if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch.
items = items.only(
*[field.name
for field in model_obj._meta.get_fields()
# For complete backwards compatibility, you may want to exclude
# GenericForeignKey from the results.
if field.name in desired_fields and \
not (field.many_to_one and field.related_model is None)
]
)
# Let's reposition each item in the results and set the _searchmeta meta information.
for item in items:
pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)]
item._searchmeta = meta
results[pos] = item
return results | Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch. | entailment |
def _clone(self):
'''
Must clone additional fields to those cloned by elasticsearch-dsl-py.
'''
instance = super(Bungiesearch, self)._clone()
instance._raw_results_only = self._raw_results_only
return instance | Must clone additional fields to those cloned by elasticsearch-dsl-py. | entailment |
def execute(self, return_results=True):
'''
Executes the query and attempts to create model objects from results.
'''
if self.results:
return self.results if return_results else None
self.execute_raw()
if self._raw_results_only:
self.results = self.raw_results
else:
self.map_results()
if return_results:
return self.results | Executes the query and attempts to create model objects from results. | entailment |
def only(self, *fields):
'''
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
'''
s = self._clone()
if len(fields) == 1 and fields[0] == '__model':
s._only = '__model'
else:
s._only = fields
return s | Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex. | entailment |
def hook_alias(self, alias, model_obj=None):
'''
Returns the alias function, if it exists and if it can be applied to this model.
'''
try:
search_alias = self._alias_hooks[alias]
except KeyError:
raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias))
else:
if search_alias._applicable_models and \
((model_obj and model_obj not in search_alias._applicable_models) or \
not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])):
raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type))
return search_alias.prepare(self, model_obj).alias_for | Returns the alias function, if it exists and if it can be applied to this model. | entailment |
def custom_search(self, index, doc_type):
'''
Performs a search on a custom elasticsearch index and mapping. Will not attempt to map result objects.
'''
from bungiesearch import Bungiesearch
return Bungiesearch(raw_results=True).index(index).doc_type(doc_type) | Performs a search on a custom elasticsearch index and mapping. Will not attempt to map result objects. | entailment |
def contribute_to_class(self, cls, name):
'''
Sets up the signal processor. Since self.model is not available
in the constructor, we perform this operation here.
'''
super(BungiesearchManager, self).contribute_to_class(cls, name)
from . import Bungiesearch
from .signals import get_signal_processor
settings = Bungiesearch.BUNGIE
if 'SIGNALS' in settings:
self.signal_processor = get_signal_processor()
self.signal_processor.setup(self.model) | Sets up the signal processor. Since self.model is not available
in the constructor, we perform this operation here. | entailment |
def django_field_to_index(field, **attr):
'''
Returns the index field type that would likely be associated with each Django type.
'''
dj_type = field.get_internal_type()
if dj_type in ('DateField', 'DateTimeField'):
return DateField(**attr)
elif dj_type in ('BooleanField', 'NullBooleanField'):
return BooleanField(**attr)
elif dj_type in ('DecimalField', 'FloatField'):
return NumberField(coretype='float', **attr)
elif dj_type in ('PositiveSmallIntegerField', 'SmallIntegerField'):
return NumberField(coretype='short', **attr)
elif dj_type in ('IntegerField', 'PositiveIntegerField', 'AutoField'):
return NumberField(coretype='integer', **attr)
elif dj_type in ('BigIntegerField'):
return NumberField(coretype='long', **attr)
return StringField(**attr) | Returns the index field type that would likely be associated with each Django type. | entailment |
def value(self, obj):
'''
Computes the value of this field to update the index.
:param obj: object instance, as a dictionary or as a model instance.
'''
if self.template_name:
t = loader.select_template([self.template_name])
return t.render(Context({'object': obj}))
if self.eval_func:
try:
return eval(self.eval_func)
except Exception as e:
raise type(e)('Could not compute value of {} field (eval_as=`{}`): {}.'.format(unicode(self), self.eval_func, unicode(e)))
elif self.model_attr:
if isinstance(obj, dict):
return obj[self.model_attr]
current_obj = getattr(obj, self.model_attr)
if callable(current_obj):
return current_obj()
else:
return current_obj
else:
raise KeyError('{0} gets its value via a model attribute, an eval function, a template, or is prepared in a method '
'call but none of `model_attr`, `eval_as,` `template,` `prepare_{0}` is provided.'.format(unicode(self))) | Computes the value of this field to update the index.
:param obj: object instance, as a dictionary or as a model instance. | entailment |
def split_command(cmd, posix=None):
'''
- cmd is string list -> nothing to do
- cmd is string -> split it using shlex
:param cmd: string ('ls -l') or list of strings (['ls','-l'])
:rtype: string list
'''
if not isinstance(cmd, string_types):
# cmd is string list
pass
else:
if not PY3:
# cmd is string
# The shlex module currently does not support Unicode input (in
# 2.x)!
if isinstance(cmd, unicode):
try:
cmd = unicodedata.normalize(
'NFKD', cmd).encode('ascii', 'strict')
except UnicodeEncodeError:
raise EasyProcessUnicodeError('unicode command "%s" can not be processed.' % cmd +
'Use string list instead of string')
log.debug('unicode is normalized')
if posix is None:
posix = 'win' not in sys.platform
cmd = shlex.split(cmd, posix=posix)
return cmd | - cmd is string list -> nothing to do
- cmd is string -> split it using shlex
:param cmd: string ('ls -l') or list of strings (['ls','-l'])
:rtype: string list | entailment |
def get_mapping(self, meta_fields=True):
'''
Returns the mapping for the index as a dictionary.
:param meta_fields: Also include elasticsearch meta fields in the dictionary.
:return: a dictionary which can be used to generate the elasticsearch index mapping for this doctype.
'''
return {'properties': dict((name, field.json()) for name, field in iteritems(self.fields) if meta_fields or name not in AbstractField.meta_fields)} | Returns the mapping for the index as a dictionary.
:param meta_fields: Also include elasticsearch meta fields in the dictionary.
:return: a dictionary which can be used to generate the elasticsearch index mapping for this doctype. | entailment |
def collect_analysis(self):
'''
:return: a dictionary which is used to get the serialized analyzer definition from the analyzer class.
'''
analysis = {}
for field in self.fields.values():
for analyzer_name in ('analyzer', 'index_analyzer', 'search_analyzer'):
if not hasattr(field, analyzer_name):
continue
analyzer = getattr(field, analyzer_name)
if not isinstance(analyzer, Analyzer):
continue
definition = analyzer.get_analysis_definition()
if definition is None:
continue
for key in definition:
analysis.setdefault(key, {}).update(definition[key])
return analysis | :return: a dictionary which is used to get the serialized analyzer definition from the analyzer class. | entailment |
def serialize_object(self, obj, obj_pk=None):
'''
Serializes an object for it to be added to the index.
:param obj: Object to be serialized. Optional if obj_pk is passed.
:param obj_pk: Object primary key. Superseded by `obj` if available.
:return: A dictionary representing the object as defined in the mapping.
'''
if not obj:
try:
# We're using `filter` followed by `values` in order to only fetch the required fields.
obj = self.model.objects.filter(pk=obj_pk).values(*self.fields_to_fetch)[0]
except Exception as e:
raise ValueError('Could not find object of primary key = {} in model {} (model index class {}). (Original exception: {}.)'.format(obj_pk, self.model, self.__class__.__name__, e))
serialized_object = {}
for name, field in iteritems(self.fields):
if hasattr(self, "prepare_%s" % name):
value = getattr(self, "prepare_%s" % name)(obj)
else:
value = field.value(obj)
serialized_object[name] = value
return serialized_object | Serializes an object for it to be added to the index.
:param obj: Object to be serialized. Optional if obj_pk is passed.
:param obj_pk: Object primary key. Superseded by `obj` if available.
:return: A dictionary representing the object as defined in the mapping. | entailment |
def _get_fields(self, fields, excludes, hotfixes):
'''
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model. If the field needs a hotfix, apply it.
'''
final_fields = {}
fields = fields or []
excludes = excludes or []
for f in self.model._meta.fields:
# If the field name is already present, skip
if f.name in self.fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
# If field is a relation, skip.
if getattr(f, 'rel'):
continue
attr = {'model_attr': f.name}
if f.has_default():
attr['null_value'] = f.default
if f.name in hotfixes:
attr.update(hotfixes[f.name])
final_fields[f.name] = django_field_to_index(f, **attr)
return final_fields | Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model. If the field needs a hotfix, apply it. | entailment |
def validate_items(self):
""" Validates the items in the backing array, including
performing type validation.
Sets the _typed property and clears the dirty flag as a side effect
Returns:
The typed array
"""
logger.debug(fmt("Validating {}", self))
from python_jsonschema_objects import classbuilder
if self.__itemtype__ is None:
return
type_checks = self.__itemtype__
if not isinstance(type_checks, (tuple, list)):
# we were given items = {'type': 'blah'} ; thus ensure the type for all data.
type_checks = [type_checks] * len(self.data)
elif len(type_checks) > len(self.data):
raise ValidationError(
"{1} does not have sufficient elements to validate against {0}"
.format(self.__itemtype__, self.data))
typed_elems = []
for elem, typ in zip(self.data, type_checks):
if isinstance(typ, dict):
for param, paramval in six.iteritems(typ):
validator = registry(param)
if validator is not None:
validator(paramval, elem, typ)
typed_elems.append(elem)
elif util.safe_issubclass(typ, classbuilder.LiteralValue):
val = typ(elem)
val.validate()
typed_elems.append(val)
elif util.safe_issubclass(typ, classbuilder.ProtocolBase):
if not isinstance(elem, typ):
try:
if isinstance(elem, (six.string_types, six.integer_types, float)):
val = typ(elem)
else:
val = typ(**util.coerce_for_expansion(elem))
except TypeError as e:
raise ValidationError("'{0}' is not a valid value for '{1}': {2}"
.format(elem, typ, e))
else:
val = elem
val.validate()
typed_elems.append(val)
elif util.safe_issubclass(typ, ArrayWrapper):
val = typ(elem)
val.validate()
typed_elems.append(val)
elif isinstance(typ, (classbuilder.TypeProxy, classbuilder.TypeRef)):
try:
if isinstance(elem, (six.string_types, six.integer_types, float)):
val = typ(elem)
else:
val = typ(**util.coerce_for_expansion(elem))
except TypeError as e:
raise ValidationError("'{0}' is not a valid value for '{1}': {2}"
.format(elem, typ, e))
else:
val.validate()
typed_elems.append(val)
self._dirty = False
self._typed = typed_elems
return typed_elems | Validates the items in the backing array, including
performing type validation.
Sets the _typed property and clears the dirty flag as a side effect
Returns:
The typed array | entailment |
def create(name, item_constraint=None, **addl_constraints):
""" Create an array validator based on the passed in constraints.
If item_constraint is a tuple, it is assumed that tuple validation
is being performed. If it is a class or dictionary, list validation
will be performed. Classes are assumed to be subclasses of ProtocolBase,
while dictionaries are expected to be basic types ('string', 'number', ...).
addl_constraints is expected to be key-value pairs of any of the other
constraints permitted by JSON Schema v4.
"""
logger.debug(fmt("Constructing ArrayValidator with {} and {}", item_constraint, addl_constraints))
from python_jsonschema_objects import classbuilder
klassbuilder = addl_constraints.pop("classbuilder", None)
props = {}
if item_constraint is not None:
if isinstance(item_constraint, (tuple, list)):
for i, elem in enumerate(item_constraint):
isdict = isinstance(elem, (dict,))
isklass = isinstance( elem, type) and util.safe_issubclass(
elem, (classbuilder.ProtocolBase, classbuilder.LiteralValue))
if not any([isdict, isklass]):
raise TypeError(
"Item constraint (position {0}) is not a schema".format(i))
elif isinstance(item_constraint, (classbuilder.TypeProxy, classbuilder.TypeRef)):
pass
elif util.safe_issubclass(item_constraint, ArrayWrapper):
pass
else:
isdict = isinstance(item_constraint, (dict,))
isklass = isinstance( item_constraint, type) and util.safe_issubclass(
item_constraint, (classbuilder.ProtocolBase, classbuilder.LiteralValue))
if not any([isdict, isklass]):
raise TypeError("Item constraint is not a schema")
if isdict and '$ref' in item_constraint:
if klassbuilder is None:
raise TypeError("Cannot resolve {0} without classbuilder"
.format(item_constraint['$ref']))
uri = item_constraint['$ref']
if uri in klassbuilder.resolved:
logger.debug(util.lazy_format(
"Using previously resolved object for {0}", uri))
else:
logger.debug(util.lazy_format("Resolving object for {0}", uri))
with klassbuilder.resolver.resolving(uri) as resolved:
# Set incase there is a circular reference in schema definition
klassbuilder.resolved[uri] = None
klassbuilder.resolved[uri] = klassbuilder.construct(
uri,
resolved,
(classbuilder.ProtocolBase,))
item_constraint = klassbuilder.resolved[uri]
elif isdict and item_constraint.get('type') == 'array':
# We need to create a sub-array validator.
item_constraint = ArrayWrapper.create(name + "#sub",
item_constraint=item_constraint[
'items'],
addl_constraints=item_constraint)
elif isdict and 'oneOf' in item_constraint:
# We need to create a TypeProxy validator
uri = "{0}_{1}".format(name, "<anonymous_list_type>")
type_array = []
for i, item_detail in enumerate(item_constraint['oneOf']):
if '$ref' in item_detail:
subtype = klassbuilder.construct(
util.resolve_ref_uri(
klassbuilder.resolver.resolution_scope,
item_detail['$ref']),
item_detail)
else:
subtype = klassbuilder.construct(
uri + "_%s" % i, item_detail)
type_array.append(subtype)
item_constraint = classbuilder.TypeProxy(type_array)
elif isdict and item_constraint.get('type') == 'object':
""" We need to create a ProtocolBase object for this anonymous definition"""
uri = "{0}_{1}".format(name, "<anonymous_list_type>")
item_constraint = klassbuilder.construct(
uri, item_constraint)
props['__itemtype__'] = item_constraint
strict = addl_constraints.pop('strict', False)
props['_strict_'] = strict
props.update(addl_constraints)
validator = type(str(name), (ArrayWrapper,), props)
return validator | Create an array validator based on the passed in constraints.
If item_constraint is a tuple, it is assumed that tuple validation
is being performed. If it is a class or dictionary, list validation
will be performed. Classes are assumed to be subclasses of ProtocolBase,
while dictionaries are expected to be basic types ('string', 'number', ...).
addl_constraints is expected to be key-value pairs of any of the other
constraints permitted by JSON Schema v4. | entailment |
def extendMarkdown(self, md, md_globals):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.add('fenced_code_block',
SpecialFencePreprocessor(md),
">normalize_whitespace") | Add FencedBlockPreprocessor to the Markdown instance. | entailment |
def parse_requirements(path):
"""Rudimentary parser for the `requirements.txt` file
We just want to separate regular packages from links to pass them to the
`install_requires` and `dependency_links` params of the `setup()`
function properly.
"""
try:
print(os.path.join(os.path.dirname(__file__), *path.splitlines()))
requirements = map(str.strip, local_file(path).splitlines())
except IOError:
raise RuntimeError("Couldn't find the `requirements.txt' file :(")
links = []
pkgs = []
for req in requirements:
if not req:
continue
if 'http:' in req or 'https:' in req:
links.append(req)
name, version = re.findall("\#egg=([^\-]+)-(.+$)", req)[0]
pkgs.append('{0}=={1}'.format(name, version))
else:
pkgs.append(req)
return pkgs, links | Rudimentary parser for the `requirements.txt` file
We just want to separate regular packages from links to pass them to the
`install_requires` and `dependency_links` params of the `setup()`
function properly. | entailment |
def propmerge(into, data_from):
""" Merge JSON schema requirements into a dictionary """
newprops = copy.deepcopy(into)
for prop, propval in six.iteritems(data_from):
if prop not in newprops:
newprops[prop] = propval
continue
new_sp = newprops[prop]
for subprop, spval in six.iteritems(propval):
if subprop not in new_sp:
new_sp[subprop] = spval
elif subprop == 'enum':
new_sp[subprop] = set(spval) & set(new_sp[subprop])
elif subprop == 'type':
if spval != new_sp[subprop]:
raise TypeError("Type cannot conflict in allOf'")
elif subprop in ('minLength', 'minimum'):
new_sp[subprop] = (new_sp[subprop] if
new_sp[subprop] > spval else spval)
elif subprop in ('maxLength', 'maximum'):
new_sp[subprop] = (new_sp[subprop] if
new_sp[subprop] < spval else spval)
elif subprop == 'multipleOf':
if new_sp[subprop] % spval == 0:
new_sp[subprop] = spval
else:
raise AttributeError(
"Cannot set conflicting multipleOf values")
else:
new_sp[subprop] = spval
newprops[prop] = new_sp
return newprops | Merge JSON schema requirements into a dictionary | entailment |
def as_dict(self):
""" Return a dictionary containing the current values
of the object.
Returns:
(dict): The object represented as a dictionary
"""
out = {}
for prop in self:
propval = getattr(self, prop)
if hasattr(propval, 'for_json'):
out[prop] = propval.for_json()
elif isinstance(propval, list):
out[prop] = [getattr(x, 'for_json', lambda:x)() for x in propval]
elif isinstance(propval, (ProtocolBase, LiteralValue)):
out[prop] = propval.as_dict()
elif propval is not None:
out[prop] = propval
return out | Return a dictionary containing the current values
of the object.
Returns:
(dict): The object represented as a dictionary | entailment |
def from_json(cls, jsonmsg):
""" Create an object directly from a JSON string.
Applies general validation after creating the
object to check whether all required fields are
present.
Args:
jsonmsg (str): An object encoded as a JSON string
Returns:
An object of the generated type
Raises:
ValidationError: if `jsonmsg` does not match the schema
`cls` was generated from
"""
import json
msg = json.loads(jsonmsg)
obj = cls(**msg)
obj.validate()
return obj | Create an object directly from a JSON string.
Applies general validation after creating the
object to check whether all required fields are
present.
Args:
jsonmsg (str): An object encoded as a JSON string
Returns:
An object of the generated type
Raises:
ValidationError: if `jsonmsg` does not match the schema
`cls` was generated from | entailment |
def validate(self):
""" Applies all defined validation to the current
state of the object, and raises an error if
they are not all met.
Raises:
ValidationError: if validations do not pass
"""
missing = self.missing_property_names()
if len(missing) > 0:
raise validators.ValidationError(
"'{0}' are required attributes for {1}"
.format(missing, self.__class__.__name__))
for prop, val in six.iteritems(self._properties):
if val is None:
continue
if isinstance(val, ProtocolBase):
val.validate()
elif getattr(val, 'isLiteralClass', None) is True:
val.validate()
elif isinstance(val, list):
for subval in val:
subval.validate()
else:
# This object is of the wrong type, but just try setting it
# The property setter will enforce its correctness
# and handily coerce its type at the same time
setattr(self, prop, val)
return True | Applies all defined validation to the current
state of the object, and raises an error if
they are not all met.
Raises:
ValidationError: if validations do not pass | entailment |
def missing_property_names(self):
"""
Returns a list of properties which are required and missing.
Properties are excluded from this list if they are allowed to be null.
:return: list of missing properties.
"""
propname = lambda x: self.__prop_names__[x]
missing = []
for x in self.__required__:
# Allow the null type
propinfo = self.propinfo(propname(x))
null_type = False
if 'type' in propinfo:
type_info = propinfo['type']
null_type = (type_info == 'null'
or isinstance(type_info, (list, tuple))
and 'null' in type_info)
elif 'oneOf' in propinfo:
for o in propinfo['oneOf']:
type_info = o.get('type')
if type_info and type_info == 'null' \
or isinstance(type_info, (list, tuple)) \
and 'null' in type_info:
null_type = True
break
if (propname(x) not in self._properties and null_type) or \
(self._properties[propname(x)] is None and not null_type):
missing.append(x)
return missing | Returns a list of properties which are required and missing.
Properties are excluded from this list if they are allowed to be null.
:return: list of missing properties. | entailment |
def construct(self, uri, *args, **kw):
""" Wrapper to debug things """
logger.debug(util.lazy_format("Constructing {0}", uri))
if ('override' not in kw or kw['override'] is False) \
and uri in self.resolved:
logger.debug(util.lazy_format("Using existing {0}", uri))
return self.resolved[uri]
else:
ret = self._construct(uri, *args, **kw)
logger.debug(util.lazy_format("Constructed {0}", ret))
return ret | Wrapper to debug things | entailment |
def _build_literal(self, nm, clsdata):
"""@todo: Docstring for _build_literal
:nm: @todo
:clsdata: @todo
:returns: @todo
"""
cls = type(str(nm), tuple((LiteralValue,)), {
'__propinfo__': {
'__literal__': clsdata,
'__title__': clsdata.get('title'),
'__default__': clsdata.get('default')}
})
return cls | @todo: Docstring for _build_literal
:nm: @todo
:clsdata: @todo
:returns: @todo | entailment |
def _build_object(self, nm, clsdata, parents,**kw):
logger.debug(util.lazy_format("Building object {0}", nm))
# To support circular references, we tag objects that we're
# currently building as "under construction"
self.under_construction.add(nm)
props = {}
defaults = set()
properties = {}
for p in parents:
properties = util.propmerge(properties, p.__propinfo__)
if 'properties' in clsdata:
properties = util.propmerge(properties, clsdata['properties'])
name_translation = {}
for prop, detail in properties.items():
logger.debug(util.lazy_format("Handling property {0}.{1}",nm, prop))
properties[prop]['raw_name'] = prop
name_translation[prop] = prop.replace('@', '')
prop = name_translation[prop]
if detail.get('default', None) is not None:
defaults.add(prop)
if detail.get('type', None) == 'object':
uri = "{0}/{1}_{2}".format(nm,
prop, "<anonymous>")
self.resolved[uri] = self.construct(
uri,
detail,
(ProtocolBase,))
props[prop] = make_property(prop,
{'type': self.resolved[uri]},
self.resolved[uri].__doc__)
properties[prop]['type'] = self.resolved[uri]
elif 'type' not in detail and '$ref' in detail:
ref = detail['$ref']
uri = util.resolve_ref_uri(self.resolver.resolution_scope, ref)
logger.debug(util.lazy_format(
"Resolving reference {0} for {1}.{2}",
ref, nm, prop
))
if uri in self.resolved:
typ = self.resolved[uri]
else:
typ = self.construct(uri, detail, (ProtocolBase,))
props[prop] = make_property(prop,
{'type': typ},
typ.__doc__)
properties[prop]['$ref'] = uri
properties[prop]['type'] = typ
elif 'oneOf' in detail:
potential = self.resolve_classes(detail['oneOf'])
logger.debug(util.lazy_format("Designating {0} as oneOf {1}", prop, potential))
desc = detail[
'description'] if 'description' in detail else ""
props[prop] = make_property(prop,
{'type': potential}, desc
)
elif 'type' in detail and detail['type'] == 'array':
if 'items' in detail and isinstance(detail['items'], dict):
if '$ref' in detail['items']:
uri = util.resolve_ref_uri(
self.resolver.resolution_scope,
detail['items']['$ref'])
typ = self.construct(uri, detail['items'])
constraints = copy.copy(detail)
constraints['strict'] = kw.get('strict')
propdata = {
'type': 'array',
'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create(
uri,
item_constraint=typ,
**constraints)}
else:
uri = "{0}/{1}_{2}".format(nm,
prop, "<anonymous_field>")
try:
if 'oneOf' in detail['items']:
typ = TypeProxy([
self.construct(uri + "_%s" % i, item_detail)
if '$ref' not in item_detail else
self.construct(util.resolve_ref_uri(
self.resolver.resolution_scope,
item_detail['$ref']),
item_detail)
for i, item_detail in enumerate(detail['items']['oneOf'])]
)
else:
typ = self.construct(uri, detail['items'])
constraints = copy.copy(detail)
constraints['strict'] = kw.get('strict')
propdata = {'type': 'array',
'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create(
uri,
item_constraint=typ,
**constraints)}
except NotImplementedError:
typ = detail['items']
constraints = copy.copy(detail)
constraints['strict'] = kw.get('strict')
propdata = {'type': 'array',
'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create(
uri,
item_constraint=typ,
**constraints)}
props[prop] = make_property(prop,
propdata,
typ.__doc__)
elif 'items' in detail:
typs = []
for i, elem in enumerate(detail['items']):
uri = "{0}/{1}/<anonymous_{2}>".format(nm, prop, i)
typ = self.construct(uri, elem)
typs.append(typ)
props[prop] = make_property(prop,
{'type': typs},
)
else:
desc = detail[
'description'] if 'description' in detail else ""
uri = "{0}/{1}".format(nm, prop)
typ = self.construct(uri, detail)
props[prop] = make_property(prop, {'type': typ}, desc)
""" If this object itself has a 'oneOf' designation, then
make the validation 'type' the list of potential objects.
"""
if 'oneOf' in clsdata:
klasses = self.resolve_classes(clsdata['oneOf'])
# Need a validation to check that it meets one of them
props['__validation__'] = {'type': klasses}
props['__extensible__'] = pattern_properties.ExtensibleValidator(
nm,
clsdata,
self)
props['__prop_names__'] = name_translation
props['__propinfo__'] = properties
required = set.union(*[p.__required__ for p in parents])
if 'required' in clsdata:
for prop in clsdata['required']:
required.add(prop)
invalid_requires = [req for req in required if req not in props['__propinfo__']]
if len(invalid_requires) > 0:
raise validators.ValidationError("Schema Definition Error: {0} schema requires "
"'{1}', but properties are not defined"
.format(nm, invalid_requires))
props['__required__'] = required
props['__has_default__'] = defaults
if required and kw.get("strict"):
props['__strict__'] = True
props['__title__'] = clsdata.get('title')
cls = type(str(nm.split('/')[-1]), tuple(parents), props)
self.under_construction.remove(nm)
return cls | If this object itself has a 'oneOf' designation, then
make the validation 'type' the list of potential objects. | entailment |
def build_classes(self,strict=False, named_only=False, standardize_names=True):
"""
Build all of the classes named in the JSONSchema.
Class names will be transformed using inflection by default, so names
with spaces in the schema will be camelcased, while names without
spaces will have internal capitalization dropped. Thus "Home Address"
becomes "HomeAddress", while "HomeAddress" becomes "Homeaddress" To
disable this behavior, pass standardize_names=False, but be aware
that accessing names with spaces from the namespace can be
problematic.
Args:
strict: (bool) use this to validate required fields while creating the class
named_only: (bool) If true, only properties with an actual title attribute will
be included in the resulting namespace (although all will be generated).
standardize_names: (bool) If true (the default), class names will be tranformed
by camel casing
Returns:
A namespace containing all the generated classes
"""
kw = {"strict": strict}
builder = classbuilder.ClassBuilder(self.resolver)
for nm, defn in iteritems(self.schema.get('definitions', {})):
uri = python_jsonschema_objects.util.resolve_ref_uri(
self.resolver.resolution_scope,
"#/definitions/" + nm)
builder.construct(uri, defn, **kw)
if standardize_names:
name_transform = lambda t: inflection.camelize(inflection.parameterize(six.text_type(t), '_'))
else:
name_transform = lambda t: t
nm = self.schema['title'] if 'title' in self.schema else self.schema['id']
nm = inflection.parameterize(six.text_type(nm), '_')
builder.construct(nm, self.schema,**kw)
self._resolved = builder.resolved
classes = {}
for uri, klass in six.iteritems(builder.resolved):
title = getattr(klass, '__title__', None)
if title is not None:
classes[name_transform(title)] = klass
elif not named_only:
classes[name_transform(uri.split('/')[-1])] = klass
return python_jsonschema_objects.util.Namespace.from_mapping(classes) | Build all of the classes named in the JSONSchema.
Class names will be transformed using inflection by default, so names
with spaces in the schema will be camelcased, while names without
spaces will have internal capitalization dropped. Thus "Home Address"
becomes "HomeAddress", while "HomeAddress" becomes "Homeaddress" To
disable this behavior, pass standardize_names=False, but be aware
that accessing names with spaces from the namespace can be
problematic.
Args:
strict: (bool) use this to validate required fields while creating the class
named_only: (bool) If true, only properties with an actual title attribute will
be included in the resulting namespace (although all will be generated).
standardize_names: (bool) If true (the default), class names will be tranformed
by camel casing
Returns:
A namespace containing all the generated classes | entailment |
def _interp(self, data):
"""The interpolation method implemented here is a kind of a billinear
interpolation. The input *data* field is first interpolated along the
rows and subsequently along its columns.
The final size of the interpolated *data* field is determined by the
last indices in self.row_indices and self.col_indices.
"""
row_interpol_data = self._interp_axis(data, 0)
interpol_data = self._interp_axis(row_interpol_data, 1)
return interpol_data | The interpolation method implemented here is a kind of a billinear
interpolation. The input *data* field is first interpolated along the
rows and subsequently along its columns.
The final size of the interpolated *data* field is determined by the
last indices in self.row_indices and self.col_indices. | entailment |
def _interp_axis(self, data, axis):
"""The *data* field contains the data to be interpolated. It is
expected that values reach out to the *data* boundaries.
With *axis*=0 this method interpolates along rows and *axis*=1 it
interpolates along colums.
For column mode the *data* input is transposed before interpolation
and subsequently transposed back.
"""
if axis == 0:
return self._pandas_interp(data, self.row_indices)
if axis == 1:
data_transposed = data.as_matrix().T
data_interpol_transposed = self._pandas_interp(data_transposed,
self.col_indices)
data_interpol = data_interpol_transposed.as_matrix().T
return data_interpol | The *data* field contains the data to be interpolated. It is
expected that values reach out to the *data* boundaries.
With *axis*=0 this method interpolates along rows and *axis*=1 it
interpolates along colums.
For column mode the *data* input is transposed before interpolation
and subsequently transposed back. | entailment |
def _pandas_interp(self, data, indices):
"""The actual transformation based on the following stackoverflow
entry: http://stackoverflow.com/a/10465162
"""
new_index = np.arange(indices[-1] + 1)
data_frame = DataFrame(data, index=indices)
data_frame_reindexed = data_frame.reindex(new_index)
data_interpol = data_frame_reindexed.apply(Series.interpolate)
del new_index
del data_frame
del data_frame_reindexed
return data_interpol | The actual transformation based on the following stackoverflow
entry: http://stackoverflow.com/a/10465162 | entailment |
def interpolate(self):
"""Do the interpolation and return resulting longitudes and latitudes.
"""
self.latitude = self._interp(self.lat_tiepoint)
self.longitude = self._interp(self.lon_tiepoint)
return self.latitude, self.longitude | Do the interpolation and return resulting longitudes and latitudes. | entailment |
def _execute(self, query, model, adapter, raw=False):
"""
We have to override this because in some situation
(such as with Filebackend, or any dummy backend)
we have to parse / adapt results *before* when can execute the query
"""
values = self.load(model, adapter)
return IterableStore(values=values)._execute(query, model=model, adapter=None, raw=raw) | We have to override this because in some situation
(such as with Filebackend, or any dummy backend)
we have to parse / adapt results *before* when can execute the query | entailment |
def get(self, key, default=None, reraise=False):
"""
Get the given key from the cache, if present.
A default value can be provided in case the requested key is not present,
otherwise, None will be returned.
:param key: the key to query
:type key: str
:param default: the value to return if the key does not exist in cache
:param reraise: wether an exception should be thrown if now value is found, defaults to False.
:type key: bool
Example usage:
.. code-block:: python
cache.set('my_key', 'my_value')
cache.get('my_key')
>>> 'my_value'
cache.get('not_present', 'default_value')
>>> 'default_value'
cache.get('not_present', reraise=True)
>>> raise lifter.exceptions.NotInCache
"""
if not self.enabled:
if reraise:
raise exceptions.DisabledCache()
return default
try:
return self._get(key)
except exceptions.NotInCache:
if reraise:
raise
return default | Get the given key from the cache, if present.
A default value can be provided in case the requested key is not present,
otherwise, None will be returned.
:param key: the key to query
:type key: str
:param default: the value to return if the key does not exist in cache
:param reraise: wether an exception should be thrown if now value is found, defaults to False.
:type key: bool
Example usage:
.. code-block:: python
cache.set('my_key', 'my_value')
cache.get('my_key')
>>> 'my_value'
cache.get('not_present', 'default_value')
>>> 'default_value'
cache.get('not_present', reraise=True)
>>> raise lifter.exceptions.NotInCache | entailment |
def set(self, key, value, timeout=NotSet):
"""
Set the given key to the given value in the cache.
A timeout may be provided, otherwise, the :py:attr:`Cache.default_timeout`
will be used.
:param key: the key to which the value will be bound
:type key: str
:param value: the value to store in the cache
:param timeout: the expiration delay for the value. None means it will never expire.
:type timeout: integer or None
Example usage:
.. code-block:: python
# this cached value will expire after half an hour
cache.set('my_key', 'value', 1800)
"""
if not self.enabled:
return
if hasattr(value, '__call__'):
value = value()
if timeout == NotSet:
timeout = self.default_timeout
self._set(key, value, timeout)
return value | Set the given key to the given value in the cache.
A timeout may be provided, otherwise, the :py:attr:`Cache.default_timeout`
will be used.
:param key: the key to which the value will be bound
:type key: str
:param value: the value to store in the cache
:param timeout: the expiration delay for the value. None means it will never expire.
:type timeout: integer or None
Example usage:
.. code-block:: python
# this cached value will expire after half an hour
cache.set('my_key', 'value', 1800) | entailment |
def resolve_attr(obj, name):
"""A custom attrgetter that operates both on dictionaries and objects"""
# TODO: setup some hinting, so we can go directly to the correct
# Maybe it's a dict ? Let's try dict lookup, it's the fastest
try:
return obj[name]
except TypeError:
pass
except KeyError:
raise exceptions.MissingField('Dict {0} has no attribute or key "{1}"'.format(obj, name))
# Okay, it's not a dict, what if we try to access the value as for a regular object attribute?
try:
# Slight hack for better speed, since accessing dict is fast
return obj.__dict__[name]
except (KeyError, AttributeError):
pass
try:
# Lookup using regular attribute
return getattr(obj, name)
except AttributeError:
pass
# Last possible choice, it's an iterable
if isinstance(obj, collections.Iterable):
return IterableAttr(obj, name)
raise exceptions.MissingField('Object {0} has no attribute or key "{1}"'.format(obj, name)) | A custom attrgetter that operates both on dictionaries and objects | entailment |
def unique_everseen(seq):
"""Solution found here : http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] | Solution found here : http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order | entailment |
def hints(self, **kwargs):
"""
Use this method to update hints value of the underlying query
example: queryset.hints(permissive=False)
"""
new_query = self.query.clone()
new_query.hints.update(kwargs)
return self._clone(query=new_query) | Use this method to update hints value of the underlying query
example: queryset.hints(permissive=False) | entailment |
def build_filter_from_kwargs(self, **kwargs):
"""Convert django-s like lookup to SQLAlchemy ones"""
query = None
for path_to_convert, value in kwargs.items():
path_parts = path_to_convert.split('__')
lookup_class = None
try:
# We check if the path ends with something such as __gte, __lte...
lookup_class = lookups.registry[path_parts[-1]]
path_to_convert = '__'.join(path_parts[:-1])
except KeyError:
pass
path = lookup_to_path(path_to_convert)
if lookup_class:
q = QueryNode(path, lookup=lookup_class(value))
else:
q = path == value
if query:
query = query & q
else:
query = q
return query | Convert django-s like lookup to SQLAlchemy ones | entailment |
def locally(self):
"""
Will execute the current queryset and pass it to the python backend
so user can run query on the local dataset (instead of contacting the store)
"""
from .backends import python
from . import models
store = python.IterableStore(values=self)
return store.query(self.manager.model).all() | Will execute the current queryset and pass it to the python backend
so user can run query on the local dataset (instead of contacting the store) | entailment |
def get_scene_splits(nlines_swath, nlines_scan, n_cpus):
"""Calculate the line numbers where the swath will be split in smaller
granules for parallel processing"""
nscans = nlines_swath // nlines_scan
if nscans < n_cpus:
nscans_subscene = 1
else:
nscans_subscene = nscans // n_cpus
nlines_subscene = nscans_subscene * nlines_scan
return range(nlines_subscene, nlines_swath, nlines_subscene) | Calculate the line numbers where the swath will be split in smaller
granules for parallel processing | entailment |
def metop20kmto1km(lons20km, lats20km):
"""Getting 1km geolocation for metop avhrr from 20km tiepoints.
"""
cols20km = np.array([0] + list(range(4, 2048, 20)) + [2047])
cols1km = np.arange(2048)
lines = lons20km.shape[0]
rows20km = np.arange(lines)
rows1km = np.arange(lines)
along_track_order = 1
cross_track_order = 3
satint = SatelliteInterpolator((lons20km, lats20km),
(rows20km, cols20km),
(rows1km, cols1km),
along_track_order,
cross_track_order)
return satint.interpolate() | Getting 1km geolocation for metop avhrr from 20km tiepoints. | entailment |
def modis5kmto1km(lons5km, lats5km):
"""Getting 1km geolocation for modis from 5km tiepoints.
http://www.icare.univ-lille1.fr/tutorials/MODIS_geolocation
"""
cols5km = np.arange(2, 1354, 5) / 5.0
cols1km = np.arange(1354) / 5.0
lines = lons5km.shape[0] * 5
rows5km = np.arange(2, lines, 5) / 5.0
rows1km = np.arange(lines) / 5.0
along_track_order = 1
cross_track_order = 3
satint = SatelliteInterpolator((lons5km, lats5km),
(rows5km, cols5km),
(rows1km, cols1km),
along_track_order,
cross_track_order,
chunk_size=10)
satint.fill_borders("y", "x")
lons1km, lats1km = satint.interpolate()
return lons1km, lats1km | Getting 1km geolocation for modis from 5km tiepoints.
http://www.icare.univ-lille1.fr/tutorials/MODIS_geolocation | entailment |
def _multi(fun, lons, lats, chunk_size, cores=1):
"""Work on multiple cores.
"""
pool = Pool(processes=cores)
splits = get_scene_splits(lons.shape[0], chunk_size, cores)
lons_parts = np.vsplit(lons, splits)
lats_parts = np.vsplit(lats, splits)
results = [pool.apply_async(fun,
(lons_parts[i],
lats_parts[i]))
for i in range(len(lons_parts))]
pool.close()
pool.join()
lons, lats = zip(*(res.get() for res in results))
return np.vstack(lons), np.vstack(lats) | Work on multiple cores. | entailment |
def modis1kmto500m(lons1km, lats1km, cores=1):
"""Getting 500m geolocation for modis from 1km tiepoints.
http://www.icare.univ-lille1.fr/tutorials/MODIS_geolocation
"""
if cores > 1:
return _multi(modis1kmto500m, lons1km, lats1km, 10, cores)
cols1km = np.arange(1354)
cols500m = np.arange(1354 * 2) / 2.0
lines = lons1km.shape[0]
rows1km = np.arange(lines)
rows500m = (np.arange(lines * 2) - 0.5) / 2.
along_track_order = 1
cross_track_order = 3
satint = SatelliteInterpolator((lons1km, lats1km),
(rows1km, cols1km),
(rows500m, cols500m),
along_track_order,
cross_track_order,
chunk_size=20)
satint.fill_borders("y", "x")
lons500m, lats500m = satint.interpolate()
return lons500m, lats500m | Getting 500m geolocation for modis from 1km tiepoints.
http://www.icare.univ-lille1.fr/tutorials/MODIS_geolocation | entailment |
def modis1kmto250m(lons1km, lats1km, cores=1):
"""Getting 250m geolocation for modis from 1km tiepoints.
http://www.icare.univ-lille1.fr/tutorials/MODIS_geolocation
"""
if cores > 1:
return _multi(modis1kmto250m, lons1km, lats1km, 10, cores)
cols1km = np.arange(1354)
cols250m = np.arange(1354 * 4) / 4.0
along_track_order = 1
cross_track_order = 3
lines = lons1km.shape[0]
rows1km = np.arange(lines)
rows250m = (np.arange(lines * 4) - 1.5) / 4.0
satint = SatelliteInterpolator((lons1km, lats1km),
(rows1km, cols1km),
(rows250m, cols250m),
along_track_order,
cross_track_order,
chunk_size=40)
satint.fill_borders("y", "x")
lons250m, lats250m = satint.interpolate()
return lons250m, lats250m | Getting 250m geolocation for modis from 1km tiepoints.
http://www.icare.univ-lille1.fr/tutorials/MODIS_geolocation | entailment |
def generic_modis5kmto1km(*data5km):
"""Getting 1km data for modis from 5km tiepoints.
"""
cols5km = np.arange(2, 1354, 5)
cols1km = np.arange(1354)
lines = data5km[0].shape[0] * 5
rows5km = np.arange(2, lines, 5)
rows1km = np.arange(lines)
along_track_order = 1
cross_track_order = 3
satint = Interpolator(list(data5km),
(rows5km, cols5km),
(rows1km, cols1km),
along_track_order,
cross_track_order,
chunk_size=10)
satint.fill_borders("y", "x")
return satint.interpolate() | Getting 1km data for modis from 5km tiepoints. | entailment |
def fill_borders(self, *args):
"""Extrapolate tiepoint lons and lats to fill in the border of the
chunks.
"""
to_run = []
cases = {"y": self._fill_row_borders,
"x": self._fill_col_borders}
for dim in args:
try:
to_run.append(cases[dim])
except KeyError:
raise NameError("Unrecognized dimension: " + str(dim))
for fun in to_run:
fun() | Extrapolate tiepoint lons and lats to fill in the border of the
chunks. | entailment |
def _extrapolate_cols(self, data, first=True, last=True):
"""Extrapolate the column of data, to get the first and last together
with the data.
"""
if first:
pos = self.col_indices[:2]
first_column = _linear_extrapolate(pos,
(data[:, 0], data[:, 1]),
self.hcol_indices[0])
if last:
pos = self.col_indices[-2:]
last_column = _linear_extrapolate(pos,
(data[:, -2], data[:, -1]),
self.hcol_indices[-1])
if first and last:
return np.hstack((np.expand_dims(first_column, 1),
data,
np.expand_dims(last_column, 1)))
elif first:
return np.hstack((np.expand_dims(first_column, 1),
data))
elif last:
return np.hstack((data,
np.expand_dims(last_column, 1)))
else:
return data | Extrapolate the column of data, to get the first and last together
with the data. | entailment |
def _fill_col_borders(self):
"""Add the first and last column to the data by extrapolation.
"""
first = True
last = True
if self.col_indices[0] == self.hcol_indices[0]:
first = False
if self.col_indices[-1] == self.hcol_indices[-1]:
last = False
for num, data in enumerate(self.tie_data):
self.tie_data[num] = self._extrapolate_cols(data, first, last)
if first and last:
self.col_indices = np.concatenate((np.array([self.hcol_indices[0]]),
self.col_indices,
np.array([self.hcol_indices[-1]])))
elif first:
self.col_indices = np.concatenate((np.array([self.hcol_indices[0]]),
self.col_indices))
elif last:
self.col_indices = np.concatenate((self.col_indices,
np.array([self.hcol_indices[-1]]))) | Add the first and last column to the data by extrapolation. | entailment |
def _extrapolate_rows(self, data, row_indices, first_index, last_index):
"""Extrapolate the rows of data, to get the first and last together
with the data.
"""
pos = row_indices[:2]
first_row = _linear_extrapolate(pos,
(data[0, :], data[1, :]),
first_index)
pos = row_indices[-2:]
last_row = _linear_extrapolate(pos,
(data[-2, :], data[-1, :]),
last_index)
return np.vstack((np.expand_dims(first_row, 0),
data,
np.expand_dims(last_row, 0))) | Extrapolate the rows of data, to get the first and last together
with the data. | entailment |
def _fill_row_borders(self):
"""Add the first and last rows to the data by extrapolation.
"""
lines = len(self.hrow_indices)
chunk_size = self.chunk_size or lines
factor = len(self.hrow_indices) / len(self.row_indices)
tmp_data = []
for num in range(len(self.tie_data)):
tmp_data.append([])
row_indices = []
for index in range(0, lines, chunk_size):
indices = np.logical_and(self.row_indices >= index / factor,
self.row_indices < (index
+ chunk_size) / factor)
ties = np.argwhere(indices).squeeze()
tiepos = self.row_indices[indices].squeeze()
for num, data in enumerate(self.tie_data):
to_extrapolate = data[ties, :]
if len(to_extrapolate) > 0:
extrapolated = self._extrapolate_rows(to_extrapolate,
tiepos,
self.hrow_indices[
index],
self.hrow_indices[index + chunk_size - 1])
tmp_data[num].append(extrapolated)
row_indices.append(np.array([self.hrow_indices[index]]))
row_indices.append(tiepos)
row_indices.append(np.array([self.hrow_indices[index
+ chunk_size - 1]]))
for num in range(len(self.tie_data)):
self.tie_data[num] = np.vstack(tmp_data[num])
self.row_indices = np.concatenate(row_indices) | Add the first and last rows to the data by extrapolation. | entailment |
def _interp(self):
"""Interpolate the cartesian coordinates.
"""
if np.all(self.hrow_indices == self.row_indices):
return self._interp1d()
xpoints, ypoints = np.meshgrid(self.hrow_indices,
self.hcol_indices)
for num, data in enumerate(self.tie_data):
spl = RectBivariateSpline(self.row_indices,
self.col_indices,
data,
s=0,
kx=self.kx_,
ky=self.ky_)
new_data_ = spl.ev(xpoints.ravel(), ypoints.ravel())
self.new_data[num] = new_data_.reshape(xpoints.shape).T.copy(order='C') | Interpolate the cartesian coordinates. | entailment |
def _interp1d(self):
"""Interpolate in one dimension.
"""
lines = len(self.hrow_indices)
for num, data in enumerate(self.tie_data):
self.new_data[num] = np.empty((len(self.hrow_indices),
len(self.hcol_indices)),
data.dtype)
for cnt in range(lines):
tck = splrep(self.col_indices, data[cnt, :], k=self.ky_, s=0)
self.new_data[num][cnt, :] = splev(
self.hcol_indices, tck, der=0) | Interpolate in one dimension. | entailment |
def get_lons_from_cartesian(x__, y__):
"""Get longitudes from cartesian coordinates.
"""
return rad2deg(arccos(x__ / sqrt(x__ ** 2 + y__ ** 2))) * sign(y__) | Get longitudes from cartesian coordinates. | entailment |
def get_lats_from_cartesian(x__, y__, z__, thr=0.8):
"""Get latitudes from cartesian coordinates.
"""
# if we are at low latitudes - small z, then get the
# latitudes only from z. If we are at high latitudes (close to the poles)
# then derive the latitude using x and y:
lats = np.where(np.logical_and(np.less(z__, thr * EARTH_RADIUS),
np.greater(z__, -1. * thr * EARTH_RADIUS)),
90 - rad2deg(arccos(z__/EARTH_RADIUS)),
sign(z__) *
(90 - rad2deg(arcsin(sqrt(x__ ** 2 + y__ ** 2)
/ EARTH_RADIUS))))
return lats | Get latitudes from cartesian coordinates. | entailment |
def set_tiepoints(self, lon, lat):
"""Defines the lon,lat tie points.
"""
self.lon_tiepoint = lon
self.lat_tiepoint = lat | Defines the lon,lat tie points. | entailment |
def compute_expansion_alignment(satz_a, satz_b, satz_c, satz_d):
"""All angles in radians."""
zeta_a = satz_a
zeta_b = satz_b
phi_a = compute_phi(zeta_a)
phi_b = compute_phi(zeta_b)
theta_a = compute_theta(zeta_a, phi_a)
theta_b = compute_theta(zeta_b, phi_b)
phi = (phi_a + phi_b) / 2
zeta = compute_zeta(phi)
theta = compute_theta(zeta, phi)
c_expansion = 4 * (((theta_a + theta_b) / 2 - theta) / (theta_a - theta_b))
sin_beta_2 = scan_width / (2 * H)
d = ((R + H) / R * np.cos(phi) - np.cos(zeta)) * sin_beta_2
e = np.cos(zeta) - np.sqrt(np.cos(zeta) ** 2 - d ** 2)
c_alignment = 4 * e * np.sin(zeta) / (theta_a - theta_b)
return c_expansion, c_alignment | All angles in radians. | entailment |
def lonlat2xyz(lons, lats):
"""Convert lons and lats to cartesian coordinates."""
R = 6370997.0
x_coords = R * da.cos(da.deg2rad(lats)) * da.cos(da.deg2rad(lons))
y_coords = R * da.cos(da.deg2rad(lats)) * da.sin(da.deg2rad(lons))
z_coords = R * da.sin(da.deg2rad(lats))
return x_coords, y_coords, z_coords | Convert lons and lats to cartesian coordinates. | entailment |
def xyz2lonlat(x__, y__, z__):
"""Get longitudes from cartesian coordinates.
"""
R = 6370997.0
lons = da.rad2deg(da.arccos(x__ / da.sqrt(x__ ** 2 + y__ ** 2))) * da.sign(y__)
lats = da.sign(z__) * (90 - da.rad2deg(da.arcsin(da.sqrt(x__ ** 2 + y__ ** 2) / R)))
return lons, lats | Get longitudes from cartesian coordinates. | entailment |
def setup_fields(attrs):
"""
Collect all fields declared on the class and remove them from attrs
"""
fields = {}
iterator = list(attrs.items())
for key, value in iterator:
if not isinstance(value, Field):
continue
fields[key] = value
del attrs[key]
return fields | Collect all fields declared on the class and remove them from attrs | entailment |
def _parse_jing_line(line):
"""Parse a line of jing output to a list of line, column, type
and message.
"""
parts = line.split(':', 4)
filename, line, column, type_, message = [x.strip() for x in parts]
if type_ == 'fatal':
if message in KNOWN_FATAL_MESSAGES_MAPPING:
message = KNOWN_FATAL_MESSAGES_MAPPING[message]
return ErrorLine(filename, line, column, type_, message) | Parse a line of jing output to a list of line, column, type
and message. | entailment |
def _parse_jing_output(output):
"""Parse the jing output into a tuple of line, column, type and message.
"""
output = output.strip()
values = [_parse_jing_line(l) for l in output.split('\n') if l]
return tuple(values) | Parse the jing output into a tuple of line, column, type and message. | entailment |
def jing(rng_filepath, *xml_filepaths):
"""Run jing.jar using the RNG file against the given XML file."""
cmd = ['java', '-jar']
cmd.extend([str(JING_JAR), str(rng_filepath)])
for xml_filepath in xml_filepaths:
cmd.append(str(xml_filepath))
proc = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
out, err = proc.communicate()
return _parse_jing_output(out.decode('utf-8')) | Run jing.jar using the RNG file against the given XML file. | entailment |
def import_obj_from_str(s):
"""Returns an import object (either ImportImport or FromImport) from text.
"""
ast_obj = ast.parse(s).body[0]
return ast_type_to_import_type[type(ast_obj)](ast_obj) | Returns an import object (either ImportImport or FromImport) from text. | entailment |
def from_str(cls, s):
"""Construct an import object from a string."""
ast_obj = ast.parse(s).body[0]
if not isinstance(ast_obj, cls._expected_ast_type):
raise AssertionError(
'Expected ast of type {!r} but got {!r}'.format(
cls._expected_ast_type,
ast_obj
)
)
return cls(ast_obj) | Construct an import object from a string. | entailment |
def sort(imports, separate=True, import_before_from=True, **classify_kwargs):
"""Sort import objects into groups.
:param list imports: FromImport / ImportImport objects
:param bool separate: Whether to classify and return separate segments
of imports based on classification.
:param bool import_before_from: Whether to sort `import ...` imports before
`from ...` imports.
For example:
from os import path
from aspy import refactor_imports
import sys
import pyramid
separate = True, import_before_from = True
import sys
from os import path
import pyramid
from aspy import refactor_imports
separate = True, import_before_from = False
from os import path
import sys
import pyramid
from aspy import refactor_imports
separate = False, import_before_from = True
import pyramid
import sys
from aspy import refactor_imports
from os import path
separate = False, import_before_from = False
from aspy import refactor_imports
from os import path
import pyramid
import sys
"""
if separate:
def classify_func(obj):
return classify_import(
obj.import_statement.module, **classify_kwargs
)
types = ImportType.__all__
else:
# A little cheaty, this allows future imports to sort before others
def classify_func(obj):
return classify_import(
obj.import_statement.module, **classify_kwargs
) == ImportType.FUTURE
types = [True, False]
if import_before_from:
def sort_within(obj):
return (CLS_TO_INDEX[type(obj)],) + obj.sort_key
else:
def sort_within(obj):
return tuple(obj.sort_key)
# Partition the imports
imports_partitioned = collections.defaultdict(list)
for import_obj in imports:
imports_partitioned[classify_func(import_obj)].append(import_obj)
# sort each of the segments
for segment_key, val in imports_partitioned.items():
imports_partitioned[segment_key] = sorted(val, key=sort_within)
return tuple(
tuple(imports_partitioned[key])
for key in types if key in imports_partitioned
) | Sort import objects into groups.
:param list imports: FromImport / ImportImport objects
:param bool separate: Whether to classify and return separate segments
of imports based on classification.
:param bool import_before_from: Whether to sort `import ...` imports before
`from ...` imports.
For example:
from os import path
from aspy import refactor_imports
import sys
import pyramid
separate = True, import_before_from = True
import sys
from os import path
import pyramid
from aspy import refactor_imports
separate = True, import_before_from = False
from os import path
import sys
import pyramid
from aspy import refactor_imports
separate = False, import_before_from = True
import pyramid
import sys
from aspy import refactor_imports
from os import path
separate = False, import_before_from = False
from aspy import refactor_imports
from os import path
import pyramid
import sys | entailment |
def classify_import(module_name, application_directories=('.',)):
"""Classifies an import by its package.
Returns a value in ImportType.__all__
:param text module_name: The dotted notation of a module
:param tuple application_directories: tuple of paths which are considered
application roots.
"""
# Only really care about the first part of the path
base, _, _ = module_name.partition('.')
found, module_path, is_builtin = _get_module_info(
base, application_directories,
)
if base == '__future__':
return ImportType.FUTURE
# Relative imports: `from .foo import bar`
elif base == '':
return ImportType.APPLICATION
# If imp tells us it is builtin, it is builtin
elif is_builtin:
return ImportType.BUILTIN
# If the module path exists in the project directories
elif _module_path_is_local_and_is_not_symlinked(
module_path, application_directories,
):
return ImportType.APPLICATION
# Otherwise we assume it is a system module or a third party module
elif (
found and
PACKAGES_PATH not in module_path and
not _due_to_pythonpath(module_path)
):
return ImportType.BUILTIN
else:
return ImportType.THIRD_PARTY | Classifies an import by its package.
Returns a value in ImportType.__all__
:param text module_name: The dotted notation of a module
:param tuple application_directories: tuple of paths which are considered
application roots. | entailment |
def _arg_parser():
"""Factory for creating the argument parser"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('xml', nargs='*')
return parser | Factory for creating the argument parser | entailment |
def parse_metadata(elm_tree):
"""Given an element-like object (:mod:`lxml.etree`)
lookup the metadata and return the found elements
:param elm_tree: the root xml element
:type elm_tree: an element-like object from :mod:`lxml.etree`
:returns: common metadata properties
:rtype: dict
"""
xpath = make_cnx_xpath(elm_tree)
role_xpath = lambda xp: tuple(xpath(xp)[0].split()) # noqa: E731
props = {
'id': _maybe(xpath('//md:content-id/text()')),
'version': xpath('//md:version/text()')[0],
'created': xpath('//md:created/text()')[0],
'revised': xpath('//md:revised/text()')[0],
'title': xpath('//md:title/text()')[0],
'license_url': xpath('//md:license/@url')[0],
'language': xpath('//md:language/text()')[0],
'authors': role_xpath('//md:role[@type="author"]/text()'),
'maintainers': role_xpath('//md:role[@type="maintainer"]/text()'),
'licensors': role_xpath('//md:role[@type="licensor"]/text()'),
'keywords': tuple(xpath('//md:keywordlist/md:keyword/text()')),
'subjects': tuple(xpath('//md:subjectlist/md:subject/text()')),
'abstract': _squash_to_text(
_maybe(xpath('//md:abstract')),
remove_namespaces=True,
),
'print_style': _maybe(
xpath('//col:param[@name="print-style"]/@value'),
),
'derived_from': {
'uri': _maybe(xpath('//md:derived-from/@url')),
'title': _maybe(xpath('//md:derived-from/md:title/text()')),
},
}
return props | Given an element-like object (:mod:`lxml.etree`)
lookup the metadata and return the found elements
:param elm_tree: the root xml element
:type elm_tree: an element-like object from :mod:`lxml.etree`
:returns: common metadata properties
:rtype: dict | entailment |
def validate_cnxml(*content_filepaths):
"""Validates the given CNXML file against the cnxml-jing.rng RNG."""
content_filepaths = [Path(path).resolve() for path in content_filepaths]
return jing(CNXML_JING_RNG, *content_filepaths) | Validates the given CNXML file against the cnxml-jing.rng RNG. | entailment |
def validate_collxml(*content_filepaths):
"""Validates the given COLLXML file against the collxml-jing.rng RNG."""
content_filepaths = [Path(path).resolve() for path in content_filepaths]
return jing(COLLXML_JING_RNG, *content_filepaths) | Validates the given COLLXML file against the collxml-jing.rng RNG. | entailment |
def action(args):
"""Roll back commands on a refpkg.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on) and n (giving the number of
operations to roll back).
"""
log.info('loading reference package')
r = refpkg.Refpkg(args.refpkg, create=False)
# First check if we can do n rollbacks
q = r.contents
for i in range(args.n):
if q['rollback'] is None:
log.error('Cannot rollback {} changes; '
'refpkg only records {} changes.'.format(args.n, i))
return 1
else:
q = q['rollback']
for i in range(args.n):
r.rollback()
return 0 | Roll back commands on a refpkg.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on) and n (giving the number of
operations to roll back). | entailment |
def set_targets(x, delta=10):
""" Sets target market trend for a date
Args:
x: Pandas DataFrame of market features
delta: Positive number defining a price buffer between what is
classified as a bullish/bearish market for the training set.
delta is equivalent to the total size of the neutral price zone.
delta / 2 is equivalent to either the positive or negative
threshold of the neutral price zone.
Returns:
Pandas Series of numpy int8 market trend targets
"""
data = [] # Keep track of targets
for row, _ in x.iterrows():
if row == x.shape[0] - 1: # Can't predict yet, done.
break
# Get closing prices
curr_close = x.close[row]
next_close = x.close[row + 1]
high_close = next_close + (delta / 2) # Pos. neutral zone threshold
low_close = next_close - (delta / 2) # Neg. neutral zone threshold
# Get target
if curr_close < low_close:
target = TARGET_CODES['bearish']
elif curr_close > high_close:
target = TARGET_CODES['bullish']
else:
target = TARGET_CODES['neutral']
data.append(target)
return pd.Series(data=data, dtype=np.int32, name='target') | Sets target market trend for a date
Args:
x: Pandas DataFrame of market features
delta: Positive number defining a price buffer between what is
classified as a bullish/bearish market for the training set.
delta is equivalent to the total size of the neutral price zone.
delta / 2 is equivalent to either the positive or negative
threshold of the neutral price zone.
Returns:
Pandas Series of numpy int8 market trend targets | entailment |
def eval_features(json):
""" Gets technical analysis features from market data JSONs
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
Returns:
Dict of market features and their values
"""
return {'close' : json[-1]['close'],
'sma' : SMA.eval_from_json(json),
'rsi' : RSI.eval_from_json(json),
'so' : SO.eval_from_json(json),
'obv' : OBV.eval_from_json(json)} | Gets technical analysis features from market data JSONs
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
Returns:
Dict of market features and their values | entailment |
def target_code_to_name(code):
""" Converts an int target code to a target name
Since self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup
to get the more readable name.
Args:
code: Value from self.TARGET_CODES
Returns:
String target name corresponding to the given code.
"""
TARGET_NAMES = {v: k for k, v in TARGET_CODES.items()}
return TARGET_NAMES[code] | Converts an int target code to a target name
Since self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup
to get the more readable name.
Args:
code: Value from self.TARGET_CODES
Returns:
String target name corresponding to the given code. | entailment |
def setup_model(x, y, model_type='random_forest', seed=None, **kwargs):
""" Initializes a machine learning model
Args:
x: Pandas DataFrame, X axis of features
y: Pandas Series, Y axis of targets
model_type: Machine Learning model to use
Valid values: 'random_forest'
seed: Random state to use when splitting sets and creating the model
**kwargs: Scikit Learn's RandomForestClassifier kwargs
Returns:
Trained model instance of model_type
"""
assert len(x) > 1 and len(y) > 1, 'Not enough data objects to train on (minimum is at least two, you have (x: {0}) and (y: {1}))'.format(len(x), len(y))
sets = namedtuple('Datasets', ['train', 'test'])
x_train, x_test, y_train, y_test = train_test_split(x,
y,
random_state=seed,
shuffle=False)
x = sets(x_train, x_test)
y = sets(y_train, y_test)
if model_type == 'random_forest' or model_type == 'rf':
model = rf.RandomForest(x, y, random_state=seed, **kwargs)
elif model_type == 'deep_neural_network' or model_type == 'dnn':
model = dnn.DeepNeuralNetwork(x, y, **kwargs)
else:
raise ValueError('Invalid model type kwarg')
return model | Initializes a machine learning model
Args:
x: Pandas DataFrame, X axis of features
y: Pandas Series, Y axis of targets
model_type: Machine Learning model to use
Valid values: 'random_forest'
seed: Random state to use when splitting sets and creating the model
**kwargs: Scikit Learn's RandomForestClassifier kwargs
Returns:
Trained model instance of model_type | entailment |
def get_json(self):
""" Gets market chart data from today to a previous date """
today = dt.now()
DIRECTION = 'last'
epochs = date.get_end_start_epochs(today.year, today.month, today.day,
DIRECTION, self.unit, self.count)
return poloniex.chart_json(epochs['shifted'], epochs['initial'],
self.period, self.symbol)[0] | Gets market chart data from today to a previous date | entailment |
def set_features(self, partition=1):
""" Parses market data JSON for technical analysis indicators
Args:
partition: Int of how many dates to take into consideration
when evaluating technical analysis indicators.
Returns:
Pandas DataFrame instance with columns as numpy.float32 features.
"""
if len(self.json) < partition + 1:
raise ValueError('Not enough dates for the specified partition size: {0}. Try a smaller partition.'.format(partition))
data = []
for offset in range(len(self.json) - partition):
json = self.json[offset : offset + partition]
data.append(eval_features(json))
return pd.DataFrame(data=data, dtype=np.float32) | Parses market data JSON for technical analysis indicators
Args:
partition: Int of how many dates to take into consideration
when evaluating technical analysis indicators.
Returns:
Pandas DataFrame instance with columns as numpy.float32 features. | entailment |
def set_long_features(self, features, columns_to_set=[], partition=2):
""" Sets features of double the duration
Example: Setting 14 day RSIs to longer will create add a
feature column of a 28 day RSIs.
Args:
features: Pandas DataFrame instance with columns as numpy.float32 features.
columns_to_set: List of strings of feature names to make longer
partition: Int of how many dates to take into consideration
when evaluating technical analysis indicators.
Returns:
Pandas DataFrame instance with columns as numpy.float32 features.
"""
# Create long features DataFrame
features_long = self.set_features(partition=2 * partition)
# Remove features not specified by args.long
unwanted_features = [f for f in features.columns if f not in columns_to_set]
features_long = features_long.drop(unwanted_features, axis=1)
# Prefix long columns with 'long_' to fix naming conflicts
features_long.columns = ['long_{0}'.format(f) for f in features_long.columns]
# Merge the two DataFrames
skip = partition
return pd.concat([features[skip:].reset_index(drop=True),
features_long],
axis=1) | Sets features of double the duration
Example: Setting 14 day RSIs to longer will create add a
feature column of a 28 day RSIs.
Args:
features: Pandas DataFrame instance with columns as numpy.float32 features.
columns_to_set: List of strings of feature names to make longer
partition: Int of how many dates to take into consideration
when evaluating technical analysis indicators.
Returns:
Pandas DataFrame instance with columns as numpy.float32 features. | entailment |
def feature_importances(self):
""" Return list of features and their importance in classification """
feature_names = [feature for feature in self.features.train]
return list(zip(feature_names, self.feature_importances_)) | Return list of features and their importance in classification | entailment |
def exitcode(self):
"""
Process exit code. :const:`0` when process exited successfully,
positive number when exception was occurred, negative number when
process was signaled and :data:`None` when process has not exited
yet.
"""
if self._process is None:
raise ProcessError(
"Process '%s' has not been started yet" % self.name)
return self._process.exitcode | Process exit code. :const:`0` when process exited successfully,
positive number when exception was occurred, negative number when
process was signaled and :data:`None` when process has not exited
yet. | entailment |
def start(self):
"""
Run the process.
"""
if self:
raise ProcessError(
"Process '%s' has been already started" % self.name)
first_run = not self.has_started
# Run process
self._process = self._process_cls(*self._process_args)
self._process.daemon = False
self._process.start()
# Wait unless process is successfully started
if first_run and self._wait_unless_ready:
if self._timeout:
stop_time = time.time() + self._timeout
while time.time() < stop_time and not self._process.ready:
time.sleep(0.25)
if not self._process.ready:
raise ProcessError(
"Timeout during start process '%s'" % self.name)
else:
while not self._process.ready:
time.sleep(0.25) | Run the process. | entailment |
def stop(self):
"""
Stop the worker.
"""
if self._http_server is not None:
self._http_server.stop()
tornado.ioloop.IOLoop.instance().add_callback(
tornado.ioloop.IOLoop.instance().stop) | Stop the worker. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.