code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return GetUsers(settings=self.settings, **kwargs).call(**kwargs)
def get_users(self, **kwargs)
Gets all of the users in the system and their information :param kwargs: :return:
13.283391
23.058619
0.576071
return GetUserInfo(settings=self.settings, **kwargs).call( user_id=user_id, **kwargs )
def get_user_info(self, user_id, **kwargs)
Retrieves information about a user, the result is only limited to what the callee has access to view. :param user_id: :param kwargs: :return:
6.167749
8.876119
0.69487
return CreateUser(settings=self.settings, **kwargs).call( email=email, name=name, password=password, username=username, **kwargs )
def create_user(self, email, name, password, username, **kwargs)
Create user :param email: E-mail :param name: Full name :param password: Password :param username: Username :param kwargs: active: roles: join_default_channels: require_password_change: send_welcome_email: verified: custom_fields: :return:
3.704055
4.364412
0.848695
return DeleteUser(settings=self.settings, **kwargs).call(user_id=user_id, **kwargs)
def delete_user(self, user_id, **kwargs)
Delete user :param user_id: User ID :param kwargs: :return:
6.08542
8.984423
0.67733
''' Returns the index name (as a string) for the given model as a class or a string. :param model: model name or model class if via_class set to True. :param via_class: set to True if parameter model is a class. :raise KeyError: If the provided model does not have any index associated. ''' try: return cls._model_to_index[model] if via_class else cls._model_name_to_index[model] except KeyError: raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model))
def get_index(cls, model, via_class=False)
Returns the index name (as a string) for the given model as a class or a string. :param model: model name or model class if via_class set to True. :param via_class: set to True if parameter model is a class. :raise KeyError: If the provided model does not have any index associated.
5.920731
2.787541
2.123998
''' Returns the default model index for the given model, or the list of indices if default is False. :param model: model name as a string. :raise KeyError: If the provided model does not have any index associated. ''' try: if default: return cls._model_name_to_default_index[model] return cls._model_name_to_model_idx[model] except KeyError: raise KeyError('Could not find any model index defined for model {}.'.format(model))
def get_model_index(cls, model, default=True)
Returns the default model index for the given model, or the list of indices if default is False. :param model: model name as a string. :raise KeyError: If the provided model does not have any index associated.
4.379599
2.17841
2.010457
''' Returns the list of models defined for this index. :param index: index name. :param as_class: set to True to return the model as a model object instead of as a string. ''' try: return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys() except KeyError: raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
def get_models(cls, index, as_class=False)
Returns the list of models defined for this index. :param index: index name. :param as_class: set to True to return the model as a model object instead of as a string.
6.668169
4.240355
1.57255
''' Returns the list of model indices (i.e. ModelIndex objects) defined for this index. :param index: index name. ''' try: return cls._idx_name_to_mdl_to_mdlidx[index].values() except KeyError: raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
def get_model_indices(cls, index)
Returns the list of model indices (i.e. ModelIndex objects) defined for this index. :param index: index name.
10.218358
5.50849
1.85502
''' Maps raw results to database model objects. :param raw_results: list raw results as returned from elasticsearch-dsl-py. :param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex. :return: list of mapped results in the *same* order as returned by elasticsearch. ''' # Let's iterate over the results and determine the appropriate mapping. model_results = defaultdict(list) # Initializing the list to the number of returned results. This allows us to restore each item in its position. if hasattr(raw_results, 'hits'): results = [None] * len(raw_results.hits) else: results = [None] * len(raw_results) found_results = {} for pos, result in enumerate(raw_results): model_name = result.meta.doc_type if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]: logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result)) results[pos] = result else: meta = Bungiesearch.get_model_index(model_name).Meta model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id) found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta) # Now that we have model ids per model name, let's fetch everything at once. for ref_name, ids in iteritems(model_results): index_name, model_name = ref_name.split('.') model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name] model_obj = model_idx.get_model() items = model_obj.objects.filter(pk__in=ids) if instance: if instance._only == '__model' or model_idx.optimize_queries: desired_fields = model_idx.fields_to_fetch elif instance._only == '__fields': desired_fields = instance._fields else: desired_fields = instance._only if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch. items = items.only( *[field.name for field in model_obj._meta.get_fields() # For complete backwards compatibility, you may want to exclude # GenericForeignKey from the results. if field.name in desired_fields and \ not (field.many_to_one and field.related_model is None) ] ) # Let's reposition each item in the results and set the _searchmeta meta information. for item in items: pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)] item._searchmeta = meta results[pos] = item return results
def map_raw_results(cls, raw_results, instance=None)
Maps raw results to database model objects. :param raw_results: list raw results as returned from elasticsearch-dsl-py. :param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex. :return: list of mapped results in the *same* order as returned by elasticsearch.
4.637215
3.514424
1.319481
''' Must clone additional fields to those cloned by elasticsearch-dsl-py. ''' instance = super(Bungiesearch, self)._clone() instance._raw_results_only = self._raw_results_only return instance
def _clone(self)
Must clone additional fields to those cloned by elasticsearch-dsl-py.
20.577564
5.268325
3.905902
''' Executes the query and attempts to create model objects from results. ''' if self.results: return self.results if return_results else None self.execute_raw() if self._raw_results_only: self.results = self.raw_results else: self.map_results() if return_results: return self.results
def execute(self, return_results=True)
Executes the query and attempts to create model objects from results.
4.718547
3.288609
1.434816
''' Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex. ''' s = self._clone() if len(fields) == 1 and fields[0] == '__model': s._only = '__model' else: s._only = fields return s
def only(self, *fields)
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
7.510127
2.241961
3.349802
''' Returns the alias function, if it exists and if it can be applied to this model. ''' try: search_alias = self._alias_hooks[alias] except KeyError: raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias)) else: if search_alias._applicable_models and \ ((model_obj and model_obj not in search_alias._applicable_models) or \ not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])): raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type)) return search_alias.prepare(self, model_obj).alias_for
def hook_alias(self, alias, model_obj=None)
Returns the alias function, if it exists and if it can be applied to this model.
5.303283
4.333079
1.223906
''' Performs a search on a custom elasticsearch index and mapping. Will not attempt to map result objects. ''' from bungiesearch import Bungiesearch return Bungiesearch(raw_results=True).index(index).doc_type(doc_type)
def custom_search(self, index, doc_type)
Performs a search on a custom elasticsearch index and mapping. Will not attempt to map result objects.
11.217888
5.072842
2.211362
''' Sets up the signal processor. Since self.model is not available in the constructor, we perform this operation here. ''' super(BungiesearchManager, self).contribute_to_class(cls, name) from . import Bungiesearch from .signals import get_signal_processor settings = Bungiesearch.BUNGIE if 'SIGNALS' in settings: self.signal_processor = get_signal_processor() self.signal_processor.setup(self.model)
def contribute_to_class(self, cls, name)
Sets up the signal processor. Since self.model is not available in the constructor, we perform this operation here.
6.197706
3.532161
1.75465
''' Returns the index field type that would likely be associated with each Django type. ''' dj_type = field.get_internal_type() if dj_type in ('DateField', 'DateTimeField'): return DateField(**attr) elif dj_type in ('BooleanField', 'NullBooleanField'): return BooleanField(**attr) elif dj_type in ('DecimalField', 'FloatField'): return NumberField(coretype='float', **attr) elif dj_type in ('PositiveSmallIntegerField', 'SmallIntegerField'): return NumberField(coretype='short', **attr) elif dj_type in ('IntegerField', 'PositiveIntegerField', 'AutoField'): return NumberField(coretype='integer', **attr) elif dj_type in ('BigIntegerField'): return NumberField(coretype='long', **attr) return StringField(**attr)
def django_field_to_index(field, **attr)
Returns the index field type that would likely be associated with each Django type.
2.517811
2.017549
1.247955
''' Computes the value of this field to update the index. :param obj: object instance, as a dictionary or as a model instance. ''' if self.template_name: t = loader.select_template([self.template_name]) return t.render(Context({'object': obj})) if self.eval_func: try: return eval(self.eval_func) except Exception as e: raise type(e)('Could not compute value of {} field (eval_as=`{}`): {}.'.format(unicode(self), self.eval_func, unicode(e))) elif self.model_attr: if isinstance(obj, dict): return obj[self.model_attr] current_obj = getattr(obj, self.model_attr) if callable(current_obj): return current_obj() else: return current_obj else: raise KeyError('{0} gets its value via a model attribute, an eval function, a template, or is prepared in a method ' 'call but none of `model_attr`, `eval_as,` `template,` `prepare_{0}` is provided.'.format(unicode(self)))
def value(self, obj)
Computes the value of this field to update the index. :param obj: object instance, as a dictionary or as a model instance.
5.284557
4.067437
1.299235
''' - cmd is string list -> nothing to do - cmd is string -> split it using shlex :param cmd: string ('ls -l') or list of strings (['ls','-l']) :rtype: string list ''' if not isinstance(cmd, string_types): # cmd is string list pass else: if not PY3: # cmd is string # The shlex module currently does not support Unicode input (in # 2.x)! if isinstance(cmd, unicode): try: cmd = unicodedata.normalize( 'NFKD', cmd).encode('ascii', 'strict') except UnicodeEncodeError: raise EasyProcessUnicodeError('unicode command "%s" can not be processed.' % cmd + 'Use string list instead of string') log.debug('unicode is normalized') if posix is None: posix = 'win' not in sys.platform cmd = shlex.split(cmd, posix=posix) return cmd
def split_command(cmd, posix=None)
- cmd is string list -> nothing to do - cmd is string -> split it using shlex :param cmd: string ('ls -l') or list of strings (['ls','-l']) :rtype: string list
5.528134
3.781633
1.461838
''' Returns the mapping for the index as a dictionary. :param meta_fields: Also include elasticsearch meta fields in the dictionary. :return: a dictionary which can be used to generate the elasticsearch index mapping for this doctype. ''' return {'properties': dict((name, field.json()) for name, field in iteritems(self.fields) if meta_fields or name not in AbstractField.meta_fields)}
def get_mapping(self, meta_fields=True)
Returns the mapping for the index as a dictionary. :param meta_fields: Also include elasticsearch meta fields in the dictionary. :return: a dictionary which can be used to generate the elasticsearch index mapping for this doctype.
5.847918
2.445656
2.391145
''' :return: a dictionary which is used to get the serialized analyzer definition from the analyzer class. ''' analysis = {} for field in self.fields.values(): for analyzer_name in ('analyzer', 'index_analyzer', 'search_analyzer'): if not hasattr(field, analyzer_name): continue analyzer = getattr(field, analyzer_name) if not isinstance(analyzer, Analyzer): continue definition = analyzer.get_analysis_definition() if definition is None: continue for key in definition: analysis.setdefault(key, {}).update(definition[key]) return analysis
def collect_analysis(self)
:return: a dictionary which is used to get the serialized analyzer definition from the analyzer class.
3.611785
2.34456
1.540496
''' Serializes an object for it to be added to the index. :param obj: Object to be serialized. Optional if obj_pk is passed. :param obj_pk: Object primary key. Superseded by `obj` if available. :return: A dictionary representing the object as defined in the mapping. ''' if not obj: try: # We're using `filter` followed by `values` in order to only fetch the required fields. obj = self.model.objects.filter(pk=obj_pk).values(*self.fields_to_fetch)[0] except Exception as e: raise ValueError('Could not find object of primary key = {} in model {} (model index class {}). (Original exception: {}.)'.format(obj_pk, self.model, self.__class__.__name__, e)) serialized_object = {} for name, field in iteritems(self.fields): if hasattr(self, "prepare_%s" % name): value = getattr(self, "prepare_%s" % name)(obj) else: value = field.value(obj) serialized_object[name] = value return serialized_object
def serialize_object(self, obj, obj_pk=None)
Serializes an object for it to be added to the index. :param obj: Object to be serialized. Optional if obj_pk is passed. :param obj_pk: Object primary key. Superseded by `obj` if available. :return: A dictionary representing the object as defined in the mapping.
3.846772
2.645324
1.454178
''' Given any explicit fields to include and fields to exclude, add additional fields based on the associated model. If the field needs a hotfix, apply it. ''' final_fields = {} fields = fields or [] excludes = excludes or [] for f in self.model._meta.fields: # If the field name is already present, skip if f.name in self.fields: continue # If field is not present in explicit field listing, skip if fields and f.name not in fields: continue # If field is in exclude list, skip if excludes and f.name in excludes: continue # If field is a relation, skip. if getattr(f, 'rel'): continue attr = {'model_attr': f.name} if f.has_default(): attr['null_value'] = f.default if f.name in hotfixes: attr.update(hotfixes[f.name]) final_fields[f.name] = django_field_to_index(f, **attr) return final_fields
def _get_fields(self, fields, excludes, hotfixes)
Given any explicit fields to include and fields to exclude, add additional fields based on the associated model. If the field needs a hotfix, apply it.
3.676116
2.585661
1.421731
logger.debug(fmt("Validating {}", self)) from python_jsonschema_objects import classbuilder if self.__itemtype__ is None: return type_checks = self.__itemtype__ if not isinstance(type_checks, (tuple, list)): # we were given items = {'type': 'blah'} ; thus ensure the type for all data. type_checks = [type_checks] * len(self.data) elif len(type_checks) > len(self.data): raise ValidationError( "{1} does not have sufficient elements to validate against {0}" .format(self.__itemtype__, self.data)) typed_elems = [] for elem, typ in zip(self.data, type_checks): if isinstance(typ, dict): for param, paramval in six.iteritems(typ): validator = registry(param) if validator is not None: validator(paramval, elem, typ) typed_elems.append(elem) elif util.safe_issubclass(typ, classbuilder.LiteralValue): val = typ(elem) val.validate() typed_elems.append(val) elif util.safe_issubclass(typ, classbuilder.ProtocolBase): if not isinstance(elem, typ): try: if isinstance(elem, (six.string_types, six.integer_types, float)): val = typ(elem) else: val = typ(**util.coerce_for_expansion(elem)) except TypeError as e: raise ValidationError("'{0}' is not a valid value for '{1}': {2}" .format(elem, typ, e)) else: val = elem val.validate() typed_elems.append(val) elif util.safe_issubclass(typ, ArrayWrapper): val = typ(elem) val.validate() typed_elems.append(val) elif isinstance(typ, (classbuilder.TypeProxy, classbuilder.TypeRef)): try: if isinstance(elem, (six.string_types, six.integer_types, float)): val = typ(elem) else: val = typ(**util.coerce_for_expansion(elem)) except TypeError as e: raise ValidationError("'{0}' is not a valid value for '{1}': {2}" .format(elem, typ, e)) else: val.validate() typed_elems.append(val) self._dirty = False self._typed = typed_elems return typed_elems
def validate_items(self)
Validates the items in the backing array, including performing type validation. Sets the _typed property and clears the dirty flag as a side effect Returns: The typed array
2.927598
2.840175
1.030781
logger.debug(fmt("Constructing ArrayValidator with {} and {}", item_constraint, addl_constraints)) from python_jsonschema_objects import classbuilder klassbuilder = addl_constraints.pop("classbuilder", None) props = {} if item_constraint is not None: if isinstance(item_constraint, (tuple, list)): for i, elem in enumerate(item_constraint): isdict = isinstance(elem, (dict,)) isklass = isinstance( elem, type) and util.safe_issubclass( elem, (classbuilder.ProtocolBase, classbuilder.LiteralValue)) if not any([isdict, isklass]): raise TypeError( "Item constraint (position {0}) is not a schema".format(i)) elif isinstance(item_constraint, (classbuilder.TypeProxy, classbuilder.TypeRef)): pass elif util.safe_issubclass(item_constraint, ArrayWrapper): pass else: isdict = isinstance(item_constraint, (dict,)) isklass = isinstance( item_constraint, type) and util.safe_issubclass( item_constraint, (classbuilder.ProtocolBase, classbuilder.LiteralValue)) if not any([isdict, isklass]): raise TypeError("Item constraint is not a schema") if isdict and '$ref' in item_constraint: if klassbuilder is None: raise TypeError("Cannot resolve {0} without classbuilder" .format(item_constraint['$ref'])) uri = item_constraint['$ref'] if uri in klassbuilder.resolved: logger.debug(util.lazy_format( "Using previously resolved object for {0}", uri)) else: logger.debug(util.lazy_format("Resolving object for {0}", uri)) with klassbuilder.resolver.resolving(uri) as resolved: # Set incase there is a circular reference in schema definition klassbuilder.resolved[uri] = None klassbuilder.resolved[uri] = klassbuilder.construct( uri, resolved, (classbuilder.ProtocolBase,)) item_constraint = klassbuilder.resolved[uri] elif isdict and item_constraint.get('type') == 'array': # We need to create a sub-array validator. item_constraint = ArrayWrapper.create(name + "#sub", item_constraint=item_constraint[ 'items'], addl_constraints=item_constraint) elif isdict and 'oneOf' in item_constraint: # We need to create a TypeProxy validator uri = "{0}_{1}".format(name, "<anonymous_list_type>") type_array = [] for i, item_detail in enumerate(item_constraint['oneOf']): if '$ref' in item_detail: subtype = klassbuilder.construct( util.resolve_ref_uri( klassbuilder.resolver.resolution_scope, item_detail['$ref']), item_detail) else: subtype = klassbuilder.construct( uri + "_%s" % i, item_detail) type_array.append(subtype) item_constraint = classbuilder.TypeProxy(type_array) elif isdict and item_constraint.get('type') == 'object': uri = "{0}_{1}".format(name, "<anonymous_list_type>") item_constraint = klassbuilder.construct( uri, item_constraint) props['__itemtype__'] = item_constraint strict = addl_constraints.pop('strict', False) props['_strict_'] = strict props.update(addl_constraints) validator = type(str(name), (ArrayWrapper,), props) return validator
def create(name, item_constraint=None, **addl_constraints)
Create an array validator based on the passed in constraints. If item_constraint is a tuple, it is assumed that tuple validation is being performed. If it is a class or dictionary, list validation will be performed. Classes are assumed to be subclasses of ProtocolBase, while dictionaries are expected to be basic types ('string', 'number', ...). addl_constraints is expected to be key-value pairs of any of the other constraints permitted by JSON Schema v4.
3.326928
3.254735
1.022181
md.registerExtension(self) md.preprocessors.add('fenced_code_block', SpecialFencePreprocessor(md), ">normalize_whitespace")
def extendMarkdown(self, md, md_globals)
Add FencedBlockPreprocessor to the Markdown instance.
4.892828
3.961432
1.235116
try: print(os.path.join(os.path.dirname(__file__), *path.splitlines())) requirements = map(str.strip, local_file(path).splitlines()) except IOError: raise RuntimeError("Couldn't find the `requirements.txt' file :(") links = [] pkgs = [] for req in requirements: if not req: continue if 'http:' in req or 'https:' in req: links.append(req) name, version = re.findall("\#egg=([^\-]+)-(.+$)", req)[0] pkgs.append('{0}=={1}'.format(name, version)) else: pkgs.append(req) return pkgs, links
def parse_requirements(path)
Rudimentary parser for the `requirements.txt` file We just want to separate regular packages from links to pass them to the `install_requires` and `dependency_links` params of the `setup()` function properly.
3.520312
3.302458
1.065967
newprops = copy.deepcopy(into) for prop, propval in six.iteritems(data_from): if prop not in newprops: newprops[prop] = propval continue new_sp = newprops[prop] for subprop, spval in six.iteritems(propval): if subprop not in new_sp: new_sp[subprop] = spval elif subprop == 'enum': new_sp[subprop] = set(spval) & set(new_sp[subprop]) elif subprop == 'type': if spval != new_sp[subprop]: raise TypeError("Type cannot conflict in allOf'") elif subprop in ('minLength', 'minimum'): new_sp[subprop] = (new_sp[subprop] if new_sp[subprop] > spval else spval) elif subprop in ('maxLength', 'maximum'): new_sp[subprop] = (new_sp[subprop] if new_sp[subprop] < spval else spval) elif subprop == 'multipleOf': if new_sp[subprop] % spval == 0: new_sp[subprop] = spval else: raise AttributeError( "Cannot set conflicting multipleOf values") else: new_sp[subprop] = spval newprops[prop] = new_sp return newprops
def propmerge(into, data_from)
Merge JSON schema requirements into a dictionary
2.226632
2.162862
1.029484
out = {} for prop in self: propval = getattr(self, prop) if hasattr(propval, 'for_json'): out[prop] = propval.for_json() elif isinstance(propval, list): out[prop] = [getattr(x, 'for_json', lambda:x)() for x in propval] elif isinstance(propval, (ProtocolBase, LiteralValue)): out[prop] = propval.as_dict() elif propval is not None: out[prop] = propval return out
def as_dict(self)
Return a dictionary containing the current values of the object. Returns: (dict): The object represented as a dictionary
2.812889
2.989637
0.94088
import json msg = json.loads(jsonmsg) obj = cls(**msg) obj.validate() return obj
def from_json(cls, jsonmsg)
Create an object directly from a JSON string. Applies general validation after creating the object to check whether all required fields are present. Args: jsonmsg (str): An object encoded as a JSON string Returns: An object of the generated type Raises: ValidationError: if `jsonmsg` does not match the schema `cls` was generated from
3.980986
4.127297
0.96455
missing = self.missing_property_names() if len(missing) > 0: raise validators.ValidationError( "'{0}' are required attributes for {1}" .format(missing, self.__class__.__name__)) for prop, val in six.iteritems(self._properties): if val is None: continue if isinstance(val, ProtocolBase): val.validate() elif getattr(val, 'isLiteralClass', None) is True: val.validate() elif isinstance(val, list): for subval in val: subval.validate() else: # This object is of the wrong type, but just try setting it # The property setter will enforce its correctness # and handily coerce its type at the same time setattr(self, prop, val) return True
def validate(self)
Applies all defined validation to the current state of the object, and raises an error if they are not all met. Raises: ValidationError: if validations do not pass
5.73284
5.731899
1.000164
propname = lambda x: self.__prop_names__[x] missing = [] for x in self.__required__: # Allow the null type propinfo = self.propinfo(propname(x)) null_type = False if 'type' in propinfo: type_info = propinfo['type'] null_type = (type_info == 'null' or isinstance(type_info, (list, tuple)) and 'null' in type_info) elif 'oneOf' in propinfo: for o in propinfo['oneOf']: type_info = o.get('type') if type_info and type_info == 'null' \ or isinstance(type_info, (list, tuple)) \ and 'null' in type_info: null_type = True break if (propname(x) not in self._properties and null_type) or \ (self._properties[propname(x)] is None and not null_type): missing.append(x) return missing
def missing_property_names(self)
Returns a list of properties which are required and missing. Properties are excluded from this list if they are allowed to be null. :return: list of missing properties.
2.818087
2.750628
1.024525
logger.debug(util.lazy_format("Constructing {0}", uri)) if ('override' not in kw or kw['override'] is False) \ and uri in self.resolved: logger.debug(util.lazy_format("Using existing {0}", uri)) return self.resolved[uri] else: ret = self._construct(uri, *args, **kw) logger.debug(util.lazy_format("Constructed {0}", ret)) return ret
def construct(self, uri, *args, **kw)
Wrapper to debug things
3.144646
3.16353
0.994031
cls = type(str(nm), tuple((LiteralValue,)), { '__propinfo__': { '__literal__': clsdata, '__title__': clsdata.get('title'), '__default__': clsdata.get('default')} }) return cls
def _build_literal(self, nm, clsdata)
@todo: Docstring for _build_literal :nm: @todo :clsdata: @todo :returns: @todo
9.796777
9.862309
0.993355
kw = {"strict": strict} builder = classbuilder.ClassBuilder(self.resolver) for nm, defn in iteritems(self.schema.get('definitions', {})): uri = python_jsonschema_objects.util.resolve_ref_uri( self.resolver.resolution_scope, "#/definitions/" + nm) builder.construct(uri, defn, **kw) if standardize_names: name_transform = lambda t: inflection.camelize(inflection.parameterize(six.text_type(t), '_')) else: name_transform = lambda t: t nm = self.schema['title'] if 'title' in self.schema else self.schema['id'] nm = inflection.parameterize(six.text_type(nm), '_') builder.construct(nm, self.schema,**kw) self._resolved = builder.resolved classes = {} for uri, klass in six.iteritems(builder.resolved): title = getattr(klass, '__title__', None) if title is not None: classes[name_transform(title)] = klass elif not named_only: classes[name_transform(uri.split('/')[-1])] = klass return python_jsonschema_objects.util.Namespace.from_mapping(classes)
def build_classes(self,strict=False, named_only=False, standardize_names=True)
Build all of the classes named in the JSONSchema. Class names will be transformed using inflection by default, so names with spaces in the schema will be camelcased, while names without spaces will have internal capitalization dropped. Thus "Home Address" becomes "HomeAddress", while "HomeAddress" becomes "Homeaddress" To disable this behavior, pass standardize_names=False, but be aware that accessing names with spaces from the namespace can be problematic. Args: strict: (bool) use this to validate required fields while creating the class named_only: (bool) If true, only properties with an actual title attribute will be included in the resulting namespace (although all will be generated). standardize_names: (bool) If true (the default), class names will be tranformed by camel casing Returns: A namespace containing all the generated classes
3.601914
3.514284
1.024935
row_interpol_data = self._interp_axis(data, 0) interpol_data = self._interp_axis(row_interpol_data, 1) return interpol_data
def _interp(self, data)
The interpolation method implemented here is a kind of a billinear interpolation. The input *data* field is first interpolated along the rows and subsequently along its columns. The final size of the interpolated *data* field is determined by the last indices in self.row_indices and self.col_indices.
4.953596
4.231628
1.170612
if axis == 0: return self._pandas_interp(data, self.row_indices) if axis == 1: data_transposed = data.as_matrix().T data_interpol_transposed = self._pandas_interp(data_transposed, self.col_indices) data_interpol = data_interpol_transposed.as_matrix().T return data_interpol
def _interp_axis(self, data, axis)
The *data* field contains the data to be interpolated. It is expected that values reach out to the *data* boundaries. With *axis*=0 this method interpolates along rows and *axis*=1 it interpolates along colums. For column mode the *data* input is transposed before interpolation and subsequently transposed back.
3.434939
3.332837
1.030635
new_index = np.arange(indices[-1] + 1) data_frame = DataFrame(data, index=indices) data_frame_reindexed = data_frame.reindex(new_index) data_interpol = data_frame_reindexed.apply(Series.interpolate) del new_index del data_frame del data_frame_reindexed return data_interpol
def _pandas_interp(self, data, indices)
The actual transformation based on the following stackoverflow entry: http://stackoverflow.com/a/10465162
3.735434
3.453535
1.081626
self.latitude = self._interp(self.lat_tiepoint) self.longitude = self._interp(self.lon_tiepoint) return self.latitude, self.longitude
def interpolate(self)
Do the interpolation and return resulting longitudes and latitudes.
4.942442
4.005642
1.23387
values = self.load(model, adapter) return IterableStore(values=values)._execute(query, model=model, adapter=None, raw=raw)
def _execute(self, query, model, adapter, raw=False)
We have to override this because in some situation (such as with Filebackend, or any dummy backend) we have to parse / adapt results *before* when can execute the query
10.343676
10.623847
0.973628
if not self.enabled: if reraise: raise exceptions.DisabledCache() return default try: return self._get(key) except exceptions.NotInCache: if reraise: raise return default
def get(self, key, default=None, reraise=False)
Get the given key from the cache, if present. A default value can be provided in case the requested key is not present, otherwise, None will be returned. :param key: the key to query :type key: str :param default: the value to return if the key does not exist in cache :param reraise: wether an exception should be thrown if now value is found, defaults to False. :type key: bool Example usage: .. code-block:: python cache.set('my_key', 'my_value') cache.get('my_key') >>> 'my_value' cache.get('not_present', 'default_value') >>> 'default_value' cache.get('not_present', reraise=True) >>> raise lifter.exceptions.NotInCache
3.353591
5.216292
0.642907
if not self.enabled: return if hasattr(value, '__call__'): value = value() if timeout == NotSet: timeout = self.default_timeout self._set(key, value, timeout) return value
def set(self, key, value, timeout=NotSet)
Set the given key to the given value in the cache. A timeout may be provided, otherwise, the :py:attr:`Cache.default_timeout` will be used. :param key: the key to which the value will be bound :type key: str :param value: the value to store in the cache :param timeout: the expiration delay for the value. None means it will never expire. :type timeout: integer or None Example usage: .. code-block:: python # this cached value will expire after half an hour cache.set('my_key', 'value', 1800)
3.329348
5.011981
0.664278
# TODO: setup some hinting, so we can go directly to the correct # Maybe it's a dict ? Let's try dict lookup, it's the fastest try: return obj[name] except TypeError: pass except KeyError: raise exceptions.MissingField('Dict {0} has no attribute or key "{1}"'.format(obj, name)) # Okay, it's not a dict, what if we try to access the value as for a regular object attribute? try: # Slight hack for better speed, since accessing dict is fast return obj.__dict__[name] except (KeyError, AttributeError): pass try: # Lookup using regular attribute return getattr(obj, name) except AttributeError: pass # Last possible choice, it's an iterable if isinstance(obj, collections.Iterable): return IterableAttr(obj, name) raise exceptions.MissingField('Object {0} has no attribute or key "{1}"'.format(obj, name))
def resolve_attr(obj, name)
A custom attrgetter that operates both on dictionaries and objects
6.098398
6.05584
1.007028
seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))]
def unique_everseen(seq)
Solution found here : http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
1.692878
1.397301
1.211534
new_query = self.query.clone() new_query.hints.update(kwargs) return self._clone(query=new_query)
def hints(self, **kwargs)
Use this method to update hints value of the underlying query example: queryset.hints(permissive=False)
4.039465
3.565179
1.133033
query = None for path_to_convert, value in kwargs.items(): path_parts = path_to_convert.split('__') lookup_class = None try: # We check if the path ends with something such as __gte, __lte... lookup_class = lookups.registry[path_parts[-1]] path_to_convert = '__'.join(path_parts[:-1]) except KeyError: pass path = lookup_to_path(path_to_convert) if lookup_class: q = QueryNode(path, lookup=lookup_class(value)) else: q = path == value if query: query = query & q else: query = q return query
def build_filter_from_kwargs(self, **kwargs)
Convert django-s like lookup to SQLAlchemy ones
3.950774
3.552379
1.112149
from .backends import python from . import models store = python.IterableStore(values=self) return store.query(self.manager.model).all()
def locally(self)
Will execute the current queryset and pass it to the python backend so user can run query on the local dataset (instead of contacting the store)
26.548763
12.496693
2.124463
nscans = nlines_swath // nlines_scan if nscans < n_cpus: nscans_subscene = 1 else: nscans_subscene = nscans // n_cpus nlines_subscene = nscans_subscene * nlines_scan return range(nlines_subscene, nlines_swath, nlines_subscene)
def get_scene_splits(nlines_swath, nlines_scan, n_cpus)
Calculate the line numbers where the swath will be split in smaller granules for parallel processing
2.377968
2.381939
0.998333
cols20km = np.array([0] + list(range(4, 2048, 20)) + [2047]) cols1km = np.arange(2048) lines = lons20km.shape[0] rows20km = np.arange(lines) rows1km = np.arange(lines) along_track_order = 1 cross_track_order = 3 satint = SatelliteInterpolator((lons20km, lats20km), (rows20km, cols20km), (rows1km, cols1km), along_track_order, cross_track_order) return satint.interpolate()
def metop20kmto1km(lons20km, lats20km)
Getting 1km geolocation for metop avhrr from 20km tiepoints.
3.231642
3.239819
0.997476
cols5km = np.arange(2, 1354, 5) / 5.0 cols1km = np.arange(1354) / 5.0 lines = lons5km.shape[0] * 5 rows5km = np.arange(2, lines, 5) / 5.0 rows1km = np.arange(lines) / 5.0 along_track_order = 1 cross_track_order = 3 satint = SatelliteInterpolator((lons5km, lats5km), (rows5km, cols5km), (rows1km, cols1km), along_track_order, cross_track_order, chunk_size=10) satint.fill_borders("y", "x") lons1km, lats1km = satint.interpolate() return lons1km, lats1km
def modis5kmto1km(lons5km, lats5km)
Getting 1km geolocation for modis from 5km tiepoints. http://www.icare.univ-lille1.fr/tutorials/MODIS_geolocation
3.379723
3.509923
0.962905
pool = Pool(processes=cores) splits = get_scene_splits(lons.shape[0], chunk_size, cores) lons_parts = np.vsplit(lons, splits) lats_parts = np.vsplit(lats, splits) results = [pool.apply_async(fun, (lons_parts[i], lats_parts[i])) for i in range(len(lons_parts))] pool.close() pool.join() lons, lats = zip(*(res.get() for res in results)) return np.vstack(lons), np.vstack(lats)
def _multi(fun, lons, lats, chunk_size, cores=1)
Work on multiple cores.
2.202434
2.228442
0.988329
if cores > 1: return _multi(modis1kmto500m, lons1km, lats1km, 10, cores) cols1km = np.arange(1354) cols500m = np.arange(1354 * 2) / 2.0 lines = lons1km.shape[0] rows1km = np.arange(lines) rows500m = (np.arange(lines * 2) - 0.5) / 2. along_track_order = 1 cross_track_order = 3 satint = SatelliteInterpolator((lons1km, lats1km), (rows1km, cols1km), (rows500m, cols500m), along_track_order, cross_track_order, chunk_size=20) satint.fill_borders("y", "x") lons500m, lats500m = satint.interpolate() return lons500m, lats500m
def modis1kmto500m(lons1km, lats1km, cores=1)
Getting 500m geolocation for modis from 1km tiepoints. http://www.icare.univ-lille1.fr/tutorials/MODIS_geolocation
3.196735
3.305453
0.96711
if cores > 1: return _multi(modis1kmto250m, lons1km, lats1km, 10, cores) cols1km = np.arange(1354) cols250m = np.arange(1354 * 4) / 4.0 along_track_order = 1 cross_track_order = 3 lines = lons1km.shape[0] rows1km = np.arange(lines) rows250m = (np.arange(lines * 4) - 1.5) / 4.0 satint = SatelliteInterpolator((lons1km, lats1km), (rows1km, cols1km), (rows250m, cols250m), along_track_order, cross_track_order, chunk_size=40) satint.fill_borders("y", "x") lons250m, lats250m = satint.interpolate() return lons250m, lats250m
def modis1kmto250m(lons1km, lats1km, cores=1)
Getting 250m geolocation for modis from 1km tiepoints. http://www.icare.univ-lille1.fr/tutorials/MODIS_geolocation
3.17287
3.28199
0.966752
cols5km = np.arange(2, 1354, 5) cols1km = np.arange(1354) lines = data5km[0].shape[0] * 5 rows5km = np.arange(2, lines, 5) rows1km = np.arange(lines) along_track_order = 1 cross_track_order = 3 satint = Interpolator(list(data5km), (rows5km, cols5km), (rows1km, cols1km), along_track_order, cross_track_order, chunk_size=10) satint.fill_borders("y", "x") return satint.interpolate()
def generic_modis5kmto1km(*data5km)
Getting 1km data for modis from 5km tiepoints.
4.421812
4.409586
1.002773
to_run = [] cases = {"y": self._fill_row_borders, "x": self._fill_col_borders} for dim in args: try: to_run.append(cases[dim]) except KeyError: raise NameError("Unrecognized dimension: " + str(dim)) for fun in to_run: fun()
def fill_borders(self, *args)
Extrapolate tiepoint lons and lats to fill in the border of the chunks.
3.985721
3.825109
1.041989
if first: pos = self.col_indices[:2] first_column = _linear_extrapolate(pos, (data[:, 0], data[:, 1]), self.hcol_indices[0]) if last: pos = self.col_indices[-2:] last_column = _linear_extrapolate(pos, (data[:, -2], data[:, -1]), self.hcol_indices[-1]) if first and last: return np.hstack((np.expand_dims(first_column, 1), data, np.expand_dims(last_column, 1))) elif first: return np.hstack((np.expand_dims(first_column, 1), data)) elif last: return np.hstack((data, np.expand_dims(last_column, 1))) else: return data
def _extrapolate_cols(self, data, first=True, last=True)
Extrapolate the column of data, to get the first and last together with the data.
2.007419
2.037034
0.985462
first = True last = True if self.col_indices[0] == self.hcol_indices[0]: first = False if self.col_indices[-1] == self.hcol_indices[-1]: last = False for num, data in enumerate(self.tie_data): self.tie_data[num] = self._extrapolate_cols(data, first, last) if first and last: self.col_indices = np.concatenate((np.array([self.hcol_indices[0]]), self.col_indices, np.array([self.hcol_indices[-1]]))) elif first: self.col_indices = np.concatenate((np.array([self.hcol_indices[0]]), self.col_indices)) elif last: self.col_indices = np.concatenate((self.col_indices, np.array([self.hcol_indices[-1]])))
def _fill_col_borders(self)
Add the first and last column to the data by extrapolation.
2.118567
1.882355
1.125488
pos = row_indices[:2] first_row = _linear_extrapolate(pos, (data[0, :], data[1, :]), first_index) pos = row_indices[-2:] last_row = _linear_extrapolate(pos, (data[-2, :], data[-1, :]), last_index) return np.vstack((np.expand_dims(first_row, 0), data, np.expand_dims(last_row, 0)))
def _extrapolate_rows(self, data, row_indices, first_index, last_index)
Extrapolate the rows of data, to get the first and last together with the data.
2.561077
2.390838
1.071205
lines = len(self.hrow_indices) chunk_size = self.chunk_size or lines factor = len(self.hrow_indices) / len(self.row_indices) tmp_data = [] for num in range(len(self.tie_data)): tmp_data.append([]) row_indices = [] for index in range(0, lines, chunk_size): indices = np.logical_and(self.row_indices >= index / factor, self.row_indices < (index + chunk_size) / factor) ties = np.argwhere(indices).squeeze() tiepos = self.row_indices[indices].squeeze() for num, data in enumerate(self.tie_data): to_extrapolate = data[ties, :] if len(to_extrapolate) > 0: extrapolated = self._extrapolate_rows(to_extrapolate, tiepos, self.hrow_indices[ index], self.hrow_indices[index + chunk_size - 1]) tmp_data[num].append(extrapolated) row_indices.append(np.array([self.hrow_indices[index]])) row_indices.append(tiepos) row_indices.append(np.array([self.hrow_indices[index + chunk_size - 1]])) for num in range(len(self.tie_data)): self.tie_data[num] = np.vstack(tmp_data[num]) self.row_indices = np.concatenate(row_indices)
def _fill_row_borders(self)
Add the first and last rows to the data by extrapolation.
2.8304
2.677525
1.057096
if np.all(self.hrow_indices == self.row_indices): return self._interp1d() xpoints, ypoints = np.meshgrid(self.hrow_indices, self.hcol_indices) for num, data in enumerate(self.tie_data): spl = RectBivariateSpline(self.row_indices, self.col_indices, data, s=0, kx=self.kx_, ky=self.ky_) new_data_ = spl.ev(xpoints.ravel(), ypoints.ravel()) self.new_data[num] = new_data_.reshape(xpoints.shape).T.copy(order='C')
def _interp(self)
Interpolate the cartesian coordinates.
3.789863
3.75562
1.009118
lines = len(self.hrow_indices) for num, data in enumerate(self.tie_data): self.new_data[num] = np.empty((len(self.hrow_indices), len(self.hcol_indices)), data.dtype) for cnt in range(lines): tck = splrep(self.col_indices, data[cnt, :], k=self.ky_, s=0) self.new_data[num][cnt, :] = splev( self.hcol_indices, tck, der=0)
def _interp1d(self)
Interpolate in one dimension.
4.62678
4.459949
1.037406
return rad2deg(arccos(x__ / sqrt(x__ ** 2 + y__ ** 2))) * sign(y__)
def get_lons_from_cartesian(x__, y__)
Get longitudes from cartesian coordinates.
4.355563
3.907749
1.114596
# if we are at low latitudes - small z, then get the # latitudes only from z. If we are at high latitudes (close to the poles) # then derive the latitude using x and y: lats = np.where(np.logical_and(np.less(z__, thr * EARTH_RADIUS), np.greater(z__, -1. * thr * EARTH_RADIUS)), 90 - rad2deg(arccos(z__/EARTH_RADIUS)), sign(z__) * (90 - rad2deg(arcsin(sqrt(x__ ** 2 + y__ ** 2) / EARTH_RADIUS)))) return lats
def get_lats_from_cartesian(x__, y__, z__, thr=0.8)
Get latitudes from cartesian coordinates.
4.878928
4.900552
0.995587
self.lon_tiepoint = lon self.lat_tiepoint = lat
def set_tiepoints(self, lon, lat)
Defines the lon,lat tie points.
3.497299
2.650295
1.319588
zeta_a = satz_a zeta_b = satz_b phi_a = compute_phi(zeta_a) phi_b = compute_phi(zeta_b) theta_a = compute_theta(zeta_a, phi_a) theta_b = compute_theta(zeta_b, phi_b) phi = (phi_a + phi_b) / 2 zeta = compute_zeta(phi) theta = compute_theta(zeta, phi) c_expansion = 4 * (((theta_a + theta_b) / 2 - theta) / (theta_a - theta_b)) sin_beta_2 = scan_width / (2 * H) d = ((R + H) / R * np.cos(phi) - np.cos(zeta)) * sin_beta_2 e = np.cos(zeta) - np.sqrt(np.cos(zeta) ** 2 - d ** 2) c_alignment = 4 * e * np.sin(zeta) / (theta_a - theta_b) return c_expansion, c_alignment
def compute_expansion_alignment(satz_a, satz_b, satz_c, satz_d)
All angles in radians.
2.770627
2.680867
1.033482
R = 6370997.0 x_coords = R * da.cos(da.deg2rad(lats)) * da.cos(da.deg2rad(lons)) y_coords = R * da.cos(da.deg2rad(lats)) * da.sin(da.deg2rad(lons)) z_coords = R * da.sin(da.deg2rad(lats)) return x_coords, y_coords, z_coords
def lonlat2xyz(lons, lats)
Convert lons and lats to cartesian coordinates.
1.626065
1.655649
0.982131
R = 6370997.0 lons = da.rad2deg(da.arccos(x__ / da.sqrt(x__ ** 2 + y__ ** 2))) * da.sign(y__) lats = da.sign(z__) * (90 - da.rad2deg(da.arcsin(da.sqrt(x__ ** 2 + y__ ** 2) / R))) return lons, lats
def xyz2lonlat(x__, y__, z__)
Get longitudes from cartesian coordinates.
2.533879
2.549726
0.993785
fields = {} iterator = list(attrs.items()) for key, value in iterator: if not isinstance(value, Field): continue fields[key] = value del attrs[key] return fields
def setup_fields(attrs)
Collect all fields declared on the class and remove them from attrs
3.366205
2.528485
1.331313
parts = line.split(':', 4) filename, line, column, type_, message = [x.strip() for x in parts] if type_ == 'fatal': if message in KNOWN_FATAL_MESSAGES_MAPPING: message = KNOWN_FATAL_MESSAGES_MAPPING[message] return ErrorLine(filename, line, column, type_, message)
def _parse_jing_line(line)
Parse a line of jing output to a list of line, column, type and message.
3.914703
3.50712
1.116216
output = output.strip() values = [_parse_jing_line(l) for l in output.split('\n') if l] return tuple(values)
def _parse_jing_output(output)
Parse the jing output into a tuple of line, column, type and message.
3.942799
3.02862
1.301847
cmd = ['java', '-jar'] cmd.extend([str(JING_JAR), str(rng_filepath)]) for xml_filepath in xml_filepaths: cmd.append(str(xml_filepath)) proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) out, err = proc.communicate() return _parse_jing_output(out.decode('utf-8'))
def jing(rng_filepath, *xml_filepaths)
Run jing.jar using the RNG file against the given XML file.
1.934621
1.903265
1.016475
ast_obj = ast.parse(s).body[0] return ast_type_to_import_type[type(ast_obj)](ast_obj)
def import_obj_from_str(s)
Returns an import object (either ImportImport or FromImport) from text.
4.004454
3.728142
1.074115
ast_obj = ast.parse(s).body[0] if not isinstance(ast_obj, cls._expected_ast_type): raise AssertionError( 'Expected ast of type {!r} but got {!r}'.format( cls._expected_ast_type, ast_obj ) ) return cls(ast_obj)
def from_str(cls, s)
Construct an import object from a string.
2.852495
2.824964
1.009746
if separate: def classify_func(obj): return classify_import( obj.import_statement.module, **classify_kwargs ) types = ImportType.__all__ else: # A little cheaty, this allows future imports to sort before others def classify_func(obj): return classify_import( obj.import_statement.module, **classify_kwargs ) == ImportType.FUTURE types = [True, False] if import_before_from: def sort_within(obj): return (CLS_TO_INDEX[type(obj)],) + obj.sort_key else: def sort_within(obj): return tuple(obj.sort_key) # Partition the imports imports_partitioned = collections.defaultdict(list) for import_obj in imports: imports_partitioned[classify_func(import_obj)].append(import_obj) # sort each of the segments for segment_key, val in imports_partitioned.items(): imports_partitioned[segment_key] = sorted(val, key=sort_within) return tuple( tuple(imports_partitioned[key]) for key in types if key in imports_partitioned )
def sort(imports, separate=True, import_before_from=True, **classify_kwargs)
Sort import objects into groups. :param list imports: FromImport / ImportImport objects :param bool separate: Whether to classify and return separate segments of imports based on classification. :param bool import_before_from: Whether to sort `import ...` imports before `from ...` imports. For example: from os import path from aspy import refactor_imports import sys import pyramid separate = True, import_before_from = True import sys from os import path import pyramid from aspy import refactor_imports separate = True, import_before_from = False from os import path import sys import pyramid from aspy import refactor_imports separate = False, import_before_from = True import pyramid import sys from aspy import refactor_imports from os import path separate = False, import_before_from = False from aspy import refactor_imports from os import path import pyramid import sys
3.404127
3.40904
0.998559
# Only really care about the first part of the path base, _, _ = module_name.partition('.') found, module_path, is_builtin = _get_module_info( base, application_directories, ) if base == '__future__': return ImportType.FUTURE # Relative imports: `from .foo import bar` elif base == '': return ImportType.APPLICATION # If imp tells us it is builtin, it is builtin elif is_builtin: return ImportType.BUILTIN # If the module path exists in the project directories elif _module_path_is_local_and_is_not_symlinked( module_path, application_directories, ): return ImportType.APPLICATION # Otherwise we assume it is a system module or a third party module elif ( found and PACKAGES_PATH not in module_path and not _due_to_pythonpath(module_path) ): return ImportType.BUILTIN else: return ImportType.THIRD_PARTY
def classify_import(module_name, application_directories=('.',))
Classifies an import by its package. Returns a value in ImportType.__all__ :param text module_name: The dotted notation of a module :param tuple application_directories: tuple of paths which are considered application roots.
5.204024
5.311116
0.979836
parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('xml', nargs='*') return parser
def _arg_parser()
Factory for creating the argument parser
4.014213
3.831092
1.047799
xpath = make_cnx_xpath(elm_tree) role_xpath = lambda xp: tuple(xpath(xp)[0].split()) # noqa: E731 props = { 'id': _maybe(xpath('//md:content-id/text()')), 'version': xpath('//md:version/text()')[0], 'created': xpath('//md:created/text()')[0], 'revised': xpath('//md:revised/text()')[0], 'title': xpath('//md:title/text()')[0], 'license_url': xpath('//md:license/@url')[0], 'language': xpath('//md:language/text()')[0], 'authors': role_xpath('//md:role[@type="author"]/text()'), 'maintainers': role_xpath('//md:role[@type="maintainer"]/text()'), 'licensors': role_xpath('//md:role[@type="licensor"]/text()'), 'keywords': tuple(xpath('//md:keywordlist/md:keyword/text()')), 'subjects': tuple(xpath('//md:subjectlist/md:subject/text()')), 'abstract': _squash_to_text( _maybe(xpath('//md:abstract')), remove_namespaces=True, ), 'print_style': _maybe( xpath('//col:param[@name="print-style"]/@value'), ), 'derived_from': { 'uri': _maybe(xpath('//md:derived-from/@url')), 'title': _maybe(xpath('//md:derived-from/md:title/text()')), }, } return props
def parse_metadata(elm_tree)
Given an element-like object (:mod:`lxml.etree`) lookup the metadata and return the found elements :param elm_tree: the root xml element :type elm_tree: an element-like object from :mod:`lxml.etree` :returns: common metadata properties :rtype: dict
2.905043
3.006933
0.966115
content_filepaths = [Path(path).resolve() for path in content_filepaths] return jing(CNXML_JING_RNG, *content_filepaths)
def validate_cnxml(*content_filepaths)
Validates the given CNXML file against the cnxml-jing.rng RNG.
7.462306
3.826015
1.950412
content_filepaths = [Path(path).resolve() for path in content_filepaths] return jing(COLLXML_JING_RNG, *content_filepaths)
def validate_collxml(*content_filepaths)
Validates the given COLLXML file against the collxml-jing.rng RNG.
7.670537
3.785056
2.026532
log.info('loading reference package') r = refpkg.Refpkg(args.refpkg, create=False) # First check if we can do n rollbacks q = r.contents for i in range(args.n): if q['rollback'] is None: log.error('Cannot rollback {} changes; ' 'refpkg only records {} changes.'.format(args.n, i)) return 1 else: q = q['rollback'] for i in range(args.n): r.rollback() return 0
def action(args)
Roll back commands on a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and n (giving the number of operations to roll back).
7.468396
5.487448
1.360996
data = [] # Keep track of targets for row, _ in x.iterrows(): if row == x.shape[0] - 1: # Can't predict yet, done. break # Get closing prices curr_close = x.close[row] next_close = x.close[row + 1] high_close = next_close + (delta / 2) # Pos. neutral zone threshold low_close = next_close - (delta / 2) # Neg. neutral zone threshold # Get target if curr_close < low_close: target = TARGET_CODES['bearish'] elif curr_close > high_close: target = TARGET_CODES['bullish'] else: target = TARGET_CODES['neutral'] data.append(target) return pd.Series(data=data, dtype=np.int32, name='target')
def set_targets(x, delta=10)
Sets target market trend for a date Args: x: Pandas DataFrame of market features delta: Positive number defining a price buffer between what is classified as a bullish/bearish market for the training set. delta is equivalent to the total size of the neutral price zone. delta / 2 is equivalent to either the positive or negative threshold of the neutral price zone. Returns: Pandas Series of numpy int8 market trend targets
3.702604
3.401102
1.088648
return {'close' : json[-1]['close'], 'sma' : SMA.eval_from_json(json), 'rsi' : RSI.eval_from_json(json), 'so' : SO.eval_from_json(json), 'obv' : OBV.eval_from_json(json)}
def eval_features(json)
Gets technical analysis features from market data JSONs Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. Returns: Dict of market features and their values
2.909192
3.260391
0.892283
TARGET_NAMES = {v: k for k, v in TARGET_CODES.items()} return TARGET_NAMES[code]
def target_code_to_name(code)
Converts an int target code to a target name Since self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup to get the more readable name. Args: code: Value from self.TARGET_CODES Returns: String target name corresponding to the given code.
3.388346
4.698005
0.721231
assert len(x) > 1 and len(y) > 1, 'Not enough data objects to train on (minimum is at least two, you have (x: {0}) and (y: {1}))'.format(len(x), len(y)) sets = namedtuple('Datasets', ['train', 'test']) x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=seed, shuffle=False) x = sets(x_train, x_test) y = sets(y_train, y_test) if model_type == 'random_forest' or model_type == 'rf': model = rf.RandomForest(x, y, random_state=seed, **kwargs) elif model_type == 'deep_neural_network' or model_type == 'dnn': model = dnn.DeepNeuralNetwork(x, y, **kwargs) else: raise ValueError('Invalid model type kwarg') return model
def setup_model(x, y, model_type='random_forest', seed=None, **kwargs)
Initializes a machine learning model Args: x: Pandas DataFrame, X axis of features y: Pandas Series, Y axis of targets model_type: Machine Learning model to use Valid values: 'random_forest' seed: Random state to use when splitting sets and creating the model **kwargs: Scikit Learn's RandomForestClassifier kwargs Returns: Trained model instance of model_type
2.65648
2.851021
0.931765
today = dt.now() DIRECTION = 'last' epochs = date.get_end_start_epochs(today.year, today.month, today.day, DIRECTION, self.unit, self.count) return poloniex.chart_json(epochs['shifted'], epochs['initial'], self.period, self.symbol)[0]
def get_json(self)
Gets market chart data from today to a previous date
14.45839
11.414033
1.266721
if len(self.json) < partition + 1: raise ValueError('Not enough dates for the specified partition size: {0}. Try a smaller partition.'.format(partition)) data = [] for offset in range(len(self.json) - partition): json = self.json[offset : offset + partition] data.append(eval_features(json)) return pd.DataFrame(data=data, dtype=np.float32)
def set_features(self, partition=1)
Parses market data JSON for technical analysis indicators Args: partition: Int of how many dates to take into consideration when evaluating technical analysis indicators. Returns: Pandas DataFrame instance with columns as numpy.float32 features.
4.578086
3.787918
1.208602
# Create long features DataFrame features_long = self.set_features(partition=2 * partition) # Remove features not specified by args.long unwanted_features = [f for f in features.columns if f not in columns_to_set] features_long = features_long.drop(unwanted_features, axis=1) # Prefix long columns with 'long_' to fix naming conflicts features_long.columns = ['long_{0}'.format(f) for f in features_long.columns] # Merge the two DataFrames skip = partition return pd.concat([features[skip:].reset_index(drop=True), features_long], axis=1)
def set_long_features(self, features, columns_to_set=[], partition=2)
Sets features of double the duration Example: Setting 14 day RSIs to longer will create add a feature column of a 28 day RSIs. Args: features: Pandas DataFrame instance with columns as numpy.float32 features. columns_to_set: List of strings of feature names to make longer partition: Int of how many dates to take into consideration when evaluating technical analysis indicators. Returns: Pandas DataFrame instance with columns as numpy.float32 features.
4.024715
4.282158
0.93988
feature_names = [feature for feature in self.features.train] return list(zip(feature_names, self.feature_importances_))
def feature_importances(self)
Return list of features and their importance in classification
4.965594
4.609257
1.077309
if self._process is None: raise ProcessError( "Process '%s' has not been started yet" % self.name) return self._process.exitcode
def exitcode(self)
Process exit code. :const:`0` when process exited successfully, positive number when exception was occurred, negative number when process was signaled and :data:`None` when process has not exited yet.
4.355081
3.684859
1.181885
if self: raise ProcessError( "Process '%s' has been already started" % self.name) first_run = not self.has_started # Run process self._process = self._process_cls(*self._process_args) self._process.daemon = False self._process.start() # Wait unless process is successfully started if first_run and self._wait_unless_ready: if self._timeout: stop_time = time.time() + self._timeout while time.time() < stop_time and not self._process.ready: time.sleep(0.25) if not self._process.ready: raise ProcessError( "Timeout during start process '%s'" % self.name) else: while not self._process.ready: time.sleep(0.25)
def start(self)
Run the process.
3.112606
2.947494
1.056018
if self._http_server is not None: self._http_server.stop() tornado.ioloop.IOLoop.instance().add_callback( tornado.ioloop.IOLoop.instance().stop)
def stop(self)
Stop the worker.
2.741032
2.631702
1.041543
setproctitle.setproctitle("{:s}: worker {:s}".format( self.context.config.name, self._tornado_app.settings['interface'].name)) self.logger.info( "Worker '%s' has been started with pid %d", self._tornado_app.settings['interface'].name, os.getpid()) # Configure logging self.context.config.configure_logging() # Create HTTP server instance self.http_server = tornado.httpserver.HTTPServer(self._tornado_app) # Initialize child self.context.initialize_child(TORNADO_WORKER, process=self) # Register SIGINT handler which will stop worker def sigint_handler(unused_signum, unused_frame): io_loop = tornado.ioloop.IOLoop.instance() io_loop.add_callback_from_signal(self.stop) signal.signal(signal.SIGINT, sigint_handler) # Register callback which is called when IOLoop is started def run_ioloop_callback(): self._ready.value = True tornado.ioloop.IOLoop.instance().add_callback(run_ioloop_callback) # Register job which will stop worker if parent process PID is changed def check_parent_callback(): if os.getppid() != self._parent_pid: self.stop() stop_callback = tornado.ioloop.PeriodicCallback( check_parent_callback, 250) stop_callback.start() # Run HTTP server self.http_server.add_sockets(self._sockets) # Run IOLoop tornado.ioloop.IOLoop.instance().start()
def run(self)
Tornado worker which handles HTTP requests.
3.378746
3.252492
1.038817
self.main_pid = os.getpid() self.processes.extend(self.init_service_processes()) self.processes.extend(self.init_tornado_workers())
def initialize(self)
Initialize instance attributes. You can override this method in the subclasses.
6.931498
6.673419
1.038673
for process in self.processes: if process.pid and os.getpid() == self.main_pid: try: os.kill(process.pid, signal.SIGUSR1) except ProcessLookupError: pass if self._sigusr1_handler_func is not None: self._sigusr1_handler_func(self.context)
def sigusr1_handler(self, unused_signum, unused_frame)
Handle SIGUSR1 signal. Call function which is defined in the **settings.SIGUSR1_HANDLER**. If main process, forward the signal to all child processes.
3.094192
2.88287
1.073303
processes = [] for process_struct in getattr( self.context.config.settings, 'SERVICE_PROCESSES', ()): process_cls = import_object(process_struct[0]) wait_unless_ready, timeout = process_struct[1], process_struct[2] self.logger.info("Init service process '%s'", process_cls.__name__) processes.append( ProcessWrapper( process_cls, (self.context,), wait_unless_ready=wait_unless_ready, timeout=timeout ) ) return processes
def init_service_processes(self)
Prepare processes defined in the **settings.SERVICE_PROCESSES**. Return :class:`list` of the :class:`ProcessWrapper` instances.
3.860191
3.444832
1.120575
workers = [] for tornado_app in get_tornado_apps(self.context, debug=False): interface = tornado_app.settings['interface'] if not interface.port and not interface.unix_socket: raise ValueError( 'Interface MUST listen either on TCP ' 'or UNIX socket or both') name, processes, host, port, unix_socket = ( interface.name, interface.processes, interface.host, interface.port, interface.unix_socket) if processes <= 0: processes = tornado.process.cpu_count() sockets = [] listen_on = [] if port: sockets.extend(tornado.netutil.bind_sockets(port, host)) listen_on.append("{:s}:{:d}".format(host, port)) if unix_socket: sockets.append(tornado.netutil.bind_unix_socket(unix_socket)) listen_on.append("{:s}".format(interface.unix_socket)) self.logger.info( "Init %d worker(s) for interface '%s' (%s)", processes, name, ", ".join(listen_on)) for dummy_i in six.moves.range(processes): worker = ProcessWrapper( TornadoProcess, (tornado_app, sockets), wait_unless_ready=True, timeout=5.0, name=name ) workers.append(worker) return workers
def init_tornado_workers(self)
Prepare worker instances for all Tornado applications. Return :class:`list` of the :class:`ProcessWrapper` instances.
3.467643
3.35235
1.034392
while 1: for process in self.processes: if not process: # When process has not been started, start it if not process.has_started: process.start() continue # When process has stopped, start it again exitcode = process.exitcode if exitcode != 0: # Process has been signaled or crashed if exitcode > 0: self.logger.error( "Process '%s' with pid %d died with exitcode " "%d", process.name, process.pid, exitcode ) else: self.logger.error( "Process '%s' with pid %d died due to %s", process.name, process.pid, SIGNALS_TO_NAMES_DICT[abs(exitcode)] ) # Max restarts has been reached, exit if not max_restarts: self.logger.fatal("Too many child restarts") break # Start process again process.start() # Decrement max_restarts counter if max_restarts > 0: max_restarts -= 1 else: # Process has stopped without error self.logger.info( "Process '%s' with pid %d has stopped", process.name, process.pid ) # Start process again process.start() self.logger.info( "Process '%s' has been started with pid %d", process.name, process.pid ) else: time.sleep(0.25) continue break
def start_processes(self, max_restarts=-1)
Start processes and check their status. When some process crashes, start it again. *max_restarts* is maximum amount of the restarts across all processes. *processes* is a :class:`list` of the :class:`ProcessWrapper` instances.
2.726972
2.65678
1.02642
setproctitle.setproctitle( "{:s}: master process '{:s}'".format( self.context.config.name, " ".join(sys.argv) )) # Init and start processes try: self.start_processes(max_restarts=100) except KeyboardInterrupt: pass # Stop processes for process in self.processes: process.stop()
def command(self)
**runserver** command implementation.
5.430667
5.180686
1.048253
c = csv.reader(handle, quoting=csv.QUOTE_NONNUMERIC) header = next(c) rootdict = dict(list(zip(header, next(c)))) t = Tree(rootdict['tax_id'], rank=rootdict[ 'rank'], tax_name=rootdict['tax_name']) for l in c: d = dict(list(zip(header, l))) target = t.descendents[d['parent_id']] target(Tree(d['tax_id'], rank=d['rank'], tax_name=d['tax_name'])) return t
def taxtable_to_tree(handle)
Read a CSV taxonomy from *handle* into a Tree.
3.357108
3.319257
1.011403
return [taxonomy.species_below(taxonomy.sibling_of(t)) for t in tax_ids]
def lonely_company(taxonomy, tax_ids)
Return a set of species tax_ids which will makes those in *tax_ids* not lonely. The returned species will probably themselves be lonely.
13.415593
13.831108
0.969958
res = [] for t in tax_ids: res.extend(taxonomy.nary_subtree(taxonomy.sibling_of(t), 2) or []) return res
def solid_company(taxonomy, tax_ids)
Return a set of non-lonely species tax_ids that will make those in *tax_ids* not lonely.
7.657255
8.394488
0.912177