sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def IOR(type, nr, size): """ An ioctl with read parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument. """ return IOC(IOC_READ, type, nr, IOC_TYPECHECK(size))
An ioctl with read parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument.
entailment
def IOW(type, nr, size): """ An ioctl with write parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument. """ return IOC(IOC_WRITE, type, nr, IOC_TYPECHECK(size))
An ioctl with write parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument.
entailment
def IOWR(type, nr, size): """ An ioctl with both read an writes parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument. """ return IOC(IOC_READ | IOC_WRITE, type, nr, IOC_TYPECHECK(size))
An ioctl with both read an writes parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument.
entailment
def _get_last_dirs(path, num=1): """Get a path including only the trailing `num` directories. Returns ------- last_path : str """ head, tail = os.path.split(path) last_path = str(tail) for ii in range(num): head, tail = os.path.split(head) last_path = os.path.join(tail, last_path) last_path = "..." + last_path return last_path
Get a path including only the trailing `num` directories. Returns ------- last_path : str
entailment
def analyze(self, args): """Run the analysis routines determined from the given `args`. """ self.log.info("Running catalog analysis") if args.count: self.count() return
Run the analysis routines determined from the given `args`.
entailment
def count(self): """Analyze the counts of ...things. Returns ------- retvals : dict Dictionary of 'property-name: counts' pairs for further processing """ self.log.info("Running 'count'") retvals = {} # Numbers of 'tasks' num_tasks = self._count_tasks() retvals['num_tasks'] = num_tasks # Numbers of 'files' num_files = self._count_repo_files() retvals['num_files'] = num_files return retvals
Analyze the counts of ...things. Returns ------- retvals : dict Dictionary of 'property-name: counts' pairs for further processing
entailment
def _count_tasks(self): """Count the number of tasks, both in the json and directory. Returns ------- num_tasks : int The total number of all tasks included in the `tasks.json` file. """ self.log.warning("Tasks:") tasks, task_names = self.catalog._load_task_list_from_file() # Total number of all tasks num_tasks = len(tasks) # Number which are active by default num_tasks_act = len([tt for tt, vv in tasks.items() if vv.active]) # Number of python files in the tasks directory num_task_files = os.path.join(self.catalog.PATHS.tasks_dir, '*.py') num_task_files = len(glob(num_task_files)) tasks_str = "{} ({} default active) with {} task-files.".format( num_tasks, num_tasks_act, num_task_files) self.log.warning(tasks_str) return num_tasks
Count the number of tasks, both in the json and directory. Returns ------- num_tasks : int The total number of all tasks included in the `tasks.json` file.
entailment
def _count_repo_files(self): """Count the number of files in the data repositories. `_COUNT_FILE_TYPES` are used to determine which file types are checked explicitly. `_IGNORE_FILES` determine which files are ignored in (most) counts. Returns ------- repo_files : int Total number of (non-ignored) files in all data repositories. """ self.log.warning("Files:") num_files = 0 repos = self.catalog.PATHS.get_all_repo_folders() num_type = np.zeros(len(self._COUNT_FILE_TYPES), dtype=int) num_ign = 0 for rep in repos: # Get the last portion of the filepath for this repo last_path = _get_last_dirs(rep, 2) # Get counts for different file types n_all = self._count_files_by_type(rep, '*') n_type = np.zeros(len(self._COUNT_FILE_TYPES), dtype=int) for ii, ftype in enumerate(self._COUNT_FILE_TYPES): n_type[ii] = self._count_files_by_type(rep, '*.' + ftype) # Get the number of ignored files # (total including ignore, minus 'all') n_ign = self._count_files_by_type(rep, '*', ignore=False) n_ign -= n_all f_str = self._file_nums_str(n_all, n_type, n_ign) f_str = "{}: {}".format(last_path, f_str) self.log.warning(f_str) # Update cumulative counts num_files += n_all num_type += n_type num_ign += n_ign f_str = self._file_nums_str(num_files, num_type, num_ign) self.log.warning(f_str) return num_files
Count the number of files in the data repositories. `_COUNT_FILE_TYPES` are used to determine which file types are checked explicitly. `_IGNORE_FILES` determine which files are ignored in (most) counts. Returns ------- repo_files : int Total number of (non-ignored) files in all data repositories.
entailment
def _file_nums_str(self, n_all, n_type, n_ign): """Construct a string showing the number of different file types. Returns ------- f_str : str """ # 'other' is the difference between all and named n_oth = n_all - np.sum(n_type) f_str = "{} Files".format(n_all) + " (" if len(n_type): f_str += ", ".join("{} {}".format(name, num) for name, num in zip(self._COUNT_FILE_TYPES, n_type)) f_str += ", " f_str += "other {}; {} ignored)".format(n_oth, n_ign) return f_str
Construct a string showing the number of different file types. Returns ------- f_str : str
entailment
def _count_files_by_type(self, path, pattern, ignore=True): """Count files in the given path, with the given pattern. If `ignore = True`, skip files in the `_IGNORE_FILES` list. Returns ------- num_files : int """ # Get all files matching the given path and pattern files = glob(os.path.join(path, pattern)) # Count the files files = [ff for ff in files if os.path.split(ff)[-1] not in self._IGNORE_FILES or not ignore] num_files = len(files) return num_files
Count files in the given path, with the given pattern. If `ignore = True`, skip files in the `_IGNORE_FILES` list. Returns ------- num_files : int
entailment
def bibcode_from_url(cls, url): """Given a URL, try to find the ADS bibcode. Currently: only `ads` URLs will work, e.g. Returns ------- code : str or 'None' The Bibcode if found, otherwise 'None' """ try: code = url.split('/abs/') code = code[1].strip() return code except: return None
Given a URL, try to find the ADS bibcode. Currently: only `ads` URLs will work, e.g. Returns ------- code : str or 'None' The Bibcode if found, otherwise 'None'
entailment
def _get_save_path(self, bury=False): """Return the path that this Entry should be saved to.""" filename = self.get_filename(self[self._KEYS.NAME]) # Put objects that shouldn't belong in this catalog in the boneyard if bury: outdir = self.catalog.get_repo_boneyard() # Get normal repository save directory else: repo_folders = self.catalog.PATHS.get_repo_output_folders() # If no repo folders exist, raise an error -- cannot save if not len(repo_folders): err_str = ( "No output data repositories found. Cannot save.\n" "Make sure that repo names are correctly configured " "in the `input/repos.json` file, and either manually or " "automatically (using `astrocats CATALOG git-clone`) " "clone the appropriate data repositories.") self.catalog.log.error(err_str) raise RuntimeError(err_str) outdir = repo_folders[0] return outdir, filename
Return the path that this Entry should be saved to.
entailment
def _ordered(self, odict): """Convert the object into a plain OrderedDict.""" ndict = OrderedDict() if isinstance(odict, CatDict) or isinstance(odict, Entry): key = odict.sort_func else: key = None nkeys = list(sorted(odict.keys(), key=key)) for key in nkeys: if isinstance(odict[key], OrderedDict): odict[key] = self._ordered(odict[key]) if isinstance(odict[key], list): if (not (odict[key] and not isinstance(odict[key][0], OrderedDict))): nlist = [] for item in odict[key]: if isinstance(item, OrderedDict): nlist.append(self._ordered(item)) else: nlist.append(item) odict[key] = nlist ndict[key] = odict[key] return ndict
Convert the object into a plain OrderedDict.
entailment
def get_hash(self, keys=[]): """Return a unique hash associated with the listed keys.""" if not len(keys): keys = list(self.keys()) string_rep = '' oself = self._ordered(deepcopy(self)) for key in keys: string_rep += json.dumps(oself.get(key, ''), sort_keys=True) return hashlib.sha512(string_rep.encode()).hexdigest()[:16]
Return a unique hash associated with the listed keys.
entailment
def _clean_quantity(self, quantity): """Clean quantity value before it is added to entry.""" value = quantity.get(QUANTITY.VALUE, '').strip() error = quantity.get(QUANTITY.E_VALUE, '').strip() unit = quantity.get(QUANTITY.U_VALUE, '').strip() kind = quantity.get(QUANTITY.KIND, '') if isinstance(kind, list) and not isinstance(kind, string_types): kind = [x.strip() for x in kind] else: kind = kind.strip() if not value: return False if is_number(value): value = '%g' % Decimal(value) if error: error = '%g' % Decimal(error) if value: quantity[QUANTITY.VALUE] = value if error: quantity[QUANTITY.E_VALUE] = error if unit: quantity[QUANTITY.U_VALUE] = unit if kind: quantity[QUANTITY.KIND] = kind return True
Clean quantity value before it is added to entry.
entailment
def _convert_odict_to_classes(self, data, clean=False, merge=True, pop_schema=True, compare_to_existing=True, filter_on={}): """Convert `OrderedDict` into `Entry` or its derivative classes.""" self._log.debug("_convert_odict_to_classes(): {}".format(self.name())) self._log.debug("This should be a temporary fix. Dont be lazy.") # Setup filters. Currently only used for photometry. fkeys = list(filter_on.keys()) # Handle 'name' name_key = self._KEYS.NAME if name_key in data: self[name_key] = data.pop(name_key) # Handle 'schema' schema_key = self._KEYS.SCHEMA if schema_key in data: # Schema should be re-added every execution (done elsewhere) so # just delete the old entry if pop_schema: data.pop(schema_key) else: self[schema_key] = data.pop(schema_key) # Cleanup 'internal' repository stuff if clean: # Add data to `self` in ways accomodating 'internal' formats and # leeway. Removes each added entry from `data` so the remaining # stuff can be handled normally data = self.clean_internal(data) # Handle 'sources' # ---------------- src_key = self._KEYS.SOURCES if src_key in data: # Remove from `data` sources = data.pop(src_key) self._log.debug("Found {} '{}' entries".format( len(sources), src_key)) self._log.debug("{}: {}".format(src_key, sources)) for src in sources: self.add_source(allow_alias=True, **src) # Handle `photometry` # ------------------- photo_key = self._KEYS.PHOTOMETRY if photo_key in data: photoms = data.pop(photo_key) self._log.debug("Found {} '{}' entries".format( len(photoms), photo_key)) phcount = 0 for photo in photoms: skip = False for fkey in fkeys: if fkey in photo and photo[fkey] not in filter_on[fkey]: skip = True if skip: continue self._add_cat_dict( Photometry, self._KEYS.PHOTOMETRY, compare_to_existing=compare_to_existing, **photo) phcount += 1 self._log.debug("Added {} '{}' entries".format( phcount, photo_key)) # Handle `spectra` # --------------- spec_key = self._KEYS.SPECTRA if spec_key in data: # When we are cleaning internal data, we don't always want to # require all of the normal spectrum data elements. spectra = data.pop(spec_key) self._log.debug("Found {} '{}' entries".format( len(spectra), spec_key)) for spec in spectra: self._add_cat_dict( Spectrum, self._KEYS.SPECTRA, compare_to_existing=compare_to_existing, **spec) # Handle `error` # -------------- err_key = self._KEYS.ERRORS if err_key in data: errors = data.pop(err_key) self._log.debug("Found {} '{}' entries".format( len(errors), err_key)) for err in errors: self._add_cat_dict(Error, self._KEYS.ERRORS, **err) # Handle `models` # --------------- model_key = self._KEYS.MODELS if model_key in data: # When we are cleaning internal data, we don't always want to # require all of the normal spectrum data elements. model = data.pop(model_key) self._log.debug("Found {} '{}' entries".format( len(model), model_key)) for mod in model: self._add_cat_dict( Model, self._KEYS.MODELS, compare_to_existing=compare_to_existing, **mod) # Handle everything else --- should be `Quantity`s # ------------------------------------------------ if len(data): self._log.debug("{} remaining entries, assuming `Quantity`".format( len(data))) # Iterate over remaining keys for key in list(data.keys()): vals = data.pop(key) # All quantities should be in lists of that quantity # E.g. `aliases` is a list of alias quantities if not isinstance(vals, list): vals = [vals] self._log.debug("{}: {}".format(key, vals)) for vv in vals: self._add_cat_dict( Quantity, key, check_for_dupes=merge, compare_to_existing=compare_to_existing, **vv) if merge and self.dupe_of: self.merge_dupes() return
Convert `OrderedDict` into `Entry` or its derivative classes.
entailment
def _check_cat_dict_source(self, cat_dict_class, key_in_self, **kwargs): """Check that a source exists and that a quantity isn't erroneous.""" # Make sure that a source is given source = kwargs.get(cat_dict_class._KEYS.SOURCE, None) if source is None: raise CatDictError( "{}: `source` must be provided!".format(self[self._KEYS.NAME]), warn=True) # Check that source is a list of integers for x in source.split(','): if not is_integer(x): raise CatDictError( "{}: `source` is comma-delimited list of " " integers!".format(self[self._KEYS.NAME]), warn=True) # If this source/data is erroneous, skip it if self.is_erroneous(key_in_self, source): self._log.info("This source is erroneous, skipping") return None # If this source/data is private, skip it if (self.catalog.args is not None and not self.catalog.args.private and self.is_private(key_in_self, source)): self._log.info("This source is private, skipping") return None return source
Check that a source exists and that a quantity isn't erroneous.
entailment
def _add_cat_dict(self, cat_dict_class, key_in_self, check_for_dupes=True, compare_to_existing=True, **kwargs): """Add a `CatDict` to this `Entry`. CatDict only added if initialization succeeds and it doesn't already exist within the Entry. """ # Make sure that a source is given, and is valid (nor erroneous) if cat_dict_class != Error: try: source = self._check_cat_dict_source(cat_dict_class, key_in_self, **kwargs) except CatDictError as err: if err.warn: self._log.info("'{}' Not adding '{}': '{}'".format(self[ self._KEYS.NAME], key_in_self, str(err))) return False if source is None: return False # Try to create a new instance of this subclass of `CatDict` new_entry = self._init_cat_dict(cat_dict_class, key_in_self, **kwargs) if new_entry is None: return False # Compare this new entry with all previous entries to make sure is new if compare_to_existing and cat_dict_class != Error: for item in self.get(key_in_self, []): if new_entry.is_duplicate_of(item): item.append_sources_from(new_entry) # Return the entry in case we want to use any additional # tags to augment the old entry return new_entry # If this is an alias, add it to the parent catalog's reverse # dictionary linking aliases to names for fast lookup. if key_in_self == self._KEYS.ALIAS: # Check if this adding this alias makes us a dupe, if so mark # ourselves as a dupe. if (check_for_dupes and 'aliases' in dir(self.catalog) and new_entry[QUANTITY.VALUE] in self.catalog.aliases): possible_dupe = self.catalog.aliases[new_entry[QUANTITY.VALUE]] # print(possible_dupe) if (possible_dupe != self[self._KEYS.NAME] and possible_dupe in self.catalog.entries): self.dupe_of.append(possible_dupe) if 'aliases' in dir(self.catalog): self.catalog.aliases[new_entry[QUANTITY.VALUE]] = self[ self._KEYS.NAME] self.setdefault(key_in_self, []).append(new_entry) if (key_in_self == self._KEYS.ALIAS and check_for_dupes and self.dupe_of): self.merge_dupes() return True
Add a `CatDict` to this `Entry`. CatDict only added if initialization succeeds and it doesn't already exist within the Entry.
entailment
def init_from_file(cls, catalog, name=None, path=None, clean=False, merge=True, pop_schema=True, ignore_keys=[], compare_to_existing=True, try_gzip=False, filter_on={}): """Construct a new `Entry` instance from an input file. The input file can be given explicitly by `path`, or a path will be constructed appropriately if possible. Arguments --------- catalog : `astrocats.catalog.catalog.Catalog` instance The parent catalog object of which this entry belongs. name : str or 'None' The name of this entry, e.g. `SN1987A` for a `Supernova` entry. If no `path` is given, a path is constructed by trying to find a file in one of the 'output' repositories with this `name`. note: either `name` or `path` must be provided. path : str or 'None' The absolutely path of the input file. note: either `name` or `path` must be provided. clean : bool Whether special sanitization processing should be done on the input data. This is mostly for input files from the 'internal' repositories. """ if not catalog: from astrocats.catalog.catalog import Catalog log = logging.getLogger() catalog = Catalog(None, log) catalog.log.debug("init_from_file()") if name is None and path is None: err = ("Either entry `name` or `path` must be specified to load " "entry.") log.error(err) raise ValueError(err) # If the path is given, use that to load from load_path = '' if path is not None: load_path = path name = '' # If the name is given, try to find a path for it else: repo_paths = catalog.PATHS.get_repo_output_folders() for rep in repo_paths: filename = cls.get_filename(name) newpath = os.path.join(rep, filename + '.json') if os.path.isfile(newpath): load_path = newpath break if load_path is None or not os.path.isfile(load_path): # FIX: is this warning worthy? return None # Create a new `Entry` instance new_entry = cls(catalog, name) # Check if .gz file if try_gzip and not load_path.endswith('.gz'): try_gzip = False # Fill it with data from json file new_entry._load_data_from_json( load_path, clean=clean, merge=merge, pop_schema=pop_schema, ignore_keys=ignore_keys, compare_to_existing=compare_to_existing, gzip=try_gzip, filter_on=filter_on) return new_entry
Construct a new `Entry` instance from an input file. The input file can be given explicitly by `path`, or a path will be constructed appropriately if possible. Arguments --------- catalog : `astrocats.catalog.catalog.Catalog` instance The parent catalog object of which this entry belongs. name : str or 'None' The name of this entry, e.g. `SN1987A` for a `Supernova` entry. If no `path` is given, a path is constructed by trying to find a file in one of the 'output' repositories with this `name`. note: either `name` or `path` must be provided. path : str or 'None' The absolutely path of the input file. note: either `name` or `path` must be provided. clean : bool Whether special sanitization processing should be done on the input data. This is mostly for input files from the 'internal' repositories.
entailment
def add_alias(self, alias, source, clean=True): """Add an alias, optionally 'cleaning' the alias string. Calls the parent `catalog` method `clean_entry_name` - to apply the same name-cleaning as is applied to entry names themselves. Returns ------- alias : str The stored version of the alias (cleaned or not). """ if clean: alias = self.catalog.clean_entry_name(alias) self.add_quantity(self._KEYS.ALIAS, alias, source) return alias
Add an alias, optionally 'cleaning' the alias string. Calls the parent `catalog` method `clean_entry_name` - to apply the same name-cleaning as is applied to entry names themselves. Returns ------- alias : str The stored version of the alias (cleaned or not).
entailment
def add_error(self, value, **kwargs): """Add an `Error` instance to this entry.""" kwargs.update({ERROR.VALUE: value}) self._add_cat_dict(Error, self._KEYS.ERRORS, **kwargs) return
Add an `Error` instance to this entry.
entailment
def add_photometry(self, compare_to_existing=True, **kwargs): """Add a `Photometry` instance to this entry.""" self._add_cat_dict( Photometry, self._KEYS.PHOTOMETRY, compare_to_existing=compare_to_existing, **kwargs) return
Add a `Photometry` instance to this entry.
entailment
def merge_dupes(self): """Merge two entries that correspond to the same entry.""" for dupe in self.dupe_of: if dupe in self.catalog.entries: if self.catalog.entries[dupe]._stub: # merge = False to avoid infinite recursion self.catalog.load_entry_from_name( dupe, delete=True, merge=False) self.catalog.copy_entry_to_entry(self.catalog.entries[dupe], self) del self.catalog.entries[dupe] self.dupe_of = []
Merge two entries that correspond to the same entry.
entailment
def add_quantity(self, quantities, value, source, check_for_dupes=True, compare_to_existing=True, **kwargs): """Add an `Quantity` instance to this entry.""" success = True for quantity in listify(quantities): kwargs.update({QUANTITY.VALUE: value, QUANTITY.SOURCE: source}) cat_dict = self._add_cat_dict( Quantity, quantity, compare_to_existing=compare_to_existing, check_for_dupes=check_for_dupes, **kwargs) if isinstance(cat_dict, CatDict): self._append_additional_tags(quantity, source, cat_dict) success = False return success
Add an `Quantity` instance to this entry.
entailment
def add_self_source(self): """Add a source that refers to the catalog itself. For now this points to the Open Supernova Catalog by default. """ return self.add_source( bibcode=self.catalog.OSC_BIBCODE, name=self.catalog.OSC_NAME, url=self.catalog.OSC_URL, secondary=True)
Add a source that refers to the catalog itself. For now this points to the Open Supernova Catalog by default.
entailment
def add_source(self, allow_alias=False, **kwargs): """Add a `Source` instance to this entry.""" if not allow_alias and SOURCE.ALIAS in kwargs: err_str = "`{}` passed in kwargs, this shouldn't happen!".format( SOURCE.ALIAS) self._log.error(err_str) raise RuntimeError(err_str) # Set alias number to be +1 of current number of sources if SOURCE.ALIAS not in kwargs: kwargs[SOURCE.ALIAS] = str(self.num_sources() + 1) source_obj = self._init_cat_dict(Source, self._KEYS.SOURCES, **kwargs) if source_obj is None: return None for item in self.get(self._KEYS.SOURCES, ''): if source_obj.is_duplicate_of(item): return item[item._KEYS.ALIAS] self.setdefault(self._KEYS.SOURCES, []).append(source_obj) return source_obj[source_obj._KEYS.ALIAS]
Add a `Source` instance to this entry.
entailment
def add_model(self, allow_alias=False, **kwargs): """Add a `Model` instance to this entry.""" if not allow_alias and MODEL.ALIAS in kwargs: err_str = "`{}` passed in kwargs, this shouldn't happen!".format( SOURCE.ALIAS) self._log.error(err_str) raise RuntimeError(err_str) # Set alias number to be +1 of current number of models if MODEL.ALIAS not in kwargs: kwargs[MODEL.ALIAS] = str(self.num_models() + 1) model_obj = self._init_cat_dict(Model, self._KEYS.MODELS, **kwargs) if model_obj is None: return None for item in self.get(self._KEYS.MODELS, ''): if model_obj.is_duplicate_of(item): return item[item._KEYS.ALIAS] self.setdefault(self._KEYS.MODELS, []).append(model_obj) return model_obj[model_obj._KEYS.ALIAS]
Add a `Model` instance to this entry.
entailment
def add_spectrum(self, compare_to_existing=True, **kwargs): """Add a `Spectrum` instance to this entry.""" spec_key = self._KEYS.SPECTRA # Make sure that a source is given, and is valid (nor erroneous) source = self._check_cat_dict_source(Spectrum, spec_key, **kwargs) if source is None: return None # Try to create a new instance of `Spectrum` new_spectrum = self._init_cat_dict(Spectrum, spec_key, **kwargs) if new_spectrum is None: return None is_dupe = False for item in self.get(spec_key, []): # Only the `filename` should be compared for duplicates. If a # duplicate is found, that means the previous `exclude` array # should be saved to the new object, and the old deleted if new_spectrum.is_duplicate_of(item): if SPECTRUM.EXCLUDE in new_spectrum: item[SPECTRUM.EXCLUDE] = new_spectrum[SPECTRUM.EXCLUDE] elif SPECTRUM.EXCLUDE in item: item.update(new_spectrum) is_dupe = True break if not is_dupe: self.setdefault(spec_key, []).append(new_spectrum) return
Add a `Spectrum` instance to this entry.
entailment
def check(self): """Check that the entry has the required fields.""" # Make sure there is a schema key in dict if self._KEYS.SCHEMA not in self: self[self._KEYS.SCHEMA] = self.catalog.SCHEMA.URL # Make sure there is a name key in dict if (self._KEYS.NAME not in self or len(self[self._KEYS.NAME]) == 0): raise ValueError("Entry name is empty:\n\t{}".format( json.dumps( self, indent=2))) return
Check that the entry has the required fields.
entailment
def get_aliases(self, includename=True): """Retrieve the aliases of this object as a list of strings. Arguments --------- includename : bool Include the 'name' parameter in the list of aliases. """ # empty list if doesnt exist alias_quanta = self.get(self._KEYS.ALIAS, []) aliases = [aq[QUANTITY.VALUE] for aq in alias_quanta] if includename and self[self._KEYS.NAME] not in aliases: aliases = [self[self._KEYS.NAME]] + aliases return aliases
Retrieve the aliases of this object as a list of strings. Arguments --------- includename : bool Include the 'name' parameter in the list of aliases.
entailment
def get_entry_text(self, fname): """Retrieve the raw text from a file.""" if fname.split('.')[-1] == 'gz': with gz.open(fname, 'rt') as f: filetext = f.read() else: with codecs.open(fname, 'r') as f: filetext = f.read() return filetext
Retrieve the raw text from a file.
entailment
def get_source_by_alias(self, alias): """Given an alias, find the corresponding source in this entry. If the given alias doesn't exist (e.g. there are no sources), then a `ValueError` is raised. Arguments --------- alias : str The str-integer (e.g. '8') of the target source. Returns ------- source : `astrocats.catalog.source.Source` object The source object corresponding to the passed alias. """ for source in self.get(self._KEYS.SOURCES, []): if source[self._KEYS.ALIAS] == alias: return source raise ValueError("Source '{}': alias '{}' not found!".format(self[ self._KEYS.NAME], alias))
Given an alias, find the corresponding source in this entry. If the given alias doesn't exist (e.g. there are no sources), then a `ValueError` is raised. Arguments --------- alias : str The str-integer (e.g. '8') of the target source. Returns ------- source : `astrocats.catalog.source.Source` object The source object corresponding to the passed alias.
entailment
def get_stub(self): """Get a new `Entry` which contains the 'stub' of this one. The 'stub' is only the name and aliases. Usage: ----- To convert a normal entry into a stub (for example), overwrite the entry in place, i.e. >>> entries[name] = entries[name].get_stub() Returns ------- stub : `astrocats.catalog.entry.Entry` subclass object The type of the returned object is this instance's type. """ stub = type(self)(self.catalog, self[self._KEYS.NAME], stub=True) if self._KEYS.ALIAS in self: stub[self._KEYS.ALIAS] = self[self._KEYS.ALIAS] if self._KEYS.DISTINCT_FROM in self: stub[self._KEYS.DISTINCT_FROM] = self[self._KEYS.DISTINCT_FROM] if self._KEYS.RA in self: stub[self._KEYS.RA] = self[self._KEYS.RA] if self._KEYS.DEC in self: stub[self._KEYS.DEC] = self[self._KEYS.DEC] if self._KEYS.DISCOVER_DATE in self: stub[self._KEYS.DISCOVER_DATE] = self[self._KEYS.DISCOVER_DATE] if self._KEYS.SOURCES in self: stub[self._KEYS.SOURCES] = self[self._KEYS.SOURCES] return stub
Get a new `Entry` which contains the 'stub' of this one. The 'stub' is only the name and aliases. Usage: ----- To convert a normal entry into a stub (for example), overwrite the entry in place, i.e. >>> entries[name] = entries[name].get_stub() Returns ------- stub : `astrocats.catalog.entry.Entry` subclass object The type of the returned object is this instance's type.
entailment
def is_erroneous(self, field, sources): """Check if attribute has been marked as being erroneous.""" if self._KEYS.ERRORS in self: my_errors = self[self._KEYS.ERRORS] for alias in sources.split(','): source = self.get_source_by_alias(alias) bib_err_values = [ err[ERROR.VALUE] for err in my_errors if err[ERROR.KIND] == SOURCE.BIBCODE and err[ERROR.EXTRA] == field ] if (SOURCE.BIBCODE in source and source[SOURCE.BIBCODE] in bib_err_values): return True name_err_values = [ err[ERROR.VALUE] for err in my_errors if err[ERROR.KIND] == SOURCE.NAME and err[ERROR.EXTRA] == field ] if (SOURCE.NAME in source and source[SOURCE.NAME] in name_err_values): return True return False
Check if attribute has been marked as being erroneous.
entailment
def is_private(self, key, sources): """Check if attribute is private.""" # aliases are always public. if key == ENTRY.ALIAS: return False return all([ SOURCE.PRIVATE in self.get_source_by_alias(x) for x in sources.split(',') ])
Check if attribute is private.
entailment
def sanitize(self): """Sanitize the data (sort it, etc.) before writing it to disk. Template method that can be overridden in each catalog's subclassed `Entry` object. """ name = self[self._KEYS.NAME] aliases = self.get_aliases(includename=False) if name not in aliases: # Assign the first source to alias, if not available assign us. if self._KEYS.SOURCES in self: self.add_quantity(self._KEYS.ALIAS, name, '1') if self._KEYS.ALIAS not in self: source = self.add_self_source() self.add_quantity(self._KEYS.ALIAS, name, source) else: source = self.add_self_source() self.add_quantity(self._KEYS.ALIAS, name, source) if self._KEYS.ALIAS in self: self[self._KEYS.ALIAS].sort( key=lambda key: alias_priority(name, key[QUANTITY.VALUE])) else: self._log.error( 'There should be at least one alias for `{}`.'.format(name)) if self._KEYS.PHOTOMETRY in self: self[self._KEYS.PHOTOMETRY].sort( key=lambda x: ((float(x[PHOTOMETRY.TIME]) if isinstance(x[PHOTOMETRY.TIME], (basestring, float, int)) else min([float(y) for y in x[PHOTOMETRY.TIME]])) if PHOTOMETRY.TIME in x else 0.0, x[PHOTOMETRY.BAND] if PHOTOMETRY.BAND in x else '', float(x[PHOTOMETRY.MAGNITUDE]) if PHOTOMETRY.MAGNITUDE in x else '')) if (self._KEYS.SPECTRA in self and list( filter(None, [ SPECTRUM.TIME in x for x in self[self._KEYS.SPECTRA] ]))): self[self._KEYS.SPECTRA].sort( key=lambda x: (float(x[SPECTRUM.TIME]) if SPECTRUM.TIME in x else 0.0, x[SPECTRUM.FILENAME] if SPECTRUM.FILENAME in x else '') ) if self._KEYS.SOURCES in self: # Remove orphan sources source_aliases = [ x[SOURCE.ALIAS] for x in self[self._KEYS.SOURCES] ] # Sources with the `PRIVATE` attribute are always retained source_list = [ x[SOURCE.ALIAS] for x in self[self._KEYS.SOURCES] if SOURCE.PRIVATE in x ] for key in self: # if self._KEYS.get_key_by_name(key).no_source: if (key in [ self._KEYS.NAME, self._KEYS.SCHEMA, self._KEYS.SOURCES, self._KEYS.ERRORS ]): continue for item in self[key]: source_list += item[item._KEYS.SOURCE].split(',') new_src_list = sorted( list(set(source_aliases).intersection(source_list))) new_sources = [] for source in self[self._KEYS.SOURCES]: if source[SOURCE.ALIAS] in new_src_list: new_sources.append(source) else: self._log.info('Removing orphaned source from `{}`.' .format(name)) if not new_sources: del self[self._KEYS.SOURCES] self[self._KEYS.SOURCES] = new_sources
Sanitize the data (sort it, etc.) before writing it to disk. Template method that can be overridden in each catalog's subclassed `Entry` object.
entailment
def save(self, bury=False, final=False): """Write entry to JSON file in the proper location. Arguments --------- bury : bool final : bool If this is the 'final' save, perform additional sanitization and cleaning operations. """ outdir, filename = self._get_save_path(bury=bury) if final: self.sanitize() # FIX: use 'dump' not 'dumps' jsonstring = json.dumps( { self[self._KEYS.NAME]: self._ordered(self) }, indent='\t' if sys.version_info[0] >= 3 else 4, separators=(',', ':'), ensure_ascii=False) if not os.path.isdir(outdir): raise RuntimeError("Output directory '{}' for event '{}' does " "not exist.".format(outdir, self[ self._KEYS.NAME])) save_name = os.path.join(outdir, filename + '.json') with codecs.open(save_name, 'w', encoding='utf8') as sf: sf.write(jsonstring) if not os.path.exists(save_name): raise RuntimeError("File '{}' was not saved!".format(save_name)) return save_name
Write entry to JSON file in the proper location. Arguments --------- bury : bool final : bool If this is the 'final' save, perform additional sanitization and cleaning operations.
entailment
def sort_func(self, key): """Used to sort keys when writing Entry to JSON format. Should be supplemented/overridden by inheriting classes. """ if key == self._KEYS.SCHEMA: return 'aaa' if key == self._KEYS.NAME: return 'aab' if key == self._KEYS.SOURCES: return 'aac' if key == self._KEYS.ALIAS: return 'aad' if key == self._KEYS.MODELS: return 'aae' if key == self._KEYS.PHOTOMETRY: return 'zzy' if key == self._KEYS.SPECTRA: return 'zzz' return key
Used to sort keys when writing Entry to JSON format. Should be supplemented/overridden by inheriting classes.
entailment
def set_pd_mag_from_counts(photodict, c='', ec='', lec='', uec='', zp=DEFAULT_ZP, sig=DEFAULT_UL_SIGMA): """Set photometry dictionary from a counts measurement.""" with localcontext() as ctx: if lec == '' or uec == '': lec = ec uec = ec prec = max( get_sig_digits(str(c), strip_zeroes=False), get_sig_digits(str(lec), strip_zeroes=False), get_sig_digits(str(uec), strip_zeroes=False)) + 1 ctx.prec = prec dlec = Decimal(str(lec)) duec = Decimal(str(uec)) if c != '': dc = Decimal(str(c)) dzp = Decimal(str(zp)) dsig = Decimal(str(sig)) photodict[PHOTOMETRY.ZERO_POINT] = str(zp) if c == '' or float(c) < float(sig) * float(uec): photodict[PHOTOMETRY.UPPER_LIMIT] = True photodict[PHOTOMETRY.UPPER_LIMIT_SIGMA] = str(sig) photodict[PHOTOMETRY.MAGNITUDE] = str(dzp - (D25 * (dsig * duec ).log10())) dnec = Decimal('10.0') ** ( (dzp - Decimal(photodict[PHOTOMETRY.MAGNITUDE])) / D25) photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dnec + duec).log10() - dnec.log10())) else: photodict[PHOTOMETRY.MAGNITUDE] = str(dzp - D25 * dc.log10()) photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dc + duec).log10() - dc.log10())) photodict[PHOTOMETRY.E_LOWER_MAGNITUDE] = str(D25 * ( dc.log10() - (dc - dlec).log10()))
Set photometry dictionary from a counts measurement.
entailment
def set_pd_mag_from_flux_density(photodict, fd='', efd='', lefd='', uefd='', sig=DEFAULT_UL_SIGMA): """Set photometry dictionary from a flux density measurement. `fd` is assumed to be in microjanskys. """ with localcontext() as ctx: if lefd == '' or uefd == '': lefd = efd uefd = efd prec = max( get_sig_digits(str(fd), strip_zeroes=False), get_sig_digits(str(lefd), strip_zeroes=False), get_sig_digits(str(uefd), strip_zeroes=False)) + 1 ctx.prec = prec dlefd = Decimal(str(lefd)) duefd = Decimal(str(uefd)) if fd != '': dfd = Decimal(str(fd)) dsig = Decimal(str(sig)) if fd == '' or float(fd) < DEFAULT_UL_SIGMA * float(uefd): photodict[PHOTOMETRY.UPPER_LIMIT] = True photodict[PHOTOMETRY.UPPER_LIMIT_SIGMA] = str(sig) photodict[PHOTOMETRY.MAGNITUDE] = str(Decimal('23.9') - D25 * ( dsig * duefd).log10()) if fd: photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dfd + duefd).log10() - dfd.log10())) else: photodict[PHOTOMETRY.MAGNITUDE] = str(Decimal('23.9') - D25 * dfd.log10()) photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dfd + duefd).log10() - dfd.log10())) photodict[PHOTOMETRY.E_LOWER_MAGNITUDE] = str(D25 * ( dfd.log10() - (dfd - dlefd).log10()))
Set photometry dictionary from a flux density measurement. `fd` is assumed to be in microjanskys.
entailment
def _check(self): """Check that entry attributes are legal.""" # Run the super method super(Photometry, self)._check() err_str = None has_flux = self._KEYS.FLUX in self has_flux_dens = self._KEYS.FLUX_DENSITY in self has_u_flux = self._KEYS.U_FLUX in self has_u_flux_dens = self._KEYS.U_FLUX_DENSITY in self has_freq = self._KEYS.FREQUENCY in self has_band = self._KEYS.BAND in self has_ener = self._KEYS.ENERGY in self has_u_freq = self._KEYS.U_FREQUENCY in self has_u_ener = self._KEYS.U_ENERGY in self if has_flux or has_flux_dens: if not any([has_freq, has_band, has_ener]): err_str = ("Has `{}` or `{}`".format(self._KEYS.FLUX, self._KEYS.FLUX_DENSITY) + " but None of `{}`, `{}`, `{}`".format( self._KEYS.FREQUENCY, self._KEYS.BAND, self._KEYS.ENERGY)) elif has_flux and not has_u_flux: err_str = "`{}` provided without `{}`.".format( self._KEYS.FLUX, self._KEYS.U_FLUX) elif has_flux_dens and not has_u_flux_dens: err_str = "`{}` provided without `{}`.".format( self._KEYS.FLUX_DENSITY, self._KEYS.U_FLUX_DENSITY) elif has_freq and not has_u_freq: err_str = "`{}` provided without `{}`.".format( self._KEYS.FREQUENCY, self._KEYS.U_FREQUENCY) elif has_ener and not has_u_ener: err_str = "`{}` provided without `{}`.".format( self._KEYS.ENERGY, self._KEYS.U_ENERGY) if err_str is not None: raise ValueError(err_str) return
Check that entry attributes are legal.
entailment
def sort_func(self, key): """Specify order for attributes.""" if key == self._KEYS.TIME: return 'aaa' if key == self._KEYS.MODEL: return 'zzy' if key == self._KEYS.SOURCE: return 'zzz' return key
Specify order for attributes.
entailment
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = get_url() context.configure( url=url, version_table="alembic_ziggurat_foundations_version", transaction_per_migration=True, ) with context.begin_transaction(): context.run_migrations()
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
entailment
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine(get_url()) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, version_table="alembic_ziggurat_foundations_version", transaction_per_migration=True, ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close()
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
entailment
def by_resource_user_and_perm( cls, user_id, perm_name, resource_id, db_session=None ): """ return all instances by user name, perm name and resource id :param user_id: :param perm_name: :param resource_id: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model).filter(cls.model.user_id == user_id) query = query.filter(cls.model.resource_id == resource_id) query = query.filter(cls.model.perm_name == perm_name) return query.first()
return all instances by user name, perm name and resource id :param user_id: :param perm_name: :param resource_id: :param db_session: :return:
entailment
def tdSensor(self): """Get the next sensor while iterating. :return: a dict with the keys: protocol, model, id, datatypes. """ protocol = create_string_buffer(20) model = create_string_buffer(20) sid = c_int() datatypes = c_int() self._lib.tdSensor(protocol, sizeof(protocol), model, sizeof(model), byref(sid), byref(datatypes)) return {'protocol': self._to_str(protocol), 'model': self._to_str(model), 'id': sid.value, 'datatypes': datatypes.value}
Get the next sensor while iterating. :return: a dict with the keys: protocol, model, id, datatypes.
entailment
def tdSensorValue(self, protocol, model, sid, datatype): """Get the sensor value for a given sensor. :return: a dict with the keys: value, timestamp. """ value = create_string_buffer(20) timestamp = c_int() self._lib.tdSensorValue(protocol, model, sid, datatype, value, sizeof(value), byref(timestamp)) return {'value': self._to_str(value), 'timestamp': timestamp.value}
Get the sensor value for a given sensor. :return: a dict with the keys: value, timestamp.
entailment
def tdController(self): """Get the next controller while iterating. :return: a dict with the keys: id, type, name, available. """ cid = c_int() ctype = c_int() name = create_string_buffer(255) available = c_int() self._lib.tdController(byref(cid), byref(ctype), name, sizeof(name), byref(available)) return {'id': cid.value, 'type': ctype.value, 'name': self._to_str(name), 'available': available.value}
Get the next controller while iterating. :return: a dict with the keys: id, type, name, available.
entailment
def make_passwordmanager(schemes=None): """ schemes contains a list of replace this list with the hash(es) you wish to support. this example sets pbkdf2_sha256 as the default, with support for legacy bcrypt hashes. :param schemes: :return: CryptContext() """ from passlib.context import CryptContext if not schemes: schemes = ["pbkdf2_sha256", "bcrypt"] pwd_context = CryptContext(schemes=schemes, deprecated="auto") return pwd_context
schemes contains a list of replace this list with the hash(es) you wish to support. this example sets pbkdf2_sha256 as the default, with support for legacy bcrypt hashes. :param schemes: :return: CryptContext()
entailment
def ziggurat_model_init( user=None, group=None, user_group=None, group_permission=None, user_permission=None, user_resource_permission=None, group_resource_permission=None, resource=None, external_identity=None, *args, **kwargs ): """ This function handles attaching model to service if model has one specified as `_ziggurat_service`, Also attached a proxy object holding all model definitions that services might use :param args: :param kwargs: :param passwordmanager, the password manager to override default one :param passwordmanager_schemes, list of schemes for default passwordmanager to use :return: """ models = ModelProxy() models.User = user models.Group = group models.UserGroup = user_group models.GroupPermission = group_permission models.UserPermission = user_permission models.UserResourcePermission = user_resource_permission models.GroupResourcePermission = group_resource_permission models.Resource = resource models.ExternalIdentity = external_identity model_service_mapping = import_model_service_mappings() if kwargs.get("passwordmanager"): user.passwordmanager = kwargs["passwordmanager"] else: user.passwordmanager = make_passwordmanager( kwargs.get("passwordmanager_schemes") ) for name, cls in models.items(): # if model has a manager attached attached the class also to manager services = model_service_mapping.get(name, []) for service in services: setattr(service, "model", cls) setattr(service, "models_proxy", models)
This function handles attaching model to service if model has one specified as `_ziggurat_service`, Also attached a proxy object holding all model definitions that services might use :param args: :param kwargs: :param passwordmanager, the password manager to override default one :param passwordmanager_schemes, list of schemes for default passwordmanager to use :return:
entailment
def messages(request, year=None, month=None, day=None, template="gnotty/messages.html"): """ Show messages for the given query or day. """ query = request.REQUEST.get("q") prev_url, next_url = None, None messages = IRCMessage.objects.all() if hide_joins_and_leaves(request): messages = messages.filter(join_or_leave=False) if query: search = Q(message__icontains=query) | Q(nickname__icontains=query) messages = messages.filter(search).order_by("-message_time") elif year and month and day: messages = messages.filter(message_time__year=year, message_time__month=month, message_time__day=day) day_delta = timedelta(days=1) this_date = date(int(year), int(month), int(day)) prev_date = this_date - day_delta next_date = this_date + day_delta prev_url = reverse("gnotty_day", args=prev_date.timetuple()[:3]) next_url = reverse("gnotty_day", args=next_date.timetuple()[:3]) else: return redirect("gnotty_year", year=datetime.now().year) context = dict(settings) context["messages"] = messages context["prev_url"] = prev_url context["next_url"] = next_url return render(request, template, context)
Show messages for the given query or day.
entailment
def calendar(request, year=None, month=None, template="gnotty/calendar.html"): """ Show calendar months for the given year/month. """ try: year = int(year) except TypeError: year = datetime.now().year lookup = {"message_time__year": year} if month: lookup["message_time__month"] = month if hide_joins_and_leaves(request): lookup["join_or_leave"] = False messages = IRCMessage.objects.filter(**lookup) try: dates = messages.datetimes("message_time", "day") except AttributeError: dates = messages.dates("message_time", "day") days = [d.date() for d in dates] months = [] if days: min_date, max_date = days[0], days[-1] days = set(days) calendar = Calendar(SUNDAY) for m in range(1, 13) if not month else [int(month)]: lt_max = m <= max_date.month or year < max_date.year gt_min = m >= min_date.month or year > min_date.year if lt_max and gt_min: weeks = calendar.monthdatescalendar(year, m) for w, week in enumerate(weeks): for d, day in enumerate(week): weeks[w][d] = { "date": day, "in_month": day.month == m, "has_messages": day in days, } months.append({"month": date(year, m, 1), "weeks": weeks}) context = dict(settings) context["months"] = months return render(request, template, context)
Show calendar months for the given year/month.
entailment
def decorate_client(api_client, func, name): """A helper for decorating :class:`bravado.client.SwaggerClient`. :class:`bravado.client.SwaggerClient` can be extended by creating a class which wraps all calls to it. This helper is used in a :func:`__getattr__` to check if the attr exists on the api_client. If the attr does not exist raise :class:`AttributeError`, if it exists and is not callable return it, and if it is callable return a partial function calling `func` with `name`. Example usage: .. code-block:: python class SomeClientDecorator(object): def __init__(self, api_client, ...): self.api_client = api_client # First arg should be suffiently unique to not conflict with any of # the kwargs def wrap_call(self, client_call_name, *args, **kwargs): ... def __getattr__(self, name): return decorate_client(self.api_client, self.wrap_call, name) :param api_client: the client which is being decorated :type api_client: :class:`bravado.client.SwaggerClient` :param func: a callable which accepts `name`, `*args`, `**kwargs` :type func: callable :param name: the attribute being accessed :type name: string :returns: the attribute from the `api_client` or a partial of `func` :raises: :class:`AttributeError` """ client_attr = getattr(api_client, name) if not callable(client_attr): return client_attr return OperationDecorator(client_attr, functools.partial(func, name))
A helper for decorating :class:`bravado.client.SwaggerClient`. :class:`bravado.client.SwaggerClient` can be extended by creating a class which wraps all calls to it. This helper is used in a :func:`__getattr__` to check if the attr exists on the api_client. If the attr does not exist raise :class:`AttributeError`, if it exists and is not callable return it, and if it is callable return a partial function calling `func` with `name`. Example usage: .. code-block:: python class SomeClientDecorator(object): def __init__(self, api_client, ...): self.api_client = api_client # First arg should be suffiently unique to not conflict with any of # the kwargs def wrap_call(self, client_call_name, *args, **kwargs): ... def __getattr__(self, name): return decorate_client(self.api_client, self.wrap_call, name) :param api_client: the client which is being decorated :type api_client: :class:`bravado.client.SwaggerClient` :param func: a callable which accepts `name`, `*args`, `**kwargs` :type func: callable :param name: the attribute being accessed :type name: string :returns: the attribute from the `api_client` or a partial of `func` :raises: :class:`AttributeError`
entailment
def delete_expired_locks(self): """ Deletes all expired mutex locks if a ttl is provided. """ ttl_seconds = self.get_mutex_ttl_seconds() if ttl_seconds is not None: DBMutex.objects.filter(creation_time__lte=timezone.now() - timedelta(seconds=ttl_seconds)).delete()
Deletes all expired mutex locks if a ttl is provided.
entailment
def start(self): """ Acquires the db mutex lock. Takes the necessary steps to delete any stale locks. Throws a DBMutexError if it can't acquire the lock. """ # Delete any expired locks first self.delete_expired_locks() try: with transaction.atomic(): self.lock = DBMutex.objects.create(lock_id=self.lock_id) except IntegrityError: raise DBMutexError('Could not acquire lock: {0}'.format(self.lock_id))
Acquires the db mutex lock. Takes the necessary steps to delete any stale locks. Throws a DBMutexError if it can't acquire the lock.
entailment
def stop(self): """ Releases the db mutex lock. Throws an error if the lock was released before the function finished. """ if not DBMutex.objects.filter(id=self.lock.id).exists(): raise DBMutexTimeoutError('Lock {0} expired before function completed'.format(self.lock_id)) else: self.lock.delete()
Releases the db mutex lock. Throws an error if the lock was released before the function finished.
entailment
def decorate_callable(self, func): """ Decorates a function with the db_mutex decorator by using this class as a context manager around it. """ def wrapper(*args, **kwargs): try: with self: result = func(*args, **kwargs) return result except DBMutexError as e: if self.suppress_acquisition_exceptions: LOG.error(e) else: raise e functools.update_wrapper(wrapper, func) return wrapper
Decorates a function with the db_mutex decorator by using this class as a context manager around it.
entailment
def groupfinder(userid, request): """ Default groupfinder implementaion for pyramid applications :param userid: :param request: :return: """ if userid and hasattr(request, "user") and request.user: groups = ["group:%s" % g.id for g in request.user.groups] return groups return []
Default groupfinder implementaion for pyramid applications :param userid: :param request: :return:
entailment
def force_atlas2_layout(graph, pos_list=None, node_masses=None, iterations=100, outbound_attraction_distribution=False, lin_log_mode=False, prevent_overlapping=False, edge_weight_influence=1.0, jitter_tolerance=1.0, barnes_hut_optimize=False, barnes_hut_theta=1.2, scaling_ratio=2.0, strong_gravity_mode=False, multithread=False, gravity=1.0): """ Position nodes using ForceAtlas2 force-directed algorithm Parameters ---------- graph: NetworkX graph A position will be assigned to every node in G. pos_list : dict or None optional (default=None) Initial positions for nodes as a dictionary with node as keys and values as a coordinate list or tuple. If None, then use random initial positions. node_masses : dict or None optional (default=None) Predefined masses for nodes with node as keys and masses as values. If None, then use degree of nodes. iterations : int optional (default=50) Number of iterations outbound_attraction_distribution : boolean Distributes attraction along outbound edges. Hubs attract less and thus are pushed to the borders. This mode is meant to grant authorities (nodes with a high indegree) a more central position than hubs (nodes with a high outdegree). This is useful for social networks and web networks, where authorities are sometimes considered more important than hubs lin_log_mode: boolean Switch ForceAtlas model from lin-lin to lin-log (tribute to Andreas Noack). Makes clusters more tight prevent_overlapping: boolean With this mode enabled, the repulsion is modified so that the nodes do not overlap. The goal is to produce a more readable and aesthetically pleasing image. edge_weight_influence: float How much influence you give to the edges weight. 0 is “no influence” and 1 is “normal”. jitter_tolerance: float How much swinging you allow. Above 1 discouraged. Lower gives less speed and more precision barnes_hut_optimize: boolean Barnes Hut optimization: n² complexity to n.ln(n) ; allows larger graphs. barnes_hut_theta: float Theta of the Barnes Hut optimization scaling_ratio: float How much repulsion you want. More makes a more sparse graph. strong_gravity_mode: boolean The “Strong gravity” option sets a force that attracts the nodes that are distant from the center more ( is this distance). This force has the drawback of being so strong that it is sometimes stronger than the other forces. It may result in a biased placement of the nodes. However, its advantage is to force a very compact layout, which may be useful for certain purposes. multithread: boolean gravity: float Attracts nodes to the center. Prevents islands from drifting away. Returns ------- pos : dict A dictionary of positions keyed by node """ assert isinstance(graph, networkx.classes.graph.Graph), "Not a networkx graph" assert isinstance(pos_list, dict) or (pos_list is None), "pos must be specified as a dictionary, as in networkx" assert multithread is False, "Not implemented yet" G = numpy.asarray(networkx.to_numpy_matrix(graph)) pos = None if pos_list is not None: pos = numpy.asarray([pos_list[i] for i in graph.nodes()]) masses = None if node_masses is not None: masses = numpy.asarray([node_masses[node] for node in graph.nodes()]) assert G.shape == (G.shape[0], G.shape[0]), "G is not 2D square" assert numpy.all(G.T == G), "G is not symmetric." # speed and speed efficiency describe a scaling factor of dx and dy # before x and y are adjusted. These are modified as the # algorithm runs to help ensure convergence. speed = 1 speed_efficiency = 1 nodes = [] for i in range(0, G.shape[0]): n = Node() if node_masses is None: n.mass = 1 + numpy.count_nonzero(G[i]) else: n.mass = masses[i] n.old_dx = 0 n.old_dy = 0 n.dx = 0 n.dy = 0 if pos is None: n.x = random.random() n.y = random.random() else: n.x = pos[i][0] n.y = pos[i][1] nodes.append(n) edges = [] es = numpy.asarray(G.nonzero()).T for e in es: if e[1] <= e[0]: continue # Avoid duplicate edges edge = Edge() edge.node1 = e[0] # The index of the first node in `nodes` edge.node2 = e[1] # The index of the second node in `nodes` edge.weight = G[tuple(e)] edges.append(edge) repulsion = get_repulsion(prevent_overlapping, scaling_ratio) if strong_gravity_mode: gravity_force = get_strong_gravity(scaling_ratio) else: gravity_force = repulsion if outbound_attraction_distribution: outbound_att_compensation = numpy.mean([n.mass for n in nodes]) attraction_coef = outbound_att_compensation if outbound_attraction_distribution else 1 attraction = get_attraction(lin_log_mode, outbound_attraction_distribution, prevent_overlapping, attraction_coef) # Main loop for _i in range(0, iterations): for n in nodes: n.old_dx = n.dx n.old_dy = n.dy n.dx = 0 n.dy = 0 # Barnes Hut optimization root_region = None if barnes_hut_optimize: root_region = Quadtree(nodes) root_region.build() apply_repulsion(repulsion, nodes, barnes_hut_optimize=barnes_hut_optimize, barnes_hut_theta=barnes_hut_theta, region=root_region) apply_gravity(gravity_force, nodes, gravity, scaling_ratio) apply_attraction(attraction, nodes, edges, edge_weight_influence) # Auto adjust speed. total_swinging = 0.0 # How much irregular movement total_effective_traction = 0.0 # How much useful movement for n in nodes: swinging = math.sqrt((n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy)) total_swinging += n.mass * swinging total_effective_traction += .5 * n.mass * math.sqrt( (n.old_dx + n.dx) * (n.old_dx + n.dx) + (n.old_dy + n.dy) * (n.old_dy + n.dy)) # Optimize jitter tolerance. # The 'right' jitter tolerance for this network. # Bigger networks need more tolerance. Denser networks need less tolerance. # Totally empiric. estimated_optimal_jitter_tolerance = .05 * math.sqrt(len(nodes)) min_jt = math.sqrt(estimated_optimal_jitter_tolerance) max_jt = 10 jt = jitter_tolerance * max(min_jt, min(max_jt, estimated_optimal_jitter_tolerance * total_effective_traction / (len(nodes) ** 2))) min_speed_efficiency = 0.05 # Protective against erratic behavior if total_swinging / total_effective_traction > 2.0: if speed_efficiency > min_speed_efficiency: speed_efficiency *= .5 jt = max(jt, jitter_tolerance) target_speed = jt * speed_efficiency * total_effective_traction / total_swinging if total_swinging > jt * total_effective_traction: if speed_efficiency > min_speed_efficiency: speed_efficiency *= .7 elif speed < 1000: speed_efficiency *= 1.3 # But the speed shoudn't rise too much too quickly, since it would # make the convergence drop dramatically. max_rise = .5 speed = speed + min(target_speed - speed, max_rise * speed) # Apply forces. if prevent_overlapping: for n in nodes: swinging = n.mass * math.sqrt( (n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy)) factor = 0.1 * speed / (1 + math.sqrt(speed * swinging)) df = math.sqrt(math.pow(n.dx, 2) + n.dy ** 2) factor = min(factor * df, 10.) / df x = n.dx * factor y = n.dy * factor else: for n in nodes: swinging = n.mass * math.sqrt( (n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy)) factor = speed / (1.0 + math.sqrt(speed * swinging)) n.x = n.x + (n.dx * factor) n.y = n.y + (n.dy * factor) positions = [(n.x, n.y) for n in nodes] return dict(zip(graph.nodes(), positions))
Position nodes using ForceAtlas2 force-directed algorithm Parameters ---------- graph: NetworkX graph A position will be assigned to every node in G. pos_list : dict or None optional (default=None) Initial positions for nodes as a dictionary with node as keys and values as a coordinate list or tuple. If None, then use random initial positions. node_masses : dict or None optional (default=None) Predefined masses for nodes with node as keys and masses as values. If None, then use degree of nodes. iterations : int optional (default=50) Number of iterations outbound_attraction_distribution : boolean Distributes attraction along outbound edges. Hubs attract less and thus are pushed to the borders. This mode is meant to grant authorities (nodes with a high indegree) a more central position than hubs (nodes with a high outdegree). This is useful for social networks and web networks, where authorities are sometimes considered more important than hubs lin_log_mode: boolean Switch ForceAtlas model from lin-lin to lin-log (tribute to Andreas Noack). Makes clusters more tight prevent_overlapping: boolean With this mode enabled, the repulsion is modified so that the nodes do not overlap. The goal is to produce a more readable and aesthetically pleasing image. edge_weight_influence: float How much influence you give to the edges weight. 0 is “no influence” and 1 is “normal”. jitter_tolerance: float How much swinging you allow. Above 1 discouraged. Lower gives less speed and more precision barnes_hut_optimize: boolean Barnes Hut optimization: n² complexity to n.ln(n) ; allows larger graphs. barnes_hut_theta: float Theta of the Barnes Hut optimization scaling_ratio: float How much repulsion you want. More makes a more sparse graph. strong_gravity_mode: boolean The “Strong gravity” option sets a force that attracts the nodes that are distant from the center more ( is this distance). This force has the drawback of being so strong that it is sometimes stronger than the other forces. It may result in a biased placement of the nodes. However, its advantage is to force a very compact layout, which may be useful for certain purposes. multithread: boolean gravity: float Attracts nodes to the center. Prevents islands from drifting away. Returns ------- pos : dict A dictionary of positions keyed by node
entailment
def apply_repulsion(repulsion, nodes, barnes_hut_optimize=False, region=None, barnes_hut_theta=1.2): """ Iterate through the nodes or edges and apply the forces directly to the node objects. """ if not barnes_hut_optimize: for i in range(0, len(nodes)): for j in range(0, i): repulsion.apply_node_to_node(nodes[i], nodes[j]) else: for i in range(0, len(nodes)): region.apply_force(nodes[i], repulsion, barnes_hut_theta)
Iterate through the nodes or edges and apply the forces directly to the node objects.
entailment
def apply_gravity(repulsion, nodes, gravity, scaling_ratio): """ Iterate through the nodes or edges and apply the gravity directly to the node objects. """ for i in range(0, len(nodes)): repulsion.apply_gravitation(nodes[i], gravity / scaling_ratio)
Iterate through the nodes or edges and apply the gravity directly to the node objects.
entailment
def get(cls, external_id, local_user_id, provider_name, db_session=None): """ Fetch row using primary key - will use existing object in session if already present :param external_id: :param local_user_id: :param provider_name: :param db_session: :return: """ db_session = get_db_session(db_session) return db_session.query(cls.model).get( [external_id, local_user_id, provider_name] )
Fetch row using primary key - will use existing object in session if already present :param external_id: :param local_user_id: :param provider_name: :param db_session: :return:
entailment
def by_external_id_and_provider(cls, external_id, provider_name, db_session=None): """ Returns ExternalIdentity instance based on search params :param external_id: :param provider_name: :param db_session: :return: ExternalIdentity """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter(cls.model.external_id == external_id) query = query.filter(cls.model.provider_name == provider_name) return query.first()
Returns ExternalIdentity instance based on search params :param external_id: :param provider_name: :param db_session: :return: ExternalIdentity
entailment
def user_by_external_id_and_provider( cls, external_id, provider_name, db_session=None ): """ Returns User instance based on search params :param external_id: :param provider_name: :param db_session: :return: User """ db_session = get_db_session(db_session) query = db_session.query(cls.models_proxy.User) query = query.filter(cls.model.external_id == external_id) query = query.filter(cls.model.provider_name == provider_name) query = query.filter(cls.models_proxy.User.id == cls.model.local_user_id) return query.first()
Returns User instance based on search params :param external_id: :param provider_name: :param db_session: :return: User
entailment
def by_user_and_perm(cls, user_id, perm_name, db_session=None): """ return by user and permission name :param user_id: :param perm_name: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model).filter(cls.model.user_id == user_id) query = query.filter(cls.model.perm_name == perm_name) return query.first()
return by user and permission name :param user_id: :param perm_name: :param db_session: :return:
entailment
def node_is_subclass(cls, *subclass_names): """Checks if cls node has parent with subclass_name.""" if not isinstance(cls, (ClassDef, Instance)): return False # if cls.bases == YES: # return False for base_cls in cls.bases: try: for inf in base_cls.inferred(): # pragma no branch if inf.qname() in subclass_names: return True if inf != cls and node_is_subclass( # pragma no branch inf, *subclass_names): # check up the hierarchy in case we are a subclass of # a subclass of a subclass ... return True except InferenceError: # pragma no cover continue return False
Checks if cls node has parent with subclass_name.
entailment
def is_field_method(node): """Checks if a call to a field instance method is valid. A call is valid if the call is a method of the underlying type. So, in a StringField the methods from str are valid, in a ListField the methods from list are valid and so on...""" name = node.attrname parent = node.last_child() inferred = safe_infer(parent) if not inferred: return False for cls_name, inst in FIELD_TYPES.items(): if node_is_instance(inferred, cls_name) and hasattr(inst, name): return True return False
Checks if a call to a field instance method is valid. A call is valid if the call is a method of the underlying type. So, in a StringField the methods from str are valid, in a ListField the methods from list are valid and so on...
entailment
def get_node_parent_class(node): """Supposes that node is a mongoengine field in a class and tries to get its parent class""" while node.parent: # pragma no branch if isinstance(node, ClassDef): return node node = node.parent
Supposes that node is a mongoengine field in a class and tries to get its parent class
entailment
def get_field_definition(node): """"node is a class attribute that is a mongoengine. Returns the definition statement for the attribute """ name = node.attrname cls = get_node_parent_class(node) definition = cls.lookup(name)[1][0].statement() return definition
node is a class attribute that is a mongoengine. Returns the definition statement for the attribute
entailment
def get_field_embedded_doc(node): """Returns de ClassDef for the related embedded document in a embedded document field.""" definition = get_field_definition(node) cls_name = definition.last_child().last_child() cls = next(cls_name.infer()) return cls
Returns de ClassDef for the related embedded document in a embedded document field.
entailment
def node_is_embedded_doc_attr(node): """Checks if a node is a valid field or method in a embedded document. """ embedded_doc = get_field_embedded_doc(node.last_child()) name = node.attrname try: r = bool(embedded_doc.lookup(name)[1][0]) except IndexError: r = False return r
Checks if a node is a valid field or method in a embedded document.
entailment
def _dispatcher(self, connection, event): """ This is the method in ``SimpleIRCClient`` that all IRC events get passed through. Here we map events to our own custom event handlers, and call them. """ super(BaseBot, self)._dispatcher(connection, event) for handler in self.events[event.eventtype()]: handler(self, connection, event)
This is the method in ``SimpleIRCClient`` that all IRC events get passed through. Here we map events to our own custom event handlers, and call them.
entailment
def message_channel(self, message): """ We won't receive our own messages, so log them manually. """ self.log(None, message) super(BaseBot, self).message_channel(message)
We won't receive our own messages, so log them manually.
entailment
def on_pubmsg(self, connection, event): """ Log any public messages, and also handle the command event. """ for message in event.arguments(): self.log(event, message) command_args = filter(None, message.split()) command_name = command_args.pop(0) for handler in self.events["command"]: if handler.event.args["command"] == command_name: self.handle_command_event(event, handler, command_args)
Log any public messages, and also handle the command event.
entailment
def handle_command_event(self, event, command, args): """ Command handler - treats each word in the message that triggered the command as an argument to the command, and does some validation to ensure that the number of arguments match. """ argspec = getargspec(command) num_all_args = len(argspec.args) - 2 # Ignore self/event args num_pos_args = num_all_args - len(argspec.defaults or []) if num_pos_args <= len(args) <= num_all_args: response = command(self, event, *args) elif num_all_args == num_pos_args: s = "s are" if num_all_args != 1 else " is" response = "%s arg%s required" % (num_all_args, s) else: bits = (num_pos_args, num_all_args) response = "between %s and %s args are required" % bits response = "%s: %s" % (self.get_nickname(event), response) self.message_channel(response)
Command handler - treats each word in the message that triggered the command as an argument to the command, and does some validation to ensure that the number of arguments match.
entailment
def handle_timer_event(self, handler): """ Runs each timer handler in a separate greenlet thread. """ while True: handler(self) sleep(handler.event.args["seconds"])
Runs each timer handler in a separate greenlet thread.
entailment
def handle_webhook_event(self, environ, url, params): """ Webhook handler - each handler for the webhook event takes an initial pattern argument for matching the URL requested. Here we match the URL to the pattern for each webhook handler, and bail out if it returns a response. """ for handler in self.events["webhook"]: urlpattern = handler.event.args["urlpattern"] if not urlpattern or match(urlpattern, url): response = handler(self, environ, url, params) if response: return response
Webhook handler - each handler for the webhook event takes an initial pattern argument for matching the URL requested. Here we match the URL to the pattern for each webhook handler, and bail out if it returns a response.
entailment
def DeviceFactory(id, lib=None): """Create the correct device instance based on device type and return it. :return: a :class:`Device` or :class:`DeviceGroup` instance. """ lib = lib or Library() if lib.tdGetDeviceType(id) == const.TELLSTICK_TYPE_GROUP: return DeviceGroup(id, lib=lib) return Device(id, lib=lib)
Create the correct device instance based on device type and return it. :return: a :class:`Device` or :class:`DeviceGroup` instance.
entailment
def process_callback(self, block=True): """Dispatch a single callback in the current thread. :param boolean block: If True, blocks waiting for a callback to come. :return: True if a callback was processed; otherwise False. """ try: (callback, args) = self._queue.get(block=block) try: callback(*args) finally: self._queue.task_done() except queue.Empty: return False return True
Dispatch a single callback in the current thread. :param boolean block: If True, blocks waiting for a callback to come. :return: True if a callback was processed; otherwise False.
entailment
def devices(self): """Return all known devices. :return: list of :class:`Device` or :class:`DeviceGroup` instances. """ devices = [] count = self.lib.tdGetNumberOfDevices() for i in range(count): device = DeviceFactory(self.lib.tdGetDeviceId(i), lib=self.lib) devices.append(device) return devices
Return all known devices. :return: list of :class:`Device` or :class:`DeviceGroup` instances.
entailment
def sensors(self): """Return all known sensors. :return: list of :class:`Sensor` instances. """ sensors = [] try: while True: sensor = self.lib.tdSensor() sensors.append(Sensor(lib=self.lib, **sensor)) except TelldusError as e: if e.error != const.TELLSTICK_ERROR_DEVICE_NOT_FOUND: raise return sensors
Return all known sensors. :return: list of :class:`Sensor` instances.
entailment
def controllers(self): """Return all known controllers. Requires Telldus core library version >= 2.1.2. :return: list of :class:`Controller` instances. """ controllers = [] try: while True: controller = self.lib.tdController() del controller["name"] del controller["available"] controllers.append(Controller(lib=self.lib, **controller)) except TelldusError as e: if e.error != const.TELLSTICK_ERROR_NOT_FOUND: raise return controllers
Return all known controllers. Requires Telldus core library version >= 2.1.2. :return: list of :class:`Controller` instances.
entailment
def add_device(self, name, protocol, model=None, **parameters): """Add a new device. :return: a :class:`Device` or :class:`DeviceGroup` instance. """ device = Device(self.lib.tdAddDevice(), lib=self.lib) try: device.name = name device.protocol = protocol if model: device.model = model for key, value in parameters.items(): device.set_parameter(key, value) # Return correct type return DeviceFactory(device.id, lib=self.lib) except Exception: import sys exc_info = sys.exc_info() try: device.remove() except: pass if "with_traceback" in dir(Exception): raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) else: exec("raise exc_info[0], exc_info[1], exc_info[2]")
Add a new device. :return: a :class:`Device` or :class:`DeviceGroup` instance.
entailment
def add_group(self, name, devices): """Add a new device group. :return: a :class:`DeviceGroup` instance. """ device = self.add_device(name, "group") device.add_to_group(devices) return device
Add a new device group. :return: a :class:`DeviceGroup` instance.
entailment
def connect_controller(self, vid, pid, serial): """Connect a controller.""" self.lib.tdConnectTellStickController(vid, pid, serial)
Connect a controller.
entailment
def disconnect_controller(self, vid, pid, serial): """Disconnect a controller.""" self.lib.tdDisconnectTellStickController(vid, pid, serial)
Disconnect a controller.
entailment
def parameters(self): """Get dict with all set parameters.""" parameters = {} for name in self.PARAMETERS: try: parameters[name] = self.get_parameter(name) except AttributeError: pass return parameters
Get dict with all set parameters.
entailment
def get_parameter(self, name): """Get a parameter.""" default_value = "$%!)(INVALID)(!%$" value = self.lib.tdGetDeviceParameter(self.id, name, default_value) if value == default_value: raise AttributeError(name) return value
Get a parameter.
entailment
def set_parameter(self, name, value): """Set a parameter.""" self.lib.tdSetDeviceParameter(self.id, name, str(value))
Set a parameter.
entailment
def add_to_group(self, devices): """Add device(s) to the group.""" ids = {d.id for d in self.devices_in_group()} ids.update(self._device_ids(devices)) self._set_group(ids)
Add device(s) to the group.
entailment
def remove_from_group(self, devices): """Remove device(s) from the group.""" ids = {d.id for d in self.devices_in_group()} ids.difference_update(self._device_ids(devices)) self._set_group(ids)
Remove device(s) from the group.
entailment
def devices_in_group(self): """Fetch list of devices in group.""" try: devices = self.get_parameter('devices') except AttributeError: return [] ctor = DeviceFactory return [ctor(int(x), lib=self.lib) for x in devices.split(',') if x]
Fetch list of devices in group.
entailment
def value(self, datatype): """Return the :class:`SensorValue` for the given data type. sensor.value(TELLSTICK_TEMPERATURE) is identical to calling sensor.temperature(). """ value = self.lib.tdSensorValue( self.protocol, self.model, self.id, datatype) return SensorValue(datatype, value['value'], value['timestamp'])
Return the :class:`SensorValue` for the given data type. sensor.value(TELLSTICK_TEMPERATURE) is identical to calling sensor.temperature().
entailment
def _prepPointsForSegments(points): """ Move any off curves at the end of the contour to the beginning of the contour. This makes segmentation easier. """ while 1: point = points[-1] if point.segmentType: break else: point = points.pop() points.insert(0, point) continue break
Move any off curves at the end of the contour to the beginning of the contour. This makes segmentation easier.
entailment
def _reversePoints(points): """ Reverse the points. This differs from the reversal point pen in RoboFab in that it doesn't worry about maintaing the start point position. That has no benefit within the context of this module. """ # copy the points points = _copyPoints(points) # find the first on curve type and recycle # it for the last on curve type firstOnCurve = None for index, point in enumerate(points): if point.segmentType is not None: firstOnCurve = index break lastSegmentType = points[firstOnCurve].segmentType # reverse the points points = reversed(points) # work through the reversed remaining points final = [] for point in points: segmentType = point.segmentType if segmentType is not None: point.segmentType = lastSegmentType lastSegmentType = segmentType final.append(point) # move any offcurves at the end of the points # to the start of the points _prepPointsForSegments(final) # done return final
Reverse the points. This differs from the reversal point pen in RoboFab in that it doesn't worry about maintaing the start point position. That has no benefit within the context of this module.
entailment
def _convertPointsToSegments(points, willBeReversed=False): """ Compile points into InputSegment objects. """ # get the last on curve previousOnCurve = None for point in reversed(points): if point.segmentType is not None: previousOnCurve = point.coordinates break assert previousOnCurve is not None # gather the segments offCurves = [] segments = [] for point in points: # off curve, hold. if point.segmentType is None: offCurves.append(point) else: segment = InputSegment( points=offCurves + [point], previousOnCurve=previousOnCurve, willBeReversed=willBeReversed ) segments.append(segment) offCurves = [] previousOnCurve = point.coordinates assert not offCurves return segments
Compile points into InputSegment objects.
entailment
def _tValueForPointOnCubicCurve(point, cubicCurve, isHorizontal=0): """ Finds a t value on a curve from a point. The points must be originaly be a point on the curve. This will only back trace the t value, needed to split the curve in parts """ pt1, pt2, pt3, pt4 = cubicCurve a, b, c, d = bezierTools.calcCubicParameters(pt1, pt2, pt3, pt4) solutions = bezierTools.solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal], d[isHorizontal] - point[isHorizontal]) solutions = [t for t in solutions if 0 <= t < 1] if not solutions and not isHorizontal: # can happen that a horizontal line doens intersect, try the vertical return _tValueForPointOnCubicCurve(point, (pt1, pt2, pt3, pt4), isHorizontal=1) if len(solutions) > 1: intersectionLenghts = {} for t in solutions: tp = _getCubicPoint(t, pt1, pt2, pt3, pt4) dist = _distance(tp, point) intersectionLenghts[dist] = t minDist = min(intersectionLenghts.keys()) solutions = [intersectionLenghts[minDist]] return solutions
Finds a t value on a curve from a point. The points must be originaly be a point on the curve. This will only back trace the t value, needed to split the curve in parts
entailment
def _scalePoints(points, scale=1, convertToInteger=True): """ Scale points and optionally convert them to integers. """ if convertToInteger: points = [ (int(round(x * scale)), int(round(y * scale))) for (x, y) in points ] else: points = [(x * scale, y * scale) for (x, y) in points] return points
Scale points and optionally convert them to integers.
entailment
def _scaleSinglePoint(point, scale=1, convertToInteger=True): """ Scale a single point """ x, y = point if convertToInteger: return int(round(x * scale)), int(round(y * scale)) else: return (x * scale, y * scale)
Scale a single point
entailment
def _flattenSegment(segment, approximateSegmentLength=_approximateSegmentLength): """ Flatten the curve segment int a list of points. The first and last points in the segment must be on curves. The returned list of points will not include the first on curve point. false curves (where the off curves are not any different from the on curves) must not be sent here. duplicate points must not be sent here. """ onCurve1, offCurve1, offCurve2, onCurve2 = segment if _pointOnLine(onCurve1, onCurve2, offCurve1) and _pointOnLine(onCurve1, onCurve2, offCurve2): return [onCurve2] est = _estimateCubicCurveLength(onCurve1, offCurve1, offCurve2, onCurve2) / approximateSegmentLength flat = [] minStep = 0.1564 step = 1.0 / est if step > .3: step = minStep t = step while t < 1: pt = _getCubicPoint(t, onCurve1, offCurve1, offCurve2, onCurve2) # ignore when point is in the same direction as the on - off curve line if not _pointOnLine(offCurve2, onCurve2, pt) and not _pointOnLine(onCurve1, offCurve1, pt): flat.append(pt) t += step flat.append(onCurve2) return flat
Flatten the curve segment int a list of points. The first and last points in the segment must be on curves. The returned list of points will not include the first on curve point. false curves (where the off curves are not any different from the on curves) must not be sent here. duplicate points must not be sent here.
entailment