code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def PluginTagToContent(self, plugin_name): """Returns a dict mapping tags to content specific to that plugin. Args: plugin_name: The name of the plugin for which to fetch plugin-specific content. Raises: KeyError: if the plugin name is not found. Returns: A dict mapping tags to plugin-specific content (which are always strings). Those strings are often serialized protos. """ if plugin_name not in self._plugin_to_tag_to_content: raise KeyError('Plugin %r could not be found.' % plugin_name) return self._plugin_to_tag_to_content[plugin_name]
Returns a dict mapping tags to content specific to that plugin. Args: plugin_name: The name of the plugin for which to fetch plugin-specific content. Raises: KeyError: if the plugin name is not found. Returns: A dict mapping tags to plugin-specific content (which are always strings). Those strings are often serialized protos.
def ask_backend(self): """ Ask the user to choose the backend """ response = self._ask_boolean( "Do you have a local docker daemon (on Linux), do you use docker-machine via a local machine, or do you use " "Docker for macOS?", True) if (response): self._display_info("If you use docker-machine on macOS, please see " "http://inginious.readthedocs.io/en/latest/install_doc/troubleshooting.html") return "local" else: self._display_info( "You will have to run inginious-backend and inginious-agent yourself. Please run the commands without argument " "and/or read the documentation for more info") return self._display_question("Please enter the address of your backend")
Ask the user to choose the backend
def begin_write(self, content_type=None): """Open content as a stream for writing. See DAVResource.begin_write() """ assert not self.is_collection self._check_write_access() mode = "wb" # GC issue 57: always store as binary # if contentType and contentType.startswith("text"): # mode = "w" return open(self.absFilePath, mode, BUFFER_SIZE)
Open content as a stream for writing. See DAVResource.begin_write()
def suggest_move(self, position): """Used for playing a single game. For parallel play, use initialize_move, select_leaf, incorporate_results, and pick_move """ start = time.time() if self.timed_match: while time.time() - start < self.seconds_per_move: self.tree_search() else: current_readouts = self.root.N while self.root.N < current_readouts + self.num_readouts: self.tree_search() if self.verbosity > 0: dbg("%d: Searched %d times in %.2f seconds\n\n" % ( position.n, self.num_readouts, time.time() - start)) # print some stats on moves considered. if self.verbosity > 2: dbg(self.root.describe()) dbg('\n\n') if self.verbosity > 3: dbg(self.root.position) return self.pick_move()
Used for playing a single game. For parallel play, use initialize_move, select_leaf, incorporate_results, and pick_move
def to_internal_value(self, data): """ List of dicts of native values <- List of dicts of primitive datatypes. """ if html.is_html_input(data): data = html.parse_html_list(data) if not isinstance(data, list): message = self.error_messages['not_a_list'].format( input_type=type(data).__name__ ) raise ValidationError({ api_settings.NON_FIELD_ERRORS_KEY: [message] }) if not self.allow_empty and len(data) == 0: message = self.error_messages['empty'] raise ValidationError({ api_settings.NON_FIELD_ERRORS_KEY: [message] }) ret = [] errors = [] for item in data: try: validated = self.child.run_validation(item) except ValidationError as exc: errors.append(exc.detail) else: ret.append(validated) errors.append({}) if any(errors): raise ValidationError(errors) return ret
List of dicts of native values <- List of dicts of primitive datatypes.
def _flush_tile_queue_blits(self, surface): """ Blit the queued tiles and block until the tile queue is empty for pygame 1.9.4 + """ tw, th = self.data.tile_size ltw = self._tile_view.left * tw tth = self._tile_view.top * th self.data.prepare_tiles(self._tile_view) blit_list = [(image, (x * tw - ltw, y * th - tth)) for x, y, l, image in self._tile_queue] surface.blits(blit_list)
Blit the queued tiles and block until the tile queue is empty for pygame 1.9.4 +
def _make_load_template(self): """ Return a function that loads a template by name. """ loader = self._make_loader() def load_template(template_name): return loader.load_name(template_name) return load_template
Return a function that loads a template by name.
def wants(cls, *service_names): """A class decorator to indicate that an XBlock class wants particular services.""" def _decorator(cls_): # pylint: disable=missing-docstring for service_name in service_names: cls_._services_requested[service_name] = "want" # pylint: disable=protected-access return cls_ return _decorator
A class decorator to indicate that an XBlock class wants particular services.
def multiifo_noise_coinc_rate(rates, slop): """ Calculate the expected rate of noise coincidences for multiple detectors Parameters ---------- rates: dict Dictionary keyed on ifo string Value is a sequence of single-detector trigger rates, units assumed to be Hz slop: float time added to maximum time-of-flight between detectors to account for timing error Returns ------- expected_coinc_rates: dict Dictionary keyed on the ifo combination string Value is expected coincidence rate in the combination, units Hz """ ifos = numpy.array(sorted(rates.keys())) rates_raw = list(rates[ifo] for ifo in ifos) expected_coinc_rates = {} # Calculate coincidence for all-ifo combination # multiply product of trigger rates by the overlap time allowed_area = multiifo_noise_coincident_area(ifos, slop) rateprod = [numpy.prod(rs) for rs in zip(*rates_raw)] ifostring = ' '.join(ifos) expected_coinc_rates[ifostring] = allowed_area * numpy.array(rateprod) # if more than one possible coincidence type exists, # calculate coincidence for subsets through recursion if len(ifos) > 2: # Calculate rate for each 'miss-one-out' detector combination subsets = itertools.combinations(ifos, len(ifos) - 1) for subset in subsets: rates_subset = {} for ifo in subset: rates_subset[ifo] = rates[ifo] sub_coinc_rates = multiifo_noise_coinc_rate(rates_subset, slop) # add these sub-coincidences to the overall dictionary for sub_coinc in sub_coinc_rates: expected_coinc_rates[sub_coinc] = sub_coinc_rates[sub_coinc] return expected_coinc_rates
Calculate the expected rate of noise coincidences for multiple detectors Parameters ---------- rates: dict Dictionary keyed on ifo string Value is a sequence of single-detector trigger rates, units assumed to be Hz slop: float time added to maximum time-of-flight between detectors to account for timing error Returns ------- expected_coinc_rates: dict Dictionary keyed on the ifo combination string Value is expected coincidence rate in the combination, units Hz
def set_env(self, key, value): """Sets environment variables by prepending the app_name to `key`. Also registers the environment variable with the instance object preventing an otherwise-required call to `reload()`. """ os.environ[make_env_key(self.appname, key)] = str(value) # must coerce to string self._registered_env_keys.add(key) self._clear_memoization()
Sets environment variables by prepending the app_name to `key`. Also registers the environment variable with the instance object preventing an otherwise-required call to `reload()`.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'entity') and self.entity is not None: _dict['entity'] = self.entity if hasattr(self, 'location') and self.location is not None: _dict['location'] = self.location if hasattr(self, 'value') and self.value is not None: _dict['value'] = self.value if hasattr(self, 'confidence') and self.confidence is not None: _dict['confidence'] = self.confidence if hasattr(self, 'metadata') and self.metadata is not None: _dict['metadata'] = self.metadata if hasattr(self, 'groups') and self.groups is not None: _dict['groups'] = [x._to_dict() for x in self.groups] if hasattr(self, '_additionalProperties'): for _key in self._additionalProperties: _value = getattr(self, _key, None) if _value is not None: _dict[_key] = _value return _dict
Return a json dictionary representing this model.
def stop_execution(self): """ Triggers the stopping of the object. """ if not (self._stopping or self._stopped): for actor in self.owner.actors: actor.stop_execution() self._stopping = True
Triggers the stopping of the object.
def explain_weights_dfs(estimator, **kwargs): # type: (...) -> Dict[str, pd.DataFrame] """ Explain weights and export them to a dict with ``pandas.DataFrame`` values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does). All keyword arguments are passed to :func:`eli5.explain_weights`. Weights of all features are exported by default. """ kwargs = _set_defaults(kwargs) return format_as_dataframes( eli5.explain_weights(estimator, **kwargs))
Explain weights and export them to a dict with ``pandas.DataFrame`` values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does). All keyword arguments are passed to :func:`eli5.explain_weights`. Weights of all features are exported by default.
def sync_local_to_remote(force="no"): """ Sync your local postgres database with remote Example: fabrik prod sync_local_to_remote:force=yes """ _check_requirements() if force != "yes": message = "This will replace the remote database '%s' with your "\ "local '%s', are you sure [y/n]" % (env.psql_db, env.local_psql_db) answer = prompt(message, "y") if answer != "y": logger.info("Sync stopped") return init_tasks() # Bootstrap fabrik # Create database dump local_file = "sync_%s.sql.tar.gz" % int(time.time()*1000) local_path = "/tmp/%s" % local_file with context_managers.shell_env(PGPASSWORD=env.local_psql_password): elocal("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % ( local_path, env.local_psql_user, env.local_psql_db )) remote_path = "/tmp/%s" % local_file # Upload sync file put(remote_path, local_path) # Import sync file by performing the following task (drop, create, import) with context_managers.shell_env(PGPASSWORD=env.psql_password): env.run("pg_restore --clean -h localhost -d %s -U %s '%s'" % ( env.psql_db, env.psql_user, remote_path) ) # Cleanup env.run("rm %s" % remote_path) elocal("rm %s" % local_path) # Trigger hook run_hook("postgres.after_sync_local_to_remote") logger.info("Sync complete")
Sync your local postgres database with remote Example: fabrik prod sync_local_to_remote:force=yes
def _serialize_object(self, response_data, request): """ Override to not serialize doc responses. """ if self._is_doc_request(request): return response_data else: return super(DocumentedResource, self)._serialize_object( response_data, request)
Override to not serialize doc responses.
def count_never_executed(self): """Count statements that were never executed.""" lineno = self.firstlineno counter = 0 for line in self.source: if self.sourcelines.get(lineno) == 0: if not self.blank_rx.match(line): counter += 1 lineno += 1 return counter
Count statements that were never executed.
def _agl_compliant_name(glyph_name): """Return an AGL-compliant name string or None if we can't make one.""" MAX_GLYPH_NAME_LENGTH = 63 clean_name = re.sub("[^0-9a-zA-Z_.]", "", glyph_name) if len(clean_name) > MAX_GLYPH_NAME_LENGTH: return None return clean_name
Return an AGL-compliant name string or None if we can't make one.
def is_form_get(attr, attrs): """Check if this is a GET form action URL.""" res = False if attr == "action": method = attrs.get_true('method', u'').lower() res = method != 'post' return res
Check if this is a GET form action URL.
def print_input(i): """ Input: { } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 html - input as JSON } """ o=i.get('out','') rx=dumps_json({'dict':i, 'sort_keys':'yes'}) if rx['return']>0: return rx h=rx['string'] if o=='con': out(h) return {'return':0, 'html':h}
Input: { } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 html - input as JSON }
async def set_gs(self, mgr_addr, gs): '''Set grid size for :py:class:`GridEnvironment` which manager is in given address. :param str mgr_addr: Address of the manager agent :param gs: New grid size of the grid environment, iterable with length 2. ''' remote_manager = await self.env.connect(mgr_addr) await remote_manager.set_gs(gs)
Set grid size for :py:class:`GridEnvironment` which manager is in given address. :param str mgr_addr: Address of the manager agent :param gs: New grid size of the grid environment, iterable with length 2.
def _parse_query(self, source): """Parse one of the rules as either objectfilter or dottysql. Example: _parse_query("5 + 5") # Returns Sum(Literal(5), Literal(5)) Arguments: source: A rule in either objectfilter or dottysql syntax. Returns: The AST to represent the rule. """ if self.OBJECTFILTER_WORDS.search(source): syntax_ = "objectfilter" else: syntax_ = None # Default it is. return query.Query(source, syntax=syntax_)
Parse one of the rules as either objectfilter or dottysql. Example: _parse_query("5 + 5") # Returns Sum(Literal(5), Literal(5)) Arguments: source: A rule in either objectfilter or dottysql syntax. Returns: The AST to represent the rule.
def to_python(self, value): """Convert value if needed.""" if isinstance(value, GroupDescriptor): value = value._value # pylint: disable=protected-access result = {} for name, field in self.fields.items(): result[name] = field.to_python(value.get(name, None)) return GroupDescriptor(result)
Convert value if needed.
def parse_table_definition_file(file): ''' Read an parse the XML of a table-definition file. @return: an ElementTree object for the table definition ''' logging.info("Reading table definition from '%s'...", file) if not os.path.isfile(file): logging.error("File '%s' does not exist.", file) exit(1) try: tableGenFile = ElementTree.ElementTree().parse(file) except IOError as e: logging.error('Could not read result file %s: %s', file, e) exit(1) except ElementTree.ParseError as e: logging.error('Table file %s is invalid: %s', file, e) exit(1) if 'table' != tableGenFile.tag: logging.error("Table file %s is invalid: It's root element is not named 'table'.", file) exit(1) return tableGenFile
Read an parse the XML of a table-definition file. @return: an ElementTree object for the table definition
def get_if_raw_addr(ifname): """Returns the IPv4 address configured on 'ifname', packed with inet_pton.""" # noqa: E501 # Get ifconfig output try: fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname)) except OSError as msg: warning("Failed to execute ifconfig: (%s)", msg) return b"\0\0\0\0" # Get IPv4 addresses addresses = [l for l in fd if l.find("inet ") >= 0] if not addresses: warning("No IPv4 address found on %s !", ifname) return b"\0\0\0\0" # Pack the first address address = addresses[0].split(' ')[1] if '/' in address: # NetBSD 8.0 address = address.split("/")[0] return socket.inet_pton(socket.AF_INET, address)
Returns the IPv4 address configured on 'ifname', packed with inet_pton.
def _parse(self, threshold): """ internal threshold string parser arguments: threshold: string describing the threshold """ match = re.search(r'^(@?)((~|\d*):)?(\d*)$', threshold) if not match: raise ValueError('Error parsing Threshold: {0}'.format(threshold)) if match.group(1) == '@': self._inclusive = True if match.group(3) == '~': self._min = float('-inf') elif match.group(3): self._min = float(match.group(3)) else: self._min = float(0) if match.group(4): self._max = float(match.group(4)) else: self._max = float('inf') if self._max < self._min: raise ValueError('max must be superior to min')
internal threshold string parser arguments: threshold: string describing the threshold
def merge(root, head, update, head_source=None): """ This function instantiate a ``Merger`` object using a configuration in according to the ``source`` value of head and update params. Then it run the merger on the three files provided in input. Params root(dict): the last common parent json of head and update head(dict): the last version of a record in INSPIRE update(dict): the update coming from outside INSPIRE to merge head_source(string): the source of the head record. If ``None``, heuristics are used to derive it from the metadata. This is useful if the HEAD came from legacy and the acquisition_source does not reflect the state of the record. Return A tuple containing the resulted merged record in json format and a an object containing all generated conflicts. """ configuration = get_configuration(head, update, head_source) conflicts = [] root, head, update = filter_records(root, head, update, filters=configuration.pre_filters) merger = Merger( root=root, head=head, update=update, default_dict_merge_op=configuration.default_dict_merge_op, default_list_merge_op=configuration.default_list_merge_op, list_dict_ops=configuration.list_dict_ops, list_merge_ops=configuration.list_merge_ops, comparators=configuration.comparators, ) try: merger.merge() except MergeError as e: conflicts = e.content conflicts = filter_conflicts(conflicts, configuration.conflict_filters) conflicts_as_json = [json.loads(c.to_json()) for c in conflicts] flat_conflicts_as_json = list(itertools.chain.from_iterable(conflicts_as_json)) merged = merger.merged_root return merged, flat_conflicts_as_json
This function instantiate a ``Merger`` object using a configuration in according to the ``source`` value of head and update params. Then it run the merger on the three files provided in input. Params root(dict): the last common parent json of head and update head(dict): the last version of a record in INSPIRE update(dict): the update coming from outside INSPIRE to merge head_source(string): the source of the head record. If ``None``, heuristics are used to derive it from the metadata. This is useful if the HEAD came from legacy and the acquisition_source does not reflect the state of the record. Return A tuple containing the resulted merged record in json format and a an object containing all generated conflicts.
def set_control_scheme(self, index): """Sets the control scheme for the agent. See :obj:`ControlSchemes`. Args: index (int): The control scheme to use. Should be set with an enum from :obj:`ControlSchemes`. """ self._current_control_scheme = index % self._num_control_schemes self._control_scheme_buffer[0] = self._current_control_scheme
Sets the control scheme for the agent. See :obj:`ControlSchemes`. Args: index (int): The control scheme to use. Should be set with an enum from :obj:`ControlSchemes`.
def fill_model(self, model=None): """ Populates a model with normalized properties. If no model is provided (None) a new one will be created. :param model: model to be populade :return: populated model """ normalized_dct = self.normalize() if model: if not isinstance(model, self._model_class): raise ModelFormSecurityError('%s should be %s instance' % (model, self._model_class.__name__)) model.populate(**normalized_dct) return model return self._model_class(**normalized_dct)
Populates a model with normalized properties. If no model is provided (None) a new one will be created. :param model: model to be populade :return: populated model
def compute_sims(inputs: mx.nd.NDArray, normalize: bool) -> mx.nd.NDArray: """ Returns a matrix with pair-wise similarity scores between inputs. Similarity score is (normalized) Euclidean distance. 'Similarity with self' is masked to large negative value. :param inputs: NDArray of inputs. :param normalize: Whether to normalize to unit-length. :return: NDArray with pairwise similarities of same shape as inputs. """ if normalize: logger.info("Normalizing embeddings to unit length") inputs = mx.nd.L2Normalization(inputs, mode='instance') sims = mx.nd.dot(inputs, inputs, transpose_b=True) sims_np = sims.asnumpy() np.fill_diagonal(sims_np, -9999999.) sims = mx.nd.array(sims_np) return sims
Returns a matrix with pair-wise similarity scores between inputs. Similarity score is (normalized) Euclidean distance. 'Similarity with self' is masked to large negative value. :param inputs: NDArray of inputs. :param normalize: Whether to normalize to unit-length. :return: NDArray with pairwise similarities of same shape as inputs.
def bounding_box(alpha, threshold=0.1): """ Returns a bounding box of the support. Parameters ---------- alpha : ndarray, ndim=2 Any one-channel image where the background has zero or low intensity. threshold : float The threshold that divides background from foreground. Returns ------- bounding_box : (top, left, bottom, right) The bounding box describing the smallest rectangle containing the foreground object, as defined by the threshold. """ assert alpha.ndim == 2 # Take the bounding box of the support, with a certain threshold. supp_axs = [alpha.max(axis=1-i) for i in range(2)] # Check first and last value of that threshold bb = [np.where(supp_axs[i] > threshold)[0][[0, -1]] for i in range(2)] return (bb[0][0], bb[1][0], bb[0][1], bb[1][1])
Returns a bounding box of the support. Parameters ---------- alpha : ndarray, ndim=2 Any one-channel image where the background has zero or low intensity. threshold : float The threshold that divides background from foreground. Returns ------- bounding_box : (top, left, bottom, right) The bounding box describing the smallest rectangle containing the foreground object, as defined by the threshold.
def _pack(self, msg_type, payload): """ Packs the given message type and payload. Turns the resulting message into a byte string. """ pb = payload.encode('utf-8') s = struct.pack('=II', len(pb), msg_type.value) return self.MAGIC.encode('utf-8') + s + pb
Packs the given message type and payload. Turns the resulting message into a byte string.
def phonenumber_validation(data): """ Validates phonenumber Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the country prefix is absent. """ from phonenumber_field.phonenumber import to_python phone_number = to_python(data) if not phone_number: return data elif not phone_number.country_code: raise serializers.ValidationError(_("Phone number needs to include valid country code (E.g +37255555555).")) elif not phone_number.is_valid(): raise serializers.ValidationError(_('The phone number entered is not valid.')) return data
Validates phonenumber Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the country prefix is absent.
def mkdir_chown(paths, user_group=None, permissions='ug=rwX,o=rX', create_parent=True, check_if_exists=False, recursive=False): """ Generates a unix command line for creating a directory and assigning permissions to it. Shortcut to a combination of :func:`~mkdir`, :func:`~chown`, and :func:`~chmod`. Note that if `check_if_exists` has been set to ``True``, and the directory is found, `mkdir` is not called, but `user_group` and `permissions` are still be applied. :param paths: Can be a single path string, or a list or tuple of path strings. :type paths: unicode | str | tuple[unicode | str] | list[unicode | str] :param: Optional owner of the directory. For notation, see :func:`~get_user_group`. :type user_group: unicode | str | int | tuple :param permissions: Optional permission mode, in any notation accepted by the unix `chmod` command. Default is ``ug=rwX,o=rX``. :type permissions: unicode | str :param create_parent: Parent directories are created if not present (`-p` argument to `mkdir`). :type create_parent: bool :param check_if_exists: Prior to creating the directory, checks if it already exists. :type check_if_exists: bool :param recursive: Apply permissions and owner change recursively. :type recursive: bool :return: Unix shell command line. :rtype: unicode | str """ def _generate_str(path): mkdir_str = mkdir(path, create_parent, check_if_exists) chown_str = chown(user_group, path, recursive) if user_group else None chmod_str = chmod(permissions, path, recursive) if permissions else None return ' && '.join(n for n in (mkdir_str, chown_str, chmod_str) if n) if isinstance(paths, (tuple, list)): return '; '.join((_generate_str(path) for path in paths)) return _generate_str(paths)
Generates a unix command line for creating a directory and assigning permissions to it. Shortcut to a combination of :func:`~mkdir`, :func:`~chown`, and :func:`~chmod`. Note that if `check_if_exists` has been set to ``True``, and the directory is found, `mkdir` is not called, but `user_group` and `permissions` are still be applied. :param paths: Can be a single path string, or a list or tuple of path strings. :type paths: unicode | str | tuple[unicode | str] | list[unicode | str] :param: Optional owner of the directory. For notation, see :func:`~get_user_group`. :type user_group: unicode | str | int | tuple :param permissions: Optional permission mode, in any notation accepted by the unix `chmod` command. Default is ``ug=rwX,o=rX``. :type permissions: unicode | str :param create_parent: Parent directories are created if not present (`-p` argument to `mkdir`). :type create_parent: bool :param check_if_exists: Prior to creating the directory, checks if it already exists. :type check_if_exists: bool :param recursive: Apply permissions and owner change recursively. :type recursive: bool :return: Unix shell command line. :rtype: unicode | str
def generate_image_commands(): ''' The Image client holds the Singularity image command group, mainly deprecated commands (image.import) and additional command helpers that are commonly use but not provided by Singularity The levels of verbosity (debug and quiet) are passed from the main client via the environment variable MESSAGELEVEL. These commands are added to Client.image under main/__init__.py to expose subcommands: Client.image.export Client.image.imprt Client.image.decompress Client.image.create ''' class ImageClient(object): group = "image" from spython.main.base.logger import println from spython.main.base.command import ( init_command, run_command ) from .utils import ( compress, decompress ) from .create import create from .importcmd import importcmd from .export import export ImageClient.create = create ImageClient.imprt = importcmd ImageClient.export = export ImageClient.decompress = decompress ImageClient.compress = compress ImageClient.println = println ImageClient.init_command = init_command ImageClient.run_command = run_command cli = ImageClient() return cli
The Image client holds the Singularity image command group, mainly deprecated commands (image.import) and additional command helpers that are commonly use but not provided by Singularity The levels of verbosity (debug and quiet) are passed from the main client via the environment variable MESSAGELEVEL. These commands are added to Client.image under main/__init__.py to expose subcommands: Client.image.export Client.image.imprt Client.image.decompress Client.image.create
def sort_dict(d, desc=True): """ Sort an ordered dictionary by value, descending. Args: d (OrderedDict): An ordered dictionary. desc (bool): If true, sort desc. Returns: OrderedDict: The sorted dictionary. """ sort = sorted(d.items(), key=lambda x: x[1], reverse=desc) return OrderedDict(sort)
Sort an ordered dictionary by value, descending. Args: d (OrderedDict): An ordered dictionary. desc (bool): If true, sort desc. Returns: OrderedDict: The sorted dictionary.
def find_xenon_grpc_jar(): """Find the Xenon-GRPC jar-file, windows version.""" prefix = Path(sys.prefix) locations = [ prefix / 'lib', prefix / 'local' / 'lib' ] for location in locations: jar_file = location / 'xenon-grpc-{}-all.jar'.format( xenon_grpc_version) if not jar_file.exists(): continue else: return str(jar_file) return None
Find the Xenon-GRPC jar-file, windows version.
def asyncStarCmap(asyncCallable, iterable): """itertools.starmap for deferred callables using cooperative multitasking """ results = [] yield coopStar(asyncCallable, results.append, iterable) returnValue(results)
itertools.starmap for deferred callables using cooperative multitasking
def add_acquisition_source( self, method, submission_number=None, internal_uid=None, email=None, orcid=None, source=None, datetime=None, ): """Add acquisition source. :type submission_number: integer :type email: integer :type source: string :param method: method of acquisition for the suggested document :type method: string :param orcid: orcid of the user that is creating the record :type orcid: string :param internal_uid: id of the user that is creating the record :type internal_uid: string :param datetime: UTC datetime in ISO 8601 format :type datetime: string """ acquisition_source = self._sourced_dict(source) acquisition_source['submission_number'] = str(submission_number) for key in ('datetime', 'email', 'method', 'orcid', 'internal_uid'): if locals()[key] is not None: acquisition_source[key] = locals()[key] self.obj['acquisition_source'] = acquisition_source
Add acquisition source. :type submission_number: integer :type email: integer :type source: string :param method: method of acquisition for the suggested document :type method: string :param orcid: orcid of the user that is creating the record :type orcid: string :param internal_uid: id of the user that is creating the record :type internal_uid: string :param datetime: UTC datetime in ISO 8601 format :type datetime: string
def get_labels(data, centroids,K): """ Returns a label for each piece of data in the dataset Parameters ------------ data: array-like, shape= (m_samples,n_samples) K: integer number of K clusters centroids: array-like, shape=(K, n_samples) returns ------------- labels: array-like, shape (1,n_samples) """ distances = np.sqrt(((data - centroids[:, np.newaxis])**2).sum(axis=2)) return np.argmin(distances, axis=0)
Returns a label for each piece of data in the dataset Parameters ------------ data: array-like, shape= (m_samples,n_samples) K: integer number of K clusters centroids: array-like, shape=(K, n_samples) returns ------------- labels: array-like, shape (1,n_samples)
def _deserialize_data(self, json_data): """ Deserialize a JSON into a dictionary """ my_dict = json.loads(json_data.decode('utf8').replace("'", '"'), encoding='UTF-8') for item in my_dict: if item == const.ADIF: my_dict[item] = int(my_dict[item]) elif item == const.DELETED: my_dict[item] = self._str_to_bool(my_dict[item]) elif item == const.CQZ: my_dict[item] = int(my_dict[item]) elif item == const.ITUZ: my_dict[item] = int(my_dict[item]) elif item == const.LATITUDE: my_dict[item] = float(my_dict[item]) elif item == const.LONGITUDE: my_dict[item] = float(my_dict[item]) elif item == const.START: my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC) elif item == const.END: my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC) elif item == const.WHITELIST_START: my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC) elif item == const.WHITELIST_END: my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC) elif item == const.WHITELIST: my_dict[item] = self._str_to_bool(my_dict[item]) else: my_dict[item] = unicode(my_dict[item]) return my_dict
Deserialize a JSON into a dictionary
def _process_download_descriptor(self, dd): # type: (Downloader, blobxfer.models.download.Descriptor) -> None """Process download descriptor :param Downloader self: this :param blobxfer.models.download.Descriptor dd: download descriptor """ # update progress bar self._update_progress_bar() # get download offsets offsets, resume_bytes = dd.next_offsets() # add resume bytes to counter if resume_bytes is not None: with self._disk_operation_lock: self._download_bytes_sofar += resume_bytes logger.debug('adding {} sofar {} from {}'.format( resume_bytes, self._download_bytes_sofar, dd.entity.name)) del resume_bytes # check if all operations completed if offsets is None and dd.all_operations_completed: finalize = True sfpath = str(dd.final_path) # finalize integrity dd.finalize_integrity() # vectored io checks if dd.entity.vectored_io is not None: with self._transfer_lock: if sfpath not in self._vio_map: self._vio_map[sfpath] = 1 else: self._vio_map[sfpath] += 1 if (self._vio_map[sfpath] == dd.entity.vectored_io.total_slices): self._vio_map.pop(sfpath) else: finalize = False # finalize file if finalize: dd.finalize_file() # accounting with self._transfer_lock: self._download_sofar += 1 if dd.entity.is_encrypted: self._dd_map.pop(sfpath) self._transfer_set.remove( blobxfer.operations.download.Downloader. create_unique_transfer_operation_id(dd.entity)) self._transfer_cc.pop(dd.entity.path, None) return # re-enqueue for other threads to download if offsets is None: self._transfer_queue.put(dd) return # ensure forthcoming disk operation is accounted for with self._disk_operation_lock: self._disk_set.add( blobxfer.operations.download.Downloader. create_unique_disk_operation_id(dd, offsets)) # check if there are too many concurrent connections with self._transfer_lock: self._transfer_cc[dd.entity.path] += 1 cc_xfer = self._transfer_cc[dd.entity.path] if cc_xfer <= self._spec.options.max_single_object_concurrency: self._transfer_queue.put(dd) # issue get range if dd.entity.mode == blobxfer.models.azure.StorageModes.File: data = blobxfer.operations.azure.file.get_file_range( dd.entity, offsets) else: data = blobxfer.operations.azure.blob.get_blob_range( dd.entity, offsets) with self._transfer_lock: self._transfer_cc[dd.entity.path] -= 1 if cc_xfer > self._spec.options.max_single_object_concurrency: self._transfer_queue.put(dd) # enqueue data for processing self._disk_queue.put((dd, offsets, data))
Process download descriptor :param Downloader self: this :param blobxfer.models.download.Descriptor dd: download descriptor
def _construct_message(self): """Set the message token/channel, then call the bas class constructor.""" self.message = {"token": self._auth, "channel": self.channel} super()._construct_message()
Set the message token/channel, then call the bas class constructor.
def _resolve_datacenter(dc, pillarenv): ''' If ``dc`` is a string - return it as is. If it's a dict then sort it in descending order by key length and try to use keys as RegEx patterns to match against ``pillarenv``. The value for matched pattern should be a string (that can use ``str.format`` syntax togetehr with captured variables from pattern) pointing to targe data center to use. If none patterns matched return ``None`` which meanse us datacenter of conencted Consul agent. ''' log.debug('Resolving Consul datacenter based on: %s', dc) try: mappings = dc.items() # is it a dict? except AttributeError: log.debug('Using pre-defined DC: \'%s\'', dc) return dc log.debug('Selecting DC based on pillarenv using %d pattern(s)', len(mappings)) log.debug('Pillarenv set to \'%s\'', pillarenv) # sort in reverse based on pattern length # but use alphabetic order within groups of patterns of same length sorted_mappings = sorted(mappings, key=lambda m: (-len(m[0]), m[0])) for pattern, target in sorted_mappings: match = re.match(pattern, pillarenv) if match: log.debug('Matched pattern: \'%s\'', pattern) result = target.format(**match.groupdict()) log.debug('Resolved datacenter: \'%s\'', result) return result log.debug( 'None of following patterns matched pillarenv=%s: %s', pillarenv, ', '.join(repr(x) for x in mappings) )
If ``dc`` is a string - return it as is. If it's a dict then sort it in descending order by key length and try to use keys as RegEx patterns to match against ``pillarenv``. The value for matched pattern should be a string (that can use ``str.format`` syntax togetehr with captured variables from pattern) pointing to targe data center to use. If none patterns matched return ``None`` which meanse us datacenter of conencted Consul agent.
def enqueue_mod(self, dn, mod): """Enqueue a LDAP modification. Arguments: dn -- the distinguished name of the object to modify mod -- an ldap modfication entry to enqueue """ # mark for update if dn not in self.__pending_mod_dn__: self.__pending_mod_dn__.append(dn) self.__mod_queue__[dn] = [] self.__mod_queue__[dn].append(mod)
Enqueue a LDAP modification. Arguments: dn -- the distinguished name of the object to modify mod -- an ldap modfication entry to enqueue
def create_custom_gradebook_column(self, course_id, column_title, column_hidden=None, column_position=None, column_teacher_notes=None): """ Create a custom gradebook column. Create a custom gradebook column """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - column[title] """no description""" data["column[title]"] = column_title # OPTIONAL - column[position] """The position of the column relative to other custom columns""" if column_position is not None: data["column[position]"] = column_position # OPTIONAL - column[hidden] """Hidden columns are not displayed in the gradebook""" if column_hidden is not None: data["column[hidden]"] = column_hidden # OPTIONAL - column[teacher_notes] """Set this if the column is created by a teacher. The gradebook only supports one teacher_notes column.""" if column_teacher_notes is not None: data["column[teacher_notes]"] = column_teacher_notes self.logger.debug("POST /api/v1/courses/{course_id}/custom_gradebook_columns with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/custom_gradebook_columns".format(**path), data=data, params=params, single_item=True)
Create a custom gradebook column. Create a custom gradebook column
def get_number_of_atoms(self): """Get the number of atoms in the calculated structure. Returns: Property, where number of atoms is a scalar. """ strc = self.get_output_structure() if not strc: return None return Property(scalars=[Scalar(value=len(strc))], units="/unit cell")
Get the number of atoms in the calculated structure. Returns: Property, where number of atoms is a scalar.
def get_preparation_cmd(user, permissions, path): """ Generates the command lines for adjusting a volume's ownership and permission flags. Returns an empty list if there is nothing to adjust. :param user: User to set ownership for on the path via ``chown``. :type user: unicode | str | int | dockermap.functional.AbstractLazyObject :param permissions: Permission flags to set via ``chmod``. :type permissions: unicode | str | dockermap.functional.AbstractLazyObject :param path: Path to adjust permissions on. :type path: unicode | str :return: Iterator over resulting command strings. :rtype: collections.Iterable[unicode | str] """ r_user = resolve_value(user) r_permissions = resolve_value(permissions) if user: yield chown(r_user, path) if permissions: yield chmod(r_permissions, path)
Generates the command lines for adjusting a volume's ownership and permission flags. Returns an empty list if there is nothing to adjust. :param user: User to set ownership for on the path via ``chown``. :type user: unicode | str | int | dockermap.functional.AbstractLazyObject :param permissions: Permission flags to set via ``chmod``. :type permissions: unicode | str | dockermap.functional.AbstractLazyObject :param path: Path to adjust permissions on. :type path: unicode | str :return: Iterator over resulting command strings. :rtype: collections.Iterable[unicode | str]
def get_config(self): """ Get and set config options from config file """ if 'rmq_port' in self.config: self.rmq_port = int(self.config['rmq_port']) if 'rmq_user' in self.config: self.rmq_user = self.config['rmq_user'] if 'rmq_password' in self.config: self.rmq_password = self.config['rmq_password'] if 'rmq_vhost' in self.config: self.rmq_vhost = self.config['rmq_vhost'] if 'rmq_exchange_type' in self.config: self.rmq_exchange_type = self.config['rmq_exchange_type'] if 'rmq_durable' in self.config: self.rmq_durable = bool(self.config['rmq_durable']) if 'rmq_heartbeat_interval' in self.config: self.rmq_heartbeat_interval = int( self.config['rmq_heartbeat_interval'])
Get and set config options from config file
def setValue(self, p_float): """Override method to set a value to show it as 0 to 100. :param p_float: The float number that want to be set. :type p_float: float """ p_float = p_float * 100 super(PercentageSpinBox, self).setValue(p_float)
Override method to set a value to show it as 0 to 100. :param p_float: The float number that want to be set. :type p_float: float
def admin_url(obj): """ Returns the admin URL of the object. No permissions checking is involved, so use with caution to avoid exposing the link to unauthorised users. Example:: {{ foo_obj|admin_url }} renders as:: /admin/foo/123 :param obj: A Django model instance. :return: the admin URL of the object """ if hasattr(obj, 'get_admin_url'): return mark_safe(obj.get_admin_url()) return mark_safe(admin_url_fn(obj))
Returns the admin URL of the object. No permissions checking is involved, so use with caution to avoid exposing the link to unauthorised users. Example:: {{ foo_obj|admin_url }} renders as:: /admin/foo/123 :param obj: A Django model instance. :return: the admin URL of the object
def title(label, style=None): """Sets the title for the current figure. Parameters ---------- label : str The new title for the current figure. style: dict The CSS style to be applied to the figure title """ fig = current_figure() fig.title = label if style is not None: fig.title_style = style
Sets the title for the current figure. Parameters ---------- label : str The new title for the current figure. style: dict The CSS style to be applied to the figure title
def expect_column_kl_divergence_to_be_less_than(self, column, partition_object=None, threshold=None, tail_weight_holdout=0, internal_weight_holdout=0, result_format=None, include_config=False, catch_exceptions=None, meta=None): """Expect the Kulback-Leibler (KL) divergence (relative entropy) of the specified column with respect to the \ partition object to be lower than the provided threshold. KL divergence compares two distributions. The higher the divergence value (relative entropy), the larger the \ difference between the two distributions. A relative entropy of zero indicates that the data are \ distributed identically, `when binned according to the provided partition`. In many practical contexts, choosing a value between 0.5 and 1 will provide a useful test. This expectation works on both categorical and continuous partitions. See notes below for details. expect_column_kl_divergence_to_be_less_than is a :func:`column_aggregate_expectation <great_expectations.data_asset.dataset.Dataset.column_aggregate_expectation>`. Args: column (str): \ The column name. partition_object (dict): \ The expected partition object (see :ref:`partition_object`). threshold (float): \ The maximum KL divergence to for which to return `success=True`. If KL divergence is larger than the\ provided threshold, the test will return `success=False`. Keyword Args: internal_weight_holdout (float between 0 and 1 or None): \ The amount of weight to split uniformly among zero-weighted partition bins. internal_weight_holdout \ provides a mechanims to make the test less strict by assigning positive weights to values observed in \ the data for which the partition explicitly expected zero weight. With no internal_weight_holdout, \ any value observed in such a region will cause KL divergence to rise to +Infinity.\ Defaults to 0. tail_weight_holdout (float between 0 and 1 or None): \ The amount of weight to add to the tails of the histogram. Tail weight holdout is split evenly between\ (-Infinity, min(partition_object['bins'])) and (max(partition_object['bins']), +Infinity). \ tail_weight_holdout provides a mechanism to make the test less strict by assigning positive weights to \ values observed in the data that are not present in the partition. With no tail_weight_holdout, \ any value observed outside the provided partition_object will cause KL divergence to rise to +Infinity.\ Defaults to 0. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. Notes: These fields in the result object are customized for this expectation: :: { "observed_value": (float) The true KL divergence (relative entropy) or None if the value is calculated \ as infinity, -infinity, or NaN "details": { "observed_partition": (dict) The partition observed in the data "expected_partition": (dict) The partition against which the data were compared, after applying specified weight holdouts. } } If the partition_object is categorical, this expectation will expect the values in column to also be \ categorical. * If the column includes values that are not present in the partition, the tail_weight_holdout will be \ equally split among those values, providing a mechanism to weaken the strictness of the expectation \ (otherwise, relative entropy would immediately go to infinity). * If the partition includes values that are not present in the column, the test will simply include \ zero weight for that value. If the partition_object is continuous, this expectation will discretize the values in the column according \ to the bins specified in the partition_object, and apply the test to the resulting distribution. * The internal_weight_holdout and tail_weight_holdout parameters provide a mechanism to weaken the \ expectation, since an expected weight of zero would drive relative entropy to be infinite if any data \ are observed in that interval. * If internal_weight_holdout is specified, that value will be distributed equally among any intervals \ with weight zero in the partition_object. * If tail_weight_holdout is specified, that value will be appended to the tails of the bins \ ((-Infinity, min(bins)) and (max(bins), Infinity). If relative entropy/kl divergence goes to infinity for any of the reasons mentioned above, the observed value\ will be set to None. This is because inf, -inf, Nan, are not json serializable and cause some json parsers to\ crash when encountered. The python None token will be serialized to null in json. See also: expect_column_chisquare_test_p_value_to_be_greater_than expect_column_bootstrapped_ks_test_p_value_to_be_greater_than """ raise NotImplementedError
Expect the Kulback-Leibler (KL) divergence (relative entropy) of the specified column with respect to the \ partition object to be lower than the provided threshold. KL divergence compares two distributions. The higher the divergence value (relative entropy), the larger the \ difference between the two distributions. A relative entropy of zero indicates that the data are \ distributed identically, `when binned according to the provided partition`. In many practical contexts, choosing a value between 0.5 and 1 will provide a useful test. This expectation works on both categorical and continuous partitions. See notes below for details. expect_column_kl_divergence_to_be_less_than is a :func:`column_aggregate_expectation <great_expectations.data_asset.dataset.Dataset.column_aggregate_expectation>`. Args: column (str): \ The column name. partition_object (dict): \ The expected partition object (see :ref:`partition_object`). threshold (float): \ The maximum KL divergence to for which to return `success=True`. If KL divergence is larger than the\ provided threshold, the test will return `success=False`. Keyword Args: internal_weight_holdout (float between 0 and 1 or None): \ The amount of weight to split uniformly among zero-weighted partition bins. internal_weight_holdout \ provides a mechanims to make the test less strict by assigning positive weights to values observed in \ the data for which the partition explicitly expected zero weight. With no internal_weight_holdout, \ any value observed in such a region will cause KL divergence to rise to +Infinity.\ Defaults to 0. tail_weight_holdout (float between 0 and 1 or None): \ The amount of weight to add to the tails of the histogram. Tail weight holdout is split evenly between\ (-Infinity, min(partition_object['bins'])) and (max(partition_object['bins']), +Infinity). \ tail_weight_holdout provides a mechanism to make the test less strict by assigning positive weights to \ values observed in the data that are not present in the partition. With no tail_weight_holdout, \ any value observed outside the provided partition_object will cause KL divergence to rise to +Infinity.\ Defaults to 0. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. Notes: These fields in the result object are customized for this expectation: :: { "observed_value": (float) The true KL divergence (relative entropy) or None if the value is calculated \ as infinity, -infinity, or NaN "details": { "observed_partition": (dict) The partition observed in the data "expected_partition": (dict) The partition against which the data were compared, after applying specified weight holdouts. } } If the partition_object is categorical, this expectation will expect the values in column to also be \ categorical. * If the column includes values that are not present in the partition, the tail_weight_holdout will be \ equally split among those values, providing a mechanism to weaken the strictness of the expectation \ (otherwise, relative entropy would immediately go to infinity). * If the partition includes values that are not present in the column, the test will simply include \ zero weight for that value. If the partition_object is continuous, this expectation will discretize the values in the column according \ to the bins specified in the partition_object, and apply the test to the resulting distribution. * The internal_weight_holdout and tail_weight_holdout parameters provide a mechanism to weaken the \ expectation, since an expected weight of zero would drive relative entropy to be infinite if any data \ are observed in that interval. * If internal_weight_holdout is specified, that value will be distributed equally among any intervals \ with weight zero in the partition_object. * If tail_weight_holdout is specified, that value will be appended to the tails of the bins \ ((-Infinity, min(bins)) and (max(bins), Infinity). If relative entropy/kl divergence goes to infinity for any of the reasons mentioned above, the observed value\ will be set to None. This is because inf, -inf, Nan, are not json serializable and cause some json parsers to\ crash when encountered. The python None token will be serialized to null in json. See also: expect_column_chisquare_test_p_value_to_be_greater_than expect_column_bootstrapped_ks_test_p_value_to_be_greater_than
def compile_pythrancode(module_name, pythrancode, specs=None, opts=None, cpponly=False, pyonly=False, output_file=None, module_dir=None, **kwargs): '''Pythran code (string) -> c++ code -> native module if `cpponly` is set to true, return the generated C++ filename if `pyonly` is set to true, prints the generated Python filename, unless `output_file` is set otherwise, return the generated native library filename ''' if pyonly: # Only generate the optimized python code content = generate_py(module_name, pythrancode, opts, module_dir) if output_file is None: print(content) return None else: return _write_temp(content, '.py') # Autodetect the Pythran spec if not given as parameter from pythran.spec import spec_parser if specs is None: specs = spec_parser(pythrancode) # Generate C++, get a PythonModule object module, error_checker = generate_cxx(module_name, pythrancode, specs, opts, module_dir) if 'ENABLE_PYTHON_MODULE' in kwargs.get('undef_macros', []): module.preamble.insert(0, Line('#undef ENABLE_PYTHON_MODULE')) module.preamble.insert(0, Line('#define PY_MAJOR_VERSION {}'. format(sys.version_info.major))) if cpponly: # User wants only the C++ code tmp_file = _write_temp(str(module), '.cpp') if not output_file: output_file = module_name + ".cpp" shutil.move(tmp_file, output_file) logger.info("Generated C++ source file: " + output_file) else: # Compile to binary try: output_file = compile_cxxcode(module_name, str(module), output_binary=output_file, **kwargs) except CompileError: logger.warn("Compilation error, trying hard to find its origin...") error_checker() logger.warn("Nop, I'm going to flood you with C++ errors!") raise return output_file
Pythran code (string) -> c++ code -> native module if `cpponly` is set to true, return the generated C++ filename if `pyonly` is set to true, prints the generated Python filename, unless `output_file` is set otherwise, return the generated native library filename
def write(self, buf): """Write bytes to the stream.""" underflow = self._audio_stream.write(buf) if underflow: logging.warning('SoundDeviceStream write underflow (size: %d)', len(buf)) return len(buf)
Write bytes to the stream.
def plot_fermi_surface(data, structure, cbm, energy_levels=[], multiple_figure=True, mlab_figure=None, kpoints_dict={}, color=(0, 0, 1), transparency_factor=[], labels_scale_factor=0.05, points_scale_factor=0.02, interative=True): """ Plot the Fermi surface at specific energy value. Args: data: energy values in a 3D grid from a CUBE file via read_cube_file function, or from a BoltztrapAnalyzer.fermi_surface_data structure: structure object of the material energy_levels: list of energy value of the fermi surface. By default 0 eV correspond to the VBM, as in the plot of band structure along symmetry line. Default: max energy value + 0.01 eV cbm: Boolean value to specify if the considered band is a conduction band or not multiple_figure: if True a figure for each energy level will be shown. If False all the surfaces will be shown in the same figure. In this las case, tune the transparency factor. mlab_figure: provide a previous figure to plot a new surface on it. kpoints_dict: dictionary of kpoints to show in the plot. example: {"K":[0.5,0.0,0.5]}, where the coords are fractional. color: tuple (r,g,b) of integers to define the color of the surface. transparency_factor: list of values in the range [0,1] to tune the opacity of the surfaces. labels_scale_factor: factor to tune the size of the kpoint labels points_scale_factor: factor to tune the size of the kpoint points interative: if True an interactive figure will be shown. If False a non interactive figure will be shown, but it is possible to plot other surfaces on the same figure. To make it interactive, run mlab.show(). Returns: a Mayavi figure and a mlab module to control the plot. Note: Experimental. Please, double check the surface shown by using some other software and report issues. """ try: from mayavi import mlab except ImportError: raise BoltztrapError( "Mayavi package should be installed to use this function") bz = structure.lattice.reciprocal_lattice.get_wigner_seitz_cell() cell = structure.lattice.reciprocal_lattice.matrix fact = 1 if cbm == False else -1 en_min = np.min(fact * data.ravel()) en_max = np.max(fact * data.ravel()) if energy_levels == []: energy_levels = [en_min + 0.01] if cbm == True else \ [en_max - 0.01] print("Energy level set to: " + str(energy_levels[0]) + " eV") else: for e in energy_levels: if e > en_max or e < en_min: raise BoltztrapError("energy level " + str(e) + " not in the range of possible energies: [" + str(en_min) + ", " + str(en_max) + "]") if transparency_factor == []: transparency_factor = [1] * len(energy_levels) if mlab_figure: fig = mlab_figure if mlab_figure == None and not multiple_figure: fig = mlab.figure(size=(1024, 768), bgcolor=(1, 1, 1)) for iface in range(len(bz)): for line in itertools.combinations(bz[iface], 2): for jface in range(len(bz)): if iface < jface and any(np.all(line[0] == x) for x in bz[jface]) and \ any(np.all(line[1] == x) for x in bz[jface]): mlab.plot3d(*zip(line[0], line[1]), color=(0, 0, 0), tube_radius=None, figure=fig) for label, coords in kpoints_dict.items(): label_coords = structure.lattice.reciprocal_lattice \ .get_cartesian_coords(coords) mlab.points3d(*label_coords, scale_factor=points_scale_factor, color=(0, 0, 0), figure=fig) mlab.text3d(*label_coords, text=label, scale=labels_scale_factor, color=(0, 0, 0), figure=fig) for isolevel, alpha in zip(energy_levels, transparency_factor): if multiple_figure: fig = mlab.figure(size=(1024, 768), bgcolor=(1, 1, 1)) for iface in range(len(bz)): for line in itertools.combinations(bz[iface], 2): for jface in range(len(bz)): if iface < jface and any(np.all(line[0] == x) for x in bz[jface]) and \ any(np.all(line[1] == x) for x in bz[jface]): mlab.plot3d(*zip(line[0], line[1]), color=(0, 0, 0), tube_radius=None, figure=fig) for label, coords in kpoints_dict.items(): label_coords = structure.lattice.reciprocal_lattice \ .get_cartesian_coords(coords) mlab.points3d(*label_coords, scale_factor=points_scale_factor, color=(0, 0, 0), figure=fig) mlab.text3d(*label_coords, text=label, scale=labels_scale_factor, color=(0, 0, 0), figure=fig) cp = mlab.contour3d(fact * data, contours=[isolevel], transparent=True, colormap='hot', color=color, opacity=alpha, figure=fig) polydata = cp.actor.actors[0].mapper.input pts = np.array(polydata.points) # - 1 polydata.points = np.dot(pts, cell / np.array(data.shape)[:, np.newaxis]) cx, cy, cz = [np.mean(np.array(polydata.points)[:, i]) for i in range(3)] polydata.points = (np.array(polydata.points) - [cx, cy, cz]) * 2 #mlab.view(distance='auto') fig.scene.isometric_view() if interative == True: mlab.show() return fig, mlab
Plot the Fermi surface at specific energy value. Args: data: energy values in a 3D grid from a CUBE file via read_cube_file function, or from a BoltztrapAnalyzer.fermi_surface_data structure: structure object of the material energy_levels: list of energy value of the fermi surface. By default 0 eV correspond to the VBM, as in the plot of band structure along symmetry line. Default: max energy value + 0.01 eV cbm: Boolean value to specify if the considered band is a conduction band or not multiple_figure: if True a figure for each energy level will be shown. If False all the surfaces will be shown in the same figure. In this las case, tune the transparency factor. mlab_figure: provide a previous figure to plot a new surface on it. kpoints_dict: dictionary of kpoints to show in the plot. example: {"K":[0.5,0.0,0.5]}, where the coords are fractional. color: tuple (r,g,b) of integers to define the color of the surface. transparency_factor: list of values in the range [0,1] to tune the opacity of the surfaces. labels_scale_factor: factor to tune the size of the kpoint labels points_scale_factor: factor to tune the size of the kpoint points interative: if True an interactive figure will be shown. If False a non interactive figure will be shown, but it is possible to plot other surfaces on the same figure. To make it interactive, run mlab.show(). Returns: a Mayavi figure and a mlab module to control the plot. Note: Experimental. Please, double check the surface shown by using some other software and report issues.
def feature_importances(data, top_n=None, feature_names=None, ax=None): """ Get and order feature importances from a scikit-learn model or from an array-like structure. If data is a scikit-learn model with sub-estimators (e.g. RandomForest, AdaBoost) the function will compute the standard deviation of each feature. Parameters ---------- data : sklearn model or array-like structure Object to get the data from. top_n : int Only get results for the top_n features. feature_names : array-like Feature names ax : matplotlib Axes Axes object to draw the plot onto, otherwise uses current Axes Returns ------- ax: matplotlib Axes Axes containing the plot Examples -------- .. plot:: ../../examples/feature_importances.py """ if data is None: raise ValueError('data is needed to plot feature importances. ' 'When plotting using the evaluator you need to pass ' 'an estimator ') # If no feature_names is provided, assign numbers res = compute.feature_importances(data, top_n, feature_names) # number of features returned n_feats = len(res) if ax is None: ax = plt.gca() ax.set_title("Feature importances") try: ax.bar(range(n_feats), res.importance, yerr=res.std_, color='red', align="center") except: ax.bar(range(n_feats), res.importance, color='red', align="center") ax.set_xticks(range(n_feats)) ax.set_xticklabels(res.feature_name) ax.set_xlim([-1, n_feats]) return ax
Get and order feature importances from a scikit-learn model or from an array-like structure. If data is a scikit-learn model with sub-estimators (e.g. RandomForest, AdaBoost) the function will compute the standard deviation of each feature. Parameters ---------- data : sklearn model or array-like structure Object to get the data from. top_n : int Only get results for the top_n features. feature_names : array-like Feature names ax : matplotlib Axes Axes object to draw the plot onto, otherwise uses current Axes Returns ------- ax: matplotlib Axes Axes containing the plot Examples -------- .. plot:: ../../examples/feature_importances.py
def need_latex_rerun(self): ''' Test for all rerun patterns if they match the output. ''' for pattern in LATEX_RERUN_PATTERNS: if pattern.search(self.out): return True return False
Test for all rerun patterns if they match the output.
def save_model(self, directory=None, append_timestep=True): """ Save TensorFlow model. If no checkpoint directory is given, the model's default saver directory is used. Optionally appends current timestep to prevent overwriting previous checkpoint files. Turn off to be able to load model from the same given path argument as given here. Args: directory (str): Optional checkpoint directory. append_timestep (bool): Appends the current timestep to the checkpoint file if true. If this is set to True, the load path must include the checkpoint timestep suffix. For example, if stored to models/ and set to true, the exported file will be of the form models/model.ckpt-X where X is the last timestep saved. The load path must precisely match this file name. If this option is turned off, the checkpoint will always overwrite the file specified in path and the model can always be loaded under this path. Returns: Checkpoint path were the model was saved. """ return self.model.save(directory=directory, append_timestep=append_timestep)
Save TensorFlow model. If no checkpoint directory is given, the model's default saver directory is used. Optionally appends current timestep to prevent overwriting previous checkpoint files. Turn off to be able to load model from the same given path argument as given here. Args: directory (str): Optional checkpoint directory. append_timestep (bool): Appends the current timestep to the checkpoint file if true. If this is set to True, the load path must include the checkpoint timestep suffix. For example, if stored to models/ and set to true, the exported file will be of the form models/model.ckpt-X where X is the last timestep saved. The load path must precisely match this file name. If this option is turned off, the checkpoint will always overwrite the file specified in path and the model can always be loaded under this path. Returns: Checkpoint path were the model was saved.
def get_division(self, obj): """Division.""" if self.context.get("division"): return DivisionSerializer(self.context.get("division")).data else: if obj.slug == "senate": return DivisionSerializer(obj.jurisdiction.division).data else: us = DivisionSerializer(obj.jurisdiction.division).data us["children"] = [ DivisionSerializer( state, context={"children_level": DivisionLevel.DISTRICT}, ).data for state in obj.jurisdiction.division.children.all() ] return us
Division.
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'): ''' run a command on the local host ''' if not self.runner.sudo or not sudoable: if executable: local_cmd = [executable, '-c', cmd] else: local_cmd = cmd else: local_cmd, prompt = utils.make_sudo_cmd(sudo_user, executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.host) p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), cwd=self.runner.basedir, executable=executable or None, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if self.runner.sudo and sudoable and self.runner.sudo_pass: fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) sudo_output = '' while not sudo_output.endswith(prompt): rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self.runner.timeout) if p.stdout in rfd: chunk = p.stdout.read() elif p.stderr in rfd: chunk = p.stderr.read() else: stdout, stderr = p.communicate() raise errors.AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output) if not chunk: stdout, stderr = p.communicate() raise errors.AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output) sudo_output += chunk p.stdin.write(self.runner.sudo_pass + '\n') fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) stdout, stderr = p.communicate() return (p.returncode, '', stdout, stderr)
run a command on the local host
def _cho_solve_AATI(A, rho, b, c, lwr, check_finite=True): """Patched version of :func:`sporco.linalg.cho_solve_AATI`.""" N, M = A.shape if N >= M: x = (b - _cho_solve((c, lwr), b.dot(A).T, check_finite=check_finite).T.dot(A.T)) / rho else: x = _cho_solve((c, lwr), b.T, check_finite=check_finite).T return x
Patched version of :func:`sporco.linalg.cho_solve_AATI`.
def txid_to_block_data(txid, bitcoind_proxy, proxy=None): """ Given a txid, get its block's data. Use SPV to verify the information we receive from the (untrusted) bitcoind host. @bitcoind_proxy must be a BitcoindConnection (from virtualchain.lib.session) Return the (block hash, block data, txdata) on success Return (None, None, None) on error """ proxy = get_default_proxy() if proxy is None else proxy timeout = 1.0 while True: try: untrusted_tx_data = bitcoind_proxy.getrawtransaction(txid, 1) untrusted_block_hash = untrusted_tx_data['blockhash'] untrusted_block_data = bitcoind_proxy.getblock(untrusted_block_hash) break except (OSError, IOError) as ie: log.exception(ie) log.error('Network error; retrying...') timeout = timeout * 2 + random.randint(0, timeout) continue except Exception as e: log.exception(e) return None, None, None bitcoind_opts = get_bitcoin_opts() spv_headers_path = bitcoind_opts['bitcoind_spv_path'] # first, can we trust this block? is it in the SPV headers? untrusted_block_header_hex = virtualchain.block_header_to_hex( untrusted_block_data, untrusted_block_data['previousblockhash'] ) block_id = SPVClient.block_header_index( spv_headers_path, ('{}00'.format(untrusted_block_header_hex)).decode('hex') ) if block_id < 0: # bad header log.error('Block header "{}" is not in the SPV headers ({})'.format( untrusted_block_header_hex, spv_headers_path )) return None, None, None # block header is trusted. Is the transaction data consistent with it? verified_block_header = virtualchain.block_verify(untrusted_block_data) if not verified_block_header: msg = ( 'Block transaction IDs are not consistent ' 'with the Merkle root of the trusted header' ) log.error(msg) return None, None, None # verify block hash verified_block_hash = virtualchain.block_header_verify( untrusted_block_data, untrusted_block_data['previousblockhash'], untrusted_block_hash ) if not verified_block_hash: log.error('Block hash is not consistent with block header') return None, None, None # we trust the block hash, block data, and txids block_hash = untrusted_block_hash block_data = untrusted_block_data tx_data = untrusted_tx_data return block_hash, block_data, tx_data
Given a txid, get its block's data. Use SPV to verify the information we receive from the (untrusted) bitcoind host. @bitcoind_proxy must be a BitcoindConnection (from virtualchain.lib.session) Return the (block hash, block data, txdata) on success Return (None, None, None) on error
async def validate(state, holdout_glob): """Validate the trained model against holdout games. Args: state: the RL loop State instance. holdout_glob: a glob that matches holdout games. """ if not glob.glob(holdout_glob): print('Glob "{}" didn\'t match any files, skipping validation'.format( holdout_glob)) else: await run( 'python3', 'validate.py', holdout_glob, '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'validate.flags')), '--work_dir={}'.format(fsdb.working_dir()))
Validate the trained model against holdout games. Args: state: the RL loop State instance. holdout_glob: a glob that matches holdout games.
def _required_child(parent, tag): """ Add child element with *tag* to *parent* if it doesn't already exist. """ if _child(parent, tag) is None: parent.append(_Element(tag))
Add child element with *tag* to *parent* if it doesn't already exist.
def get(cls, uuid): """Return a `Resource` instance of this class identified by the given code or UUID. Only `Resource` classes with specified `member_path` attributes can be directly requested with this method. """ if not uuid: raise ValueError("get must have a value passed as an argument") uuid = quote(str(uuid)) url = recurly.base_uri() + (cls.member_path % (uuid,)) _resp, elem = cls.element_for_url(url) return cls.from_element(elem)
Return a `Resource` instance of this class identified by the given code or UUID. Only `Resource` classes with specified `member_path` attributes can be directly requested with this method.
def generate_checker(value): """Generate state checker for given value.""" @property @wraps(can_be_) def checker(self): return self.can_be_(value) return checker
Generate state checker for given value.
def get_metric_values(self): """ Get the faked metrics, for all metric groups and all resources that have been prepared on the manager object of this context object. Returns: iterable of tuple (group_name, iterable of values): The faked metrics, in the order they had been added, where: group_name (string): Metric group name. values (:class:~zhmcclient.FakedMetricObjectValues`): The metric values for one resource at one point in time. """ group_names = self.properties.get('metric-groups', None) if not group_names: group_names = self.manager.get_metric_values_group_names() ret = [] for group_name in group_names: try: mo_val = self.manager.get_metric_values(group_name) ret_item = (group_name, mo_val) ret.append(ret_item) except ValueError: pass # ignore metric groups without metric values return ret
Get the faked metrics, for all metric groups and all resources that have been prepared on the manager object of this context object. Returns: iterable of tuple (group_name, iterable of values): The faked metrics, in the order they had been added, where: group_name (string): Metric group name. values (:class:~zhmcclient.FakedMetricObjectValues`): The metric values for one resource at one point in time.
def evaluate(self, x): """TODO: will become _evaluate once polynomial filtering is merged.""" if not hasattr(self, '_coefficients'): # Graph Fourier transform -> modulation -> inverse GFT. c = self.G.igft(self._kernels.evaluate(self.G.e).squeeze()) c = np.sqrt(self.G.n_vertices) * self.G.U * c[:, np.newaxis] self._coefficients = self.G.gft(c) shape = x.shape x = x.flatten() y = np.full((self.n_features_out, x.size), np.nan) for i in range(len(x)): query = self._coefficients[x[i] == self.G.e] if len(query) != 0: y[:, i] = query[0] return y.reshape((self.n_features_out,) + shape)
TODO: will become _evaluate once polynomial filtering is merged.
def _detach_received(self, error): """Callback called when a link DETACH frame is received. This callback will process the received DETACH error to determine if the link is recoverable or whether it should be shutdown. :param error: The error information from the detach frame. :type error: ~uamqp.errors.ErrorResponse """ # pylint: disable=protected-access if error: condition = error.condition description = error.description info = error.info else: condition = b"amqp:unknown-error" description = None info = None self._error = errors._process_link_error(self.error_policy, condition, description, info) _logger.info("Received Link detach event: %r\nLink: %r\nDescription: %r" "\nDetails: %r\nRetryable: %r\nConnection: %r", condition, self.name, description, info, self._error.action.retry, self._session._connection.container_id)
Callback called when a link DETACH frame is received. This callback will process the received DETACH error to determine if the link is recoverable or whether it should be shutdown. :param error: The error information from the detach frame. :type error: ~uamqp.errors.ErrorResponse
def check_auth(self, username, password): ''' This function is called to check if a username password combination is valid. ''' return username == self.queryname and password == self.querypw
This function is called to check if a username password combination is valid.
def enable(self): """ Enables WinQuad setting """ nquad = self.nquad.value() for label, xsll, xsul, xslr, xsur, ys, nx, ny in \ zip(self.label[:nquad], self.xsll[:nquad], self.xsul[:nquad], self.xslr[:nquad], self.xsur[:nquad], self.ys[:nquad], self.nx[:nquad], self.ny[:nquad]): label.config(state='normal') for thing in (xsll, xsul, xslr, xsur, ys, nx, ny): thing.enable() for label, xsll, xsul, xslr, xsur, ys, nx, ny in \ zip(self.label[nquad:], self.xsll[nquad:], self.xsul[nquad:], self.xslr[nquad:], self.xsur[nquad:], self.ys[nquad:], self.nx[nquad:], self.ny[nquad:]): label.config(state='disable') for thing in (xsll, xsul, xslr, xsur, ys, nx, ny): thing.disable() self.nquad.enable() self.xbin.enable() self.ybin.enable() self.sbutt.enable()
Enables WinQuad setting
def complete_vhwa_command(self, command): """Signals that the Video HW Acceleration command has completed. in command of type str Pointer to VBOXVHWACMD containing the completed command. """ if not isinstance(command, basestring): raise TypeError("command can only be an instance of type basestring") self._call("completeVHWACommand", in_p=[command])
Signals that the Video HW Acceleration command has completed. in command of type str Pointer to VBOXVHWACMD containing the completed command.
def _run_program(self, bin, fastafile, params=None): """ Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) # TODO: test organism #cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s > /dev/null 2>&1" % ( cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s" % ( bin, fastafile, params["background_model"], params["pwmfile"], params["width"], params["number"], params["outfile"], params["strand"], ) #print cmd p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() #stdout,stderr = "","" #p = Popen(cmd, shell=True) #p.wait() motifs = [] if os.path.exists(params["outfile"]): with open(params["outfile"]) as f: motifs = self.parse_out(f) for motif in motifs: motif.id = "%s_%s" % (self.name, motif.id) return motifs, stdout, stderr
Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
def iter_org_events(self, org, number=-1, etag=None): """Iterate over events as they appear on the user's organization dashboard. You must be authenticated to view this. :param str org: (required), name of the organization :param int number: (optional), number of events to return. Default: -1 returns all available events :param str etag: (optional), ETag from a previous request to the same endpoint :returns: list of :class:`Event <github3.events.Event>`\ s """ url = '' if org: url = self._build_url('events', 'orgs', org, base_url=self._api) return self._iter(int(number), url, Event, etag=etag)
Iterate over events as they appear on the user's organization dashboard. You must be authenticated to view this. :param str org: (required), name of the organization :param int number: (optional), number of events to return. Default: -1 returns all available events :param str etag: (optional), ETag from a previous request to the same endpoint :returns: list of :class:`Event <github3.events.Event>`\ s
def get_objects_dex(self): """ Yields all dex objects inclduing their Analysis objects :returns: tuple of (sha256, DalvikVMFormat, Analysis) """ # TODO: there is no variant like get_objects_apk for digest, d in self.analyzed_dex.items(): yield digest, d, self.analyzed_vms[digest]
Yields all dex objects inclduing their Analysis objects :returns: tuple of (sha256, DalvikVMFormat, Analysis)
def from_string(cls, s): """ Instantiate a `Derivation` from a UDF or UDX string representation. The UDF/UDX representations are as output by a processor like the `LKB <http://moin.delph-in.net/LkbTop>`_ or `ACE <http://sweaglesw.org/linguistics/ace/>`_, or from the :meth:`UdfNode.to_udf` or :meth:`UdfNode.to_udx` methods. Args: s (str): UDF or UDX serialization """ if not (s.startswith('(') and s.endswith(')')): raise ValueError( 'Derivations must begin and end with parentheses: ( )' ) s_ = s[1:] # get rid of initial open-parenthesis stack = [] deriv = None try: matches = cls.udf_re.finditer(s_) for match in matches: if match.group('done'): node = stack.pop() if len(stack) == 0: deriv = node break else: stack[-1].daughters.append(node) elif match.group('form'): if len(stack) == 0: raise ValueError('Possible leaf node with no parent.') gd = match.groupdict() # ignore LKB-style start/end data if it exists on gd term = UdfTerminal( _unquote(gd['form']), tokens=_udf_tokens(gd.get('tokens')), parent=stack[-1] if stack else None ) stack[-1].daughters.append(term) elif match.group('id'): gd = match.groupdict() head = None entity, _, type = gd['entity'].partition('@') if entity[0] == '^': entity = entity[1:] head = True if type == '': type = None udf = UdfNode(gd['id'], entity, gd['score'], gd['start'], gd['end'], head=head, type=type, parent=stack[-1] if stack else None) stack.append(udf) elif match.group('root'): udf = UdfNode(None, match.group('root')) stack.append(udf) except (ValueError, AttributeError): raise ValueError('Invalid derivation: %s' % s) if stack or deriv is None: raise ValueError('Invalid derivation; possibly unbalanced ' 'parentheses: %s' % s) return cls(*deriv, head=deriv._head, type=deriv.type)
Instantiate a `Derivation` from a UDF or UDX string representation. The UDF/UDX representations are as output by a processor like the `LKB <http://moin.delph-in.net/LkbTop>`_ or `ACE <http://sweaglesw.org/linguistics/ace/>`_, or from the :meth:`UdfNode.to_udf` or :meth:`UdfNode.to_udx` methods. Args: s (str): UDF or UDX serialization
def solar_position_loop(unixtime, loc_args, out): """Loop through the time array and calculate the solar position""" lat = loc_args[0] lon = loc_args[1] elev = loc_args[2] pressure = loc_args[3] temp = loc_args[4] delta_t = loc_args[5] atmos_refract = loc_args[6] sst = loc_args[7] esd = loc_args[8] for i in range(unixtime.shape[0]): utime = unixtime[i] jd = julian_day(utime) jde = julian_ephemeris_day(jd, delta_t) jc = julian_century(jd) jce = julian_ephemeris_century(jde) jme = julian_ephemeris_millennium(jce) R = heliocentric_radius_vector(jme) if esd: out[0, i] = R continue L = heliocentric_longitude(jme) B = heliocentric_latitude(jme) Theta = geocentric_longitude(L) beta = geocentric_latitude(B) x0 = mean_elongation(jce) x1 = mean_anomaly_sun(jce) x2 = mean_anomaly_moon(jce) x3 = moon_argument_latitude(jce) x4 = moon_ascending_longitude(jce) delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4) delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4) epsilon0 = mean_ecliptic_obliquity(jme) epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon) delta_tau = aberration_correction(R) lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau) v0 = mean_sidereal_time(jd, jc) v = apparent_sidereal_time(v0, delta_psi, epsilon) alpha = geocentric_sun_right_ascension(lamd, epsilon, beta) delta = geocentric_sun_declination(lamd, epsilon, beta) if sst: out[0, i] = v out[1, i] = alpha out[2, i] = delta continue m = sun_mean_longitude(jme) eot = equation_of_time(m, alpha, delta_psi, epsilon) H = local_hour_angle(v, lon, alpha) xi = equatorial_horizontal_parallax(R) u = uterm(lat) x = xterm(u, lat, elev) y = yterm(u, lat, elev) delta_alpha = parallax_sun_right_ascension(x, xi, H, delta) delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha, H) H_prime = topocentric_local_hour_angle(H, delta_alpha) e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime, H_prime) delta_e = atmospheric_refraction_correction(pressure, temp, e0, atmos_refract) e = topocentric_elevation_angle(e0, delta_e) theta = topocentric_zenith_angle(e) theta0 = topocentric_zenith_angle(e0) gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat) phi = topocentric_azimuth_angle(gamma) out[0, i] = theta out[1, i] = theta0 out[2, i] = e out[3, i] = e0 out[4, i] = phi out[5, i] = eot
Loop through the time array and calculate the solar position
def _check_input(self, X, R): """Check whether input data and coordinates in right type Parameters ---------- X : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. R : list of 2D arrays, element i has shape=[n_voxel, n_dim] Each element in the list contains the scanner coordinate matrix of fMRI data of one subject. Returns ------- HTFA Returns the instance itself. """ # Check data type if not isinstance(X, list): raise TypeError("Input data should be a list") if not isinstance(R, list): raise TypeError("Coordinates should be a list") # Check the number of subjects if len(X) < 1: raise ValueError("Need at leat one subject to train the model.\ Got {0:d}".format(len(X))) for idx, x in enumerate(X): if not isinstance(x, np.ndarray): raise TypeError("Each subject data should be an array") if x.ndim != 2: raise TypeError("Each subject data should be 2D array") if not isinstance(R[idx], np.ndarray): raise TypeError( "Each scanner coordinate matrix should be an array") if R[idx].ndim != 2: raise TypeError( "Each scanner coordinate matrix should be 2D array") if x.shape[0] != R[idx].shape[0]: raise TypeError( "n_voxel should be the same in X[idx] and R[idx]") return self
Check whether input data and coordinates in right type Parameters ---------- X : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. R : list of 2D arrays, element i has shape=[n_voxel, n_dim] Each element in the list contains the scanner coordinate matrix of fMRI data of one subject. Returns ------- HTFA Returns the instance itself.
def qteStartRecordingHook(self, msgObj): """ Commence macro recording. Macros are recorded by connecting to the 'keypressed' signal it emits. If the recording has already commenced, or if this method was called during a macro replay, then return immediately. """ if self.qteRecording: self.qteMain.qteStatus('Macro recording already enabled') return # Update status flag. self.qteRecording = True # Reset the variables. self.qteMain.qteStatus('Macro recording started') self.recorded_keysequence = QtmacsKeysequence() # Connect the 'keypressed' and 'abort' signals. self.qteMain.qtesigKeyparsed.connect(self.qteKeyPress) self.qteMain.qtesigAbort.connect(self.qteStopRecordingHook)
Commence macro recording. Macros are recorded by connecting to the 'keypressed' signal it emits. If the recording has already commenced, or if this method was called during a macro replay, then return immediately.
def set_continous_wave(self, enabled): """ Enable/disable the client side X-mode. When enabled this recalculates the setpoints before sending them to the Crazyflie. """ pk = CRTPPacket() pk.set_header(CRTPPort.PLATFORM, PLATFORM_COMMAND) pk.data = (0, enabled) self._cf.send_packet(pk)
Enable/disable the client side X-mode. When enabled this recalculates the setpoints before sending them to the Crazyflie.
def pick_auth(endpoint_context, areq, all=False): """ Pick authentication method :param areq: AuthorizationRequest instance :return: A dictionary with the authentication method and its authn class ref """ acrs = [] try: if len(endpoint_context.authn_broker) == 1: return endpoint_context.authn_broker.default() if "acr_values" in areq: if not isinstance(areq["acr_values"], list): areq["acr_values"] = [areq["acr_values"]] acrs = areq["acr_values"] else: # same as any try: acrs = areq["claims"]["id_token"]["acr"]["values"] except KeyError: try: _ith = areq[verified_claim_name("id_token_hint")] except KeyError: try: _hint = areq['login_hint'] except KeyError: pass else: if endpoint_context.login_hint2acrs: acrs = endpoint_context.login_hint2acrs(_hint) else: try: acrs = [_ith['acr']] except KeyError: pass if not acrs: return endpoint_context.authn_broker.default() for acr in acrs: res = endpoint_context.authn_broker.pick(acr) logger.debug("Picked AuthN broker for ACR %s: %s" % ( str(acr), str(res))) if res: if all: return res else: # Return the first guess by pick. return res[0] except KeyError as exc: logger.debug( "An error occurred while picking the authN broker: %s" % str(exc)) return None
Pick authentication method :param areq: AuthorizationRequest instance :return: A dictionary with the authentication method and its authn class ref
def _get_unicode(data, force=False): """Try to return a text aka unicode object from the given data.""" if isinstance(data, binary_type): return data.decode('utf-8') elif data is None: return '' elif force: if PY2: return unicode(data) else: return str(data) else: return data
Try to return a text aka unicode object from the given data.
def InteractiveShell(self, cmd=None, strip_cmd=True, delim=None, strip_delim=True): """Get stdout from the currently open interactive shell and optionally run a command on the device, returning all output. Args: cmd: Optional. Command to run on the target. strip_cmd: Optional (default True). Strip command name from stdout. delim: Optional. Delimiter to look for in the output to know when to stop expecting more output (usually the shell prompt) strip_delim: Optional (default True): Strip the provided delimiter from the output Returns: The stdout from the shell command. """ conn = self._get_service_connection(b'shell:') return self.protocol_handler.InteractiveShellCommand( conn, cmd=cmd, strip_cmd=strip_cmd, delim=delim, strip_delim=strip_delim)
Get stdout from the currently open interactive shell and optionally run a command on the device, returning all output. Args: cmd: Optional. Command to run on the target. strip_cmd: Optional (default True). Strip command name from stdout. delim: Optional. Delimiter to look for in the output to know when to stop expecting more output (usually the shell prompt) strip_delim: Optional (default True): Strip the provided delimiter from the output Returns: The stdout from the shell command.
def take_action(self, production_rule: str) -> 'GrammarStatelet': """ Takes an action in the current grammar state, returning a new grammar state with whatever updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS". This will update the non-terminal stack. Updating the non-terminal stack involves popping the non-terminal that was expanded off of the stack, then pushing on any non-terminals in the production rule back on the stack. For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and ``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e", "<e,d>"]``. If ``self._reverse_productions`` is set to ``False`` then we push the non-terminals on in in their given order, which means that the first non-terminal in the production rule gets popped off the stack `last`. """ left_side, right_side = production_rule.split(' -> ') assert self._nonterminal_stack[-1] == left_side, (f"Tried to expand {self._nonterminal_stack[-1]}" f"but got rule {left_side} -> {right_side}") new_stack = self._nonterminal_stack[:-1] productions = self._get_productions_from_string(right_side) if self._reverse_productions: productions = list(reversed(productions)) for production in productions: if self._is_nonterminal(production): new_stack.append(production) return GrammarStatelet(nonterminal_stack=new_stack, valid_actions=self._valid_actions, is_nonterminal=self._is_nonterminal, reverse_productions=self._reverse_productions)
Takes an action in the current grammar state, returning a new grammar state with whatever updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS". This will update the non-terminal stack. Updating the non-terminal stack involves popping the non-terminal that was expanded off of the stack, then pushing on any non-terminals in the production rule back on the stack. For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and ``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e", "<e,d>"]``. If ``self._reverse_productions`` is set to ``False`` then we push the non-terminals on in in their given order, which means that the first non-terminal in the production rule gets popped off the stack `last`.
def init_flatpak(): """ If we are in Flatpak, we must build a tessdata/ directory using the .traineddata files from each locale directory """ tessdata_files = glob.glob("/app/share/locale/*/*.traineddata") if len(tessdata_files) <= 0: return os.path.exists("/app") localdir = os.path.expanduser("~/.local") base_data_dir = os.getenv( "XDG_DATA_HOME", os.path.join(localdir, "share") ) tessdatadir = os.path.join(base_data_dir, "paperwork", "tessdata") logger.info("Assuming we are running in Flatpak." " Building tessdata directory {} ...".format(tessdatadir)) util.rm_rf(tessdatadir) util.mkdir_p(tessdatadir) os.symlink("/app/share/tessdata/eng.traineddata", os.path.join(tessdatadir, "eng.traineddata")) os.symlink("/app/share/tessdata/osd.traineddata", os.path.join(tessdatadir, "osd.traineddata")) os.symlink("/app/share/tessdata/configs", os.path.join(tessdatadir, "configs")) os.symlink("/app/share/tessdata/tessconfigs", os.path.join(tessdatadir, "tessconfigs")) for tessdata in tessdata_files: logger.info("{} found".format(tessdata)) os.symlink(tessdata, os.path.join(tessdatadir, os.path.basename(tessdata))) os.environ['TESSDATA_PREFIX'] = os.path.dirname(tessdatadir) logger.info("Tessdata directory ready") return True
If we are in Flatpak, we must build a tessdata/ directory using the .traineddata files from each locale directory
def drop_prefix_and_return_type(function): """Takes the function value from a frame and drops prefix and return type For example:: static void * Allocator<MozJemallocBase>::malloc(unsigned __int64) ^ ^^^^^^ return type prefix This gets changes to this:: Allocator<MozJemallocBase>::malloc(unsigned __int64) This tokenizes on space, but takes into account types, generics, traits, function arguments, and other parts of the function signature delimited by things like `', <>, {}, [], and () for both C/C++ and Rust. After tokenizing, this returns the last token since that's comprised of the function name and its arguments. :arg function: the function value in a frame to drop bits from :returns: adjusted function value """ DELIMITERS = { '(': ')', '{': '}', '[': ']', '<': '>', '`': "'" } OPEN = DELIMITERS.keys() CLOSE = DELIMITERS.values() # The list of tokens accumulated so far tokens = [] # Keeps track of open delimiters so we can match and close them levels = [] # The current token we're building current = [] for i, char in enumerate(function): if char in OPEN: levels.append(char) current.append(char) elif char in CLOSE: if levels and DELIMITERS[levels[-1]] == char: levels.pop() current.append(char) else: # This is an unmatched close. current.append(char) elif levels: current.append(char) elif char == ' ': tokens.append(''.join(current)) current = [] else: current.append(char) if current: tokens.append(''.join(current)) while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')): # It's possible for the function signature to have a space between # the function name and the parenthesized arguments or [clone ...] # thing. If that's the case, we join the last two tokens. We keep doing # that until the last token is nice. # # Example: # # somefunc (int arg1, int arg2) # ^ # somefunc(int arg1, int arg2) [clone .cold.111] # ^ # somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222] # ^ ^ tokens = tokens[:-2] + [' '.join(tokens[-2:])] return tokens[-1]
Takes the function value from a frame and drops prefix and return type For example:: static void * Allocator<MozJemallocBase>::malloc(unsigned __int64) ^ ^^^^^^ return type prefix This gets changes to this:: Allocator<MozJemallocBase>::malloc(unsigned __int64) This tokenizes on space, but takes into account types, generics, traits, function arguments, and other parts of the function signature delimited by things like `', <>, {}, [], and () for both C/C++ and Rust. After tokenizing, this returns the last token since that's comprised of the function name and its arguments. :arg function: the function value in a frame to drop bits from :returns: adjusted function value
def load(self): """ Fetch data about droplet - use this instead of get_data() """ droplets = self.get_data("droplets/%s" % self.id) droplet = droplets['droplet'] for attr in droplet.keys(): setattr(self, attr, droplet[attr]) for net in self.networks['v4']: if net['type'] == 'private': self.private_ip_address = net['ip_address'] if net['type'] == 'public': self.ip_address = net['ip_address'] if self.networks['v6']: self.ip_v6_address = self.networks['v6'][0]['ip_address'] if "backups" in self.features: self.backups = True else: self.backups = False if "ipv6" in self.features: self.ipv6 = True else: self.ipv6 = False if "private_networking" in self.features: self.private_networking = True else: self.private_networking = False if "tags" in droplets: self.tags = droplets["tags"] return self
Fetch data about droplet - use this instead of get_data()
def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=''): """ Return a dictionary detailing what, if any, change was made to a record field :param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, normLookup, normRegex, normIncludes, deriveValue, copyValue, deriveRegex :param string fromVal: previous field value :param string toVal: new string value :param dict using: field values used to derive new values; only applicable for deriveValue, copyValue, deriveRegex :param string pattern: which regex pattern was matched to make the change; only applicable for genericRegex, fieldSpecificRegex, deriveRegex """ histObj = {} if fromVal != toVal: histObj[lookupType] = {"from": fromVal, "to": toVal} if lookupType in ['deriveValue', 'deriveRegex', 'copyValue', 'normIncludes', 'deriveIncludes'] and using!='': histObj[lookupType]["using"] = using if lookupType in ['genericRegex', 'fieldSpecificRegex', 'normRegex', 'deriveRegex'] and pattern!='': histObj[lookupType]["pattern"] = pattern return histObj
Return a dictionary detailing what, if any, change was made to a record field :param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, normLookup, normRegex, normIncludes, deriveValue, copyValue, deriveRegex :param string fromVal: previous field value :param string toVal: new string value :param dict using: field values used to derive new values; only applicable for deriveValue, copyValue, deriveRegex :param string pattern: which regex pattern was matched to make the change; only applicable for genericRegex, fieldSpecificRegex, deriveRegex
def get_direct_queue(self): """Returns a :class: `kombu.Queue` instance to be used to listen for messages send to this specific Actor instance""" return Queue(self.id, self.inbox_direct, routing_key=self.routing_key, auto_delete=True)
Returns a :class: `kombu.Queue` instance to be used to listen for messages send to this specific Actor instance
def plot(self, columns=None, loc=None, iloc=None, **kwargs): """" A wrapper around plotting. Matplotlib plot arguments can be passed in, plus: Parameters ----------- columns: string or list-like, optional If not empty, plot a subset of columns from the ``cumulative_hazards_``. Default all. loc: iloc: slice, optional specify a location-based subsection of the curves to plot, ex: ``.plot(iloc=slice(0,10))`` will plot the first 10 time points. """ from matplotlib import pyplot as plt assert loc is None or iloc is None, "Cannot set both loc and iloc in call to .plot" def shaded_plot(ax, x, y, y_upper, y_lower, **kwargs): base_line, = ax.plot(x, y, drawstyle="steps-post", **kwargs) ax.fill_between(x, y_lower, y2=y_upper, alpha=0.25, color=base_line.get_color(), linewidth=1.0, step="post") def create_df_slicer(loc, iloc): get_method = "loc" if loc is not None else "iloc" if iloc is None and loc is None: user_submitted_ix = slice(0, None) else: user_submitted_ix = loc if loc is not None else iloc return lambda df: getattr(df, get_method)[user_submitted_ix] subset_df = create_df_slicer(loc, iloc) if not columns: columns = self.cumulative_hazards_.columns else: columns = _to_list(columns) set_kwargs_ax(kwargs) ax = kwargs.pop("ax") x = subset_df(self.cumulative_hazards_).index.values.astype(float) for column in columns: y = subset_df(self.cumulative_hazards_[column]).values index = subset_df(self.cumulative_hazards_[column]).index y_upper = subset_df(self.confidence_intervals_[column].loc["upper-bound"]).values y_lower = subset_df(self.confidence_intervals_[column].loc["lower-bound"]).values shaded_plot(ax, x, y, y_upper, y_lower, label=column, **kwargs) plt.hlines(0, index.min() - 1, index.max(), color="k", linestyles="--", alpha=0.5) ax.legend() return ax
A wrapper around plotting. Matplotlib plot arguments can be passed in, plus: Parameters ----------- columns: string or list-like, optional If not empty, plot a subset of columns from the ``cumulative_hazards_``. Default all. loc: iloc: slice, optional specify a location-based subsection of the curves to plot, ex: ``.plot(iloc=slice(0,10))`` will plot the first 10 time points.
def LightcurveHDU(model): ''' Construct the data HDU file containing the arrays and the observing info. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=1) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) cards.append(('MODEL', model.name, 'Name of EVEREST model used')) cards.append(('APNAME', model.aperture_name, 'Name of aperture used')) cards.append(('BPAD', model.bpad, 'Chunk overlap in cadences')) for c in range(len(model.breakpoints)): cards.append( ('BRKPT%02d' % (c + 1), model.breakpoints[c], 'Light curve breakpoint')) cards.append(('CBVNUM', model.cbv_num, 'Number of CBV signals to recover')) cards.append(('CBVNITER', model.cbv_niter, 'Number of CBV SysRem iterations')) cards.append(('CBVWIN', model.cbv_win, 'Window size for smoothing CBVs')) cards.append(('CBVORD', model.cbv_order, 'Order when smoothing CBVs')) cards.append(('CDIVS', model.cdivs, 'Cross-validation subdivisions')) cards.append(('CDPP', model.cdpp, 'Average de-trended CDPP')) cards.append(('CDPPR', model.cdppr, 'Raw CDPP')) cards.append(('CDPPV', model.cdppv, 'Average validation CDPP')) cards.append(('CDPPG', model.cdppg, 'Average GP-de-trended CDPP')) for i in range(99): try: cards.append(('CDPP%02d' % (i + 1), model.cdpp_arr[i] if not np.isnan( model.cdpp_arr[i]) else 0, 'Chunk de-trended CDPP')) cards.append(('CDPPR%02d' % ( i + 1), model.cdppr_arr[i] if not np.isnan( model.cdppr_arr[i]) else 0, 'Chunk raw CDPP')) cards.append(('CDPPV%02d' % (i + 1), model.cdppv_arr[i] if not np.isnan( model.cdppv_arr[i]) else 0, 'Chunk validation CDPP')) except: break cards.append( ('CVMIN', model.cv_min, 'Cross-validation objective function')) cards.append( ('GITER', model.giter, 'Number of GP optimiziation iterations')) cards.append( ('GMAXF', model.giter, 'Max number of GP function evaluations')) cards.append(('GPFACTOR', model.gp_factor, 'GP amplitude initialization factor')) cards.append(('KERNEL', model.kernel, 'GP kernel name')) if model.kernel == 'Basic': cards.append( ('GPWHITE', model.kernel_params[0], 'GP white noise amplitude (e-/s)')) cards.append( ('GPRED', model.kernel_params[1], 'GP red noise amplitude (e-/s)')) cards.append( ('GPTAU', model.kernel_params[2], 'GP red noise timescale (days)')) elif model.kernel == 'QuasiPeriodic': cards.append( ('GPWHITE', model.kernel_params[0], 'GP white noise amplitude (e-/s)')) cards.append( ('GPRED', model.kernel_params[1], 'GP red noise amplitude (e-/s)')) cards.append(('GPGAMMA', model.kernel_params[2], 'GP scale factor')) cards.append(('GPPER', model.kernel_params[3], 'GP period (days)')) for c in range(len(model.breakpoints)): for o in range(model.pld_order): cards.append(('LAMB%02d%02d' % (c + 1, o + 1), model.lam[c][o], 'Cross-validation parameter')) if model.name == 'iPLD': cards.append(('RECL%02d%02d' % (c + 1, o + 1), model.reclam[c][o], 'Cross-validation parameter')) cards.append(('LEPS', model.leps, 'Cross-validation tolerance')) cards.append(('MAXPIX', model.max_pixels, 'Maximum size of TPF aperture')) for i, source in enumerate(model.nearby[:99]): cards.append(('NRBY%02dID' % (i + 1), source['ID'], 'Nearby source ID')) cards.append( ('NRBY%02dX' % (i + 1), source['x'], 'Nearby source X position')) cards.append( ('NRBY%02dY' % (i + 1), source['y'], 'Nearby source Y position')) cards.append( ('NRBY%02dM' % (i + 1), source['mag'], 'Nearby source magnitude')) cards.append(('NRBY%02dX0' % (i + 1), source['x0'], 'Nearby source reference X')) cards.append(('NRBY%02dY0' % (i + 1), source['y0'], 'Nearby source reference Y')) for i, n in enumerate(model.neighbors): cards.append( ('NEIGH%02d' % i, model.neighbors[i], 'Neighboring star used to de-trend')) cards.append(('OITER', model.oiter, 'Number of outlier search iterations')) cards.append(('OPTGP', model.optimize_gp, 'GP optimization performed?')) cards.append( ('OSIGMA', model.osigma, 'Outlier tolerance (standard deviations)')) for i, planet in enumerate(model.planets): cards.append( ('P%02dT0' % (i + 1), planet[0], 'Planet transit time (days)')) cards.append( ('P%02dPER' % (i + 1), planet[1], 'Planet transit period (days)')) cards.append( ('P%02dDUR' % (i + 1), planet[2], 'Planet transit duration (days)')) cards.append(('PLDORDER', model.pld_order, 'PLD de-trending order')) cards.append(('SATUR', model.saturated, 'Is target saturated?')) cards.append(('SATTOL', model.saturation_tolerance, 'Fractional saturation tolerance')) # Add the EVEREST quality flags to the QUALITY array quality = np.array(model.quality) quality[np.array(model.badmask, dtype=int)] += 2 ** (QUALITY_BAD - 1) quality[np.array(model.nanmask, dtype=int)] += 2 ** (QUALITY_NAN - 1) quality[np.array(model.outmask, dtype=int)] += 2 ** (QUALITY_OUT - 1) quality[np.array(model.recmask, dtype=int)] += 2 ** (QUALITY_REC - 1) quality[np.array(model.transitmask, dtype=int)] += 2 ** (QUALITY_TRN - 1) # When de-trending, we interpolated to fill in NaN fluxes. Here # we insert the NaNs back in, since there's no actual physical # information at those cadences. flux = np.array(model.flux) flux[model.nanmask] = np.nan # Create the arrays list arrays = [pyfits.Column(name='CADN', format='D', array=model.cadn), pyfits.Column(name='FLUX', format='D', array=flux, unit='e-/s'), pyfits.Column(name='FRAW', format='D', array=model.fraw, unit='e-/s'), pyfits.Column(name='FRAW_ERR', format='D', array=model.fraw_err, unit='e-/s'), pyfits.Column(name='QUALITY', format='J', array=quality), pyfits.Column(name='TIME', format='D', array=model.time, unit='BJD - 2454833')] # Add the CBVs if model.fcor is not None: arrays += [pyfits.Column(name='FCOR', format='D', array=model.fcor, unit='e-/s')] for n in range(model.XCBV.shape[1]): arrays += [pyfits.Column(name='CBV%02d' % (n + 1), format='D', array=model.XCBV[:, n])] # Did we subtract a background term? if hasattr(model.bkg, '__len__'): arrays.append(pyfits.Column(name='BKG', format='D', array=model.bkg, unit='e-/s')) # Create the HDU header = pyfits.Header(cards=cards) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='ARRAYS') return hdu
Construct the data HDU file containing the arrays and the observing info.
def unpack(self, buff, offset=0): """Unpack a binary message into this object's attributes. Unpack the binary value *buff* and update this object attributes based on the results. Args: buff (bytes): Binary data package to be unpacked. offset (int): Where to begin unpacking. Raises: Exception: If there is a struct unpacking error. """ begin = offset hexas = [] while begin < offset + 8: number = struct.unpack("!B", buff[begin:begin+1])[0] hexas.append("%.2x" % number) begin += 1 self._value = ':'.join(hexas)
Unpack a binary message into this object's attributes. Unpack the binary value *buff* and update this object attributes based on the results. Args: buff (bytes): Binary data package to be unpacked. offset (int): Where to begin unpacking. Raises: Exception: If there is a struct unpacking error.
def _build_http(http=None): """Construct an http client suitable for googleapiclient usage w/ user agent. """ if not http: http = httplib2.Http( timeout=HTTP_REQUEST_TIMEOUT, ca_certs=HTTPLIB_CA_BUNDLE) user_agent = 'Python-httplib2/{} (gzip), {}/{}'.format( httplib2.__version__, 'custodian-gcp', '0.1') return set_user_agent(http, user_agent)
Construct an http client suitable for googleapiclient usage w/ user agent.
def kill(self) -> None: """Kill ffmpeg job.""" self._proc.kill() self._loop.run_in_executor(None, self._proc.communicate)
Kill ffmpeg job.
def generate_signed_url_v2( credentials, resource, expiration, api_access_endpoint="", method="GET", content_md5=None, content_type=None, response_type=None, response_disposition=None, generation=None, headers=None, query_parameters=None, ): """Generate a V2 signed URL to provide query-string auth'n to a resource. .. note:: Assumes ``credentials`` implements the :class:`google.auth.credentials.Signing` interface. Also assumes ``credentials`` has a ``service_account_email`` property which identifies the credentials. .. note:: If you are on Google Compute Engine, you can't generate a signed URL. Follow `Issue 922`_ for updates on this. If you'd like to be able to generate a signed URL from GCE, you can use a standard service account from a JSON file rather than a GCE service account. See headers `reference`_ for more details on optional arguments. .. _Issue 922: https://github.com/GoogleCloudPlatform/\ google-cloud-python/issues/922 .. _reference: https://cloud.google.com/storage/docs/reference-headers :type credentials: :class:`google.auth.credentials.Signing` :param credentials: Credentials object with an associated private key to sign text. :type resource: str :param resource: A pointer to a specific resource (typically, ``/bucket-name/path/to/blob.txt``). :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] :param expiration: Point in time when the signed URL should expire. :type api_access_endpoint: str :param api_access_endpoint: Optional URI base. Defaults to empty string. :type method: str :param method: The HTTP verb that will be used when requesting the URL. Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the signature will additionally contain the `x-goog-resumable` header, and the method changed to POST. See the signed URL docs regarding this flow: https://cloud.google.com/storage/docs/access-control/signed-urls :type content_md5: str :param content_md5: (Optional) The MD5 hash of the object referenced by ``resource``. :type content_type: str :param content_type: (Optional) The content type of the object referenced by ``resource``. :type response_type: str :param response_type: (Optional) Content type of responses to requests for the signed URL. Used to over-ride the content type of the underlying resource. :type response_disposition: str :param response_disposition: (Optional) Content disposition of responses to requests for the signed URL. :type generation: str :param generation: (Optional) A value that indicates which generation of the resource to fetch. :type headers: Union[dict|List(Tuple(str,str))] :param headers: (Optional) Additional HTTP headers to be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers Requests using the signed URL *must* pass the specified header (name and value) with each request for the URL. :type query_parameters: dict :param query_parameters: (Optional) Additional query paramtersto be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers#query :raises: :exc:`TypeError` when expiration is not a valid type. :raises: :exc:`AttributeError` if credentials is not an instance of :class:`google.auth.credentials.Signing`. :rtype: str :returns: A signed URL you can use to access the resource until expiration. """ expiration_stamp = get_expiration_seconds_v2(expiration) canonical = canonicalize(method, resource, query_parameters, headers) # Generate the string to sign. elements_to_sign = [ canonical.method, content_md5 or "", content_type or "", str(expiration_stamp), ] elements_to_sign.extend(canonical.headers) elements_to_sign.append(canonical.resource) string_to_sign = "\n".join(elements_to_sign) # Set the right query parameters. signed_query_params = get_signed_query_params_v2( credentials, expiration_stamp, string_to_sign ) if response_type is not None: signed_query_params["response-content-type"] = response_type if response_disposition is not None: signed_query_params["response-content-disposition"] = response_disposition if generation is not None: signed_query_params["generation"] = generation signed_query_params.update(canonical.query_parameters) sorted_signed_query_params = sorted(signed_query_params.items()) # Return the built URL. return "{endpoint}{resource}?{querystring}".format( endpoint=api_access_endpoint, resource=resource, querystring=six.moves.urllib.parse.urlencode(sorted_signed_query_params), )
Generate a V2 signed URL to provide query-string auth'n to a resource. .. note:: Assumes ``credentials`` implements the :class:`google.auth.credentials.Signing` interface. Also assumes ``credentials`` has a ``service_account_email`` property which identifies the credentials. .. note:: If you are on Google Compute Engine, you can't generate a signed URL. Follow `Issue 922`_ for updates on this. If you'd like to be able to generate a signed URL from GCE, you can use a standard service account from a JSON file rather than a GCE service account. See headers `reference`_ for more details on optional arguments. .. _Issue 922: https://github.com/GoogleCloudPlatform/\ google-cloud-python/issues/922 .. _reference: https://cloud.google.com/storage/docs/reference-headers :type credentials: :class:`google.auth.credentials.Signing` :param credentials: Credentials object with an associated private key to sign text. :type resource: str :param resource: A pointer to a specific resource (typically, ``/bucket-name/path/to/blob.txt``). :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] :param expiration: Point in time when the signed URL should expire. :type api_access_endpoint: str :param api_access_endpoint: Optional URI base. Defaults to empty string. :type method: str :param method: The HTTP verb that will be used when requesting the URL. Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the signature will additionally contain the `x-goog-resumable` header, and the method changed to POST. See the signed URL docs regarding this flow: https://cloud.google.com/storage/docs/access-control/signed-urls :type content_md5: str :param content_md5: (Optional) The MD5 hash of the object referenced by ``resource``. :type content_type: str :param content_type: (Optional) The content type of the object referenced by ``resource``. :type response_type: str :param response_type: (Optional) Content type of responses to requests for the signed URL. Used to over-ride the content type of the underlying resource. :type response_disposition: str :param response_disposition: (Optional) Content disposition of responses to requests for the signed URL. :type generation: str :param generation: (Optional) A value that indicates which generation of the resource to fetch. :type headers: Union[dict|List(Tuple(str,str))] :param headers: (Optional) Additional HTTP headers to be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers Requests using the signed URL *must* pass the specified header (name and value) with each request for the URL. :type query_parameters: dict :param query_parameters: (Optional) Additional query paramtersto be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers#query :raises: :exc:`TypeError` when expiration is not a valid type. :raises: :exc:`AttributeError` if credentials is not an instance of :class:`google.auth.credentials.Signing`. :rtype: str :returns: A signed URL you can use to access the resource until expiration.
def render_field(self, obj, field_name, **options): """Render field""" try: field = obj._meta.get_field(field_name) except FieldDoesNotExist: return getattr(obj, field_name, '') if hasattr(field, 'choices') and getattr(field, 'choices'): return getattr(obj, 'get_{}_display'.format(field_name))() value = getattr(obj, field_name, '') renderer = self.renderers.get(type(field)) if renderer: return renderer(value, **options) if isinstance(value, models.BaseModel): value = str(value) return self.render_value(value, **options)
Render field
def add_dependency(self, p_from_todo, p_to_todo): """ Adds a dependency from task 1 to task 2. """ def find_next_id(): """ Find a new unused ID. Unused means that no task has it as an 'id' value or as a 'p' value. """ def id_exists(p_id): """ Returns True if there exists a todo with the given parent ID. """ for todo in self._todos: number = str(p_id) if todo.has_tag('id', number) or todo.has_tag('p', number): return True return False new_id = 1 while id_exists(new_id): new_id += 1 return str(new_id) def append_projects_to_subtodo(): """ Appends projects in the parent todo item that are not present in the sub todo item. """ if config().append_parent_projects(): for project in p_from_todo.projects() - p_to_todo.projects(): self.append(p_to_todo, "+{}".format(project)) def append_contexts_to_subtodo(): """ Appends contexts in the parent todo item that are not present in the sub todo item. """ if config().append_parent_contexts(): for context in p_from_todo.contexts() - p_to_todo.contexts(): self.append(p_to_todo, "@{}".format(context)) if p_from_todo != p_to_todo and not self._depgraph.has_edge( hash(p_from_todo), hash(p_to_todo)): dep_id = None if p_from_todo.has_tag('id'): dep_id = p_from_todo.tag_value('id') else: dep_id = find_next_id() p_from_todo.set_tag('id', dep_id) p_to_todo.add_tag('p', dep_id) self._add_edge(p_from_todo, p_to_todo, dep_id) append_projects_to_subtodo() append_contexts_to_subtodo() self.dirty = True
Adds a dependency from task 1 to task 2.
def unpack_kinesis_event(kinesis_event, deserializer=None, unpacker=None, embed_timestamp=False): """Extracts events (a list of dicts) from a Kinesis event.""" records = kinesis_event["Records"] events = [] shard_ids = set() for rec in records: data = rec["kinesis"]["data"] try: payload = b64decode(data) except TypeError: payload = b64decode(data.encode("utf-8")) if unpacker: payload = unpacker(payload) shard_ids.add(rec["eventID"].split(":")[0]) try: payload = payload.decode() except AttributeError: pass if deserializer: try: payload = deserializer(payload) except ValueError: try: payload = deserializer(payload.replace("\\'", "'")) except: logger.error("Invalid searialized payload: {}".format( payload)) raise if isinstance(payload, dict) and embed_timestamp: ts = rec["kinesis"].get("approximateArrivalTimestamp") if ts: ts = datetime.fromtimestamp(ts, tz=tz.tzutc()) ts_str = ("{year:04d}-{month:02d}-{day:02d} " "{hour:02d}:{minute:02d}:{second:02d}").format( year=ts.year, month=ts.month, day=ts.day, hour=ts.hour, minute=ts.minute, second=ts.second) else: ts_str = "" payload[embed_timestamp] = ts_str events.append(payload) if len(shard_ids) > 1: msg = "Kinesis event contains records from several shards: {}".format( shard_ids) raise(BadKinesisEventError(msg)) return events, shard_ids.pop()
Extracts events (a list of dicts) from a Kinesis event.
def getstate(self): """ Returns RUNNING, -1 COMPLETE, 0 or EXECUTOR_ERROR, 255 """ state = "RUNNING" exit_code = -1 exitcode_file = os.path.join(self.workdir, "exit_code") pid_file = os.path.join(self.workdir, "pid") if os.path.exists(exitcode_file): with open(exitcode_file) as f: exit_code = int(f.read()) elif os.path.exists(pid_file): with open(pid_file, "r") as pid: pid = int(pid.read()) try: (_pid, exit_status) = os.waitpid(pid, os.WNOHANG) if _pid != 0: exit_code = exit_status >> 8 with open(exitcode_file, "w") as f: f.write(str(exit_code)) os.unlink(pid_file) except OSError: os.unlink(pid_file) exit_code = 255 if exit_code == 0: state = "COMPLETE" elif exit_code != -1: state = "EXECUTOR_ERROR" return state, exit_code
Returns RUNNING, -1 COMPLETE, 0 or EXECUTOR_ERROR, 255
def find_sample_min_std(self, Intensities): ''' find the best interpretation with the minimum stratard deviation (in units of percent % !) ''' Best_array = [] best_array_std_perc = inf Best_array_tmp = [] Best_interpretations = {} Best_interpretations_tmp = {} for this_specimen in list(Intensities.keys()): for value in Intensities[this_specimen]: Best_interpretations_tmp[this_specimen] = value Best_array_tmp = [value] all_other_specimens = list(Intensities.keys()) all_other_specimens.remove(this_specimen) for other_specimen in all_other_specimens: closest_value = self.find_close_value( Intensities[other_specimen], value) Best_array_tmp.append(closest_value) Best_interpretations_tmp[other_specimen] = closest_value if std(Best_array_tmp, ddof=1) / mean(Best_array_tmp) < best_array_std_perc: Best_array = Best_array_tmp best_array_std_perc = std( Best_array, ddof=1) / mean(Best_array_tmp) Best_interpretations = copy.deepcopy( Best_interpretations_tmp) Best_interpretations_tmp = {} return Best_interpretations, mean(Best_array), std(Best_array, ddof=1)
find the best interpretation with the minimum stratard deviation (in units of percent % !)