code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def colorbar(self, mappable=None, **kwargs): """Add a `~matplotlib.colorbar.Colorbar` to these `Axes` Parameters ---------- mappable : matplotlib data collection, optional collection against which to map the colouring, default will be the last added mappable artist (collection or image) fraction : `float`, optional fraction of space to steal from these `Axes` to make space for the new axes, default is ``0.`` if ``use_axesgrid=True`` is given (default), otherwise default is ``.15`` to match the upstream matplotlib default. **kwargs other keyword arguments to be passed to the :meth:`Plot.colorbar` generator Returns ------- cbar : `~matplotlib.colorbar.Colorbar` the newly added `Colorbar` See Also -------- Plot.colorbar """ fig = self.get_figure() if kwargs.get('use_axesgrid', True): kwargs.setdefault('fraction', 0.) if kwargs.get('fraction', 0.) == 0.: kwargs.setdefault('use_axesgrid', True) mappable, kwargs = gcbar.process_colorbar_kwargs( fig, mappable=mappable, ax=self, **kwargs) if isinstance(fig, Plot): # either we have created colorbar Axes using axesgrid1, or # the user already gave use_axesgrid=False, so we forcefully # disable axesgrid here in case fraction == 0., which causes # gridspec colorbars to fail. kwargs['use_axesgrid'] = False return fig.colorbar(mappable, **kwargs)
Add a `~matplotlib.colorbar.Colorbar` to these `Axes` Parameters ---------- mappable : matplotlib data collection, optional collection against which to map the colouring, default will be the last added mappable artist (collection or image) fraction : `float`, optional fraction of space to steal from these `Axes` to make space for the new axes, default is ``0.`` if ``use_axesgrid=True`` is given (default), otherwise default is ``.15`` to match the upstream matplotlib default. **kwargs other keyword arguments to be passed to the :meth:`Plot.colorbar` generator Returns ------- cbar : `~matplotlib.colorbar.Colorbar` the newly added `Colorbar` See Also -------- Plot.colorbar
def clear_citation(self): """Clear the citation and if citation clearing is enabled, clear the evidence and annotations.""" self.citation.clear() if self.citation_clearing: self.evidence = None self.annotations.clear()
Clear the citation and if citation clearing is enabled, clear the evidence and annotations.
def handleStatus(self, version, code, message): "extends handleStatus to instantiate a local response object" proxy.ProxyClient.handleStatus(self, version, code, message) # client.Response is currently just a container for needed data self._response = client.Response(version, code, message, {}, None)
extends handleStatus to instantiate a local response object
def start(self): """ Start daemonization process. """ # If pidfile already exists, we should read pid from there; to overwrite it, if locking # will fail, because locking attempt somehow purges the file contents. if os.path.isfile(self.pid): with open(self.pid, "r") as old_pidfile: old_pid = old_pidfile.read() # Create a lockfile so that only one instance of this daemon is running at any time. try: lockfile = open(self.pid, "w") except IOError: print("Unable to create the pidfile.") sys.exit(1) try: # Try to get an exclusive lock on the file. This will fail if another process has the file # locked. fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: print("Unable to lock on the pidfile.") # We need to overwrite the pidfile if we got here. with open(self.pid, "w") as pidfile: pidfile.write(old_pid) sys.exit(1) # skip fork if foreground is specified if not self.foreground: # Fork, creating a new process for the child. try: process_id = os.fork() except OSError as e: self.logger.error("Unable to fork, errno: {0}".format(e.errno)) sys.exit(1) if process_id != 0: if self.keep_fds: # This is the parent process. Exit without cleanup, # see https://github.com/thesharp/daemonize/issues/46 os._exit(0) else: sys.exit(0) # This is the child process. Continue. # Stop listening for signals that the parent process receives. # This is done by getting a new process id. # setpgrp() is an alternative to setsid(). # setsid puts the process in a new parent group and detaches its controlling terminal. process_id = os.setsid() if process_id == -1: # Uh oh, there was a problem. sys.exit(1) # Add lockfile to self.keep_fds. self.keep_fds.append(lockfile.fileno()) # Close all file descriptors, except the ones mentioned in self.keep_fds. devnull = "/dev/null" if hasattr(os, "devnull"): # Python has set os.devnull on this system, use it instead as it might be different # than /dev/null. devnull = os.devnull if self.auto_close_fds: for fd in range(3, resource.getrlimit(resource.RLIMIT_NOFILE)[0]): if fd not in self.keep_fds: try: os.close(fd) except OSError: pass devnull_fd = os.open(devnull, os.O_RDWR) os.dup2(devnull_fd, 0) os.dup2(devnull_fd, 1) os.dup2(devnull_fd, 2) os.close(devnull_fd) if self.logger is None: # Initialize logging. self.logger = logging.getLogger(self.app) self.logger.setLevel(logging.DEBUG) # Display log messages only on defined handlers. self.logger.propagate = False # Initialize syslog. # It will correctly work on OS X, Linux and FreeBSD. if sys.platform == "darwin": syslog_address = "/var/run/syslog" else: syslog_address = "/dev/log" # We will continue with syslog initialization only if actually have such capabilities # on the machine we are running this. if os.path.exists(syslog_address): syslog = handlers.SysLogHandler(syslog_address) if self.verbose: syslog.setLevel(logging.DEBUG) else: syslog.setLevel(logging.INFO) # Try to mimic to normal syslog messages. formatter = logging.Formatter("%(asctime)s %(name)s: %(message)s", "%b %e %H:%M:%S") syslog.setFormatter(formatter) self.logger.addHandler(syslog) # Set umask to default to safe file permissions when running as a root daemon. 027 is an # octal number which we are typing as 0o27 for Python3 compatibility. os.umask(0o27) # Change to a known directory. If this isn't done, starting a daemon in a subdirectory that # needs to be deleted results in "directory busy" errors. os.chdir(self.chdir) # Execute privileged action privileged_action_result = self.privileged_action() if not privileged_action_result: privileged_action_result = [] # Change owner of pid file, it's required because pid file will be removed at exit. uid, gid = -1, -1 if self.group: try: gid = grp.getgrnam(self.group).gr_gid except KeyError: self.logger.error("Group {0} not found".format(self.group)) sys.exit(1) if self.user: try: uid = pwd.getpwnam(self.user).pw_uid except KeyError: self.logger.error("User {0} not found.".format(self.user)) sys.exit(1) if uid != -1 or gid != -1: os.chown(self.pid, uid, gid) # Change gid if self.group: try: os.setgid(gid) except OSError: self.logger.error("Unable to change gid.") sys.exit(1) # Change uid if self.user: try: uid = pwd.getpwnam(self.user).pw_uid except KeyError: self.logger.error("User {0} not found.".format(self.user)) sys.exit(1) try: os.setuid(uid) except OSError: self.logger.error("Unable to change uid.") sys.exit(1) try: lockfile.write("%s" % (os.getpid())) lockfile.flush() except IOError: self.logger.error("Unable to write pid to the pidfile.") print("Unable to write pid to the pidfile.") sys.exit(1) # Set custom action on SIGTERM. signal.signal(signal.SIGTERM, self.sigterm) atexit.register(self.exit) self.logger.warning("Starting daemon.") try: self.action(*privileged_action_result) except Exception: for line in traceback.format_exc().split("\n"): self.logger.error(line)
Start daemonization process.
def build(self, format='qcow2', path='/tmp'): ''' Build an image using Kiwi. :param format: :param path: :return: ''' if kiwi is None: msg = 'Unable to build the image due to the missing dependencies: Kiwi module is not available.' log.error(msg) raise CommandExecutionError(msg) raise CommandExecutionError("Build is not yet implemented")
Build an image using Kiwi. :param format: :param path: :return:
def get_log_events(awsclient, log_group_name, log_stream_name, start_ts=None): """Get log events for the specified log group and stream. this is used in tenkai output instance diagnostics :param log_group_name: log group name :param log_stream_name: log stream name :param start_ts: timestamp :return: """ client_logs = awsclient.get_client('logs') request = { 'logGroupName': log_group_name, 'logStreamName': log_stream_name } if start_ts: request['startTime'] = start_ts # TODO exhaust the events! # TODO use all_pages ! response = client_logs.get_log_events(**request) if 'events' in response and response['events']: return [{'timestamp': e['timestamp'], 'message': e['message']} for e in response['events']]
Get log events for the specified log group and stream. this is used in tenkai output instance diagnostics :param log_group_name: log group name :param log_stream_name: log stream name :param start_ts: timestamp :return:
def run_command(self, config_file): """ :param str config_file: The name of config file. """ config = configparser.ConfigParser() config.read(config_file) rdbms = config.get('database', 'rdbms').lower() wrapper = self.create_routine_wrapper_generator(rdbms) wrapper.main(config_file)
:param str config_file: The name of config file.
def AgregarFlete(self, descripcion, importe): "Agrega la información referente al flete de la liquidación (opcional)" flete = dict(descripcion=descripcion, importe=importe) self.solicitud['flete'] = flete return True
Agrega la información referente al flete de la liquidación (opcional)
def withNamedValues(cls, **values): """Create a subclass with discreet named values constraint. Reduce fully duplicate enumerations along the way. """ enums = set(cls.namedValues.items()) enums.update(values.items()) class X(cls): namedValues = namedval.NamedValues(*enums) subtypeSpec = cls.subtypeSpec + constraint.SingleValueConstraint( *values.values()) X.__name__ = cls.__name__ return X
Create a subclass with discreet named values constraint. Reduce fully duplicate enumerations along the way.
def version(self, content_type="*/*"): """ versioning is based off of this post http://urthen.github.io/2013/05/09/ways-to-version-your-api/ """ v = "" accept_header = self.get_header('accept', "") if accept_header: a = AcceptHeader(accept_header) for mt in a.filter(content_type): v = mt[2].get("version", "") if v: break return v
versioning is based off of this post http://urthen.github.io/2013/05/09/ways-to-version-your-api/
def scan(audio_filepaths, *, album_gain=False, skip_tagged=False, thread_count=None, ffmpeg_path=None, executor=None): """ Analyze files, and return a dictionary of filepath to loudness metadata or filepath to future if executor is not None. """ r128_data = {} with contextlib.ExitStack() as cm: if executor is None: if thread_count is None: try: thread_count = len(os.sched_getaffinity(0)) except AttributeError: thread_count = os.cpu_count() enable_ffmpeg_threading = thread_count > (len(audio_filepaths) + int(album_gain)) executor = cm.enter_context(concurrent.futures.ThreadPoolExecutor(max_workers=thread_count)) asynchronous = False else: enable_ffmpeg_threading = False asynchronous = True loudness_tags = tuple(map(has_loudness_tag, audio_filepaths)) # remove invalid files audio_filepaths = tuple(audio_filepath for (audio_filepath, has_tags) in zip(audio_filepaths, loudness_tags) if has_tags is not None) loudness_tags = tuple(filter(None, loudness_tags)) futures = {} if album_gain: if skip_tagged and all(map(operator.itemgetter(1), loudness_tags)): logger().info("All files already have an album gain tag, skipping album gain scan") elif audio_filepaths: calc_album_peak = any(map(lambda x: os.path.splitext(x)[-1].lower() != ".opus", audio_filepaths)) futures[ALBUM_GAIN_KEY] = executor.submit(get_r128_loudness, audio_filepaths, calc_peak=calc_album_peak, enable_ffmpeg_threading=enable_ffmpeg_threading, ffmpeg_path=ffmpeg_path) for audio_filepath in audio_filepaths: if skip_tagged and has_loudness_tag(audio_filepath)[0]: logger().info("File '%s' already has a track gain tag, skipping track gain scan" % (audio_filepath)) continue if os.path.splitext(audio_filepath)[-1].lower() == ".opus": # http://www.rfcreader.com/#rfc7845_line1060 calc_peak = False else: calc_peak = True futures[audio_filepath] = executor.submit(get_r128_loudness, (audio_filepath,), calc_peak=calc_peak, enable_ffmpeg_threading=enable_ffmpeg_threading, ffmpeg_path=ffmpeg_path) if asynchronous: return futures for audio_filepath in audio_filepaths: try: r128_data[audio_filepath] = futures[audio_filepath].result() except KeyError: # track gain was skipped pass except Exception as e: # raise logger().warning("Failed to analyze file '%s': %s %s" % (audio_filepath, e.__class__.__qualname__, e)) if album_gain and audio_filepaths: try: r128_data[ALBUM_GAIN_KEY] = futures[ALBUM_GAIN_KEY].result() except KeyError: # album gain was skipped pass except Exception as e: # raise logger().warning("Failed to analyze files %s: %s %s" % (", ".join("'%s'" % (audio_filepath) for audio_filepath in audio_filepaths), e.__class__.__qualname__, e)) return r128_data
Analyze files, and return a dictionary of filepath to loudness metadata or filepath to future if executor is not None.
def normalise_key(self, key): """Make sure key is a valid python attribute""" key = key.replace('-', '_') if key.startswith("noy_"): key = key[4:] return key
Make sure key is a valid python attribute
def look_up(self, **keys: Dict[InstanceName, ScalarValue]) -> "ArrayEntry": """Return the entry with matching keys. Args: keys: Keys and values specified as keyword arguments. Raises: InstanceValueError: If the receiver's value is not a YANG list. NonexistentInstance: If no entry with matching keys exists. """ if not isinstance(self.schema_node, ListNode): raise InstanceValueError(self.json_pointer(), "lookup on non-list") try: for i in range(len(self.value)): en = self.value[i] flag = True for k in keys: if en[k] != keys[k]: flag = False break if flag: return self._entry(i) raise NonexistentInstance(self.json_pointer(), "entry lookup failed") except KeyError: raise NonexistentInstance(self.json_pointer(), "entry lookup failed") from None except TypeError: raise InstanceValueError(self.json_pointer(), "lookup on non-list") from None
Return the entry with matching keys. Args: keys: Keys and values specified as keyword arguments. Raises: InstanceValueError: If the receiver's value is not a YANG list. NonexistentInstance: If no entry with matching keys exists.
def get_service_references(self, clazz, ldap_filter=None): # type: (Optional[str], Optional[str]) -> Optional[List[ServiceReference]] """ Returns the service references for services that were registered under the specified class by this bundle and matching the given filter :param clazz: The class name with which the service was registered. :param ldap_filter: A filter on service properties :return: The list of references to the services registered by the calling bundle and matching the filters. """ refs = self.__framework.find_service_references(clazz, ldap_filter) if refs: for ref in refs: if ref.get_bundle() is not self.__bundle: refs.remove(ref) return refs
Returns the service references for services that were registered under the specified class by this bundle and matching the given filter :param clazz: The class name with which the service was registered. :param ldap_filter: A filter on service properties :return: The list of references to the services registered by the calling bundle and matching the filters.
def add_back_ref(self, back_ref, attr=None): """Add reference from back_ref to self :param back_ref: back_ref to add :type back_ref: Resource :rtype: Resource """ back_ref.add_ref(self, attr) return self.fetch()
Add reference from back_ref to self :param back_ref: back_ref to add :type back_ref: Resource :rtype: Resource
def write(data, path, saltenv='base', index=0): ''' Write the named file, by default the first file found is written, but the index of the file can be specified to write to a lower priority file root ''' if saltenv not in __opts__['pillar_roots']: return 'Named environment {0} is not present'.format(saltenv) if len(__opts__['pillar_roots'][saltenv]) <= index: return 'Specified index {0} in environment {1} is not present'.format( index, saltenv) if os.path.isabs(path): return ('The path passed in {0} is not relative to the environment ' '{1}').format(path, saltenv) dest = os.path.join(__opts__['pillar_roots'][saltenv][index], path) dest_dir = os.path.dirname(dest) if not os.path.isdir(dest_dir): os.makedirs(dest_dir) with salt.utils.files.fopen(dest, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) return 'Wrote data to file {0}'.format(dest)
Write the named file, by default the first file found is written, but the index of the file can be specified to write to a lower priority file root
def _prepare_graph(g, g_colors, q_cls, q_arg, adjust_graph): """Prepares a graph for use in :class:`.QueueNetwork`. This function is called by ``__init__`` in the :class:`.QueueNetwork` class. It creates the :class:`.QueueServer` instances that sit on the edges, and sets various edge and node properties that are used when drawing the graph. Parameters ---------- g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, \ ``None``, etc. Any object that networkx can turn into a :any:`DiGraph<networkx.DiGraph>` g_colors : dict A dictionary of colors. The specific keys used are ``vertex_color`` and ``vertex_fill_color`` for vertices that do not have any loops. Set :class:`.QueueNetwork` for the default values passed. q_cls : dict A dictionary where the keys are integers that represent an edge type, and the values are :class:`.QueueServer` classes. q_args : dict A dictionary where the keys are integers that represent an edge type, and the values are the arguments that are used when creating an instance of that :class:`.QueueServer` class. adjust_graph : bool Specifies whether the graph will be adjusted using :func:`.adjacency2graph`. Returns ------- g : :class:`.QueueNetworkDiGraph` queues : list A list of :class:`QueueServers<.QueueServer>` where ``queues[k]`` is the ``QueueServer`` that sets on the edge with edge index ``k``. Notes ----- The graph ``g`` should have the ``edge_type`` edge property map. If it does not then an ``edge_type`` edge property is created and set to 1. The following properties are set by each queue: ``vertex_color``, ``vertex_fill_color``, ``vertex_fill_color``, ``edge_color``. See :class:`.QueueServer` for more on setting these values. The following properties are assigned as a properties to the graph; their default values for each edge or vertex is shown: * ``vertex_pen_width``: ``1``, * ``vertex_size``: ``8``, * ``edge_control_points``: ``[]`` * ``edge_marker_size``: ``8`` * ``edge_pen_width``: ``1.25`` Raises ------ TypeError Raised when the parameter ``g`` is not of a type that can be made into a :any:`networkx.DiGraph`. """ g = _test_graph(g) if adjust_graph: pos = nx.get_node_attributes(g, 'pos') ans = nx.to_dict_of_dicts(g) g = adjacency2graph(ans, adjust=2, is_directed=g.is_directed()) g = QueueNetworkDiGraph(g) if len(pos) > 0: g.set_pos(pos) g.new_vertex_property('vertex_color') g.new_vertex_property('vertex_fill_color') g.new_vertex_property('vertex_pen_width') g.new_vertex_property('vertex_size') g.new_edge_property('edge_control_points') g.new_edge_property('edge_color') g.new_edge_property('edge_marker_size') g.new_edge_property('edge_pen_width') queues = _set_queues(g, q_cls, q_arg, 'cap' in g.vertex_properties()) if 'pos' not in g.vertex_properties(): g.set_pos() for k, e in enumerate(g.edges()): g.set_ep(e, 'edge_pen_width', 1.25) g.set_ep(e, 'edge_marker_size', 8) if e[0] == e[1]: g.set_ep(e, 'edge_color', queues[k].colors['edge_loop_color']) else: g.set_ep(e, 'edge_color', queues[k].colors['edge_color']) for v in g.nodes(): g.set_vp(v, 'vertex_pen_width', 1) g.set_vp(v, 'vertex_size', 8) e = (v, v) if g.is_edge(e): g.set_vp(v, 'vertex_color', queues[g.edge_index[e]]._current_color(2)) g.set_vp(v, 'vertex_fill_color', queues[g.edge_index[e]]._current_color()) else: g.set_vp(v, 'vertex_color', g_colors['vertex_color']) g.set_vp(v, 'vertex_fill_color', g_colors['vertex_fill_color']) return g, queues
Prepares a graph for use in :class:`.QueueNetwork`. This function is called by ``__init__`` in the :class:`.QueueNetwork` class. It creates the :class:`.QueueServer` instances that sit on the edges, and sets various edge and node properties that are used when drawing the graph. Parameters ---------- g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, \ ``None``, etc. Any object that networkx can turn into a :any:`DiGraph<networkx.DiGraph>` g_colors : dict A dictionary of colors. The specific keys used are ``vertex_color`` and ``vertex_fill_color`` for vertices that do not have any loops. Set :class:`.QueueNetwork` for the default values passed. q_cls : dict A dictionary where the keys are integers that represent an edge type, and the values are :class:`.QueueServer` classes. q_args : dict A dictionary where the keys are integers that represent an edge type, and the values are the arguments that are used when creating an instance of that :class:`.QueueServer` class. adjust_graph : bool Specifies whether the graph will be adjusted using :func:`.adjacency2graph`. Returns ------- g : :class:`.QueueNetworkDiGraph` queues : list A list of :class:`QueueServers<.QueueServer>` where ``queues[k]`` is the ``QueueServer`` that sets on the edge with edge index ``k``. Notes ----- The graph ``g`` should have the ``edge_type`` edge property map. If it does not then an ``edge_type`` edge property is created and set to 1. The following properties are set by each queue: ``vertex_color``, ``vertex_fill_color``, ``vertex_fill_color``, ``edge_color``. See :class:`.QueueServer` for more on setting these values. The following properties are assigned as a properties to the graph; their default values for each edge or vertex is shown: * ``vertex_pen_width``: ``1``, * ``vertex_size``: ``8``, * ``edge_control_points``: ``[]`` * ``edge_marker_size``: ``8`` * ``edge_pen_width``: ``1.25`` Raises ------ TypeError Raised when the parameter ``g`` is not of a type that can be made into a :any:`networkx.DiGraph`.
def hkl_transformation(transf, miller_index): """ Returns the Miller index from setting A to B using a transformation matrix Args: transf (3x3 array): The transformation matrix that transforms a lattice of A to B miller_index ([h, k, l]): Miller index to transform to setting B """ # Get a matrix of whole numbers (ints) lcm = lambda a, b: a * b // math.gcd(a, b) reduced_transf = reduce(lcm, [int(1 / i) for i in itertools.chain(*transf) if i != 0]) * transf reduced_transf = reduced_transf.astype(int) # perform the transformation t_hkl = np.dot(reduced_transf, miller_index) d = abs(reduce(gcd, t_hkl)) t_hkl = np.array([int(i / d) for i in t_hkl]) # get mostly positive oriented Miller index if len([i for i in t_hkl if i < 0]) > 1: t_hkl *= -1 return tuple(t_hkl)
Returns the Miller index from setting A to B using a transformation matrix Args: transf (3x3 array): The transformation matrix that transforms a lattice of A to B miller_index ([h, k, l]): Miller index to transform to setting B
def unlink(self): """Remove the daemon's pid file :return: None """ logger.debug("Unlinking %s", self.pid_filename) try: os.unlink(self.pid_filename) except OSError as exp: logger.debug("Got an error unlinking our pid file: %s", exp)
Remove the daemon's pid file :return: None
def _handle_response(response, server_config, synchronous=False, timeout=None): """Handle a server's response in a typical fashion. Do the following: 1. Check the server's response for an HTTP status code indicating an error. 2. Poll the server for a foreman task to complete if an HTTP 202 (accepted) status code is returned and ``synchronous is True``. 3. Immediately return if an HTTP "NO CONTENT" response is received. 4. Determine what type of the content returned from server. Depending on the type method should return server's response, with all JSON decoded or just response content itself. :param response: A response object as returned by one of the functions in :mod:`nailgun.client` or the requests library. :param server_config: A `nailgun.config.ServerConfig` object. :param synchronous: Should this function poll the server? :param timeout: Maximum number of seconds to wait until timing out. Defaults to ``nailgun.entity_mixins.TASK_TIMEOUT``. """ response.raise_for_status() if synchronous is True and response.status_code == ACCEPTED: return ForemanTask( server_config, id=response.json()['id']).poll(timeout=timeout) if response.status_code == NO_CONTENT: return if 'application/json' in response.headers.get('content-type', '').lower(): return response.json() elif isinstance(response.content, bytes): return response.content.decode('utf-8') else: return response.content
Handle a server's response in a typical fashion. Do the following: 1. Check the server's response for an HTTP status code indicating an error. 2. Poll the server for a foreman task to complete if an HTTP 202 (accepted) status code is returned and ``synchronous is True``. 3. Immediately return if an HTTP "NO CONTENT" response is received. 4. Determine what type of the content returned from server. Depending on the type method should return server's response, with all JSON decoded or just response content itself. :param response: A response object as returned by one of the functions in :mod:`nailgun.client` or the requests library. :param server_config: A `nailgun.config.ServerConfig` object. :param synchronous: Should this function poll the server? :param timeout: Maximum number of seconds to wait until timing out. Defaults to ``nailgun.entity_mixins.TASK_TIMEOUT``.
def avatar_url_from_openid(openid, size=64, default='retro', dns=False): """ Our own implementation since fas doesn't support this nicely yet. """ if dns: # This makes an extra DNS SRV query, which can slow down our webapps. # It is necessary for libravatar federation, though. import libravatar return libravatar.libravatar_url( openid=openid, size=size, default=default, ) else: params = _ordered_query_params([('s', size), ('d', default)]) query = parse.urlencode(params) hash = sha256(openid.encode('utf-8')).hexdigest() return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
Our own implementation since fas doesn't support this nicely yet.
def write_update(rootfs_filepath: str, progress_callback: Callable[[float], None], chunk_size: int = 1024, file_size: int = None) -> RootPartitions: """ Write the new rootfs to the next root partition - Figure out, from the system, the correct root partition to write to - Write the rootfs at ``rootfs_filepath`` there, with progress :param rootfs_filepath: The path to a checked rootfs.ext4 :param progress_callback: A callback to call periodically with progress between 0 and 1.0. May never reach precisely 1.0, best only for user information. :param chunk_size: The size of file chunks to copy in between progress notifications :param file_size: The total size of the update file (for generating progress percentage). If ``None``, generated with ``seek``/``tell``. :returns: The root partition that the rootfs image was written to, e.g. ``RootPartitions.TWO`` or ``RootPartitions.THREE``. """ unused = _find_unused_partition() part_path = unused.value.path write_file(rootfs_filepath, part_path, progress_callback, chunk_size, file_size) return unused
Write the new rootfs to the next root partition - Figure out, from the system, the correct root partition to write to - Write the rootfs at ``rootfs_filepath`` there, with progress :param rootfs_filepath: The path to a checked rootfs.ext4 :param progress_callback: A callback to call periodically with progress between 0 and 1.0. May never reach precisely 1.0, best only for user information. :param chunk_size: The size of file chunks to copy in between progress notifications :param file_size: The total size of the update file (for generating progress percentage). If ``None``, generated with ``seek``/``tell``. :returns: The root partition that the rootfs image was written to, e.g. ``RootPartitions.TWO`` or ``RootPartitions.THREE``.
def _build_response(self, resp): """Build internal Response object from given response.""" # rememberLogin # if self.method is 'LOGIN' and resp.json().get('code') == 200: # cookiesJar.save_cookies(resp, NCloudBot.username) self.response.content = resp.content self.response.status_code = resp.status_code self.response.headers = resp.headers
Build internal Response object from given response.
def read(self): """ Load the metrics file from the given path """ f = open(self.path, "r") self.manifest_json = f.read()
Load the metrics file from the given path
def pOparapar(self,Opar,apar,tdisrupt=None): """ NAME: pOparapar PURPOSE: return the probability of a given parallel (frequency,angle) offset pair INPUT: Opar - parallel frequency offset (array) (can be Quantity) apar - parallel angle offset along the stream (scalar) (can be Quantity) OUTPUT: p(Opar,apar) HISTORY: 2015-12-07 - Written - Bovy (UofT) """ if _APY_LOADED and isinstance(Opar,units.Quantity): Opar= Opar.to(1/units.Gyr).value\ /bovy_conversion.freq_in_Gyr(self._vo,self._ro) if _APY_LOADED and isinstance(apar,units.Quantity): apar= apar.to(units.rad).value if tdisrupt is None: tdisrupt= self._tdisrupt if isinstance(Opar,(int,float,numpy.float32,numpy.float64)): Opar= numpy.array([Opar]) out= numpy.zeros(len(Opar)) # Compute ts ts= apar/Opar # Evaluate out[(ts < tdisrupt)*(ts >= 0.)]=\ numpy.exp(-0.5*(Opar[(ts < tdisrupt)*(ts >= 0.)]-self._meandO)**2.\ /self._sortedSigOEig[2])/\ numpy.sqrt(self._sortedSigOEig[2]) return out
NAME: pOparapar PURPOSE: return the probability of a given parallel (frequency,angle) offset pair INPUT: Opar - parallel frequency offset (array) (can be Quantity) apar - parallel angle offset along the stream (scalar) (can be Quantity) OUTPUT: p(Opar,apar) HISTORY: 2015-12-07 - Written - Bovy (UofT)
def assistant_from_yaml(cls, source, y, superassistant, fully_loaded=True, role=settings.DEFAULT_ASSISTANT_ROLE): """Constructs instance of YamlAssistant loaded from given structure y, loaded from source file source. Args: source: path to assistant source file y: loaded yaml structure superassistant: superassistant of this assistant Returns: YamlAssistant instance constructed from y with source file source Raises: YamlError: if the assistant is malformed """ # In pre-0.9.0, we required assistant to be a mapping of {name: assistant_attributes} # now we allow that, but we also allow omitting the assistant name and putting # the attributes to top_level, too. name = os.path.splitext(os.path.basename(source))[0] yaml_checker.check(source, y) assistant = yaml_assistant.YamlAssistant(name, y, source, superassistant, fully_loaded=fully_loaded, role=role) return assistant
Constructs instance of YamlAssistant loaded from given structure y, loaded from source file source. Args: source: path to assistant source file y: loaded yaml structure superassistant: superassistant of this assistant Returns: YamlAssistant instance constructed from y with source file source Raises: YamlError: if the assistant is malformed
def get_viscosity(medium="CellCarrier", channel_width=20.0, flow_rate=0.16, temperature=23.0): """Returns the viscosity for RT-DC-specific media Parameters ---------- medium: str The medium to compute the viscosity for. One of ["CellCarrier", "CellCarrier B", "water"]. channel_width: float The channel width in µm flow_rate: float Flow rate in µl/s temperature: float or ndarray Temperature in °C Returns ------- viscosity: float or ndarray Viscosity in mPa*s Notes ----- - CellCarrier and CellCarrier B media are optimized for RT-DC measurements. - Values for the viscosity of water are computed using equation (15) from :cite:`Kestin_1978`. """ if medium.lower() not in ["cellcarrier", "cellcarrier b", "water"]: raise ValueError("Invalid medium: {}".format(medium)) # convert flow_rate from µl/s to m³/s # convert channel_width from µm to m term1 = 1.1856 * 6 * flow_rate * 1e-9 / (channel_width * 1e-6)**3 * 2 / 3 if medium == "CellCarrier": temp_corr = (temperature / 23.2)**-0.866 term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.677) eta = 0.179 * (term1 * term2)**(0.677 - 1) * temp_corr * 1e3 elif medium == "CellCarrier B": temp_corr = (temperature / 23.6)**-0.866 term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.634) eta = 0.360 * (term1 * term2)**(0.634 - 1) * temp_corr * 1e3 elif medium == "water": # see equation (15) in Kestin et al, J. Phys. Chem. 7(3) 1978 if np.min(temperature) < 0 or np.max(temperature) > 40: msg = "For water, the temperature must be in [0, 40] degC! " \ "Got min/max values of '{}'.".format(np.min(temperature), np.max(temperature)) raise ValueError(msg) eta0 = 1.002 # [mPa] right = (20-temperature) / (temperature + 96) \ * (+ 1.2364 - 1.37e-3 * (20 - temperature) + 5.7e-6 * (20 - temperature)**2 ) eta = eta0 * 10**right return eta
Returns the viscosity for RT-DC-specific media Parameters ---------- medium: str The medium to compute the viscosity for. One of ["CellCarrier", "CellCarrier B", "water"]. channel_width: float The channel width in µm flow_rate: float Flow rate in µl/s temperature: float or ndarray Temperature in °C Returns ------- viscosity: float or ndarray Viscosity in mPa*s Notes ----- - CellCarrier and CellCarrier B media are optimized for RT-DC measurements. - Values for the viscosity of water are computed using equation (15) from :cite:`Kestin_1978`.
def get_assessment(self, assessment_id): """Gets the ``Assessment`` specified by its ``Id``. In plenary mode, the exact ``Id`` is found or a ``NotFound`` results. Otherwise, the returned ``Assessment`` may have a different ``Id`` than requested, such as the case where a duplicate ``Id`` was assigned to a ``Assessment`` and retained for compatibility. arg: assessment_id (osid.id.Id): ``Id`` of the ``Assessment`` return: (osid.assessment.Assessment) - the assessment raise: NotFound - ``assessment_id`` not found raise: NullArgument - ``assessment_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('assessment', collection='Assessment', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(assessment_id, 'assessment').get_identifier())}, **self._view_filter())) return objects.Assessment(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
Gets the ``Assessment`` specified by its ``Id``. In plenary mode, the exact ``Id`` is found or a ``NotFound`` results. Otherwise, the returned ``Assessment`` may have a different ``Id`` than requested, such as the case where a duplicate ``Id`` was assigned to a ``Assessment`` and retained for compatibility. arg: assessment_id (osid.id.Id): ``Id`` of the ``Assessment`` return: (osid.assessment.Assessment) - the assessment raise: NotFound - ``assessment_id`` not found raise: NullArgument - ``assessment_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method is must be implemented.*
def export(rv, code=None, headers=None): """ Create a suitable response Args: rv: return value of action code: status code headers: response headers Returns: flask.Response """ if isinstance(rv, ResponseBase): return make_response(rv, code, headers) else: if code is None: code = 200 mediatype = request.accept_mimetypes.best_match( exporters.keys(), default='application/json') return exporters[mediatype](rv, code, headers)
Create a suitable response Args: rv: return value of action code: status code headers: response headers Returns: flask.Response
def allow(self, privilege): """Add an allowed privilege (read, write, execute, all).""" assert privilege in PERMISSIONS['user'].keys() reading = PERMISSIONS['user'][privilege] + PERMISSIONS['group'][privilege] + PERMISSIONS['other'][privilege] os.chmod(self.file_path, reading)
Add an allowed privilege (read, write, execute, all).
def authorize(self, names, payload=None, request_type="push"): '''Authorize a client based on encrypting the payload with the client token, which should be matched on the receiving server''' if self.secrets is not None: if "registry" in self.secrets: # Use the payload to generate a digest push|collection|name|tag|user timestamp = generate_timestamp() credential = generate_credential(self.secrets['registry']['username']) credential = "%s/%s/%s" %(request_type,credential,timestamp) if payload is None: payload = "%s|%s|%s|%s|%s|" %(request_type, names['collection'], timestamp, names['image'], names['tag']) signature = generate_signature(payload,self.secrets['registry']['token']) return "SREGISTRY-HMAC-SHA256 Credential=%s,Signature=%s" %(credential,signature)
Authorize a client based on encrypting the payload with the client token, which should be matched on the receiving server
def fill_treewidget(self, tree, parameters): """ fills a QTreeWidget with nested parameters, in future replace QTreeWidget with QTreeView and call fill_treeview Args: tree: QtWidgets.QTreeWidget parameters: dictionary or Parameter object show_all: boolean if true show all parameters, if false only selected ones Returns: """ tree.clear() assert isinstance(parameters, (dict, Parameter)) for key, value in parameters.items(): if isinstance(value, Parameter): B26QTreeItem(tree, key, value, parameters.valid_values[key], parameters.info[key]) else: B26QTreeItem(tree, key, value, type(value), '')
fills a QTreeWidget with nested parameters, in future replace QTreeWidget with QTreeView and call fill_treeview Args: tree: QtWidgets.QTreeWidget parameters: dictionary or Parameter object show_all: boolean if true show all parameters, if false only selected ones Returns:
def walk_oid(self, oid): """Get a list of SNMP varbinds in response to a walk for oid. Each varbind in response list has a tag, iid, val and type attribute.""" var = netsnmp.Varbind(oid) varlist = netsnmp.VarList(var) data = self.walk(varlist) if len(data) == 0: raise SnmpException("SNMP walk response incomplete") return varlist
Get a list of SNMP varbinds in response to a walk for oid. Each varbind in response list has a tag, iid, val and type attribute.
def handle_pubrel(self): """Handle incoming PUBREL packet.""" self.logger.info("PUBREL received") ret, mid = self.in_packet.read_uint16() if ret != NC.ERR_SUCCESS: return ret evt = event.EventPubrel(mid) self.push_event(evt) return NC.ERR_SUCCESS
Handle incoming PUBREL packet.
def hmset(self, key, value_dict): """ Sets fields to values as in `value_dict` in the hash stored at `key`. Sets the specified fields to their respective values in the hash stored at `key`. This command overwrites any specified fields already existing in the hash. If `key` does not exist, a new key holding a hash is created. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of fields being set. :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :param value_dict: field to value mapping :type value_dict: :class:`dict` :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError` """ if not value_dict: future = concurrent.TracebackFuture() future.set_result(False) else: command = [b'HMSET', key] command.extend(sum(value_dict.items(), ())) future = self._execute(command) return future
Sets fields to values as in `value_dict` in the hash stored at `key`. Sets the specified fields to their respective values in the hash stored at `key`. This command overwrites any specified fields already existing in the hash. If `key` does not exist, a new key holding a hash is created. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of fields being set. :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :param value_dict: field to value mapping :type value_dict: :class:`dict` :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError`
def password_enter(self, wallet, password): """ Enters the **password** in to **wallet** :param wallet: Wallet to enter password for :type wallet: str :param password: Password to enter :type password: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.password_enter( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... password="test" ... ) True """ wallet = self._process_value(wallet, 'wallet') payload = {"wallet": wallet, "password": password} resp = self.call('password_enter', payload) return resp['valid'] == '1'
Enters the **password** in to **wallet** :param wallet: Wallet to enter password for :type wallet: str :param password: Password to enter :type password: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.password_enter( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... password="test" ... ) True
def onFolderTreeClicked(self, proxyIndex): """What to do when a Folder in the tree is clicked""" if not proxyIndex.isValid(): return index = self.proxyFileModel.mapToSource(proxyIndex) settings = QSettings() folder_path = self.fileModel.filePath(index) settings.setValue('mainwindow/workingDirectory', folder_path)
What to do when a Folder in the tree is clicked
def _push_next(self): """Assign next batch workload to workers.""" r = next(self._iter, None) if r is None: return async_ret = self._worker_pool.apply_async( self._worker_fn, (r, self._batchify_fn, self._dataset)) self._data_buffer[self._sent_idx] = async_ret self._sent_idx += 1
Assign next batch workload to workers.
def get_frames(self, frames='all', override=False, **kwargs): """ Extract frames from the trajectory file. Depending on the passed parameters a frame, a list of particular frames, a range of frames (from, to), or all frames can be extracted with this function. Parameters ---------- frames : :class:`int` or :class:`list` or :class:`touple` or :class:`str` Specified frame (:class:`int`), or frames (:class:`list`), or range (:class:`touple`), or `all`/`everything` (:class:`str`). (default=`all`) override : :class:`bool` If True, a frame already storred in :attr:`frames` can be override. (default=False) extract_data : :class:`bool`, optional If False, a frame is returned as a :class:`str` block as in the trajectory file. Ohterwise, it is extracted and returned as :class:`pywindow.molecular.MolecularSystem`. (default=True) swap_atoms : :class:`dict`, optional If this kwarg is passed with an appropriate dictionary a :func:`pywindow.molecular.MolecularSystem.swap_atom_keys()` will be applied to the extracted frame. forcefield : :class:`str`, optional If this kwarg is passed with appropriate forcefield keyword a :func:`pywindow.molecular.MolecularSystem.decipher_atom_keys()` will be applied to the extracted frame. Returns ------- :class:`pywindow.molecular.MolecularSystem` If a single frame is extracted. None : :class:`NoneType` If more than one frame is extracted, the frames are returned to :attr:`frames` """ if override is True: self.frames = {} if isinstance(frames, int): frame = self._get_frame( self.trajectory_map[frames], frames, **kwargs) if frames not in self.frames.keys(): self.frames[frames] = frame return frame if isinstance(frames, list): for frame in frames: if frame not in self.frames.keys(): self.frames[frame] = self._get_frame( self.trajectory_map[frame], frame, **kwargs) if isinstance(frames, tuple): for frame in range(frames[0], frames[1]): if frame not in self.frames.keys(): self.frames[frame] = self._get_frame( self.trajectory_map[frame], frame, **kwargs) if isinstance(frames, str): if frames in ['all', 'everything']: for frame in range(0, self.no_of_frames): if frame not in self.frames.keys(): self.frames[frame] = self._get_frame( self.trajectory_map[frame], frame, **kwargs)
Extract frames from the trajectory file. Depending on the passed parameters a frame, a list of particular frames, a range of frames (from, to), or all frames can be extracted with this function. Parameters ---------- frames : :class:`int` or :class:`list` or :class:`touple` or :class:`str` Specified frame (:class:`int`), or frames (:class:`list`), or range (:class:`touple`), or `all`/`everything` (:class:`str`). (default=`all`) override : :class:`bool` If True, a frame already storred in :attr:`frames` can be override. (default=False) extract_data : :class:`bool`, optional If False, a frame is returned as a :class:`str` block as in the trajectory file. Ohterwise, it is extracted and returned as :class:`pywindow.molecular.MolecularSystem`. (default=True) swap_atoms : :class:`dict`, optional If this kwarg is passed with an appropriate dictionary a :func:`pywindow.molecular.MolecularSystem.swap_atom_keys()` will be applied to the extracted frame. forcefield : :class:`str`, optional If this kwarg is passed with appropriate forcefield keyword a :func:`pywindow.molecular.MolecularSystem.decipher_atom_keys()` will be applied to the extracted frame. Returns ------- :class:`pywindow.molecular.MolecularSystem` If a single frame is extracted. None : :class:`NoneType` If more than one frame is extracted, the frames are returned to :attr:`frames`
def merged(self): '''The clean stats from all the hosts reporting to this host.''' stats = {} for topic in self.client.topics()['topics']: for producer in self.client.lookup(topic)['producers']: hostname = producer['broadcast_address'] port = producer['http_port'] host = '%s_%s' % (hostname, port) stats[host] = nsqd.Client( 'http://%s:%s/' % (hostname, port)).clean_stats() return stats
The clean stats from all the hosts reporting to this host.
def delete(self, request, *args, **kwargs): """ Calls the delete() method on the fetched object and then redirects to the success URL. """ self.object = self.get_object() success_url = self.get_success_url() self.object.delete() if self.request.is_ajax(): return JSONResponseMixin.render_to_response(self, context={}) return HttpResponseRedirect(success_url)
Calls the delete() method on the fetched object and then redirects to the success URL.
def get_nodes_with_recipe(recipe_name, environment=None): """Get all nodes which include a given recipe, prefix-searches are also supported """ prefix_search = recipe_name.endswith("*") if prefix_search: recipe_name = recipe_name.rstrip("*") for n in get_nodes(environment): recipes = get_recipes_in_node(n) for role in get_roles_in_node(n, recursive=True): recipes.extend(get_recipes_in_role(role)) if prefix_search: if any(recipe.startswith(recipe_name) for recipe in recipes): yield n else: if recipe_name in recipes: yield n
Get all nodes which include a given recipe, prefix-searches are also supported
def getfo(self, remotepath, fl, callback=None): """ Copy a remote file (``remotepath``) from the SFTP server and write to an open file or file-like object, ``fl``. Any exception raised by operations will be passed through. This method is primarily provided as a convenience. :param object remotepath: opened file or file-like object to copy to :param str fl: the destination path on the local host or open file object :param callable callback: optional callback function (form: ``func(int, int)``) that accepts the bytes transferred so far and the total bytes to be transferred :return: the `number <int>` of bytes written to the opened file object .. versionadded:: 1.10 """ file_size = self.stat(remotepath).st_size with self.open(remotepath, "rb") as fr: fr.prefetch(file_size) return self._transfer_with_callback( reader=fr, writer=fl, file_size=file_size, callback=callback )
Copy a remote file (``remotepath``) from the SFTP server and write to an open file or file-like object, ``fl``. Any exception raised by operations will be passed through. This method is primarily provided as a convenience. :param object remotepath: opened file or file-like object to copy to :param str fl: the destination path on the local host or open file object :param callable callback: optional callback function (form: ``func(int, int)``) that accepts the bytes transferred so far and the total bytes to be transferred :return: the `number <int>` of bytes written to the opened file object .. versionadded:: 1.10
def setHint( self, hint ): """ Sets the hint for this line edit that will be displayed when in \ editable mode. :param hint | <str> """ self._hint = hint lineEdit = self.lineEdit() if isinstance(lineEdit, XLineEdit): lineEdit.setHint(hint)
Sets the hint for this line edit that will be displayed when in \ editable mode. :param hint | <str>
def trace_symlink_target(link): """ Given a file that is known to be a symlink, trace it to its ultimate target. Raises TargetNotPresent when the target cannot be determined. Raises ValueError when the specified link is not a symlink. """ if not is_symlink(link): raise ValueError("link must point to a symlink on the system") while is_symlink(link): orig = os.path.dirname(link) link = readlink(link) link = resolve_path(link, orig) return link
Given a file that is known to be a symlink, trace it to its ultimate target. Raises TargetNotPresent when the target cannot be determined. Raises ValueError when the specified link is not a symlink.
def find_token(request, token_type, service, **kwargs): """ The access token can be in a number of places. There are priority rules as to which one to use, abide by those: 1 If it's among the request parameters use that 2 If among the extra keyword arguments 3 Acquired by a previous run service. :param request: :param token_type: :param service: :param kwargs: :return: """ if request is not None: try: _token = request[token_type] except KeyError: pass else: del request[token_type] # Required under certain circumstances :-) not under other request.c_param[token_type] = SINGLE_OPTIONAL_STRING return _token try: return kwargs["access_token"] except KeyError: # I should pick the latest acquired token, this should be the right # order for that. _arg = service.multiple_extend_request_args( {}, kwargs['state'], ['access_token'], ['auth_response', 'token_response', 'refresh_token_response']) return _arg['access_token']
The access token can be in a number of places. There are priority rules as to which one to use, abide by those: 1 If it's among the request parameters use that 2 If among the extra keyword arguments 3 Acquired by a previous run service. :param request: :param token_type: :param service: :param kwargs: :return:
def set_channel_locations(self, channel_ids, locations): '''This function sets the location properties of each specified channel id with the corresponding locations of the passed in locations list. Parameters ---------- channel_ids: array_like The channel ids (ints) for which the locations will be specified locations: array_like A list of corresonding locations (array_like) for the given channel_ids ''' if len(channel_ids) == len(locations): for i in range(len(channel_ids)): if isinstance(locations[i],(list,np.ndarray)): location = np.asarray(locations[i]) self.set_channel_property(channel_ids[i], 'location', location.astype(float)) else: raise ValueError(str(locations[i]) + " must be an array_like") else: raise ValueError("channel_ids and locations must have same length")
This function sets the location properties of each specified channel id with the corresponding locations of the passed in locations list. Parameters ---------- channel_ids: array_like The channel ids (ints) for which the locations will be specified locations: array_like A list of corresonding locations (array_like) for the given channel_ids
def last(self): """ The last of a GridSpace is another GridSpace constituted of the last of the individual elements. To access the elements by their X,Y position, either index the position directly or use the items() method. """ if self.type == HoloMap: last_items = [(k, v.last if isinstance(v, HoloMap) else v) for (k, v) in self.data.items()] else: last_items = self.data return self.clone(last_items)
The last of a GridSpace is another GridSpace constituted of the last of the individual elements. To access the elements by their X,Y position, either index the position directly or use the items() method.
def __set_ethernet_uris(self, ethernet_names, operation="add"): """Updates network uris.""" if not isinstance(ethernet_names, list): ethernet_names = [ethernet_names] associated_enets = self.data.get('networkUris', []) ethernet_uris = [] for i, enet in enumerate(ethernet_names): enet_exists = self._ethernet_networks.get_by_name(enet) if enet_exists: ethernet_uris.append(enet_exists.data['uri']) else: raise HPOneViewResourceNotFound("Ethernet: {} does not exist".foramt(enet)) if operation == "remove": enets_to_update = sorted(list(set(associated_enets) - set(ethernet_uris))) elif operation == "add": enets_to_update = sorted(list(set(associated_enets).union(set(ethernet_uris)))) else: raise ValueError("Value {} is not supported as operation. The supported values are: ['add', 'remove']") if set(enets_to_update) != set(associated_enets): updated_network = {'networkUris': enets_to_update} self.update(updated_network)
Updates network uris.
def _append(self, menu): '''append this menu item to a menu''' menu.AppendCheckItem(self.id(), self.name, self.description) menu.Check(self.id(), self.checked)
append this menu item to a menu
def save_artists(self, artists, filename="artist_lyrics", overwrite=False): """Save lyrics from multiple Artist objects as JSON object :param artists: List of Artist objects to save lyrics from :param filename: Name of output file (json) :param overwrite: Overwrites preexisting file if True """ if isinstance(artists, Artist): artists = [artists] # Create a temporary directory for lyrics start = time.time() tmp_dir = 'tmp_lyrics' if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) count = 0 else: count = len(os.listdir(tmp_dir)) # Check if file already exists if os.path.isfile(filename + ".json") and not overwrite: msg = "{f} already exists. Overwrite?\n(y/n): ".format(f=filename) if input(msg).lower() != "y": print("Leaving file in place. Exiting.") os.rmdir(tmp_dir) return # Extract each artist's lyrics in json format all_lyrics = {'artists': []} for n, artist in enumerate(artists): if isinstance(artist, Artist): all_lyrics['artists'].append({}) f = "tmp_{n}_{a}".format(n=count + n, a=artist.name.replace(" ", "")) tmp_file = os.path.join(tmp_dir, f) if self.verbose: print(tmp_file) all_lyrics['artists'][-1] = artist.save_lyrics(overwrite=True) # Save all of the lyrics with open(filename + '.json', 'w') as outfile: json.dump(all_lyrics, outfile) # Delete the temporary directory shutil.rmtree(tmp_dir) elapsed = (time.time() - start) / 60 / 60 print("Time elapsed: {t} hours".format(t=elapsed))
Save lyrics from multiple Artist objects as JSON object :param artists: List of Artist objects to save lyrics from :param filename: Name of output file (json) :param overwrite: Overwrites preexisting file if True
def get_allproductandrelease(self): """ Get All ProductAndReleases :return: A duple: All product and Releses from SDC Catalog as a dict, the 'Request' response """ logger.info("Get all ProductAndReleases") response = self.get(PRODUCTANDRELEASE_RESOURCE_ROOT_URI, headers=self.headers) sr_response = response_body_to_dict(response, self.headers[HEADER_CONTENT_TYPE], xml_root_element_name=PRODUCTANDRELEASE_BODY_ROOT) return sr_response, response
Get All ProductAndReleases :return: A duple: All product and Releses from SDC Catalog as a dict, the 'Request' response
def vectorize_range(values): """ This function is for url encoding. Takes a value or a tuple or list of tuples and returns a single result, tuples are joined by "," if necessary, elements in tuple are joined by '_' """ if isinstance(values, tuple): return '_'.join(str(i) for i in values) if isinstance(values, list): if not all([isinstance(item, tuple) for item in values]): raise TypeError('Items in the list must be tuples') return ','.join('_'.join(str(i) for i in v) for v in values) return str(values)
This function is for url encoding. Takes a value or a tuple or list of tuples and returns a single result, tuples are joined by "," if necessary, elements in tuple are joined by '_'
def dictify(r,root=True): """http://stackoverflow.com/a/30923963/2946714""" if root: return {r.tag : dictify(r, False)} d=copy(r.attrib) if r.text: d["_text"]=r.text for x in r.findall("./*"): if x.tag not in d: d[x.tag]=[] d[x.tag].append(dictify(x,False)) return d
http://stackoverflow.com/a/30923963/2946714
def allconcat(self, x, mesh_axis, concat_axis): """Grouped allconcat (like MPI allgather followed by concat). Args: x: a LaidOutTensor mesh_axis: an integer - the mesh axis along which to group concat_axis: an integer (the Tensor axis along which to concatenate) Returns: a LaidOutTensor """ return self._collective_with_groups( x, [mesh_axis], functools.partial(allconcat_ring, concat_axis=concat_axis))
Grouped allconcat (like MPI allgather followed by concat). Args: x: a LaidOutTensor mesh_axis: an integer - the mesh axis along which to group concat_axis: an integer (the Tensor axis along which to concatenate) Returns: a LaidOutTensor
def bods2c(name): """ Translate a string containing a body name or ID code to an integer code. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bods2c_c.html :param name: String to be translated to an ID code. :type name: str :return: Integer ID code corresponding to name. :rtype: int """ name = stypes.stringToCharP(name) code = ctypes.c_int(0) found = ctypes.c_int(0) libspice.bods2c_c(name, ctypes.byref(code), ctypes.byref(found)) return code.value, bool(found.value)
Translate a string containing a body name or ID code to an integer code. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bods2c_c.html :param name: String to be translated to an ID code. :type name: str :return: Integer ID code corresponding to name. :rtype: int
def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs): """Called to initialize the HTTPAdapter when no proxy is used.""" try: pool_kwargs['ssl_version'] = ssl.PROTOCOL_TLS except AttributeError: pool_kwargs['ssl_version'] = ssl.PROTOCOL_SSLv23 return super(SSLAdapter, self).init_poolmanager(connections, maxsize, block, **pool_kwargs)
Called to initialize the HTTPAdapter when no proxy is used.
def sg_arg(): r"""Gets current command line options Returns: tf.sg_opt instance that is updated with current commandd line options. """ if not tf.app.flags.FLAGS.__dict__['__parsed']: tf.app.flags.FLAGS._parse_flags() return tf.sg_opt(tf.app.flags.FLAGS.__dict__['__flags'])
r"""Gets current command line options Returns: tf.sg_opt instance that is updated with current commandd line options.
def set_params(self, **params): """ Set the parameters of this estimator. Returns ------- self """ valid_params = self.get_params() for key, value in params.items(): if key not in valid_params: raise ValueError( "Invalid parameter %s for estimator %s. " "Check the list of available parameters " "with `estimator.get_params().keys()`." % (key, self.__class__.__name__) ) setattr(self, key, value) return self
Set the parameters of this estimator. Returns ------- self
def get_term_agents(self): """Return dict of INDRA Agents keyed by corresponding TERMs in the EKB. This is meant to be used when entities e.g. "phosphorylated ERK", rather than events need to be extracted from processed natural language. These entities with their respective states are represented as INDRA Agents. Further, each key of the dictionary corresponds to the ID assigned by TRIPS to the given TERM that the Agent was extracted from. Returns ------- agents : dict[str, indra.statements.Agent] Dict of INDRA Agents extracted from EKB. """ terms = self.tree.findall('TERM') agents = {} assoc_links = [] for term in terms: term_id = term.attrib.get('id') if term_id: agent = self._get_agent_by_id(term_id, None) agents[term_id] = agent # Handle assoc-with links aw = term.find('assoc-with') if aw is not None: aw_id = aw.attrib.get('id') if aw_id: assoc_links.append((term_id, aw_id)) # We only keep the target end of assoc with links if both # source and target are in the list for source, target in assoc_links: if target in agents and source in agents: agents.pop(source) return agents
Return dict of INDRA Agents keyed by corresponding TERMs in the EKB. This is meant to be used when entities e.g. "phosphorylated ERK", rather than events need to be extracted from processed natural language. These entities with their respective states are represented as INDRA Agents. Further, each key of the dictionary corresponds to the ID assigned by TRIPS to the given TERM that the Agent was extracted from. Returns ------- agents : dict[str, indra.statements.Agent] Dict of INDRA Agents extracted from EKB.
def to_bytes(self): ''' Takes a list of IPOption objects and returns a packed byte string of options, appropriately padded if necessary. ''' raw = b'' if not self._options: return raw for ipopt in self._options: raw += ipopt.to_bytes() padbytes = 4 - (len(raw) % 4) raw += b'\x00'*padbytes return raw
Takes a list of IPOption objects and returns a packed byte string of options, appropriately padded if necessary.
def wait_for_jobs(jobs): """Waits for all the jobs to be runnning. Args: jobs(list): list of the python-grid5000 jobs to wait for Raises: Exception: if one of the job gets in error state. """ all_running = False while not all_running: all_running = True time.sleep(5) for job in jobs: job.refresh() scheduled = getattr(job, "scheduled_at", None) if scheduled is not None: logger.info("Waiting for %s on %s [%s]" % (job.uid, job.site, _date2h(scheduled))) all_running = all_running and job.state == "running" if job.state == "error": raise Exception("The job %s is in error state" % job) logger.info("All jobs are Running !")
Waits for all the jobs to be runnning. Args: jobs(list): list of the python-grid5000 jobs to wait for Raises: Exception: if one of the job gets in error state.
def authenticated( method: Callable[..., Optional[Awaitable[None]]] ) -> Callable[..., Optional[Awaitable[None]]]: """Decorate methods with this to require that the user be logged in. If the user is not logged in, they will be redirected to the configured `login url <RequestHandler.get_login_url>`. If you configure a login url with a query parameter, Tornado will assume you know what you're doing and use it as-is. If not, it will add a `next` parameter so the login page knows where to send you once you're logged in. """ @functools.wraps(method) def wrapper( # type: ignore self: RequestHandler, *args, **kwargs ) -> Optional[Awaitable[None]]: if not self.current_user: if self.request.method in ("GET", "HEAD"): url = self.get_login_url() if "?" not in url: if urllib.parse.urlsplit(url).scheme: # if login url is absolute, make next absolute too next_url = self.request.full_url() else: assert self.request.uri is not None next_url = self.request.uri url += "?" + urlencode(dict(next=next_url)) self.redirect(url) return None raise HTTPError(403) return method(self, *args, **kwargs) return wrapper
Decorate methods with this to require that the user be logged in. If the user is not logged in, they will be redirected to the configured `login url <RequestHandler.get_login_url>`. If you configure a login url with a query parameter, Tornado will assume you know what you're doing and use it as-is. If not, it will add a `next` parameter so the login page knows where to send you once you're logged in.
def load(filenames, prepare_data_iterator=True, batch_size=None, exclude_parameter=False, parameter_only=False): '''load Load network information from files. Args: filenames (list): List of filenames. Returns: dict: Network information. ''' class Info: pass info = Info() proto = nnabla_pb2.NNablaProtoBuf() for filename in filenames: _, ext = os.path.splitext(filename) # TODO: Here is some known problems. # - Even when protobuf file includes network structure, # it will not loaded. # - Even when prototxt file includes parameter, # it will not loaded. if ext in ['.nntxt', '.prototxt']: if not parameter_only: with open(filename, 'rt') as f: try: text_format.Merge(f.read(), proto) except: logger.critical('Failed to read {}.'.format(filename)) logger.critical( '2 byte characters may be used for file name or folder name.') raise if len(proto.parameter) > 0: if not exclude_parameter: nn.load_parameters(filename) elif ext in ['.protobuf', '.h5']: if not exclude_parameter: nn.load_parameters(filename) else: logger.info('Skip loading parameter.') elif ext == '.nnp': try: tmpdir = tempfile.mkdtemp() with zipfile.ZipFile(filename, 'r') as nnp: for name in nnp.namelist(): _, ext = os.path.splitext(name) if name == 'nnp_version.txt': nnp.extract(name, tmpdir) with open(os.path.join(tmpdir, name), 'rt') as f: pass # TODO currently do nothing with version. elif ext in ['.nntxt', '.prototxt']: nnp.extract(name, tmpdir) if not parameter_only: with open(os.path.join(tmpdir, name), 'rt') as f: text_format.Merge(f.read(), proto) if len(proto.parameter) > 0: if not exclude_parameter: nn.load_parameters( os.path.join(tmpdir, name)) elif ext in ['.protobuf', '.h5']: nnp.extract(name, tmpdir) if not exclude_parameter: nn.load_parameters(os.path.join(tmpdir, name)) else: logger.info('Skip loading parameter.') finally: shutil.rmtree(tmpdir) default_context = None if proto.HasField('global_config'): info.global_config = _global_config(proto) default_context = info.global_config.default_context if 'cuda' in default_context.backend: import nnabla_ext.cudnn elif 'cuda:float' in default_context.backend: try: import nnabla_ext.cudnn except: pass else: import nnabla_ext.cpu default_context = nnabla_ext.cpu.context() comm = current_communicator() if comm: default_context.device_id = str(comm.rank) if proto.HasField('training_config'): info.training_config = _training_config(proto) info.datasets = _datasets( proto, prepare_data_iterator if prepare_data_iterator is not None else info.training_config.max_epoch > 0) info.networks = _networks(proto, default_context, batch_size) info.optimizers = _optimizers( proto, default_context, info.networks, info.datasets) info.monitors = _monitors( proto, default_context, info.networks, info.datasets) info.executors = _executors(proto, info.networks) return info
load Load network information from files. Args: filenames (list): List of filenames. Returns: dict: Network information.
def recursive_operation_ls( self, endpoint_id, depth=3, filter_after_first=True, **params ): """ Makes recursive calls to ``GET /operation/endpoint/<endpoint_id>/ls`` Does not preserve access to top level operation_ls fields, but adds a "path" field for every item that represents the full path to that item. :rtype: iterable of :class:`GlobusResponse <globus_sdk.response.GlobusResponse>` **Parameters** ``endpoint_id`` (*string*) The endpoint being recursively ls'ed. If no "path" is given in params, the start path is determined by this endpoint. ``depth`` (*int*) The maximum file depth the recursive ls will go to. ``filter_after_first`` (*bool*) If False, any "filter" in params will only be applied to the first, top level ls, all results beyond that will be unfiltered. ``params`` Parameters that will be passed through as query params. **Examples** >>> tc = globus_sdk.TransferClient(...) >>> for entry in tc.recursive_operation_ls(ep_id, path="/~/project1/"): >>> print(entry["path"], entry["type"]) **External Documentation** See `List Directory Contents \ <https://docs.globus.org/api/transfer/file_operations/#list_directory_contents>`_ in the REST documentation for details, but note that top level data fields are no longer available and an additional per item "path" field is added. """ endpoint_id = safe_stringify(endpoint_id) self.logger.info( "TransferClient.recursive_operation_ls({}, {}, {})".format( endpoint_id, depth, params ) ) return RecursiveLsResponse(self, endpoint_id, depth, filter_after_first, params)
Makes recursive calls to ``GET /operation/endpoint/<endpoint_id>/ls`` Does not preserve access to top level operation_ls fields, but adds a "path" field for every item that represents the full path to that item. :rtype: iterable of :class:`GlobusResponse <globus_sdk.response.GlobusResponse>` **Parameters** ``endpoint_id`` (*string*) The endpoint being recursively ls'ed. If no "path" is given in params, the start path is determined by this endpoint. ``depth`` (*int*) The maximum file depth the recursive ls will go to. ``filter_after_first`` (*bool*) If False, any "filter" in params will only be applied to the first, top level ls, all results beyond that will be unfiltered. ``params`` Parameters that will be passed through as query params. **Examples** >>> tc = globus_sdk.TransferClient(...) >>> for entry in tc.recursive_operation_ls(ep_id, path="/~/project1/"): >>> print(entry["path"], entry["type"]) **External Documentation** See `List Directory Contents \ <https://docs.globus.org/api/transfer/file_operations/#list_directory_contents>`_ in the REST documentation for details, but note that top level data fields are no longer available and an additional per item "path" field is added.
def sam2fastq(line): """ print fastq from sam """ fastq = [] fastq.append('@%s' % line[0]) fastq.append(line[9]) fastq.append('+%s' % line[0]) fastq.append(line[10]) return fastq
print fastq from sam
def get_public_key(self): """Get the PublicKey for this PrivateKey.""" return PublicKey.from_verifying_key( self._private_key.get_verifying_key(), network=self.network, compressed=self.compressed)
Get the PublicKey for this PrivateKey.
def assess_angmom(X): """ Checks for change of sign in each component of the angular momentum. Returns an array with ith entry 1 if no sign change in i component and 0 if sign change. Box = (0,0,0) S.A loop = (0,0,1) L.A loop = (1,0,0) """ L=angmom(X[0]) loop = np.array([1,1,1]) for i in X[1:]: L0 = angmom(i) if(L0[0]*L[0]<0.): loop[0] = 0 if(L0[1]*L[1]<0.): loop[1] = 0 if(L0[2]*L[2]<0.): loop[2] = 0 return loop
Checks for change of sign in each component of the angular momentum. Returns an array with ith entry 1 if no sign change in i component and 0 if sign change. Box = (0,0,0) S.A loop = (0,0,1) L.A loop = (1,0,0)
def create_handler(target: str): """Create a handler for logging to ``target``""" if target == 'stderr': return logging.StreamHandler(sys.stderr) elif target == 'stdout': return logging.StreamHandler(sys.stdout) else: return logging.handlers.WatchedFileHandler(filename=target)
Create a handler for logging to ``target``
def decision_function(self, pairs): """Returns the decision function used to classify the pairs. Returns the opposite of the learned metric value between samples in every pair, to be consistent with scikit-learn conventions. Hence it should ideally be low for dissimilar samples and high for similar samples. This is the decision function that is used to classify pairs as similar (+1), or dissimilar (-1). Parameters ---------- pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2) 3D Array of pairs to predict, with each row corresponding to two points, or 2D array of indices of pairs if the metric learner uses a preprocessor. Returns ------- y_predicted : `numpy.ndarray` of floats, shape=(n_constraints,) The predicted decision function value for each pair. """ pairs = check_input(pairs, type_of_inputs='tuples', preprocessor=self.preprocessor_, estimator=self, tuple_size=self._tuple_size) return - self.score_pairs(pairs)
Returns the decision function used to classify the pairs. Returns the opposite of the learned metric value between samples in every pair, to be consistent with scikit-learn conventions. Hence it should ideally be low for dissimilar samples and high for similar samples. This is the decision function that is used to classify pairs as similar (+1), or dissimilar (-1). Parameters ---------- pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2) 3D Array of pairs to predict, with each row corresponding to two points, or 2D array of indices of pairs if the metric learner uses a preprocessor. Returns ------- y_predicted : `numpy.ndarray` of floats, shape=(n_constraints,) The predicted decision function value for each pair.
def render(template: typing.Union[str, Template], **kwargs): """ Renders a template string using Jinja2 and the Cauldron templating environment. :param template: The string containing the template to be rendered :param kwargs: Any named arguments to pass to Jinja2 for use in rendering :return: The rendered template string """ if not hasattr(template, 'render'): template = get_environment().from_string(textwrap.dedent(template)) return template.render( cauldron_template_uid=make_template_uid(), **kwargs )
Renders a template string using Jinja2 and the Cauldron templating environment. :param template: The string containing the template to be rendered :param kwargs: Any named arguments to pass to Jinja2 for use in rendering :return: The rendered template string
def solve_with_sdpa(sdp, solverparameters=None): """Helper function to write out the SDP problem to a temporary file, call the solver, and parse the output. :param sdp: The SDP relaxation to be solved. :type sdp: :class:`ncpol2sdpa.sdp`. :param solverparameters: Optional parameters to SDPA. :type solverparameters: dict of str. :returns: tuple of float and list -- the primal and dual solution of the SDP, respectively, and a status string. """ solverexecutable = detect_sdpa(solverparameters) if solverexecutable is None: raise OSError("SDPA is not in the path or the executable provided is" + " not correct") primal, dual = 0, 0 tempfile_ = tempfile.NamedTemporaryFile() tmp_filename = tempfile_.name tempfile_.close() tmp_dats_filename = tmp_filename + ".dat-s" tmp_out_filename = tmp_filename + ".out" write_to_sdpa(sdp, tmp_dats_filename) command_line = [solverexecutable, "-ds", tmp_dats_filename, "-o", tmp_out_filename] if solverparameters is not None: for key, value in list(solverparameters.items()): if key == "executable": continue elif key == "paramsfile": command_line.extend(["-p", value]) else: raise ValueError("Unknown parameter for SDPA: " + key) if sdp.verbose < 1: with open(os.devnull, "w") as fnull: call(command_line, stdout=fnull, stderr=fnull) else: call(command_line) primal, dual, x_mat, y_mat, status = read_sdpa_out(tmp_out_filename, True, True) if sdp.verbose < 2: os.remove(tmp_dats_filename) os.remove(tmp_out_filename) return primal+sdp.constant_term, \ dual+sdp.constant_term, x_mat, y_mat, status
Helper function to write out the SDP problem to a temporary file, call the solver, and parse the output. :param sdp: The SDP relaxation to be solved. :type sdp: :class:`ncpol2sdpa.sdp`. :param solverparameters: Optional parameters to SDPA. :type solverparameters: dict of str. :returns: tuple of float and list -- the primal and dual solution of the SDP, respectively, and a status string.
def set_ntp_server(server): """Sets the NTP server on Linux :param server: (str) NTP server IP or hostname :return: None :raises CommandError """ log = logging.getLogger(mod_logger + '.set_ntp_server') # Ensure the hostname is a str if not isinstance(server, basestring): msg = 'server argument must be a string' log.error(msg) raise CommandError(msg) # Ensure the ntp.conf file exists ntp_conf = '/etc/ntp.conf' if not os.path.isfile(ntp_conf): msg = 'File not found: {f}'.format(f=ntp_conf) log.error(msg) raise CommandError(msg) log.info('Clearing out existing server entries from %s...', ntp_conf) try: sed(ntp_conf, '^server.*', '', g=0) except CommandError: _, ex, trace = sys.exc_info() msg = 'Unable to update file: {f}\n{e}'.format(f=ntp_conf, e=str(ex)) log.error(msg) raise CommandError, msg, trace out_str = 'server ' + server log.info('Appending server: %s', out_str) with open(ntp_conf, 'a') as f: f.write(out_str) log.info('Successfully updated file: {f}'.format(f=ntp_conf))
Sets the NTP server on Linux :param server: (str) NTP server IP or hostname :return: None :raises CommandError
def kill_line(event): """ Kill the text from the cursor to the end of the line. If we are at the end of the line, this should remove the newline. (That way, it is possible to delete multiple lines by executing this command multiple times.) """ buff = event.current_buffer if event.arg < 0: deleted = buff.delete_before_cursor(count=-buff.document.get_start_of_line_position()) else: if buff.document.current_char == '\n': deleted = buff.delete(1) else: deleted = buff.delete(count=buff.document.get_end_of_line_position()) event.cli.clipboard.set_text(deleted)
Kill the text from the cursor to the end of the line. If we are at the end of the line, this should remove the newline. (That way, it is possible to delete multiple lines by executing this command multiple times.)
def input_dir(self, dirname): """Check all files in this directory and all subdirectories.""" dirname = dirname.rstrip('/') if self.excluded(dirname): return 0 counters = self.options.report.counters verbose = self.options.verbose filepatterns = self.options.filename runner = self.runner for root, dirs, files in os.walk(dirname): if verbose: print('directory ' + root) counters['directories'] += 1 for subdir in sorted(dirs): if self.excluded(subdir, root): dirs.remove(subdir) for filename in sorted(files): # contain a pattern that matches? if ((filename_match(filename, filepatterns) and not self.excluded(filename, root))): runner(os.path.join(root, filename))
Check all files in this directory and all subdirectories.
def flushall(self, async_op=False): """ Remove all keys from all databases. :param async_op: lets the entire dataset to be freed asynchronously. \ Defaults to False """ if async_op: fut = self.execute(b'FLUSHALL', b'ASYNC') else: fut = self.execute(b'FLUSHALL') return wait_ok(fut)
Remove all keys from all databases. :param async_op: lets the entire dataset to be freed asynchronously. \ Defaults to False
def get_grade_entries_by_query(self, grade_entry_query): """Gets a list of entries matching the given grade entry query. arg: grade_entry_query (osid.grading.GradeEntryQuery): the grade entry query return: (osid.grading.GradeEntryList) - the returned ``GradeEntryList`` raise: NullArgument - ``grade_entry_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``grade_entry_query`` is not of this service *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceQuerySession.get_resources_by_query and_list = list() or_list = list() for term in grade_entry_query._query_terms: if '$in' in grade_entry_query._query_terms[term] and '$nin' in grade_entry_query._query_terms[term]: and_list.append( {'$or': [{term: {'$in': grade_entry_query._query_terms[term]['$in']}}, {term: {'$nin': grade_entry_query._query_terms[term]['$nin']}}]}) else: and_list.append({term: grade_entry_query._query_terms[term]}) for term in grade_entry_query._keyword_terms: or_list.append({term: grade_entry_query._keyword_terms[term]}) if or_list: and_list.append({'$or': or_list}) view_filter = self._view_filter() if view_filter: and_list.append(view_filter) if and_list: query_terms = {'$and': and_list} collection = JSONClientValidated('grading', collection='GradeEntry', runtime=self._runtime) result = collection.find(query_terms).sort('_id', DESCENDING) else: result = [] return objects.GradeEntryList(result, runtime=self._runtime, proxy=self._proxy)
Gets a list of entries matching the given grade entry query. arg: grade_entry_query (osid.grading.GradeEntryQuery): the grade entry query return: (osid.grading.GradeEntryList) - the returned ``GradeEntryList`` raise: NullArgument - ``grade_entry_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``grade_entry_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
def to_pb(self): """Converts the garbage collection rule to a protobuf. :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ max_age = _helpers._timedelta_to_duration_pb(self.max_age) return table_v2_pb2.GcRule(max_age=max_age)
Converts the garbage collection rule to a protobuf. :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object.
def _fetch_targets(self, api_client, q, target): ''' Make an API call defined in metadata.json. Parse the returned object as implemented in the "parse_[object name]" method. :param api_client: :param q: :param target: :return: ''' # Handle & format the target type target_type, response_attribute, list_method_name, list_params, ignore_list_error = target list_method = getattr(api_client, list_method_name) try: targets = handle_truncated_response(list_method, list_params, [response_attribute])[response_attribute] except Exception as e: if not ignore_list_error: printException(e) targets = [] setattr(self, '%s_count' % target_type, len(targets)) self.fetchstatuslogger.counts[target_type]['discovered'] += len(targets) region = api_client._client_config.region_name # Queue resources for target in targets: # call callback methods try: callback = getattr(self, 'parse_%s' % target_type[0:-1]) except: callback = self.store_target target['scout2_target_type'] = target_type if q: # Add to the queue q.put((callback, region, target))
Make an API call defined in metadata.json. Parse the returned object as implemented in the "parse_[object name]" method. :param api_client: :param q: :param target: :return:
def open_spec(f): """ :param f: file object with spec data spec file is a yaml document that specifies which modules can be loaded. modules - list of base modules that can be loaded pths - list of .pth files to load """ import ruamel.yaml as yaml keys = ['modules', 'pths', 'test_import', 'install_hints', 'extra_paths'] data = yaml.safe_load(f) parsed = dict() ## pattern = re.compile("^\s+|\s*,\s*|\s+$") for k in keys: v = data.get(k, []) # Items are always lists if isinstance(v, basestring): parsed[k] = [m for m in re.split(r",| ", v)] # parsed[k] = re.split(pattern, v) else: parsed[k] = v return parsed
:param f: file object with spec data spec file is a yaml document that specifies which modules can be loaded. modules - list of base modules that can be loaded pths - list of .pth files to load
def _get_rows(self, table): """Returns rows from table""" childnodes = table.childNodes qname_childnodes = [(s.qname[1], s) for s in childnodes] return [node for name, node in qname_childnodes if name == u'table-row']
Returns rows from table
def prepare_axes(wave, flux, fig=None, ax_lower=(0.1, 0.1), ax_dim=(0.85, 0.65)): """Create fig and axes if needed and layout axes in fig.""" # Axes location in figure. if not fig: fig = plt.figure() ax = fig.add_axes([ax_lower[0], ax_lower[1], ax_dim[0], ax_dim[1]]) ax.plot(wave, flux) return fig, ax
Create fig and axes if needed and layout axes in fig.
def get_extents(self, element, ranges, range_type='combined'): """ A Chord plot is always drawn on a unit circle. """ xdim, ydim = element.nodes.kdims[:2] if range_type not in ('combined', 'data', 'extents'): return xdim.range[0], ydim.range[0], xdim.range[1], ydim.range[1] no_labels = (element.nodes.get_dimension(self.label_index) is None and self.labels is None) rng = 1.1 if no_labels else 1.4 x0, x1 = max_range([xdim.range, (-rng, rng)]) y0, y1 = max_range([ydim.range, (-rng, rng)]) return (x0, y0, x1, y1)
A Chord plot is always drawn on a unit circle.
def compute(self, *inputs, **kwargs): """ Compute based on NeuralVariable. :type inputs: list of NeuralVariable :return: NeuralVariable """ from deepy.core.neural_var import NeuralVariable from deepy.core.graph import graph if type(inputs[0]) != NeuralVariable: raise SystemError("The input of `compute` must be NeuralVar") dims = [t.dim() for t in inputs] if len(inputs) == 1: self.init(input_dim=dims[0]) else: self.init(input_dims=dims) # Check block if self.parameters and not self._linked_block: self.belongs_to(graph.default_block()) # convert kwargs train_kwargs, _, _ = convert_to_theano_var(kwargs) output = self.compute_tensor(*[t.tensor for t in inputs], **train_kwargs) if type(output) != list and type(output) != tuple: return NeuralVariable(output, dim=self.output_dim) else: return [NeuralVariable(*item) for item in zip(output, self.output_dims)]
Compute based on NeuralVariable. :type inputs: list of NeuralVariable :return: NeuralVariable
def watched(self, option): """ Set whether to filter by a user's watchlist. Options available are user.ONLY, user.NOT, and None; default is None. """ params = join_params(self.parameters, {"watched": option}) return self.__class__(**params)
Set whether to filter by a user's watchlist. Options available are user.ONLY, user.NOT, and None; default is None.
def xpath(self, selector: str, *, first: bool = False, _encoding: str = None) -> _XPath: """Given an XPath selector, returns a list of :class:`Element <Element>` objects or a single one. :param selector: XPath Selector to use. :param first: Whether or not to return just the first result. :param _encoding: The encoding format. If a sub-selector is specified (e.g. ``//a/@href``), a simple list of results is returned. See W3School's `XPath Examples <https://www.w3schools.com/xml/xpath_examples.asp>`_ for more details. If ``first`` is ``True``, only returns the first :class:`Element <Element>` found. """ selected = self.lxml.xpath(selector) elements = [ Element(element=selection, default_encoding=_encoding or self.encoding) if not isinstance(selection, etree._ElementUnicodeResult) else str(selection) for selection in selected ] return _get_first_or_list(elements, first)
Given an XPath selector, returns a list of :class:`Element <Element>` objects or a single one. :param selector: XPath Selector to use. :param first: Whether or not to return just the first result. :param _encoding: The encoding format. If a sub-selector is specified (e.g. ``//a/@href``), a simple list of results is returned. See W3School's `XPath Examples <https://www.w3schools.com/xml/xpath_examples.asp>`_ for more details. If ``first`` is ``True``, only returns the first :class:`Element <Element>` found.
def run(self): ''' run - The thread main. Will attempt to stop and join the attached thread. ''' # Try to silence default exception printing. self.otherThread._Thread__stderr = self._stderr if hasattr(self.otherThread, '_Thread__stop'): # If py2, call this first to start thread termination cleanly. # Python3 does not need such ( nor does it provide.. ) self.otherThread._Thread__stop() while self.otherThread.isAlive(): # We loop raising exception incase it's caught hopefully this breaks us far out. ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self.otherThread.ident), ctypes.py_object(self.exception)) self.otherThread.join(self.repeatEvery) try: self._stderr.close() except: pass
run - The thread main. Will attempt to stop and join the attached thread.
def securityEventWS(symbols=None, on_data=None): '''https://iextrading.com/developer/docs/#security-event''' symbols = _strToList(symbols) sendinit = ({'symbols': symbols, 'channels': ['securityevent']},) return _stream(_wsURL('deep'), sendinit, on_data)
https://iextrading.com/developer/docs/#security-event
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._avatar is not None: return False if self._currency is not None: return False if self._description is not None: return False if self._daily_limit is not None: return False if self._daily_spent is not None: return False if self._overdraft_limit is not None: return False if self._balance is not None: return False if self._alias is not None: return False if self._public_uuid is not None: return False if self._status is not None: return False if self._sub_status is not None: return False if self._reason is not None: return False if self._reason_description is not None: return False if self._all_co_owner is not None: return False if self._user_id is not None: return False if self._monetary_account_profile is not None: return False if self._notification_filters is not None: return False if self._setting is not None: return False return True
:rtype: bool
def make_sub_call(id_, lineno, params): """ This will return an AST node for a sub/procedure call. """ return symbols.CALL.make_node(id_, params, lineno)
This will return an AST node for a sub/procedure call.
def chain_HSPs(blast, xdist=100, ydist=100): """ Take a list of BlastLines (or a BlastSlow instance), and returns a list of BlastLines. """ key = lambda x: (x.query, x.subject) blast.sort(key=key) clusters = Grouper() for qs, points in groupby(blast, key=key): points = sorted(list(points), \ key=lambda x: (x.qstart, x.qstop, x.sstart, x.sstop)) n = len(points) for i in xrange(n): a = points[i] clusters.join(a) for j in xrange(i + 1, n): b = points[j] # x-axis distance del_x = get_distance(a, b) if del_x > xdist: break # y-axis distance del_y = get_distance(a, b, xaxis=False) if del_y > ydist: continue # otherwise join clusters.join(a, b) chained_hsps = [combine_HSPs(x) for x in clusters] key = lambda x: (x.query, -x.score if x.has_score else 0) chained_hsps = sorted(chained_hsps, key=key) return chained_hsps
Take a list of BlastLines (or a BlastSlow instance), and returns a list of BlastLines.
def get(self, path, data=None, return_fields=None): """Call the Infoblox device to get the obj for the data passed in :param str obj_reference: The object reference data :param dict data: The data for the get request :rtype: requests.Response """ return self.session.get(self._request_url(path, return_fields), data=json.dumps(data), auth=self.auth, verify=False)
Call the Infoblox device to get the obj for the data passed in :param str obj_reference: The object reference data :param dict data: The data for the get request :rtype: requests.Response
def guest_session_new(self, **kwargs): """ Generate a guest session id. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('guest_session_new') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Generate a guest session id. Returns: A dict respresentation of the JSON returned from the API.
def _all_params(arr): """ Ensures that the argument is a list that either is empty or contains only GPParamSpec's :param arr: list :return: """ if not isinstance([], list): raise TypeError("non-list value found for parameters") return all(isinstance(x, GPParamSpec) for x in arr)
Ensures that the argument is a list that either is empty or contains only GPParamSpec's :param arr: list :return:
def set_page_property(self, page_id, data): """ Set the page (content) property e.g. add hash parameters :param page_id: content_id format :param data: data should be as json data :return: """ url = 'rest/api/content/{page_id}/property'.format(page_id=page_id) json_data = data return self.post(path=url, data=json_data)
Set the page (content) property e.g. add hash parameters :param page_id: content_id format :param data: data should be as json data :return:
def _find_supported_challenge(authzr, responders): """ Find a challenge combination that consists of a single challenge that the responder can satisfy. :param ~acme.messages.AuthorizationResource auth: The authorization to examine. :type responder: List[`~txacme.interfaces.IResponder`] :param responder: The possible responders to use. :raises NoSupportedChallenges: When a suitable challenge combination is not found. :rtype: Tuple[`~txacme.interfaces.IResponder`, `~acme.messages.ChallengeBody`] :return: The responder and challenge that were found. """ matches = [ (responder, challbs[0]) for challbs in authzr.body.resolved_combinations for responder in responders if [challb.typ for challb in challbs] == [responder.challenge_type]] if len(matches) == 0: raise NoSupportedChallenges(authzr) else: return matches[0]
Find a challenge combination that consists of a single challenge that the responder can satisfy. :param ~acme.messages.AuthorizationResource auth: The authorization to examine. :type responder: List[`~txacme.interfaces.IResponder`] :param responder: The possible responders to use. :raises NoSupportedChallenges: When a suitable challenge combination is not found. :rtype: Tuple[`~txacme.interfaces.IResponder`, `~acme.messages.ChallengeBody`] :return: The responder and challenge that were found.
def Route(resource=None, methods=["get", "post", "put", "delete"], schema=None): """ route """ def _route(func): def wrapper(self, *args, **kwargs): # "test" argument means no wrap func this time, # return original func immediately. if kwargs.get("test", False): kwargs.pop("test") func(self, *args, **kwargs) _methods = methods if isinstance(methods, str): _methods = [methods] route = self.router.route(resource) for method in _methods: getattr(route, method)(func, schema) # Ordered by declare sequence # http://stackoverflow.com/questions/4459531/how-to-read-class-attributes-in-the-same-order-as-declared f_locals = sys._getframe(1).f_locals _order = len([v for v in f_locals.itervalues() if hasattr(v, '__call__') and hasattr(v, '__name__') and v.__name__ == "wrapper"]) wrapper.__dict__["_order"] = _order return wrapper return _route
route
def build_info_string(info): """ Build a new vcf INFO string based on the information in the info_dict. The info is a dictionary with vcf info keys as keys and lists of vcf values as values. If there is no value False is value in info Args: info (dict): A dictionary with information from the vcf file Returns: String: A string that is on the proper vcf format for the INFO column """ info_list = [] for annotation in info: if info[annotation]: info_list.append('='.join([annotation, ','.join(info[annotation])])) else: info_list.append(annotation) return ';'.join(info_list)
Build a new vcf INFO string based on the information in the info_dict. The info is a dictionary with vcf info keys as keys and lists of vcf values as values. If there is no value False is value in info Args: info (dict): A dictionary with information from the vcf file Returns: String: A string that is on the proper vcf format for the INFO column
def list_components(self): """List all of the registered component names. This list will include all of the permanently stored components as well as any temporary components that were added with a temporary=True flag in this session. Returns: list of str: The list of component names. Any of these names can be passed to get_component as is to get the corresponding IOTile object. """ overlays = list(self._component_overlays) items = self.kvstore.get_all() return overlays + [x[0] for x in items if not x[0].startswith('config:')]
List all of the registered component names. This list will include all of the permanently stored components as well as any temporary components that were added with a temporary=True flag in this session. Returns: list of str: The list of component names. Any of these names can be passed to get_component as is to get the corresponding IOTile object.
def simxGetJointPosition(clientID, jointHandle, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' position = ct.c_float() return c_GetJointPosition(clientID, jointHandle, ct.byref(position), operationMode), position.value
Please have a look at the function description/documentation in the V-REP user manual