code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def make_error_redirect(self, authorization_error=None): """ Return a Django ``HttpResponseRedirect`` describing the request failure. If the :py:meth:`validate` method raises an error, the authorization endpoint should return the result of calling this method like so: >>> auth_code_generator = ( >>> AuthorizationCodeGenerator('/oauth2/missing_redirect_uri/')) >>> try: >>> auth_code_generator.validate(request) >>> except AuthorizationError as authorization_error: >>> return auth_code_generator.make_error_redirect(authorization_error) If there is no known Client ``redirect_uri`` (because it is malformed, or the Client is invalid, or if the supplied ``redirect_uri`` does not match the regsitered value, or some other request failure) then the response will redirect to the ``missing_redirect_uri`` passed to the :py:meth:`__init__` method. Also used to signify user denial; call this method without passing in the optional ``authorization_error`` argument to return a generic :py:class:`AccessDenied` message. >>> if not user_accepted_request: >>> return auth_code_generator.make_error_redirect() """ if not self.redirect_uri: return HttpResponseRedirect(self.missing_redirect_uri) authorization_error = (authorization_error or AccessDenied('user denied the request')) response_params = get_error_details(authorization_error) # From http://tools.ietf.org/html/rfc6749#section-4.1.2.1 : # # REQUIRED if the "state" parameter was present in the client # authorization request. The exact value received from the # client. # if self.state is not None: response_params['state'] = self.state return HttpResponseRedirect( update_parameters(self.redirect_uri, response_params))
Return a Django ``HttpResponseRedirect`` describing the request failure. If the :py:meth:`validate` method raises an error, the authorization endpoint should return the result of calling this method like so: >>> auth_code_generator = ( >>> AuthorizationCodeGenerator('/oauth2/missing_redirect_uri/')) >>> try: >>> auth_code_generator.validate(request) >>> except AuthorizationError as authorization_error: >>> return auth_code_generator.make_error_redirect(authorization_error) If there is no known Client ``redirect_uri`` (because it is malformed, or the Client is invalid, or if the supplied ``redirect_uri`` does not match the regsitered value, or some other request failure) then the response will redirect to the ``missing_redirect_uri`` passed to the :py:meth:`__init__` method. Also used to signify user denial; call this method without passing in the optional ``authorization_error`` argument to return a generic :py:class:`AccessDenied` message. >>> if not user_accepted_request: >>> return auth_code_generator.make_error_redirect()
def option(default_value): ''' The @option(x) decorator, usable in an immutable class (see immutable), is identical to the @param decorator except that the parameter is not required and instead takes on the default value x when the immutable is created. ''' def _option(f): (args, varargs, kwargs, dflts) = getargspec_py27like(f) if varargs is not None or kwargs is not None or dflts: raise ValueError( 'Options may not accept variable, variadic keyword, or default arguments') if len(args) != 1: raise ValueError('Parameter transformation functions must take exactly one argument') f._pimms_immutable_data_ = {} f._pimms_immutable_data_['is_param'] = True f._pimms_immutable_data_['default_value'] = default_value f._pimms_immutable_data_['name'] = f.__name__ f = staticmethod(f) return f return _option
The @option(x) decorator, usable in an immutable class (see immutable), is identical to the @param decorator except that the parameter is not required and instead takes on the default value x when the immutable is created.
def nginx_web_ssl_config(self): """ Nginx web ssl config """ dt = [self.nginx_web_dir, self.nginx_ssl_dir] return nginx_conf_string.simple_ssl_web_conf.format(dt=dt)
Nginx web ssl config
def send_game(self, *args, **kwargs): """See :func:`send_game`""" return send_game(*args, **self._merge_overrides(**kwargs)).run()
See :func:`send_game`
def sequence_diversity(pos, ac, start=None, stop=None, is_accessible=None): """Estimate nucleotide diversity within a given region, which is the average proportion of sites (including monomorphic sites not present in the data) that differ between randomly chosen pairs of chromosomes. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity. Notes ----- If start and/or stop are not provided, uses the difference between the last and the first position as a proxy for the total number of sites, which can overestimate the sequence diversity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi = allel.sequence_diversity(pos, ac, start=1, stop=31) >>> pi 0.13978494623655915 """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) ac = asarray_ndim(ac, 2) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # deal with subregion if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac = ac[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # sum differences over variants mpd_sum = np.sum(mpd) # calculate value per base if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) pi = mpd_sum / n_bases return pi
Estimate nucleotide diversity within a given region, which is the average proportion of sites (including monomorphic sites not present in the data) that differ between randomly chosen pairs of chromosomes. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity. Notes ----- If start and/or stop are not provided, uses the difference between the last and the first position as a proxy for the total number of sites, which can overestimate the sequence diversity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi = allel.sequence_diversity(pos, ac, start=1, stop=31) >>> pi 0.13978494623655915
def reqHistoricalData(self, id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate, chartOptions): """reqHistoricalData(EClientSocketBase self, TickerId id, Contract contract, IBString const & endDateTime, IBString const & durationStr, IBString const & barSizeSetting, IBString const & whatToShow, int useRTH, int formatDate, TagValueListSPtr const & chartOptions)""" return _swigibpy.EClientSocketBase_reqHistoricalData(self, id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate, chartOptions)
reqHistoricalData(EClientSocketBase self, TickerId id, Contract contract, IBString const & endDateTime, IBString const & durationStr, IBString const & barSizeSetting, IBString const & whatToShow, int useRTH, int formatDate, TagValueListSPtr const & chartOptions)
def _get_qgrams(self, src, tar, qval=0, skip=0): """Return the Q-Grams in src & tar. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version skip : int The number of characters to skip (only works when src and tar are strings) Returns ------- tuple of Counters Q-Grams Examples -------- >>> pe = _TokenDistance() >>> pe._get_qgrams('AT', 'TT', qval=2) (QGrams({'$A': 1, 'AT': 1, 'T#': 1}), QGrams({'$T': 1, 'TT': 1, 'T#': 1})) """ if isinstance(src, Counter) and isinstance(tar, Counter): return src, tar if qval > 0: return QGrams(src, qval, '$#', skip), QGrams(tar, qval, '$#', skip) return Counter(src.strip().split()), Counter(tar.strip().split())
Return the Q-Grams in src & tar. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version skip : int The number of characters to skip (only works when src and tar are strings) Returns ------- tuple of Counters Q-Grams Examples -------- >>> pe = _TokenDistance() >>> pe._get_qgrams('AT', 'TT', qval=2) (QGrams({'$A': 1, 'AT': 1, 'T#': 1}), QGrams({'$T': 1, 'TT': 1, 'T#': 1}))
def top_commenters(self, num): """Return a markdown representation of the top commenters.""" num = min(num, len(self.commenters)) if num <= 0: return '' top_commenters = sorted( iteritems(self.commenters), key=lambda x: (-sum(y.score for y in x[1]), -len(x[1]), str(x[0])))[:num] retval = self.post_header.format('Top Commenters') for author, comments in top_commenters: retval += '1. {} ({}, {} comment{})\n'.format( self._user(author), self._points(sum(x.score for x in comments)), len(comments), 's' if len(comments) != 1 else '') return '{}\n'.format(retval)
Return a markdown representation of the top commenters.
def salt_ssh(): ''' Execute the salt-ssh system ''' import salt.cli.ssh if '' in sys.path: sys.path.remove('') try: client = salt.cli.ssh.SaltSSH() _install_signal_handlers(client) client.run() except SaltClientError as err: trace = traceback.format_exc() try: hardcrash = client.options.hard_crash except (AttributeError, KeyError): hardcrash = False _handle_interrupt( SystemExit(err), err, hardcrash, trace=trace)
Execute the salt-ssh system
def attach_events(*args): """Class decorator for SQLAlchemy models to attach listeners on class methods decorated with :func:`.on` Usage:: @attach_events class User(Model): email = Column(String(50)) @on('email', 'set') def lowercase_email(self, new_value, old_value, initiating_event): self.email = new_value.lower() """ def wrapper(cls): for name, fn in cls.__dict__.items(): if not name.startswith('__') and hasattr(fn, _SQLAlchemyEvent.ATTR): e = getattr(fn, _SQLAlchemyEvent.ATTR) if e.field_name: event.listen(getattr(cls, e.field_name), e.event_name, fn, **e.listen_kwargs) else: event.listen(cls, e.event_name, fn, **e.listen_kwargs) return cls if args and callable(args[0]): return wrapper(args[0]) return wrapper
Class decorator for SQLAlchemy models to attach listeners on class methods decorated with :func:`.on` Usage:: @attach_events class User(Model): email = Column(String(50)) @on('email', 'set') def lowercase_email(self, new_value, old_value, initiating_event): self.email = new_value.lower()
def set_description(self): """ Set the node description """ if self.device_info['type'] == 'Router': self.node['description'] = '%s %s' % (self.device_info['type'], self.device_info['model']) else: self.node['description'] = self.device_info['desc']
Set the node description
def piper(self, in_sock, out_sock, out_addr, onkill): "Worker thread for data reading" try: while True: written = in_sock.recv(32768) if not written: try: out_sock.shutdown(socket.SHUT_WR) except socket.error: self.threads[onkill].kill() break try: out_sock.sendall(written) except socket.error: pass self.data_handled += len(written) except greenlet.GreenletExit: return
Worker thread for data reading
def eigb(A, y0, eps, rmax=150, nswp=20, max_full_size=1000, verb=1): """ Approximate computation of minimal eigenvalues in tensor train format This function uses alternating least-squares algorithm for the computation of several minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function. :Reference: S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov. Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm., 185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought :type y0: tensor :param eps: Accuracy required :type eps: float :param rmax: Maximal rank :type rmax: int :param kickrank: Addition rank, the larger the more robus the method, :type kickrank: int :rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors. :Example: >>> import tt >>> import tt.eigb >>> d = 8; f = 3 >>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1 >>> x = tt.rand(n, d * f, r) >>> a = tt.qlaplace_dd([8, 8, 8]) >>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0) Solving a block eigenvalue problem Looking for 8 eigenvalues with accuracy 1E-06 swp: 1 er = 35.93 rmax:19 swp: 2 er = 4.51015E-04 rmax:18 swp: 3 er = 1.87584E-12 rmax:17 Total number of matvecs: 0 >>> print ev [ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448 0.0013448 0.00164356] """ ry = y0.r.copy() lam = tt_eigb.tt_block_eig.tt_eigb(y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, eps, rmax, ry[y0.d], 0, nswp, max_full_size, verb) y = tensor() y.d = y0.d y.n = A.n.copy() y.r = ry y.core = tt_eigb.tt_block_eig.result_core.copy() tt_eigb.tt_block_eig.deallocate_result() y.get_ps() return y, lam
Approximate computation of minimal eigenvalues in tensor train format This function uses alternating least-squares algorithm for the computation of several minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function. :Reference: S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov. Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm., 185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought :type y0: tensor :param eps: Accuracy required :type eps: float :param rmax: Maximal rank :type rmax: int :param kickrank: Addition rank, the larger the more robus the method, :type kickrank: int :rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors. :Example: >>> import tt >>> import tt.eigb >>> d = 8; f = 3 >>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1 >>> x = tt.rand(n, d * f, r) >>> a = tt.qlaplace_dd([8, 8, 8]) >>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0) Solving a block eigenvalue problem Looking for 8 eigenvalues with accuracy 1E-06 swp: 1 er = 35.93 rmax:19 swp: 2 er = 4.51015E-04 rmax:18 swp: 3 er = 1.87584E-12 rmax:17 Total number of matvecs: 0 >>> print ev [ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448 0.0013448 0.00164356]
def get_nameserver_detail_output_show_nameserver_nameserver_xlatedomain(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, "output") show_nameserver = ET.SubElement(output, "show-nameserver") nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid") nameserver_portid_key.text = kwargs.pop('nameserver_portid') nameserver_xlatedomain = ET.SubElement(show_nameserver, "nameserver-xlatedomain") nameserver_xlatedomain.text = kwargs.pop('nameserver_xlatedomain') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def run(self): """Run the simulation.""" # Define the resource requirements of each component in the simulation. vertices_resources = { # Every component runs on exactly one core and consumes a certain # amount of SDRAM to hold configuration data. component: {Cores: 1, SDRAM: component._get_config_size()} for component in self._components } # Work out what SpiNNaker application needs to be loaded for each # component vertices_applications = {component: component._get_kernel() for component in self._components} # Convert the Wire objects into Rig Net objects and create a lookup # from Net to the (key, mask) to use. net_keys = {Net(wire.source, wire.sinks): (wire.routing_key, 0xFFFFFFFF) for wire in self._wires} nets = list(net_keys) # Boot the SpiNNaker machine and interrogate it to determine what # resources (e.g. cores, SDRAM etc.) are available. mc = MachineController(self._hostname) mc.boot() system_info = mc.get_system_info() # Automatically chose which chips and cores to use for each component # and generate routing tables. placements, allocations, application_map, routing_tables = \ place_and_route_wrapper(vertices_resources, vertices_applications, nets, net_keys, system_info) with mc.application(): # Allocate memory for configuration data, tagged by core number. memory_allocations = sdram_alloc_for_vertices(mc, placements, allocations) # Load the configuration data for all components for component, memory in memory_allocations.items(): component._write_config(memory) # Load all routing tables mc.load_routing_tables(routing_tables) # Load all SpiNNaker application kernels mc.load_application(application_map) # Wait for all six cores to reach the 'sync0' barrier mc.wait_for_cores_to_reach_state("sync0", len(self._components)) # Send the 'sync0' signal to start execution and wait for the # simulation to finish. mc.send_signal("sync0") time.sleep(self.length * 0.001) mc.wait_for_cores_to_reach_state("exit", len(self._components)) # Retrieve result data for component, memory in memory_allocations.items(): component._read_results(memory)
Run the simulation.
def regroup_vectorized(srccat, eps, far=None, dist=norm_dist): """ Regroup the islands of a catalog according to their normalised distance. Assumes srccat is recarray-like for efficiency. Return a list of island groups. Parameters ---------- srccat : np.rec.arry or pd.DataFrame Should have the following fields[units]: ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any] eps : float maximum normalised distance within which sources are considered to be grouped far : float (degrees) sources that are further than this distance apart will not be grouped, and will not be tested. Default = 0.5. dist : func a function that calculates the distance between a source and each element of an array of sources. Default = :func:`AegeanTools.cluster.norm_dist` Returns ------- islands : list of lists Each island contians integer indices for members from srccat (in descending dec order). """ if far is None: far = 0.5 # 10*max(a.a/3600 for a in srccat) # most negative declination first # XXX: kind='mergesort' ensures stable sorting for determinism. # Do we need this? order = np.argsort(srccat.dec, kind='mergesort')[::-1] # TODO: is it better to store groups as arrays even if appends are more # costly? groups = [[order[0]]] for idx in order[1:]: rec = srccat[idx] # TODO: Find out if groups are big enough for this to give us a speed # gain. If not, get distance to all entries in groups above # decmin simultaneously. decmin = rec.dec - far for group in reversed(groups): # when an island's largest (last) declination is smaller than # decmin, we don't need to look at any more islands if srccat.dec[group[-1]] < decmin: # new group groups.append([idx]) rafar = far / np.cos(np.radians(rec.dec)) group_recs = np.take(srccat, group, mode='clip') group_recs = group_recs[abs(rec.ra - group_recs.ra) <= rafar] if len(group_recs) and dist(rec, group_recs).min() < eps: group.append(idx) break else: # new group groups.append([idx]) # TODO?: a more numpy-like interface would return only an array providing # the mapping: # group_idx = np.empty(len(srccat), dtype=int) # for i, group in enumerate(groups): # group_idx[group] = i # return group_idx return groups
Regroup the islands of a catalog according to their normalised distance. Assumes srccat is recarray-like for efficiency. Return a list of island groups. Parameters ---------- srccat : np.rec.arry or pd.DataFrame Should have the following fields[units]: ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any] eps : float maximum normalised distance within which sources are considered to be grouped far : float (degrees) sources that are further than this distance apart will not be grouped, and will not be tested. Default = 0.5. dist : func a function that calculates the distance between a source and each element of an array of sources. Default = :func:`AegeanTools.cluster.norm_dist` Returns ------- islands : list of lists Each island contians integer indices for members from srccat (in descending dec order).
def start(self, wait_for_completion=True, operation_timeout=None, status_timeout=None): """ Start (activate) this Partition, using the HMC operation "Start Partition". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the partition status has reached the desired value (it still may show status "paused"). If `wait_for_completion=True`, this method repeatedly checks the status of the partition after the HMC operation has completed, and waits until the status is in one of the desired states "active" or "degraded". TODO: Describe what happens if the maximum number of active partitions is exceeded. Authorization requirements: * Object-access permission to this Partition. * Object-access permission to the CPC containing this Partition. * Task permission to the "Start Partition" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the partition has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. Returns: :class:`py:dict` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns an empty :class:`py:dict` object. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired partition status. """ result = self.manager.session.post( self.uri + '/operations/start', wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["active", "degraded"] self.wait_for_status(statuses, status_timeout) return result
Start (activate) this Partition, using the HMC operation "Start Partition". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the partition status has reached the desired value (it still may show status "paused"). If `wait_for_completion=True`, this method repeatedly checks the status of the partition after the HMC operation has completed, and waits until the status is in one of the desired states "active" or "degraded". TODO: Describe what happens if the maximum number of active partitions is exceeded. Authorization requirements: * Object-access permission to this Partition. * Object-access permission to the CPC containing this Partition. * Task permission to the "Start Partition" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the partition has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. Returns: :class:`py:dict` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns an empty :class:`py:dict` object. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired partition status.
def get_hull_energy(self, comp): """ Args: comp (Composition): Input composition Returns: Energy of lowest energy equilibrium at desired composition. Not normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O) """ e = 0 for k, v in self.get_decomposition(comp).items(): e += k.energy_per_atom * v return e * comp.num_atoms
Args: comp (Composition): Input composition Returns: Energy of lowest energy equilibrium at desired composition. Not normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
def write_xsd(cls) -> None: """Write the complete base schema file `HydPyConfigBase.xsd` based on the template file `HydPyConfigBase.xsdt`. Method |XSDWriter.write_xsd| adds model specific information to the general information of template file `HydPyConfigBase.xsdt` regarding reading and writing of time series data and exchanging parameter and sequence values e.g. during calibration. The following example shows that after writing a new schema file, method |XMLInterface.validate_xml| does not raise an error when either applied on the XML configuration files `single_run.xml` or `multiple_runs.xml` of the `LahnH` example project: >>> import os >>> from hydpy.auxs.xmltools import XSDWriter, XMLInterface >>> if os.path.exists(XSDWriter.filepath_target): ... os.remove(XSDWriter.filepath_target) >>> os.path.exists(XSDWriter.filepath_target) False >>> XSDWriter.write_xsd() >>> os.path.exists(XSDWriter.filepath_target) True >>> from hydpy import data >>> for configfile in ('single_run.xml', 'multiple_runs.xml'): ... XMLInterface(configfile, data.get_path('LahnH')).validate_xml() """ with open(cls.filepath_source) as file_: template = file_.read() template = template.replace( '<!--include model sequence groups-->', cls.get_insertion()) template = template.replace( '<!--include exchange items-->', cls.get_exchangeinsertion()) with open(cls.filepath_target, 'w') as file_: file_.write(template)
Write the complete base schema file `HydPyConfigBase.xsd` based on the template file `HydPyConfigBase.xsdt`. Method |XSDWriter.write_xsd| adds model specific information to the general information of template file `HydPyConfigBase.xsdt` regarding reading and writing of time series data and exchanging parameter and sequence values e.g. during calibration. The following example shows that after writing a new schema file, method |XMLInterface.validate_xml| does not raise an error when either applied on the XML configuration files `single_run.xml` or `multiple_runs.xml` of the `LahnH` example project: >>> import os >>> from hydpy.auxs.xmltools import XSDWriter, XMLInterface >>> if os.path.exists(XSDWriter.filepath_target): ... os.remove(XSDWriter.filepath_target) >>> os.path.exists(XSDWriter.filepath_target) False >>> XSDWriter.write_xsd() >>> os.path.exists(XSDWriter.filepath_target) True >>> from hydpy import data >>> for configfile in ('single_run.xml', 'multiple_runs.xml'): ... XMLInterface(configfile, data.get_path('LahnH')).validate_xml()
def freeze(self): """ Freeze all settings so that they can't be altered """ self.target.disable() self.filter.configure(state='disable') self.prog_ob.configure(state='disable') self.pi.configure(state='disable') self.observers.configure(state='disable') self.comment.configure(state='disable')
Freeze all settings so that they can't be altered
def merge(self, config): """ Load configuration from given configuration. :param config: config to load. If config is a string type, then it's treated as .ini filename :return: None """ if isinstance(config, ConfigParser) is True: self.update(config) elif isinstance(config, str): self.read(config)
Load configuration from given configuration. :param config: config to load. If config is a string type, then it's treated as .ini filename :return: None
def isPow2(num) -> bool: """ Check if number or constant is power of two """ if not isinstance(num, int): num = int(num) return num != 0 and ((num & (num - 1)) == 0)
Check if number or constant is power of two
def _clean_key_type(key_name, escape_char=ESCAPE_SEQ): """Removes type specifier returning detected type and a key name without type specifier. :param str key_name: A key name containing type postfix. :rtype: tuple[type|None, str] :returns: Type definition and cleaned key name. """ for i in (2, 1): if len(key_name) < i: return None, key_name type_v = key_name[-i:] if type_v in _KEY_SPLIT: if len(key_name) <= i: return _KEY_SPLIT[type_v], '' esc_cnt = 0 for pos in range(-i - 1, -len(key_name) - 1, -1): if key_name[pos] == escape_char: esc_cnt += 1 else: break if esc_cnt % 2 == 0: return _KEY_SPLIT[type_v], key_name[:-i] else: return None, key_name return None, key_name
Removes type specifier returning detected type and a key name without type specifier. :param str key_name: A key name containing type postfix. :rtype: tuple[type|None, str] :returns: Type definition and cleaned key name.
def _str_replace(txt): """Makes a small text amenable to being used in a filename.""" txt = txt.replace(",", "") txt = txt.replace(" ", "_") txt = txt.replace(":", "") txt = txt.replace(".", "") txt = txt.replace("/", "") txt = txt.replace("", "") return txt
Makes a small text amenable to being used in a filename.
def combine(self, members, output_file, dimension=None, start_index=None, stop_index=None, stride=None): """ Combine many files into a single file on disk. Defaults to using the 'time' dimension. """ nco = None try: nco = Nco() except BaseException: # This is not necessarily an import error (could be wrong PATH) raise ImportError("NCO not found. The NCO python bindings are required to use 'Collection.combine'.") if len(members) > 0 and hasattr(members[0], 'path'): # A member DotDoct was passed in, we only need the paths members = [ m.path for m in members ] options = ['-4'] # NetCDF4 options += ['-L', '3'] # Level 3 compression options += ['-h'] # Don't append to the history global attribute if dimension is not None: if start_index is None: start_index = 0 if stop_index is None: stop_index = '' if stride is None: stride = 1 options += ['-d', '{0},{1},{2},{3}'.format(dimension, start_index, stop_index, stride)] nco.ncrcat(input=members, output=output_file, options=options)
Combine many files into a single file on disk. Defaults to using the 'time' dimension.
def dead(name, enable=None, sig=None, init_delay=None, **kwargs): ''' Ensure that the named service is dead by stopping the service if it is running name The name of the init or rc script used to manage the service enable Set the service to be enabled at boot time, ``True`` sets the service to be enabled, ``False`` sets the named service to be disabled. The default is ``None``, which does not enable or disable anything. sig The string to search for when looking for the service process with ps init_delay Add a sleep command (in seconds) before the check to make sure service is killed. .. versionadded:: 2017.7.0 no_block : False **For systemd minions only.** Stops the service using ``--no-block``. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # Check for common error: using enabled option instead of enable if 'enabled' in kwargs: return _enabled_used_error(ret) # Convert enable to boolean in case user passed a string value if isinstance(enable, six.string_types): enable = salt.utils.data.is_true(enable) # Check if the service is available try: if not _available(name, ret): if __opts__.get('test'): ret['result'] = None ret['comment'] = 'Service {0} not present; if created in this state run, it would have been stopped'.format(name) else: # A non-available service is OK here, don't let the state fail # because of it. ret['result'] = True return ret except CommandExecutionError as exc: ret['result'] = False ret['comment'] = exc.strerror return ret # lot of custom init script won't or mis implement the status # command, so it is just an indicator but can not be fully trusted before_toggle_status = __salt__['service.status'](name, sig) if 'service.enabled' in __salt__: if salt.utils.platform.is_windows(): # service.enabled in Windows returns True for services that are set # to Auto start, but services set to Manual can also be disabled before_toggle_enable_status = __salt__['service.info'](name)['StartType'] in ['Auto', 'Manual'] else: before_toggle_enable_status = __salt__['service.enabled'](name) else: before_toggle_enable_status = True # See if the service is already dead if not before_toggle_status: ret['comment'] = 'The service {0} is already dead'.format(name) if enable is True and not before_toggle_enable_status: ret.update(_enable(name, None, skip_verify=False, **kwargs)) elif enable is False and before_toggle_enable_status: ret.update(_disable(name, None, skip_verify=False, **kwargs)) return ret # Run the tests if __opts__['test']: ret['result'] = None ret['comment'] = 'Service {0} is set to be killed'.format(name) return ret # Conditionally add systemd-specific args to call to service.start stop_kwargs, warnings = _get_systemd_only(__salt__['service.stop'], kwargs) if warnings: ret.setdefault('warnings', []).extend(warnings) if salt.utils.platform.is_windows(): for arg in ['timeout', 'with_deps', 'with_parents']: if kwargs.get(arg, False): stop_kwargs.update({arg: kwargs.get(arg)}) func_ret = __salt__['service.stop'](name, **stop_kwargs) if not func_ret: ret['result'] = False ret['comment'] = 'Service {0} failed to die'.format(name) if enable is True: ret.update(_enable(name, True, result=False, skip_verify=False, **kwargs)) elif enable is False: ret.update(_disable(name, True, result=False, skip_verify=False, **kwargs)) return ret if init_delay: time.sleep(init_delay) # only force a change state if we have explicitly detected them after_toggle_status = __salt__['service.status'](name) if 'service.enabled' in __salt__: after_toggle_enable_status = __salt__['service.enabled'](name) else: after_toggle_enable_status = True if ( (before_toggle_enable_status != after_toggle_enable_status) or (before_toggle_status != after_toggle_status) ) and not ret.get('changes', {}): ret['changes'][name] = after_toggle_status # be sure to stop, in case we mis detected in the check if after_toggle_status: ret['result'] = False ret['comment'] = 'Service {0} failed to die'.format(name) else: ret['comment'] = 'Service {0} was killed'.format(name) if enable is True: ret.update(_enable(name, after_toggle_status, result=not after_toggle_status, skip_verify=False, **kwargs)) elif enable is False: ret.update(_disable(name, after_toggle_status, result=not after_toggle_status, skip_verify=False, **kwargs)) return ret
Ensure that the named service is dead by stopping the service if it is running name The name of the init or rc script used to manage the service enable Set the service to be enabled at boot time, ``True`` sets the service to be enabled, ``False`` sets the named service to be disabled. The default is ``None``, which does not enable or disable anything. sig The string to search for when looking for the service process with ps init_delay Add a sleep command (in seconds) before the check to make sure service is killed. .. versionadded:: 2017.7.0 no_block : False **For systemd minions only.** Stops the service using ``--no-block``. .. versionadded:: 2017.7.0
def linear_interpolation(first, last, steps): """Interpolates all missing values using linear interpolation. :param numeric first: Start value for the interpolation. :param numeric last: End Value for the interpolation :param integer steps: Number of missing values that have to be calculated. :return: Returns a list of floats containing only the missing values. :rtype: list :todo: Define a more general interface! """ result = [] for step in xrange(0, steps): fpart = (steps - step) * first lpart = (step + 1) * last value = (fpart + lpart) / float(steps + 1) result.append(value) return result
Interpolates all missing values using linear interpolation. :param numeric first: Start value for the interpolation. :param numeric last: End Value for the interpolation :param integer steps: Number of missing values that have to be calculated. :return: Returns a list of floats containing only the missing values. :rtype: list :todo: Define a more general interface!
def move_images(self, image_directory): """ Moves png-files one directory up from path/image/*.png -> path/*.png""" image_paths = glob(image_directory + "/**/*.png", recursive=True) for image_path in image_paths: destination = image_path.replace("\\image\\", "\\") shutil.move(image_path, destination) image_folders = glob(image_directory + "/**/image", recursive=True) for image_folder in image_folders: os.removedirs(image_folder)
Moves png-files one directory up from path/image/*.png -> path/*.png
def transform_metadata(blob): """ Transforms metadata types about channels / users / bots / etc. into a dict rather than a list in order to enable faster lookup. """ o = {} for e in blob: i = e[u'id'] o[i] = e return o
Transforms metadata types about channels / users / bots / etc. into a dict rather than a list in order to enable faster lookup.
def cli(ctx, project_dir): """Launch the verilog simulation.""" exit_code = SCons(project_dir).sim() ctx.exit(exit_code)
Launch the verilog simulation.
def get_consul(self, resource_type): """ Returns an object that a :class:`~bang.deployers.deployer.Deployer` uses to control resources of :attr:`resource_type`. :param str service: Any of the resources defined in :mod:`bang.resources`. """ consul = self.CONSUL_MAP.get(resource_type) if consul: return consul(self)
Returns an object that a :class:`~bang.deployers.deployer.Deployer` uses to control resources of :attr:`resource_type`. :param str service: Any of the resources defined in :mod:`bang.resources`.
def check_mq_connection(self): """ RabbitMQ checks the connection It displays on the screen whether or not you have a connection. """ import pika from zengine.client_queue import BLOCKING_MQ_PARAMS from pika.exceptions import ProbableAuthenticationError, ConnectionClosed try: connection = pika.BlockingConnection(BLOCKING_MQ_PARAMS) channel = connection.channel() if channel.is_open: print(__(u"{0}RabbitMQ is working{1}").format(CheckList.OKGREEN, CheckList.ENDC)) elif self.channel.is_closed or self.channel.is_closing: print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC)) except ConnectionClosed as e: print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC), e) except ProbableAuthenticationError as e: print(__(u"{0}RabbitMQ username and password wrong{1}").format(CheckList.FAIL, CheckList.ENDC))
RabbitMQ checks the connection It displays on the screen whether or not you have a connection.
def import_activity_to_graph(diagram_graph, process_id, process_attributes, element): """ Method that adds the new element that represents BPMN activity. Should not be used directly, only as a part of method, that imports an element which extends Activity element (task, subprocess etc.) :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML element which extends 'activity'. """ BpmnDiagramGraphImport.import_flow_node_to_graph(diagram_graph, process_id, process_attributes, element) element_id = element.getAttribute(consts.Consts.id) diagram_graph.node[element_id][consts.Consts.default] = element.getAttribute(consts.Consts.default) \ if element.hasAttribute(consts.Consts.default) else None
Method that adds the new element that represents BPMN activity. Should not be used directly, only as a part of method, that imports an element which extends Activity element (task, subprocess etc.) :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML element which extends 'activity'.
def _getInstrumentsVoc(self): """ This function returns the registered instruments in the system as a vocabulary. The instruments are filtered by the selected method. """ cfilter = {'portal_type': 'Instrument', 'is_active': True} if self.getMethod(): cfilter['getMethodUIDs'] = {"query": self.getMethod().UID(), "operator": "or"} bsc = getToolByName(self, 'bika_setup_catalog') items = [('', 'No instrument')] + [ (o.UID, o.Title) for o in bsc(cfilter)] o = self.getInstrument() if o and o.UID() not in [i[0] for i in items]: items.append((o.UID(), o.Title())) items.sort(lambda x, y: cmp(x[1], y[1])) return DisplayList(list(items))
This function returns the registered instruments in the system as a vocabulary. The instruments are filtered by the selected method.
def create_replication_interface(self, sp, ip_port, ip_address, netmask=None, v6_prefix_length=None, gateway=None, vlan_id=None): """ Creates a replication interface. :param sp: `UnityStorageProcessor` object. Storage processor on which the replication interface is running. :param ip_port: `UnityIpPort` object. Physical port or link aggregation on the storage processor on which the interface is running. :param ip_address: IP address of the replication interface. :param netmask: IPv4 netmask for the replication interface, if it uses an IPv4 address. :param v6_prefix_length: IPv6 prefix length for the interface, if it uses an IPv6 address. :param gateway: IPv4 or IPv6 gateway address for the replication interface. :param vlan_id: VLAN identifier for the interface. :return: the newly create replication interface. """ return UnityReplicationInterface.create( self._cli, sp, ip_port, ip_address, netmask=netmask, v6_prefix_length=v6_prefix_length, gateway=gateway, vlan_id=vlan_id)
Creates a replication interface. :param sp: `UnityStorageProcessor` object. Storage processor on which the replication interface is running. :param ip_port: `UnityIpPort` object. Physical port or link aggregation on the storage processor on which the interface is running. :param ip_address: IP address of the replication interface. :param netmask: IPv4 netmask for the replication interface, if it uses an IPv4 address. :param v6_prefix_length: IPv6 prefix length for the interface, if it uses an IPv6 address. :param gateway: IPv4 or IPv6 gateway address for the replication interface. :param vlan_id: VLAN identifier for the interface. :return: the newly create replication interface.
def to_json(self): """ Serializes the event to JSON. :returns: a string """ event_as_dict = copy.deepcopy(self.event_body) if self.timestamp: if "keen" in event_as_dict: event_as_dict["keen"]["timestamp"] = self.timestamp.isoformat() else: event_as_dict["keen"] = {"timestamp": self.timestamp.isoformat()} return json.dumps(event_as_dict)
Serializes the event to JSON. :returns: a string
def post_message2(consumers, lti_key, url, body, method='POST', content_type='application/xml'): """ Posts a signed message to LTI consumer using LTI 2.0 format :param: consumers: consumers from config :param: lti_key: key to find appropriate consumer :param: url: post url :param: body: xml body :return: success """ # pylint: disable=too-many-arguments (response, _) = _post_patched_request( consumers, lti_key, body, url, method, content_type, ) is_success = response.status == 200 log.debug("is success %s", is_success) return is_success
Posts a signed message to LTI consumer using LTI 2.0 format :param: consumers: consumers from config :param: lti_key: key to find appropriate consumer :param: url: post url :param: body: xml body :return: success
def group_and_sort_nodes(self): """ Groups and then sorts the nodes according to the criteria passed into the Plot constructor. """ if self.node_grouping and not self.node_order: if self.group_order == "alphabetically": self.nodes = [ n for n, d in sorted( self.graph.nodes(data=True), key=lambda x: x[1][self.node_grouping], ) ] elif self.group_order == "default": grp = [ d[self.node_grouping] for _, d in self.graph.nodes(data=True) ] grp_name = list(unique_everseen(grp)) nodes = [] for key in grp_name: nodes.extend( [ n for n, d in self.graph.nodes(data=True) if key in d.values() ] ) self.nodes = nodes elif self.node_order and not self.node_grouping: self.nodes = [ n for n, _ in sorted( self.graph.nodes(data=True), key=lambda x: x[1][self.node_order], ) ] elif self.node_grouping and self.node_order: if self.group_order == "alphabetically": self.nodes = [ n for n, d in sorted( self.graph.nodes(data=True), key=lambda x: ( x[1][self.node_grouping], x[1][self.node_order], ), ) ] elif self.group_order == "default": grp = [ d[self.node_grouping] for _, d in self.graph.nodes(data=True) ] grp_name = list(unique_everseen(grp)) nodes = [] for key in grp_name: nodes.extend( [ n for n, d in sorted( self.graph.nodes(data=True), key=lambda x: x[1][self.node_order], ) if key in d.values() ] ) self.nodes = nodes
Groups and then sorts the nodes according to the criteria passed into the Plot constructor.
def consumer(cfg_uri, queue, logger=None, fetchsize=1): """ 分布式爬虫的爬虫端(具体爬虫部分) 被包装的函数必须满足如下要求: 1. 有且仅有一个参数 2. 对于每个任务,返回两个参数: code, message :param cfg_uri: 读取任务的路径 :param queue: Queue的名字 :param logger: 日志记录工具 :param fetchsize: 每次取出消息数量 """ from stompest.protocol import StompSpec _info, _exception = _deal_logger(logger) cfg_uri = _build_uri(cfg_uri) def decorator(function): def _build_conn(): client = _conn(cfg_uri, queue, _info) client.subscribe(queue, { StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL, 'activemq.prefetchSize': fetchsize }) return client @wraps(function) def wapper(): client = _build_conn() while True: try: frame = client.receiveFrame() _info('got new frame %s' % frame) param = loads(frame.body) code, msg = function(param) _info('result of task [%s]: [%s]-[%s]' % (frame.body, code, msg)) except (KeyboardInterrupt, AssertionError, ConsumerFatalError), e: _exception(e) break except Exception, e: _exception(e) finally: try: client.ack(frame) except Exception, e: _exception(e) client.close() client = _build_conn() client.disconnect() _info('disconnected %s' % cfg_uri) return wapper return decorator
分布式爬虫的爬虫端(具体爬虫部分) 被包装的函数必须满足如下要求: 1. 有且仅有一个参数 2. 对于每个任务,返回两个参数: code, message :param cfg_uri: 读取任务的路径 :param queue: Queue的名字 :param logger: 日志记录工具 :param fetchsize: 每次取出消息数量
def pckw02(handle, classid, frname, first, last, segid, intlen, n, polydg, cdata, btime): """ Write a type 2 segment to a PCK binary file given the file handle, frame class ID, base frame, time range covered by the segment, and the Chebyshev polynomial coefficients. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pckw02_c.html :param handle: Handle of binary PCK file open for writing. :type handle: int :param classid: Frame class ID of body-fixed frame. :type classid: int :param frname: Name of base reference frame. :type frname: str :param first: Start time of interval covered by segment. :type first: float :param last: End time of interval covered by segment. :type last: float :param segid: Segment identifier. :type segid: str :param intlen: Length of time covered by logical record. :type intlen: float :param n: Number of logical records in segment. :type n: int :param polydg: Chebyshev polynomial degree. :type polydg: int :param cdata: Array of Chebyshev coefficients. :type cdata: N-Element Array of floats :param btime: Begin time of first logical record. :type btime: float """ handle = ctypes.c_int(handle) classid = ctypes.c_int(classid) frame = stypes.stringToCharP(frname) first = ctypes.c_double(first) last = ctypes.c_double(last) segid = stypes.stringToCharP(segid) intlen = ctypes.c_double(intlen) n = ctypes.c_int(n) polydg = ctypes.c_int(polydg) cdata = stypes.toDoubleVector(cdata) btime = ctypes.c_double(btime) libspice.pckw02_c(handle, classid, frame, first, last, segid, intlen, n, polydg, cdata, btime)
Write a type 2 segment to a PCK binary file given the file handle, frame class ID, base frame, time range covered by the segment, and the Chebyshev polynomial coefficients. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pckw02_c.html :param handle: Handle of binary PCK file open for writing. :type handle: int :param classid: Frame class ID of body-fixed frame. :type classid: int :param frname: Name of base reference frame. :type frname: str :param first: Start time of interval covered by segment. :type first: float :param last: End time of interval covered by segment. :type last: float :param segid: Segment identifier. :type segid: str :param intlen: Length of time covered by logical record. :type intlen: float :param n: Number of logical records in segment. :type n: int :param polydg: Chebyshev polynomial degree. :type polydg: int :param cdata: Array of Chebyshev coefficients. :type cdata: N-Element Array of floats :param btime: Begin time of first logical record. :type btime: float
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._year is not None: return False if self._alias_user is not None: return False return True
:rtype: bool
def load(stream, container=dict): """ Load and parse a file or file-like object 'stream' provides simple shell variables' definitions. :param stream: A file or file like object :param container: Factory function to create a dict-like object to store properties :return: Dict-like object holding shell variables' definitions >>> from anyconfig.compat import StringIO as to_strm >>> load(to_strm('')) {} >>> load(to_strm("# ")) {} >>> load(to_strm("aaa=")) {'aaa': ''} >>> load(to_strm("aaa=bbb")) {'aaa': 'bbb'} >>> load(to_strm("aaa=bbb # ...")) {'aaa': 'bbb'} """ ret = container() for line in stream.readlines(): line = line.rstrip() if line is None or not line: continue (key, val) = _parseline(line) if key is None: LOGGER.warning("Empty val in the line: %s", line) continue ret[key] = val return ret
Load and parse a file or file-like object 'stream' provides simple shell variables' definitions. :param stream: A file or file like object :param container: Factory function to create a dict-like object to store properties :return: Dict-like object holding shell variables' definitions >>> from anyconfig.compat import StringIO as to_strm >>> load(to_strm('')) {} >>> load(to_strm("# ")) {} >>> load(to_strm("aaa=")) {'aaa': ''} >>> load(to_strm("aaa=bbb")) {'aaa': 'bbb'} >>> load(to_strm("aaa=bbb # ...")) {'aaa': 'bbb'}
def _handle_socket(self, event: int, fd: int, multi: Any, data: bytes) -> None: """Called by libcurl when it wants to change the file descriptors it cares about. """ event_map = { pycurl.POLL_NONE: ioloop.IOLoop.NONE, pycurl.POLL_IN: ioloop.IOLoop.READ, pycurl.POLL_OUT: ioloop.IOLoop.WRITE, pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE, } if event == pycurl.POLL_REMOVE: if fd in self._fds: self.io_loop.remove_handler(fd) del self._fds[fd] else: ioloop_event = event_map[event] # libcurl sometimes closes a socket and then opens a new # one using the same FD without giving us a POLL_NONE in # between. This is a problem with the epoll IOLoop, # because the kernel can tell when a socket is closed and # removes it from the epoll automatically, causing future # update_handler calls to fail. Since we can't tell when # this has happened, always use remove and re-add # instead of update. if fd in self._fds: self.io_loop.remove_handler(fd) self.io_loop.add_handler(fd, self._handle_events, ioloop_event) self._fds[fd] = ioloop_event
Called by libcurl when it wants to change the file descriptors it cares about.
def stretch(arr, fields=None, return_indices=False): """Stretch an array. Stretch an array by ``hstack()``-ing multiple array fields while preserving column names and record array structure. If a scalar field is specified, it will be stretched along with array fields. Parameters ---------- arr : NumPy structured or record array The array to be stretched. fields : list of strings or string, optional (default=None) A list of column names or a single column name to stretch. If ``fields`` is a string, then the output array is a one-dimensional unstructured array containing only the stretched elements of that field. If None, then stretch all fields. return_indices : bool, optional (default=False) If True, the array index of each stretched array entry will be returned in addition to the stretched array. This changes the return type of this function to a tuple consisting of a structured array and a numpy int64 array. Returns ------- ret : A NumPy structured array The stretched array. Examples -------- >>> import numpy as np >>> from root_numpy import stretch >>> arr = np.empty(2, dtype=[('scalar', np.int), ('array', 'O')]) >>> arr[0] = (0, np.array([1, 2, 3], dtype=np.float)) >>> arr[1] = (1, np.array([4, 5, 6], dtype=np.float)) >>> stretch(arr, ['scalar', 'array']) array([(0, 1.0), (0, 2.0), (0, 3.0), (1, 4.0), (1, 5.0), (1, 6.0)], dtype=[('scalar', '<i8'), ('array', '<f8')]) """ dtype = [] len_array = None flatten = False if fields is None: fields = arr.dtype.names elif isinstance(fields, string_types): fields = [fields] flatten = True # Construct dtype and check consistency for field in fields: dt = arr.dtype[field] if dt == 'O' or len(dt.shape): if dt == 'O': # Variable-length array field lengths = VLEN(arr[field]) else: # Fixed-length array field lengths = np.repeat(dt.shape[0], arr.shape[0]) if len_array is None: len_array = lengths elif not np.array_equal(lengths, len_array): raise ValueError( "inconsistent lengths of array columns in input") if dt == 'O': dtype.append((field, arr[field][0].dtype)) else: dtype.append((field, arr[field].dtype, dt.shape[1:])) else: # Scalar field dtype.append((field, dt)) if len_array is None: raise RuntimeError("no array column in input") # Build stretched output ret = np.empty(np.sum(len_array), dtype=dtype) for field in fields: dt = arr.dtype[field] if dt == 'O' or len(dt.shape) == 1: # Variable-length or 1D fixed-length array field ret[field] = np.hstack(arr[field]) elif len(dt.shape): # Multidimensional fixed-length array field ret[field] = np.vstack(arr[field]) else: # Scalar field ret[field] = np.repeat(arr[field], len_array) if flatten: ret = ret[fields[0]] if return_indices: idx = np.concatenate(list(map(np.arange, len_array))) return ret, idx return ret
Stretch an array. Stretch an array by ``hstack()``-ing multiple array fields while preserving column names and record array structure. If a scalar field is specified, it will be stretched along with array fields. Parameters ---------- arr : NumPy structured or record array The array to be stretched. fields : list of strings or string, optional (default=None) A list of column names or a single column name to stretch. If ``fields`` is a string, then the output array is a one-dimensional unstructured array containing only the stretched elements of that field. If None, then stretch all fields. return_indices : bool, optional (default=False) If True, the array index of each stretched array entry will be returned in addition to the stretched array. This changes the return type of this function to a tuple consisting of a structured array and a numpy int64 array. Returns ------- ret : A NumPy structured array The stretched array. Examples -------- >>> import numpy as np >>> from root_numpy import stretch >>> arr = np.empty(2, dtype=[('scalar', np.int), ('array', 'O')]) >>> arr[0] = (0, np.array([1, 2, 3], dtype=np.float)) >>> arr[1] = (1, np.array([4, 5, 6], dtype=np.float)) >>> stretch(arr, ['scalar', 'array']) array([(0, 1.0), (0, 2.0), (0, 3.0), (1, 4.0), (1, 5.0), (1, 6.0)], dtype=[('scalar', '<i8'), ('array', '<f8')])
def get(ctx, job): """Get experiment or experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting an experiment: \b ```bash $ polyaxon experiment get # if experiment is cached ``` \b ```bash $ polyaxon experiment --experiment=1 get ``` \b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get ``` \b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get ``` Examples for getting an experiment job: \b ```bash $ polyaxon experiment get -j 1 # if experiment is cached ``` \b ```bash $ polyaxon experiment --experiment=1 get --job=10 ``` \b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2 ``` \b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j 2 ``` """ def get_experiment(): try: response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not load experiment `{}` info.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) get_experiment_details(response) def get_experiment_job(): try: response = PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.resources: get_resources(response.resources.to_dict(), header="Job resources:") response = Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources'] )) Printer.print_header("Job info:") dict_tabulate(response) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job() else: get_experiment()
Get experiment or experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting an experiment: \b ```bash $ polyaxon experiment get # if experiment is cached ``` \b ```bash $ polyaxon experiment --experiment=1 get ``` \b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get ``` \b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get ``` Examples for getting an experiment job: \b ```bash $ polyaxon experiment get -j 1 # if experiment is cached ``` \b ```bash $ polyaxon experiment --experiment=1 get --job=10 ``` \b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2 ``` \b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j 2 ```
def vrrpe_vip(self, **kwargs): """Set vrrpe VIP. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet, ve, etc). name (str): Name of interface. (1/0/5, 1/0/10, VE name etc). vrid (str): vrrpev3 ID. get (bool): Get config instead of editing config. (True, False) delete (bool): True, the VIP address is added and False if its to be deleted (True, False). Default value will be False if not specified. vip (str): IPv4/IPv6 Virtual IP Address. rbridge_id (str): rbridge-id for device. Only required when type is `ve`. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Raises: KeyError: if `int_type`, `name`, `vrid`, or `vip` is not passed. ValueError: if `int_type`, `name`, `vrid`, or `vip` is invalid. Returns: Return value of `callback`. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output =dev.interface.vrrpe_vip(int_type='ve', ... name='89', rbridge_id = '1', ... vrid='11', vip='10.0.1.10') ... output = dev.interface.vrrpe_vip(get=True, ... int_type='ve', name='89', rbridge_id = '1') ... output =dev.interface.vrrpe_vip(delete=True, ... int_type='ve', name='89', rbridge_id = '1',vrid='1', ... vip='10.0.0.10') """ int_type = kwargs.pop('int_type').lower() name = kwargs.pop('name',) vip = kwargs.pop('vip', '') get = kwargs.pop('get', False) delete = kwargs.pop('delete', False) callback = kwargs.pop('callback', self._callback) valid_int_types = ['gigabitethernet', 'tengigabitethernet', 'fortygigabitethernet', 'hundredgigabitethernet', 'port_channel', 've'] if vip != '': ipaddress = ip_interface(unicode(vip)) version = ipaddress.version else: version = 4 if int_type not in valid_int_types: raise ValueError('`int_type` must be one of: %s' % repr(valid_int_types)) if delete: vrid = kwargs.pop('vrid') rbridge_id = kwargs.pop('rbridge_id', '1') vrrpe_args = dict(rbridge_id=rbridge_id, name=name, vrid=vrid, virtual_ipaddr=vip) elif get: rbridge_id = kwargs.pop('rbridge_id', '1') vrrpe_args = dict(name=name, vrid='', virtual_ipaddr='') else: vrid = kwargs.pop('vrid') ipaddress = ip_interface(unicode(vip)) if int_type == 've': rbridge_id = kwargs.pop('rbridge_id', '1') vrrpe_args = dict(name=name, vrid=vrid, virtual_ipaddr=str(ipaddress.ip)) method_name = None method_class = self._interface if version == 4: vrrpe_args['version'] = '3' method_name = 'interface_%s_vrrpe_virtual_ip_virtual_' \ 'ipaddr' % int_type elif version == 6: method_name = 'interface_%s_ipv6_vrrpv3e_group_virtual_ip_' \ 'virtual_ipaddr' % int_type if int_type == 've': method_name = 'rbridge_id_%s' % method_name if version == 6: method_name = method_name.replace('group_', '') method_class = self._rbridge vrrpe_args['rbridge_id'] = rbridge_id if not pynos.utilities.valid_vlan_id(name): raise InvalidVlanId("`name` must be between `1` and `8191`") elif not pynos.utilities.valid_interface(int_type, name): raise ValueError('`name` must be in the format of x/y/z for ' 'physical interfaces or x for port channel.') vrrpe_vip = getattr(method_class, method_name) config = vrrpe_vip(**vrrpe_args) result = [] if delete: config.find('.//*virtual-ip').set('operation', 'delete') if get: output = callback(config, handler='get_config') for item in output.data.findall('.//{*}vrrpe'): vrid = item.find('.//{*}vrid').text if item.find('.//{*}virtual-ipaddr') is not None: vip = item.find('.//{*}virtual-ipaddr').text else: vip = '' tmp = {"vrid": vrid, "vip": vip} result.append(tmp) else: result = callback(config) return result
Set vrrpe VIP. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet, ve, etc). name (str): Name of interface. (1/0/5, 1/0/10, VE name etc). vrid (str): vrrpev3 ID. get (bool): Get config instead of editing config. (True, False) delete (bool): True, the VIP address is added and False if its to be deleted (True, False). Default value will be False if not specified. vip (str): IPv4/IPv6 Virtual IP Address. rbridge_id (str): rbridge-id for device. Only required when type is `ve`. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Raises: KeyError: if `int_type`, `name`, `vrid`, or `vip` is not passed. ValueError: if `int_type`, `name`, `vrid`, or `vip` is invalid. Returns: Return value of `callback`. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output =dev.interface.vrrpe_vip(int_type='ve', ... name='89', rbridge_id = '1', ... vrid='11', vip='10.0.1.10') ... output = dev.interface.vrrpe_vip(get=True, ... int_type='ve', name='89', rbridge_id = '1') ... output =dev.interface.vrrpe_vip(delete=True, ... int_type='ve', name='89', rbridge_id = '1',vrid='1', ... vip='10.0.0.10')
def lt(lt_value): """ Validates that a field value is less than the value given to this validator. """ def validate(value): if value >= lt_value: return e("{} is not less than {}", value, lt_value) return validate
Validates that a field value is less than the value given to this validator.
def lock_excl(self, timeout='default'): """Establish an exclusive lock to the resource. :param timeout: Absolute time period (in milliseconds) that a resource waits to get unlocked by the locking session before returning an error. (Defaults to self.timeout) """ timeout = self.timeout if timeout == 'default' else timeout timeout = self._cleanup_timeout(timeout) self.visalib.lock(self.session, constants.AccessModes.exclusive_lock, timeout, None)
Establish an exclusive lock to the resource. :param timeout: Absolute time period (in milliseconds) that a resource waits to get unlocked by the locking session before returning an error. (Defaults to self.timeout)
def backup(self, paths=None): """Backup method driver.""" if not paths: paths = self._get_paths() try: self._backup_compresslevel(paths) except TypeError: try: self._backup_pb_gui(paths) except ImportError: self._backup_pb_tqdm(paths) # Delete source if specified if self.delete_source: shutil.rmtree(self.source) return self.zip_filename
Backup method driver.
def write_xml(self): ''' Writes a VocabularyKey Xml as per Healthvault schema. :returns: lxml.etree.Element representing a single VocabularyKey ''' key = None if self. language is not None: lang = {} lang['{http://www.w3.org/XML/1998/namespace}lang'] = self.language key = etree.Element('vocabulary-key', attrib=lang) else: key = etree.Element('vocabulary-key') name = etree.Element('name') name.text = self.name key.append(name) if self.family is not None: family = etree.Element('family') family.text = self.family key.append(family) if self.version is not None: version = etree.Element('version') version.text = self.version key.append(version) if self.code_value is not None: code_value = etree.Element('code-value') code_value.text = self.code_value key.append(code_value) return key
Writes a VocabularyKey Xml as per Healthvault schema. :returns: lxml.etree.Element representing a single VocabularyKey
def yesterday(hour=None, minute=None): """ Gives the ``datetime.datetime`` object corresponding to yesterday. The default value for optional parameters is the current value of hour and minute. I.e: when called without specifying values for parameters, the resulting object will refer to the time = now - 24 hours; when called with only hour specified, the resulting object will refer to yesterday at the specified hour and at the current minute. :param hour: the hour for yesterday, in the format *0-23* (defaults to ``None``) :type hour: int :param minute: the minute for yesterday, in the format *0-59* (defaults to ``None``) :type minute: int :returns: a ``datetime.datetime`` object :raises: *ValueError* when hour or minute have bad values """ if hour is None: hour = datetime.now().hour if minute is None: minute = datetime.now().minute yesterday_date = date.today() + timedelta(days=-1) return datetime(yesterday_date.year, yesterday_date.month, yesterday_date.day, hour, minute, 0)
Gives the ``datetime.datetime`` object corresponding to yesterday. The default value for optional parameters is the current value of hour and minute. I.e: when called without specifying values for parameters, the resulting object will refer to the time = now - 24 hours; when called with only hour specified, the resulting object will refer to yesterday at the specified hour and at the current minute. :param hour: the hour for yesterday, in the format *0-23* (defaults to ``None``) :type hour: int :param minute: the minute for yesterday, in the format *0-59* (defaults to ``None``) :type minute: int :returns: a ``datetime.datetime`` object :raises: *ValueError* when hour or minute have bad values
def _build_kwargs(keys, input_dict): """ Parameters ---------- keys : iterable Typically a list of strings. adict : dict-like A dictionary from which to attempt to pull each key. Returns ------- kwargs : dict A dictionary with only the keys that were in input_dict """ kwargs = {} for key in keys: try: kwargs[key] = input_dict[key] except KeyError: pass return kwargs
Parameters ---------- keys : iterable Typically a list of strings. adict : dict-like A dictionary from which to attempt to pull each key. Returns ------- kwargs : dict A dictionary with only the keys that were in input_dict
def _linux_stp(br, state): ''' Internal, sets STP state ''' brctl = _tool_path('brctl') return __salt__['cmd.run']('{0} stp {1} {2}'.format(brctl, br, state), python_shell=False)
Internal, sets STP state
def _take_screenshot(self, screenshot=False, name_prefix='unknown'): """ This is different from _save_screenshot. The return value maybe None or the screenshot path Args: screenshot: bool or PIL image """ if isinstance(screenshot, bool): if not screenshot: return return self._save_screenshot(name_prefix=name_prefix) if isinstance(screenshot, Image.Image): return self._save_screenshot(screen=screenshot, name_prefix=name_prefix) raise TypeError("invalid type for func _take_screenshot: "+ type(screenshot))
This is different from _save_screenshot. The return value maybe None or the screenshot path Args: screenshot: bool or PIL image
def _start_thread(self): """Start an enqueueing thread.""" self._stopping_event = Event() self._enqueueing_thread = Thread(target=self._enqueue_batches, args=(self._stopping_event,)) self._enqueueing_thread.start()
Start an enqueueing thread.
def qteDefVar(self, varName: str, value, module=None, doc: str=None): """ Define and document ``varName`` in an arbitrary name space. If ``module`` is **None** then ``qte_global`` will be used. .. warning: If the ``varName`` was already defined in ``module`` then its value and documentation are overwritten without warning. |Args| * ``varName`` (**str**): variable name. * ``value`` (**object**): arbitrary data to store. * ``module`` (**Python module**): the module in which the variable should be defined. * ``doc`` (**str**): documentation string for variable. |Returns| **bool**: **True** if ``varName`` could be defined in ``module``. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. """ # Use the global name space per default. if module is None: module = qte_global # Create the documentation dictionary if it does not exist # already. if not hasattr(module, '_qte__variable__docstring__dictionary__'): module._qte__variable__docstring__dictionary__ = {} # Set the variable value and documentation string. setattr(module, varName, value) module._qte__variable__docstring__dictionary__[varName] = doc return True
Define and document ``varName`` in an arbitrary name space. If ``module`` is **None** then ``qte_global`` will be used. .. warning: If the ``varName`` was already defined in ``module`` then its value and documentation are overwritten without warning. |Args| * ``varName`` (**str**): variable name. * ``value`` (**object**): arbitrary data to store. * ``module`` (**Python module**): the module in which the variable should be defined. * ``doc`` (**str**): documentation string for variable. |Returns| **bool**: **True** if ``varName`` could be defined in ``module``. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
def collect_diagnostic_data_45(self, end_datetime, bundle_size_bytes, cluster_name=None, roles=None, collect_metrics=False, start_datetime=None): """ Issue the command to collect diagnostic data. If start_datetime is specified, diagnostic data is collected for the entire period between start_datetime and end_datetime provided that bundle size is less than or equal to bundle_size_bytes. Diagnostics data collection fails if the bundle size is greater than bundle_size_bytes. If start_datetime is not specified, diagnostic data is collected starting from end_datetime and collecting backwards upto a maximum of bundle_size_bytes. @param end_datetime: The end of the collection period. Type datetime. @param bundle_size_bytes: The target size for the support bundle in bytes @param cluster_name: The cluster to collect or None for all clusters @param roles: Role ids of roles to restrict log and metric collection to. Valid since v10. @param collect_metrics: Whether to collect metrics for viewing as charts. Valid since v13. @param start_datetime: The start of the collection period. Type datetime. Valid since v13. """ args = { 'endTime': end_datetime.isoformat(), 'bundleSizeBytes': bundle_size_bytes, 'clusterName': cluster_name } if self._get_resource_root().version >= 10: args['roles'] = roles if self._get_resource_root().version >= 13: args['enableMonitorMetricsCollection'] = collect_metrics if start_datetime is not None: args['startTime'] = start_datetime.isoformat() return self._cmd('collectDiagnosticData', data=args)
Issue the command to collect diagnostic data. If start_datetime is specified, diagnostic data is collected for the entire period between start_datetime and end_datetime provided that bundle size is less than or equal to bundle_size_bytes. Diagnostics data collection fails if the bundle size is greater than bundle_size_bytes. If start_datetime is not specified, diagnostic data is collected starting from end_datetime and collecting backwards upto a maximum of bundle_size_bytes. @param end_datetime: The end of the collection period. Type datetime. @param bundle_size_bytes: The target size for the support bundle in bytes @param cluster_name: The cluster to collect or None for all clusters @param roles: Role ids of roles to restrict log and metric collection to. Valid since v10. @param collect_metrics: Whether to collect metrics for viewing as charts. Valid since v13. @param start_datetime: The start of the collection period. Type datetime. Valid since v13.
def unregisterWalkthrough(self, walkthrough): """ Unregisters the inputed walkthrough from the application walkthroug list. :param walkthrough | <XWalkthrough> """ if type(walkthrough) in (str, unicode): walkthrough = self.findWalkthrough(walkthrough) try: self._walkthroughs.remove(walkthrough) except ValueError: pass
Unregisters the inputed walkthrough from the application walkthroug list. :param walkthrough | <XWalkthrough>
def run_script(self, script, identifier=_DEFAULT_SCRIPT_NAME): """ Run a JS script within the context.\ All code is ran synchronously,\ there is no event loop. It's thread-safe :param script: utf-8 encoded or unicode string :type script: bytes or str :param identifier: utf-8 encoded or unicode string.\ This is used as the name of the script\ (ie: in stack-traces) :type identifier: bytes or str :return: Result of running the JS script :rtype: str :raises V8Error: if there was\ an error running the JS script """ assert isinstance(script, six.text_type) or _is_utf_8(script) assert isinstance(identifier, six.text_type) or _is_utf_8(identifier) if isinstance(script, six.text_type): script = script.encode('utf-8') if isinstance(identifier, six.text_type): identifier = identifier.encode('utf-8') with _String() as output: with _String() as error: code = lib.v8cffi_run_script( self._c_context[0], script, len(script), identifier, len(identifier), output.string_ptr, output.len_ptr, error.string_ptr, error.len_ptr) if code != lib.E_V8_OK: raise exceptions.get_exception(code)(six.text_type(error)) return six.text_type(output)
Run a JS script within the context.\ All code is ran synchronously,\ there is no event loop. It's thread-safe :param script: utf-8 encoded or unicode string :type script: bytes or str :param identifier: utf-8 encoded or unicode string.\ This is used as the name of the script\ (ie: in stack-traces) :type identifier: bytes or str :return: Result of running the JS script :rtype: str :raises V8Error: if there was\ an error running the JS script
def _interpret_as_minutes(sval, mdict): """ Times like "1:22" are ambiguous; do they represent minutes and seconds or hours and minutes? By default, timeparse assumes the latter. Call this function after parsing out a dictionary to change that assumption. >>> import pprint >>> pprint.pprint(_interpret_as_minutes('1:24', {'secs': '24', 'mins': '1'})) {'hours': '1', 'mins': '24'} """ if ( sval.count(':') == 1 and '.' not in sval and (('hours' not in mdict) or (mdict['hours'] is None)) and (('days' not in mdict) or (mdict['days'] is None)) and (('weeks' not in mdict) or (mdict['weeks'] is None)) #and (('months' not in mdict) or (mdict['months'] is None)) #and (('years' not in mdict) or (mdict['years'] is None)) ): mdict['hours'] = mdict['mins'] mdict['mins'] = mdict['secs'] mdict.pop('secs') pass return mdict
Times like "1:22" are ambiguous; do they represent minutes and seconds or hours and minutes? By default, timeparse assumes the latter. Call this function after parsing out a dictionary to change that assumption. >>> import pprint >>> pprint.pprint(_interpret_as_minutes('1:24', {'secs': '24', 'mins': '1'})) {'hours': '1', 'mins': '24'}
def retrieve_version(self, obj, version): """Retrieve the version of the object """ current_version = getattr(obj, VERSION_ID, None) if current_version is None: # No initial version return obj if str(current_version) == str(version): # Same version return obj # Retrieve the object from the repository pr = api.get_tool("portal_repository") # bypass permission check to AccessPreviousVersions result = pr._retrieve( obj, selector=version, preserve=(), countPurged=True) return result.object
Retrieve the version of the object
def _build_input_args(input_filepath_list, input_format_list): ''' Builds input arguments by stitching input filepaths and input formats together. ''' if len(input_format_list) != len(input_filepath_list): raise ValueError( "input_format_list & input_filepath_list are not the same size" ) input_args = [] zipped = zip(input_filepath_list, input_format_list) for input_file, input_fmt in zipped: input_args.extend(input_fmt) input_args.append(input_file) return input_args
Builds input arguments by stitching input filepaths and input formats together.
def add(self, response, condition=None): """ Add a new Response object :param response: The Response object :type response: parser.trigger.response.Response :param condition: An optional Conditional statement for the Response :type condition: parser.condition.Condition or None """ self._log.info('Adding a new response with the priority level {priority}'.format(priority=response.priority)) # If this is the first time we are seeing this priority level, ready a new response list if response.priority not in self._responses: self.sorted = False # Only reset sorted flag on a new priority definition for efficiency self._responses[response.priority] = [] # If this response requires a condition be met, assign it to the Response object directly if condition: self._log.debug('Response has a condition defined') self._conditionals[response] = condition self._responses[response.priority].append(response)
Add a new Response object :param response: The Response object :type response: parser.trigger.response.Response :param condition: An optional Conditional statement for the Response :type condition: parser.condition.Condition or None
def execute(self): """ Execute the actions necessary to perform a `molecule lint` and returns None. :return: None """ self.print_info() linters = [ l for l in [ self._config.lint, self._config.verifier.lint, self._config.provisioner.lint, ] if l ] for l in linters: l.execute()
Execute the actions necessary to perform a `molecule lint` and returns None. :return: None
def _pop_params(cls, kwargs): """ Pop entries from the `kwargs` passed to cls.__new__ based on the values in `cls.params`. Parameters ---------- kwargs : dict The kwargs passed to cls.__new__. Returns ------- params : list[(str, object)] A list of string, value pairs containing the entries in cls.params. Raises ------ TypeError Raised if any parameter values are not passed or not hashable. """ params = cls.params if not isinstance(params, Mapping): params = {k: NotSpecified for k in params} param_values = [] for key, default_value in params.items(): try: value = kwargs.pop(key, default_value) if value is NotSpecified: raise KeyError(key) # Check here that the value is hashable so that we fail here # instead of trying to hash the param values tuple later. hash(value) except KeyError: raise TypeError( "{typename} expected a keyword parameter {name!r}.".format( typename=cls.__name__, name=key ) ) except TypeError: # Value wasn't hashable. raise TypeError( "{typename} expected a hashable value for parameter " "{name!r}, but got {value!r} instead.".format( typename=cls.__name__, name=key, value=value, ) ) param_values.append((key, value)) return tuple(param_values)
Pop entries from the `kwargs` passed to cls.__new__ based on the values in `cls.params`. Parameters ---------- kwargs : dict The kwargs passed to cls.__new__. Returns ------- params : list[(str, object)] A list of string, value pairs containing the entries in cls.params. Raises ------ TypeError Raised if any parameter values are not passed or not hashable.
def set_cache_implementation(self, cache_name, impl_name, maxsize, **kwargs): """ Changes the cache implementation for the named cache """ self._get_cache(cache_name).set_cache_impl(impl_name, maxsize, **kwargs)
Changes the cache implementation for the named cache
def extract_yaml(yaml_files): """ Take a list of yaml_files and load them to return back to the testing program """ loaded_yaml = [] for yaml_file in yaml_files: try: with open(yaml_file, 'r') as fd: loaded_yaml.append(yaml.safe_load(fd)) except IOError as e: print('Error reading file', yaml_file) raise e except yaml.YAMLError as e: print('Error parsing file', yaml_file) raise e except Exception as e: print('General error') raise e return loaded_yaml
Take a list of yaml_files and load them to return back to the testing program
def per_chunk(iterable, n=1, fillvalue=None): """ From http://stackoverflow.com/a/8991553/610569 >>> list(per_chunk('abcdefghi', n=2)) [('a', 'b'), ('c', 'd'), ('e', 'f'), ('g', 'h'), ('i', None)] >>> list(per_chunk('abcdefghi', n=3)) [('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'h', 'i')] """ args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue)
From http://stackoverflow.com/a/8991553/610569 >>> list(per_chunk('abcdefghi', n=2)) [('a', 'b'), ('c', 'd'), ('e', 'f'), ('g', 'h'), ('i', None)] >>> list(per_chunk('abcdefghi', n=3)) [('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'h', 'i')]
def argument_types(self): """Retrieve a container for the non-variadic arguments for this type. The returned object is iterable and indexable. Each item in the container is a Type instance. """ class ArgumentsIterator(collections.Sequence): def __init__(self, parent): self.parent = parent self.length = None def __len__(self): if self.length is None: self.length = conf.lib.clang_getNumArgTypes(self.parent) return self.length def __getitem__(self, key): # FIXME Support slice objects. if not isinstance(key, int): raise TypeError("Must supply a non-negative int.") if key < 0: raise IndexError("Only non-negative indexes are accepted.") if key >= len(self): raise IndexError("Index greater than container length: " "%d > %d" % ( key, len(self) )) result = conf.lib.clang_getArgType(self.parent, key) if result.kind == TypeKind.INVALID: raise IndexError("Argument could not be retrieved.") return result assert self.kind == TypeKind.FUNCTIONPROTO return ArgumentsIterator(self)
Retrieve a container for the non-variadic arguments for this type. The returned object is iterable and indexable. Each item in the container is a Type instance.
def create(self, data): """ Add a new store to your MailChimp account. Error checking on the currency code verifies that it is in the correct three-letter, all-caps format as specified by ISO 4217 but does not check that it is a valid code as the list of valid codes changes over time. :param data: The request body parameters :type data: :py:class:`dict` data = { "id": string*, "list_id": string*, "name": string*, "currency_code": string* } """ if 'id' not in data: raise KeyError('The store must have an id') if 'list_id' not in data: raise KeyError('The store must have a list_id') if 'name' not in data: raise KeyError('The store must have a name') if 'currency_code' not in data: raise KeyError('The store must have a currency_code') if not re.match(r"^[A-Z]{3}$", data['currency_code']): raise ValueError('The currency_code must be a valid 3-letter ISO 4217 currency code') response = self._mc_client._post(url=self._build_path(), data=data) if response is not None: self.store_id = response['id'] else: self.store_id = None return response
Add a new store to your MailChimp account. Error checking on the currency code verifies that it is in the correct three-letter, all-caps format as specified by ISO 4217 but does not check that it is a valid code as the list of valid codes changes over time. :param data: The request body parameters :type data: :py:class:`dict` data = { "id": string*, "list_id": string*, "name": string*, "currency_code": string* }
def plot(self): """ Plots reaction energy as a function of mixing ratio x in self.c1 - self.c2 tie line using pylab. Returns: Pylab object that plots reaction energy as a function of mixing ratio x. """ plt.rcParams['xtick.major.pad'] = '6' plt.rcParams['ytick.major.pad'] = '6' plt.rcParams['axes.linewidth'] = 2 npoint = 1000 xs = np.linspace(0, 1, npoint) # Converts sampling points in self.c1 - self.c2 tie line to those in # self.comp1 - self.comp2 tie line. xs_reverse_converted = InterfacialReactivity._reverse_convert( xs, self.factor1, self.factor2) energies = [self._get_energy(x) for x in xs_reverse_converted] plt.plot(xs, energies, 'k-') # Marks kinks and minimum energy point. kinks = self.get_kinks() _, x_kink, energy_kink, _, _ = zip(*kinks) plt.scatter(x_kink, energy_kink, marker='o', c='blue', s=20) plt.scatter(self.minimum()[0], self.minimum()[1], marker='*', c='red', s=300) # Labels kinks with indices. Labels are made draggable # in case of overlapping. for index, x, energy, _, _ in kinks: plt.annotate( index, xy=(x, energy), xytext=(5, 30), textcoords='offset points', ha='right', va='bottom', arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')).draggable() plt.xlim([-0.05, 1.05]) if self.norm: plt.ylabel('Energy (eV/atom)') else: plt.ylabel('Energy (eV/f.u.)') plt.xlabel('$x$ in $x$ {} + $(1-x)$ {}'.format( self.c1.reduced_formula, self.c2.reduced_formula)) return plt
Plots reaction energy as a function of mixing ratio x in self.c1 - self.c2 tie line using pylab. Returns: Pylab object that plots reaction energy as a function of mixing ratio x.
def watch_and_wait(self, poll_interval=10, idle_log_timeout=None, kill_on_timeout=False, stash_log_method=None, tag_instances=False, **kwargs): """This provides shortcut access to the wait_for_complete_function.""" return wait_for_complete(self._job_queue, job_list=self.job_list, job_name_prefix=self.basename, poll_interval=poll_interval, idle_log_timeout=idle_log_timeout, kill_on_log_timeout=kill_on_timeout, stash_log_method=stash_log_method, tag_instances=tag_instances, **kwargs)
This provides shortcut access to the wait_for_complete_function.
def _translate_response(self, response, state): """ Translates a saml authorization response to an internal response :type response: saml2.response.AuthnResponse :rtype: satosa.internal.InternalData :param response: The saml authorization response :return: A translated internal response """ # The response may have been encrypted by the IdP so if we have an # encryption key, try it. if self.encryption_keys: response.parse_assertion(self.encryption_keys) authn_info = response.authn_info()[0] auth_class_ref = authn_info[0] timestamp = response.assertion.authn_statement[0].authn_instant issuer = response.response.issuer.text auth_info = AuthenticationInformation( auth_class_ref, timestamp, issuer, ) # The SAML response may not include a NameID. subject = response.get_subject() name_id = subject.text if subject else None name_id_format = subject.format if subject else None attributes = self.converter.to_internal( self.attribute_profile, response.ava, ) internal_resp = InternalData( auth_info=auth_info, attributes=attributes, subject_type=name_id_format, subject_id=name_id, ) satosa_logging(logger, logging.DEBUG, "backend received attributes:\n%s" % json.dumps(response.ava, indent=4), state) return internal_resp
Translates a saml authorization response to an internal response :type response: saml2.response.AuthnResponse :rtype: satosa.internal.InternalData :param response: The saml authorization response :return: A translated internal response
def remove(self, experiment): """Remove the configuration of an experiment""" try: project_path = self.projects[self[experiment]['project']]['root'] except KeyError: return config_path = osp.join(project_path, '.project', experiment + '.yml') for f in [config_path, config_path + '~', config_path + '.lck']: if os.path.exists(f): os.remove(f) del self[experiment]
Remove the configuration of an experiment
def mapping(self, struct, key_depth=1000, tree_depth=1, update_callable=None): """ Generates random values for dict-like objects @struct: the dict-like structure you want to fill with random data @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|{key: value}| 2=|{key: {key: value}, key2: {key2: value2}}| @update_callable: #callable method which updates data in your dict-like structure - e.g. :meth:builtins.dict.update -> random @struct .. from collections import UserDict from vital.debug import RandData class MyDict(UserDict): pass rd = RandData(int) my_dict = MyDict() rd.dict(my_dict, 3, 1, my_dict.update) # -> { # 'SE0ZNy0F6O': 42078648993195761, # 'pbK': 70822820981335987, # '0A5Aa7': 17503122029338459} .. """ if not tree_depth: return self._map_type() _struct = struct() add_struct = _struct.update if not update_callable \ else getattr(_struct, update_callable) for x in range(key_depth): add_struct({ self.randstr: self.mapping( struct, key_depth, tree_depth-1, update_callable) }) return _struct
Generates random values for dict-like objects @struct: the dict-like structure you want to fill with random data @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|{key: value}| 2=|{key: {key: value}, key2: {key2: value2}}| @update_callable: #callable method which updates data in your dict-like structure - e.g. :meth:builtins.dict.update -> random @struct .. from collections import UserDict from vital.debug import RandData class MyDict(UserDict): pass rd = RandData(int) my_dict = MyDict() rd.dict(my_dict, 3, 1, my_dict.update) # -> { # 'SE0ZNy0F6O': 42078648993195761, # 'pbK': 70822820981335987, # '0A5Aa7': 17503122029338459} ..
def forwards(self, orm): "Write your forwards methods here." orm['avocado.DataField'].objects.get_or_create( app_name='variants', model_name='variantphenotype', field_name='hgmd_id', defaults={ 'name': 'HGMD', 'published': True })
Write your forwards methods here.
def as_binary(s, encoding='utf-8'): """Force conversion of given string to binary type. Binary is ``bytes`` type for Python 3.x and ``str`` for Python 2.x . If the string is already in binary, then no conversion is done and the same string is returned and ``encoding`` argument is ignored. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to binary. encoding: str The encoding of the resulting binary string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``bytes`` for Python3 or ``str`` for Python 2. """ if isinstance(s, six.text_type): return s.encode(encoding) elif isinstance(s, six.binary_type): # make sure the binary is in required encoding return s.decode(encoding).encode(encoding) else: raise ValueError('Can only convert types {0} and {1}'.format(six.text_type, six.binary_type))
Force conversion of given string to binary type. Binary is ``bytes`` type for Python 3.x and ``str`` for Python 2.x . If the string is already in binary, then no conversion is done and the same string is returned and ``encoding`` argument is ignored. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to binary. encoding: str The encoding of the resulting binary string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``bytes`` for Python3 or ``str`` for Python 2.
def setup_pilotpoints_grid(ml=None,sr=None,ibound=None,prefix_dict=None, every_n_cell=4, use_ibound_zones=False, pp_dir='.',tpl_dir='.', shapename="pp.shp"): """ setup regularly-spaced (gridded) pilot point parameterization Parameters ---------- ml : flopy.mbase a flopy mbase dervied type. If None, sr must not be None. sr : flopy.utils.reference.SpatialReference a spatial reference use to locate the model grid in space. If None, ml must not be None. Default is None ibound : numpy.ndarray the modflow ibound integer array. Used to set pilot points only in active areas. If None and ml is None, then pilot points are set in all rows and columns according to every_n_cell. Default is None. prefix_dict : dict a dictionary of pilot point parameter prefix, layer pairs. example : {"hk":[0,1,2,3]} would setup pilot points with the prefix "hk" for model layers 1 - 4 (zero based). If None, a generic set of pilot points with the "pp" prefix are setup for a generic nrowXncol grid. Default is None use_ibound_zones : bool a flag to use the greater-than-zero values in the ibound as pilot point zones. If False,ibound values greater than zero are treated as a single zone. Default is False. pp_dir : str directory to write pilot point files to. Default is '.' tpl_dir : str directory to write pilot point template file to. Default is '.' shapename : str name of shapefile to write that containts pilot point information. Default is "pp.shp" Returns ------- pp_df : pandas.DataFrame a dataframe summarizing pilot point information (same information written to shapename """ from . import pp_utils warnings.warn("setup_pilotpoint_grid has moved to pp_utils...",PyemuWarning) return pp_utils.setup_pilotpoints_grid(ml=ml,sr=sr,ibound=ibound, prefix_dict=prefix_dict, every_n_cell=every_n_cell, use_ibound_zones=use_ibound_zones, pp_dir=pp_dir,tpl_dir=tpl_dir, shapename=shapename)
setup regularly-spaced (gridded) pilot point parameterization Parameters ---------- ml : flopy.mbase a flopy mbase dervied type. If None, sr must not be None. sr : flopy.utils.reference.SpatialReference a spatial reference use to locate the model grid in space. If None, ml must not be None. Default is None ibound : numpy.ndarray the modflow ibound integer array. Used to set pilot points only in active areas. If None and ml is None, then pilot points are set in all rows and columns according to every_n_cell. Default is None. prefix_dict : dict a dictionary of pilot point parameter prefix, layer pairs. example : {"hk":[0,1,2,3]} would setup pilot points with the prefix "hk" for model layers 1 - 4 (zero based). If None, a generic set of pilot points with the "pp" prefix are setup for a generic nrowXncol grid. Default is None use_ibound_zones : bool a flag to use the greater-than-zero values in the ibound as pilot point zones. If False,ibound values greater than zero are treated as a single zone. Default is False. pp_dir : str directory to write pilot point files to. Default is '.' tpl_dir : str directory to write pilot point template file to. Default is '.' shapename : str name of shapefile to write that containts pilot point information. Default is "pp.shp" Returns ------- pp_df : pandas.DataFrame a dataframe summarizing pilot point information (same information written to shapename
def job_count_enabled(self): """ Return the number of enabled jobs. :return: The number of jobs that are enabled. :rtype: int """ enabled = 0 for job_desc in self._jobs.values(): if job_desc['enabled']: enabled += 1 return enabled
Return the number of enabled jobs. :return: The number of jobs that are enabled. :rtype: int
def upload(self, remote, reader): """ Uploads a file :param remote: remote file name :param reader: an object that implements the read(size) method (typically a file descriptor) :return: """ fd = self.open(remote, 'w') while True: chunk = reader.read(512 * 1024) if chunk == b'': break self.write(fd, chunk) self.close(fd)
Uploads a file :param remote: remote file name :param reader: an object that implements the read(size) method (typically a file descriptor) :return:
def restore_all_edges(self): """ Restores all hidden edges. """ for edge in self.hidden_edges.keys(): try: self.restore_edge(edge) except GraphError: pass
Restores all hidden edges.
def nodata(self): """Returns read only property for band nodata value, assuming single band rasters for now. """ if self._nodata is None: self._nodata = self[0].GetNoDataValue() return self._nodata
Returns read only property for band nodata value, assuming single band rasters for now.
def msg_filter(self): """Validate/return msg_filter from request (e.g. 'fuzzy', 'untranslated'), or a default. If a query is also specified in the request, then return None. """ if self.query: msg_filter = None else: msg_filter = self._request_request('msg_filter', 'all') available_msg_filters = {'untranslated', 'translated', 'fuzzy', 'all'} if msg_filter not in available_msg_filters: msg_filter = 'all' return msg_filter
Validate/return msg_filter from request (e.g. 'fuzzy', 'untranslated'), or a default. If a query is also specified in the request, then return None.
def spec_from_thresh(thresh, labels, scores, *args, **kwargs): r"""Compute the specifity that a particular threshold on the scores can acheive specificity = Num_True_Negative / (Num_True_Negative + Num_False_Positive) >>> scores = np.arange(0, 1, 0.1) >>> spec_from_thresh(0.5, labels=(scores > .5).astype(int), scores=scores) 1.0 >>> spec_from_thresh(0.5, labels=(scores > .4).astype(int), scores=scores) 1.0 >>> spec_from_thresh(0.5, labels=(scores > .9).astype(int), scores=scores) 0.6 """ df = pd.DataFrame(list(zip(labels, np.array(scores > thresh).astype(int)))) c = Confusion(df, *args, **kwargs) return c._binary_specificity
r"""Compute the specifity that a particular threshold on the scores can acheive specificity = Num_True_Negative / (Num_True_Negative + Num_False_Positive) >>> scores = np.arange(0, 1, 0.1) >>> spec_from_thresh(0.5, labels=(scores > .5).astype(int), scores=scores) 1.0 >>> spec_from_thresh(0.5, labels=(scores > .4).astype(int), scores=scores) 1.0 >>> spec_from_thresh(0.5, labels=(scores > .9).astype(int), scores=scores) 0.6
def put(self, bucket=None, key=None, upload_id=None): """Update a new object or upload a part of a multipart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :returns: A Flask response. """ if upload_id is not None: return self.multipart_uploadpart(bucket, key, upload_id) else: return self.create_object(bucket, key)
Update a new object or upload a part of a multipart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :returns: A Flask response.
def ipv4_reassembly(packet, *, count=NotImplemented): """Make data for IPv4 reassembly.""" if 'IP' in packet: ipv4 = packet['IP'] if ipv4.flags.DF: # dismiss not fragmented packet return False, None data = dict( bufid=( ipaddress.ip_address(ipv4.src), # source IP address ipaddress.ip_address(ipv4.dst), # destination IP address ipv4.id, # identification TP_PROTO.get(ipv4.proto).name, # payload protocol type ), num=count, # original packet range number fo=ipv4.frag, # fragment offset ihl=ipv4.ihl, # internet header length mf=bool(ipv4.flags.MF), # more fragment flag tl=ipv4.len, # total length, header includes header=bytearray(ipv4.raw_packet_cache), # raw bytearray type header payload=bytearray(bytes(ipv4.payload)), # raw bytearray type payload ) return True, data return False, None
Make data for IPv4 reassembly.
def find_pore_to_pore_distance(network, pores1=None, pores2=None): r''' Find the distance between all pores on set one to each pore in set 2 Parameters ---------- network : OpenPNM Network Object The network object containing the pore coordinates pores1 : array_like The pore indices of the first set pores2 : array_Like The pore indices of the second set. It's OK if these indices are partially or completely duplicating ``pores``. Returns ------- A distance matrix with ``len(pores1)`` rows and ``len(pores2)`` columns. The distance between pore *i* in ``pores1`` and *j* in ``pores2`` is located at *(i, j)* and *(j, i)* in the distance matrix. ''' from scipy.spatial.distance import cdist p1 = sp.array(pores1, ndmin=1) p2 = sp.array(pores2, ndmin=1) coords = network['pore.coords'] return cdist(coords[p1], coords[p2])
r''' Find the distance between all pores on set one to each pore in set 2 Parameters ---------- network : OpenPNM Network Object The network object containing the pore coordinates pores1 : array_like The pore indices of the first set pores2 : array_Like The pore indices of the second set. It's OK if these indices are partially or completely duplicating ``pores``. Returns ------- A distance matrix with ``len(pores1)`` rows and ``len(pores2)`` columns. The distance between pore *i* in ``pores1`` and *j* in ``pores2`` is located at *(i, j)* and *(j, i)* in the distance matrix.
def _convert_bundle(bundle): """ Converts ambry bundle to dict ready to send to CKAN API. Args: bundle (ambry.bundle.Bundle): bundle to convert. Returns: dict: dict to send to CKAN to create dataset. See http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.package_create """ # shortcut for metadata meta = bundle.dataset.config.metadata notes = '' for f in bundle.dataset.files: if f.path.endswith('documentation.md'): contents = f.unpacked_contents if isinstance(contents, six.binary_type): contents = contents.decode('utf-8') notes = json.dumps(contents) break ret = { 'name': bundle.dataset.vid.lower(), 'title': meta.about.title, 'author': meta.contacts.wrangler.name, 'author_email': meta.contacts.wrangler.email, 'maintainer': meta.contacts.maintainer.name, 'maintainer_email': meta.contacts.maintainer.email, 'license_id': '', 'notes': notes, 'url': meta.identity.source, 'version': bundle.dataset.version, 'state': 'active', 'owner_org': CKAN_CONFIG['organization'], } return ret
Converts ambry bundle to dict ready to send to CKAN API. Args: bundle (ambry.bundle.Bundle): bundle to convert. Returns: dict: dict to send to CKAN to create dataset. See http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.package_create
def save_session(self, sid, session, namespace=None): """Store the user session for a client. :param sid: The session id of the client. :param session: The session dictionary. :param namespace: The Socket.IO namespace. If this argument is omitted the default namespace is used. """ namespace = namespace or '/' eio_session = self.eio.get_session(sid) eio_session[namespace] = session
Store the user session for a client. :param sid: The session id of the client. :param session: The session dictionary. :param namespace: The Socket.IO namespace. If this argument is omitted the default namespace is used.
def abbreviate(labels, rfill=' '): """ Abbreviate labels without introducing ambiguities. """ max_len = max(len(l) for l in labels) for i in range(1, max_len): abbrev = [l[:i].ljust(i, rfill) for l in labels] if len(abbrev) == len(set(abbrev)): break return abbrev
Abbreviate labels without introducing ambiguities.
def duplicate(self): ''' Returns a copy of the current group, including its lines. @returns: Group ''' instance = self.__class__(name=self.name, description=self.description) for line in self.lines: instance.lines.append(line.duplicate()) return instance
Returns a copy of the current group, including its lines. @returns: Group
def detect_mean_shift(self, ts, B=1000): """ Detect mean shift in a time series. B is number of bootstrapped samples to draw. """ x = np.arange(0, len(ts)) stat_ts_func = self.compute_balance_mean_ts null_ts_func = self.shuffle_timeseries stats_ts, pvals, nums = self.get_ts_stats_significance(x, ts, stat_ts_func, null_ts_func, B=B, permute_fast=True) return stats_ts, pvals, nums
Detect mean shift in a time series. B is number of bootstrapped samples to draw.
def maintainer(self): """ >>> package = yarg.get('yarg') >>> package.maintainer Maintainer(name=u'Kura', email=u'kura@kura.io') """ maintainer = namedtuple('Maintainer', 'name email') return maintainer(name=self._package['maintainer'], email=self._package['maintainer_email'])
>>> package = yarg.get('yarg') >>> package.maintainer Maintainer(name=u'Kura', email=u'kura@kura.io')
def process_climis_livestock_data(data_dir: str): """ Process CliMIS livestock data. """ records = [] livestock_data_dir = f"{data_dir}/Climis South Sudan Livestock Data" for filename in glob( f"{livestock_data_dir}/Livestock Body Condition/*2017.csv" ): records += process_file_with_single_table( filename, lambda ind: f"Percentage of {filename.split('_')[-3].lower()} with body condition {ind.lower()}", lambda f: f.split("_")[-2], ) for filename in glob( f"{livestock_data_dir}/Livestock Production/*2017.csv" ): records += process_file_with_single_table( filename, lambda ind: "Percentage of householding at least milking one of their livestocks", lambda f: f.split("_")[1], ) disease_acronym_dict = { "FMD": "Foot and Mouth Disease (FMD)", "LSD": "Lumpy Skin Disease (LSD)", "CBPP": "Contagious Bovine Pleuropneumonia (CBPP)", "CCPP": "Contagious Caprine Pleuropneumonia (CCPP)", "NC": "NC", "PPR": "Peste des Petits Ruminants (PPR)", "Others": "Other diseases", } func = ( lambda k, i: f"Percentage of livestock with {disease_acronym_dict[k]} that are {i.lower().strip()}" ) livestock_disease_header_dict = { k: partial(func, k) for k in disease_acronym_dict } livestock_migration_header_dict = { "Livestock migration": lambda i: f"Percentage of livestock migrating {i.split()[-1].lower()}", "Distance covered": lambda i: "Distance covered by migrating livestock", "Proportion of livestock that migrated": lambda i: "Percentage of livestock that migrated", "Migration normal at this time of the year": lambda i: f"Migration normal at this time of year, {i}", "Duration in months when the migrated animals are expected to be back after": lambda i: "Duration in months when the migrated animals are expected to be back after", "Reasons for livestock migration": lambda i: f"Percentage of livestock migrating due to {i.lower()}", } def process_directory(dirname, header_dict): return pd.concat( [ df for df in [ process_file_with_multiple_tables(f, header_dict) for f in glob(f"{livestock_data_dir}/{dirname}/*2017.csv") ] if df is not None ] ) func2 = ( lambda k, i: f"{k.replace('animals', i.lower()).replace('stock', 'stock of '+i.lower()).replace('animal', i.lower())}" ) livestock_ownership_headers = [ "Average current stock per household", "Average number of animals born per household during last 4 weeks", "Average number of animals acquired per household during last 4 weeks (dowry, purchase, gift)", "Average number of animals given out as bride price/gift per household during last 4 weeks per household", "Average number of animals sold per household during last 4 weeks household", "Average price of animal sold (SSP)", "Average number of animals exchanged for grain per household during last 4 weeks", "Average number of animals died/slaughtered/lost per household during last 4 weeks", ] livestock_ownership_header_dict = { k: partial(func2, k) for k in livestock_ownership_headers } ownership_df = process_directory( "Livestock Ownership", livestock_ownership_header_dict ) disease_df = process_directory( "Livestock Diseases", livestock_disease_header_dict ) livestock_migration_df = process_directory( "Livestock Migration", livestock_migration_header_dict ) livestock_pasture_header_dict = { "Pasture condtion": lambda i: f"Percentage of livestock pasture in {i.lower()} condition", "Pasture condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} condition compared to a similar time in a normal year", "Browse condition": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition", "Browse condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition compared to a similar time in a normal year", "Presence of constraints in accessing forage": lambda i: f"Percentage reporting the {('presence' if i=='Yes' else 'absence')} of constraints in accessing forage", "Main forage constraints": lambda i: f"Percentage reporting {i.lower()} as the main forage constraint", } livestock_pasture_df = process_directory( "Livestock Pasture", livestock_pasture_header_dict ) livestock_water_sources_header_dict = { "Main water sources": lambda i: f"Percentage of livestock whose main water source is {i.lower()}", "Number of days livestock have been watered in the last 7 days": lambda i: f"Number of days {i.lower()} have been watered in the last 7 days", } livestock_water_sources_df = process_directory( "Livestock Water Sources", livestock_water_sources_header_dict ) for filename in glob(f"{livestock_data_dir}/Livestock Loss/*2017.csv"): records += process_file_with_single_table( filename, lambda ind: f"Percentage of {filename.split('_')[-3].lower()} loss accounted for by {ind.lower()}", lambda f: f.split("_")[-2], ) for record in records: if isinstance(record["Value"], str): record["Value"] = record["Value"].replace("%", "") livestock_prices_df = pd.concat( [ make_livestock_prices_table(f) for f in glob( f"{livestock_data_dir}/Livestock Market Prices/*2017.csv" ) ] ) climis_livestock_data_df = pd.concat( [ pd.DataFrame(records), disease_df, ownership_df, livestock_prices_df, livestock_migration_df, livestock_pasture_df, livestock_water_sources_df, ] ) return climis_livestock_data_df
Process CliMIS livestock data.
def read(self, filepath): """Read the metadata values from a file path.""" fp = codecs.open(filepath, 'r', encoding='utf-8') try: self.read_file(fp) finally: fp.close()
Read the metadata values from a file path.
def down_alpha_beta(returns, factor_returns, **kwargs): """ Computes alpha and beta for periods when the benchmark return is negative. Parameters ---------- see documentation for `alpha_beta`. Returns ------- alpha : float beta : float """ return down(returns, factor_returns, function=alpha_beta_aligned, **kwargs)
Computes alpha and beta for periods when the benchmark return is negative. Parameters ---------- see documentation for `alpha_beta`. Returns ------- alpha : float beta : float
def add_toc_entry(self, title, level, slide_number): """ Adds a new entry to current presentation Table of Contents. """ self.__toc.append({'title': title, 'number': slide_number, 'level': level})
Adds a new entry to current presentation Table of Contents.
def arp_suppression(self, **kwargs): """ Enable Arp Suppression on a Vlan. Args: name:Vlan name on which the Arp suppression needs to be enabled. enable (bool): If arp suppression should be enabled or disabled.Default:``True``. get (bool) : Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `name` is not passed. ValueError: if `name` is invalid. output2 = dev.interface.arp_suppression(name='89') Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.arp_suppression( ... name='89') ... output = dev.interface.arp_suppression( ... get=True,name='89') ... output = dev.interface.arp_suppression( ... enable=False,name='89') ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError """ name = kwargs.pop('name') enable = kwargs.pop('enable', True) get = kwargs.pop('get', False) callback = kwargs.pop('callback', self._callback) method_class = self._interface arp_args = dict(name=name) if name: if not pynos.utilities.valid_vlan_id(name): raise InvalidVlanId("`name` must be between `1` and `8191`") arp_suppression = getattr(method_class, 'interface_vlan_interface_vlan_suppress_' 'arp_suppress_arp_enable') config = arp_suppression(**arp_args) if get: return callback(config, handler='get_config') if not enable: config.find('.//*suppress-arp').set('operation', 'delete') return callback(config)
Enable Arp Suppression on a Vlan. Args: name:Vlan name on which the Arp suppression needs to be enabled. enable (bool): If arp suppression should be enabled or disabled.Default:``True``. get (bool) : Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `name` is not passed. ValueError: if `name` is invalid. output2 = dev.interface.arp_suppression(name='89') Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.arp_suppression( ... name='89') ... output = dev.interface.arp_suppression( ... get=True,name='89') ... output = dev.interface.arp_suppression( ... enable=False,name='89') ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
def _check_events(tk): """Checks events in the queue on a given Tk instance""" used = False try: # Process all enqueued events, then exit. while True: try: # Get an event request from the queue. method, args, kwargs, response_queue = tk.tk._event_queue.get_nowait() except queue.Empty: # No more events to process. break else: # Call the event with the given arguments, and then return # the result back to the caller via the response queue. used = True if tk.tk._debug >= 2: print('Calling event from main thread:', method.__name__, args, kwargs) try: response_queue.put((False, method(*args, **kwargs))) except SystemExit: raise # Raises original SystemExit except Exception: # Calling the event caused an exception; return the # exception back to the caller so that it can be raised # in the caller's thread. from sys import exc_info # Python 2 requirement ex_type, ex_value, ex_tb = exc_info() response_queue.put((True, (ex_type, ex_value, ex_tb))) finally: # Schedule to check again. If we just processed an event, check # immediately; if we didn't, check later. if used: tk.after_idle(_check_events, tk) else: tk.after(tk.tk._check_period, _check_events, tk)
Checks events in the queue on a given Tk instance
def raise_thread_exception(thread_id, exception): """Raise an exception in a thread. Currently, this is only available on CPython. Note: This works by setting an async exception in the thread. This means that the exception will only get called the next time that thread acquires the GIL. Concretely, this means that this middleware can't cancel system calls. """ if current_platform == "CPython": _raise_thread_exception_cpython(thread_id, exception) else: message = "Setting thread exceptions (%s) is not supported for your current platform (%r)." exctype = (exception if inspect.isclass(exception) else type(exception)).__name__ logger.critical(message, exctype, current_platform)
Raise an exception in a thread. Currently, this is only available on CPython. Note: This works by setting an async exception in the thread. This means that the exception will only get called the next time that thread acquires the GIL. Concretely, this means that this middleware can't cancel system calls.