code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _mature(subseq, absolute, c, size=33, total=5000): """Create mature sequences around start/end""" reads = dict() probs = [0.1, 0.2, 0.4, 0.2, 0.1] end = 5 + size error = [-2, -1, 0, 1, 2] for error5 in error: for error3 in error: s = 5 - error5 e = end - error3 seen = subseq[s:e] counts = int(probs[error5 + 2] * probs[error3 + 2] * total) + 1 name = "seq_%s_%s_%s_x%s" % (c, s + absolute, e + absolute, counts) reads[name] = (seen, counts) return reads
Create mature sequences around start/end
def ReadItem(self, document_link, options=None): """Reads a document. :param str document_link: The link to the document. :param dict options: The request options for the request. :return: The read Document. :rtype: dict """ if options is None: options = {} path = base.GetPathFromLink(document_link) document_id = base.GetResourceIdOrFullNameFromLink(document_link) return self.Read(path, 'docs', document_id, None, options)
Reads a document. :param str document_link: The link to the document. :param dict options: The request options for the request. :return: The read Document. :rtype: dict
def show_firmware_version_output_show_firmware_version_control_processor_memory(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_firmware_version = ET.Element("show_firmware_version") config = show_firmware_version output = ET.SubElement(show_firmware_version, "output") show_firmware_version = ET.SubElement(output, "show-firmware-version") control_processor_memory = ET.SubElement(show_firmware_version, "control-processor-memory") control_processor_memory.text = kwargs.pop('control_processor_memory') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def set_folder(self, folder='assets'): """ Changes the file folder to look at :param folder: str [images, assets] :return: None """ folder = folder.replace('/', '.') self._endpoint = 'files/{folder}'.format(folder=folder)
Changes the file folder to look at :param folder: str [images, assets] :return: None
def team_billableInfo(self, **kwargs) -> SlackResponse: """Gets billable users information for the current team.""" self._validate_xoxp_token() return self.api_call("team.billableInfo", http_verb="GET", params=kwargs)
Gets billable users information for the current team.
def restore_from_snapshot(self, volume_id, snapshot_id): """Restores a specific volume from a snapshot :param integer volume_id: The id of the volume :param integer snapshot_id: The id of the restore point :return: Returns whether succesfully restored or not """ return self.client.call('Network_Storage', 'restoreFromSnapshot', snapshot_id, id=volume_id)
Restores a specific volume from a snapshot :param integer volume_id: The id of the volume :param integer snapshot_id: The id of the restore point :return: Returns whether succesfully restored or not
def parallel_beam_geometry(space, num_angles=None, det_shape=None): r"""Create default parallel beam geometry from ``space``. This is intended for simple test cases where users do not need the full flexibility of the geometries, but simply want a geometry that works. This default geometry gives a fully sampled sinogram according to the Nyquist criterion, which in general results in a very large number of samples. In particular, a ``space`` that is not centered at the origin can result in very large detectors. Parameters ---------- space : `DiscreteLp` Reconstruction space, the space of the volumetric data to be projected. Needs to be 2d or 3d. num_angles : int, optional Number of angles. Default: Enough to fully sample the data, see Notes. det_shape : int or sequence of int, optional Number of detector pixels. Default: Enough to fully sample the data, see Notes. Returns ------- geometry : `ParallelBeamGeometry` If ``space`` is 2d, return a `Parallel2dGeometry`. If ``space`` is 3d, return a `Parallel3dAxisGeometry`. Examples -------- Create a parallel beam geometry from a 2d space: >>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20)) >>> geometry = parallel_beam_geometry(space) >>> geometry.angles.size 45 >>> geometry.detector.size 31 Notes ----- According to [NW2001]_, pages 72--74, a function :math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support .. math:: \| x \| > \rho \implies f(x) = 0, and is essentially bandlimited .. math:: \| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0, can be fully reconstructed from a parallel beam ray transform if (1) the projection angles are sampled with a spacing of :math:`\Delta \psi` such that .. math:: \Delta \psi \leq \frac{\pi}{\rho \Omega}, and (2) the detector is sampled with an interval :math:`\Delta s` that satisfies .. math:: \Delta s \leq \frac{\pi}{\Omega}. The geometry returned by this function satisfies these conditions exactly. If the domain is 3-dimensional, the geometry is "separable", in that each slice along the z-dimension of the data is treated as independed 2d data. References ---------- .. [NW2001] Natterer, F and Wuebbeling, F. *Mathematical Methods in Image Reconstruction*. SIAM, 2001. https://dx.doi.org/10.1137/1.9780898718324 """ # Find maximum distance from rotation axis corners = space.domain.corners()[:, :2] rho = np.max(np.linalg.norm(corners, axis=1)) # Find default values according to Nyquist criterion. # We assume that the function is bandlimited by a wave along the x or y # axis. The highest frequency we can measure is then a standing wave with # period of twice the inter-node distance. min_side = min(space.partition.cell_sides[:2]) omega = np.pi / min_side num_px_horiz = 2 * int(np.ceil(rho * omega / np.pi)) + 1 if space.ndim == 2: det_min_pt = -rho det_max_pt = rho if det_shape is None: det_shape = num_px_horiz elif space.ndim == 3: num_px_vert = space.shape[2] min_h = space.domain.min_pt[2] max_h = space.domain.max_pt[2] det_min_pt = [-rho, min_h] det_max_pt = [rho, max_h] if det_shape is None: det_shape = [num_px_horiz, num_px_vert] if num_angles is None: num_angles = int(np.ceil(omega * rho)) angle_partition = uniform_partition(0, np.pi, num_angles) det_partition = uniform_partition(det_min_pt, det_max_pt, det_shape) if space.ndim == 2: return Parallel2dGeometry(angle_partition, det_partition) elif space.ndim == 3: return Parallel3dAxisGeometry(angle_partition, det_partition) else: raise ValueError('``space.ndim`` must be 2 or 3.')
r"""Create default parallel beam geometry from ``space``. This is intended for simple test cases where users do not need the full flexibility of the geometries, but simply want a geometry that works. This default geometry gives a fully sampled sinogram according to the Nyquist criterion, which in general results in a very large number of samples. In particular, a ``space`` that is not centered at the origin can result in very large detectors. Parameters ---------- space : `DiscreteLp` Reconstruction space, the space of the volumetric data to be projected. Needs to be 2d or 3d. num_angles : int, optional Number of angles. Default: Enough to fully sample the data, see Notes. det_shape : int or sequence of int, optional Number of detector pixels. Default: Enough to fully sample the data, see Notes. Returns ------- geometry : `ParallelBeamGeometry` If ``space`` is 2d, return a `Parallel2dGeometry`. If ``space`` is 3d, return a `Parallel3dAxisGeometry`. Examples -------- Create a parallel beam geometry from a 2d space: >>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20)) >>> geometry = parallel_beam_geometry(space) >>> geometry.angles.size 45 >>> geometry.detector.size 31 Notes ----- According to [NW2001]_, pages 72--74, a function :math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support .. math:: \| x \| > \rho \implies f(x) = 0, and is essentially bandlimited .. math:: \| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0, can be fully reconstructed from a parallel beam ray transform if (1) the projection angles are sampled with a spacing of :math:`\Delta \psi` such that .. math:: \Delta \psi \leq \frac{\pi}{\rho \Omega}, and (2) the detector is sampled with an interval :math:`\Delta s` that satisfies .. math:: \Delta s \leq \frac{\pi}{\Omega}. The geometry returned by this function satisfies these conditions exactly. If the domain is 3-dimensional, the geometry is "separable", in that each slice along the z-dimension of the data is treated as independed 2d data. References ---------- .. [NW2001] Natterer, F and Wuebbeling, F. *Mathematical Methods in Image Reconstruction*. SIAM, 2001. https://dx.doi.org/10.1137/1.9780898718324
def handle_directive(self, words): """ handles directives: adds the reference and allocates space for the content """ refname = words[1] logging.debug("Handling directive " + str(self.getdirective(words[0]))) logging.debug("First argument is " + str(words[1])) if(self.getdirective(words[0]).isstatic()): if(refname in self.static_refs): raise ReferenceError("[line {}]:{} already defined here (word) {} (line) {}".format(self.line_count, refname, self.static_refs[refname][0], self.static_refs[refname][1])) self.static_refs[refname] = (self.word_count, self.line_count) else: if(refname in self.refs): raise ReferenceError("[line {}]:{} already defined here (word) {} (line) {}".format(self.line_count, refname, self.refs[refname][0], self.refs[refname][1])) self.refs[refname] = (self.word_count, self.line_count) directive = self.getdirective(words[0]) self.word_count += directive.get_word_count(words[2:]) logging.debug("Directive allocates {} words.".format(directive.get_word_count(words[2:]))) return (self.line_count, "data", directive, words[2:])
handles directives: adds the reference and allocates space for the content
def get_sub_dsp(self, nodes_bunch, edges_bunch=None): """ Returns the sub-dispatcher induced by given node and edge bunches. The induced sub-dispatcher contains the available nodes in nodes_bunch and edges between those nodes, excluding those that are in edges_bunch. The available nodes are non isolated nodes and function nodes that have all inputs and at least one output. :param nodes_bunch: A container of node ids which will be iterated through once. :type nodes_bunch: list[str], iterable :param edges_bunch: A container of edge ids that will be removed. :type edges_bunch: list[(str, str)], iterable, optional :return: A dispatcher. :rtype: Dispatcher .. seealso:: :func:`get_sub_dsp_from_workflow` .. note:: The sub-dispatcher edge or node attributes just point to the original dispatcher. So changes to the node or edge structure will not be reflected in the original dispatcher map while changes to the attributes will. **--------------------------------------------------------------------** **Example**: A dispatcher with a two functions `fun1` and `fun2`: .. dispatcher:: dsp :opt: graph_attr={'ratio': '1'} >>> dsp = Dispatcher(name='Dispatcher') >>> dsp.add_function(function_id='fun1', inputs=['a', 'b'], ... outputs=['c', 'd']) 'fun1' >>> dsp.add_function(function_id='fun2', inputs=['a', 'd'], ... outputs=['c', 'e']) 'fun2' Get the sub-dispatcher induced by given nodes bunch:: >>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2']) .. dispatcher:: sub_dsp :opt: graph_attr={'ratio': '1'} >>> sub_dsp.name = 'Sub-Dispatcher' """ # Get real paths. nodes_bunch = [self.get_node(u)[1][0] for u in nodes_bunch] # Define an empty dispatcher. sub_dsp = self.copy_structure( dmap=self.dmap.subgraph(nodes_bunch).copy() ) # Namespace shortcuts for speed. nodes, dmap_out_degree = sub_dsp.nodes, sub_dsp.dmap.out_degree dmap_dv, dmap_rm_edge = self.default_values, sub_dsp.dmap.remove_edge dmap_rm_node = sub_dsp.dmap.remove_node # Remove function nodes that has not whole inputs available. for u in nodes_bunch: n = nodes[u].get('inputs', None) # Function inputs. # No all inputs if n is not None and not set(n).issubset(nodes_bunch): dmap_rm_node(u) # Remove function node. # Remove edges that are not in edges_bunch. if edges_bunch is not None: for e in edges_bunch: # Iterate sub-graph edges. dmap_rm_edge(*e) # Remove edge. # Remove function node with no outputs. for u in [u for u, n in sub_dsp.dmap.nodes.items() if n['type'] == 'function']: # noinspection PyCallingNonCallable if not dmap_out_degree(u): # No outputs. dmap_rm_node(u) # Remove function node. from networkx import isolates # Remove isolate nodes from sub-graph. sub_dsp.dmap.remove_nodes_from(list(isolates(sub_dsp.dmap))) # Set default values. sub_dsp.default_values = {k: dmap_dv[k] for k in dmap_dv if k in nodes} return sub_dsp
Returns the sub-dispatcher induced by given node and edge bunches. The induced sub-dispatcher contains the available nodes in nodes_bunch and edges between those nodes, excluding those that are in edges_bunch. The available nodes are non isolated nodes and function nodes that have all inputs and at least one output. :param nodes_bunch: A container of node ids which will be iterated through once. :type nodes_bunch: list[str], iterable :param edges_bunch: A container of edge ids that will be removed. :type edges_bunch: list[(str, str)], iterable, optional :return: A dispatcher. :rtype: Dispatcher .. seealso:: :func:`get_sub_dsp_from_workflow` .. note:: The sub-dispatcher edge or node attributes just point to the original dispatcher. So changes to the node or edge structure will not be reflected in the original dispatcher map while changes to the attributes will. **--------------------------------------------------------------------** **Example**: A dispatcher with a two functions `fun1` and `fun2`: .. dispatcher:: dsp :opt: graph_attr={'ratio': '1'} >>> dsp = Dispatcher(name='Dispatcher') >>> dsp.add_function(function_id='fun1', inputs=['a', 'b'], ... outputs=['c', 'd']) 'fun1' >>> dsp.add_function(function_id='fun2', inputs=['a', 'd'], ... outputs=['c', 'e']) 'fun2' Get the sub-dispatcher induced by given nodes bunch:: >>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2']) .. dispatcher:: sub_dsp :opt: graph_attr={'ratio': '1'} >>> sub_dsp.name = 'Sub-Dispatcher'
def from_string(cls, constraint): """ :param str constraint: The string representation of a constraint :rtype: :class:`MarathonConstraint` """ obj = constraint.split(':') marathon_constraint = cls.from_json(obj) if marathon_constraint: return marathon_constraint raise ValueError("Invalid string format. " "Expected `field:operator:value`")
:param str constraint: The string representation of a constraint :rtype: :class:`MarathonConstraint`
def eval_command(self, cmd): """ Thread func to allow restarting / stopping of threads, for example when receiving a connection reset info message from the wss server. :return: """ if cmd == 'restart': self.restart(soft=True) elif cmd == 'stop': self.stop()
Thread func to allow restarting / stopping of threads, for example when receiving a connection reset info message from the wss server. :return:
def _module_name(self) -> str: """Module name of the wrapped function.""" name = self.f.__module__ if name == '__main__': return importer.main_module_name() return name
Module name of the wrapped function.
def mappedPolygon(self, polygon, path=None, percent=0.5): """ Maps the inputed polygon to the inputed path \ used when drawing items along the path. If no \ specific path is supplied, then this object's own \ path will be used. It will rotate and move the \ polygon according to the inputed percentage. :param polygon <QPolygonF> :param path <QPainterPath> :param percent <float> :return <QPolygonF> mapped_poly """ translatePerc = percent anglePerc = percent # we don't want to allow the angle percentage greater than 0.85 # or less than 0.05 or we won't get a good rotation angle if 0.95 <= anglePerc: anglePerc = 0.98 elif anglePerc <= 0.05: anglePerc = 0.05 if not path: path = self.path() if not (path and path.length()): return QPolygonF() # transform the polygon to the path point = path.pointAtPercent(translatePerc) angle = path.angleAtPercent(anglePerc) # rotate about the 0 axis transform = QTransform().rotate(-angle) polygon = transform.map(polygon) # move to the translation point transform = QTransform().translate(point.x(), point.y()) # create the rotated polygon mapped_poly = transform.map(polygon) self._polygons.append(mapped_poly) return mapped_poly
Maps the inputed polygon to the inputed path \ used when drawing items along the path. If no \ specific path is supplied, then this object's own \ path will be used. It will rotate and move the \ polygon according to the inputed percentage. :param polygon <QPolygonF> :param path <QPainterPath> :param percent <float> :return <QPolygonF> mapped_poly
def create_gallery_folder(self, folder_name, scope='content/write'): """ Create a new folder in the Mxit user's gallery User authentication required with the following scope: 'content/write' """ return _post( token=self.oauth.get_user_token(scope), uri='/user/media/' + urllib.quote(folder_name) )
Create a new folder in the Mxit user's gallery User authentication required with the following scope: 'content/write'
def accept(self): """Method invoked when OK button is clicked.""" self.save_state() self.dock.show_busy() # The order of the components are matter. components = self.prepare_components() error_code, message = self.impact_function.generate_report( components, iface=self.iface) if error_code == ImpactReport.REPORT_GENERATION_FAILED: self.dock.hide_busy() LOGGER.info(tr( 'The impact report could not be generated.')) send_error_message(self, message) LOGGER.info(message.to_text()) return ANALYSIS_FAILED_BAD_CODE, message sender_name = self.sender().objectName() try: if sender_name == 'button_print_pdf': self.create_pdf = True self.open_as_pdf() else: self.create_pdf = False self.open_in_composer() self.dock.hide_busy() except Exception: self.dock.hide_busy() QtWidgets.QDialog.accept(self)
Method invoked when OK button is clicked.
def _forward_pass(self, images): """ Forward pass a list of images through the CNN """ # form image array num_images = len(images) if num_images == 0: return None for image in images: if not isinstance(image, Image): new_images = [] for image in images: if len(image.shape) > 2: new_images.append(ColorImage(image, frame='unspecified')) elif image.dtype == np.float32 or image.dtype == np.float64: new_images.append(DepthImage(image, frame='unspecified')) else: raise ValueError('Image type not understood') images = new_images break im_height = images[0].height im_width = images[0].width channels = images[0].channels tensor_channels = 3 image_arr = np.zeros([num_images, im_height, im_width, tensor_channels]) for j, image in enumerate(images): if channels == 3: image_arr[j,:,:,:] = image.raw_data else: image_arr[j,:,:,:] = np.tile(image.raw_data, [1,1,1,3]) # predict fp_start = time.time() final_blobs = self.cnn_.featurize(image_arr) fp_stop = time.time() logging.debug('Featurization took %f sec per image' %((fp_stop - fp_start) / len(images))) return final_blobs.reshape(final_blobs.shape[0], -1)
Forward pass a list of images through the CNN
def _get_cifar(directory, url): """Download and extract CIFAR to directory unless it is there.""" filename = os.path.basename(url) path = generator_utils.maybe_download(directory, filename, url) tarfile.open(path, "r:gz").extractall(directory)
Download and extract CIFAR to directory unless it is there.
def get_job( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Example: >>> from google.cloud import talent_v4beta1 >>> >>> client = talent_v4beta1.JobServiceClient() >>> >>> name = client.job_path('[PROJECT]', '[JOBS]') >>> >>> response = client.get_job(name) Args: name (str): Required. The resource name of the job to retrieve. The format is "projects/{project\_id}/jobs/{job\_id}", for example, "projects/api-test-project/jobs/1234". retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.talent_v4beta1.types.Job` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "get_job" not in self._inner_api_calls: self._inner_api_calls[ "get_job" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_job, default_retry=self._method_configs["GetJob"].retry, default_timeout=self._method_configs["GetJob"].timeout, client_info=self._client_info, ) request = job_service_pb2.GetJobRequest(name=name) return self._inner_api_calls["get_job"]( request, retry=retry, timeout=timeout, metadata=metadata )
Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Example: >>> from google.cloud import talent_v4beta1 >>> >>> client = talent_v4beta1.JobServiceClient() >>> >>> name = client.job_path('[PROJECT]', '[JOBS]') >>> >>> response = client.get_job(name) Args: name (str): Required. The resource name of the job to retrieve. The format is "projects/{project\_id}/jobs/{job\_id}", for example, "projects/api-test-project/jobs/1234". retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.talent_v4beta1.types.Job` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def gen_table(self): """ This function generates the CRC table used for the table_driven CRC algorithm. The Python version cannot handle tables of an index width other than 8. See the generated C code for tables with different sizes instead. """ table_length = 1 << self.TableIdxWidth tbl = [0] * table_length for i in range(table_length): register = i if self.ReflectIn: register = self.reflect(register, self.TableIdxWidth) register = register << (self.Width - self.TableIdxWidth + self.CrcShift) for j in range(self.TableIdxWidth): if register & (self.MSB_Mask << self.CrcShift) != 0: register = (register << 1) ^ (self.Poly << self.CrcShift) else: register = (register << 1) if self.ReflectIn: register = self.reflect(register >> self.CrcShift, self.Width) << self.CrcShift tbl[i] = register & (self.Mask << self.CrcShift) return tbl
This function generates the CRC table used for the table_driven CRC algorithm. The Python version cannot handle tables of an index width other than 8. See the generated C code for tables with different sizes instead.
def send(self): """Sends the broadcast message. :returns: tuple of (:class:`adnpy.models.Message`, :class:`adnpy.models.APIMeta`) """ parse_links = self.parse_links or self.parse_markdown_links message = { 'annotations': [], 'entities': { 'parse_links': parse_links, 'parse_markdown_links': self.parse_markdown_links, } } if self.photo: photo, photo_meta = _upload_file(self.api, self.photo) message['annotations'].append({ 'type': 'net.app.core.oembed', 'value': { '+net.app.core.file': { 'file_id': photo.id, 'file_token': photo.file_token, 'format': 'oembed', } } }) if self.attachment: attachment, attachment_meta = _upload_file(self.api, self.attachment) message['annotations'].append({ 'type': 'net.app.core.attachments', 'value': { '+net.app.core.file_list': [ { 'file_id': attachment.id, 'file_token': attachment.file_token, 'format': 'metadata', } ] } }) if self.text: message['text'] = self.text else: message['machine_only'] = True if self.headline: message['annotations'].append({ 'type': 'net.app.core.broadcast.message.metadata', 'value': { 'subject': self.headline, }, }) if self.read_more_link: message['annotations'].append({ 'type': 'net.app.core.crosspost', 'value': { 'canonical_url': self.read_more_link, } }) return self.api.create_message(self.channel_id, data=message)
Sends the broadcast message. :returns: tuple of (:class:`adnpy.models.Message`, :class:`adnpy.models.APIMeta`)
def is_expanded(request, key): """ Examines request object to return boolean of whether passed field is expanded. """ expand = request.query_params.get("expand", "") expand_fields = [] for e in expand.split(","): expand_fields.extend([e for e in e.split(".")]) return "~all" in expand_fields or key in expand_fields
Examines request object to return boolean of whether passed field is expanded.
def _cache(self, response, key): """ Add a retrieved template to the cache for 304 checking accepts a dict and key name, adds the retrieval time, and adds both to self.templates as a new dict using the specified key """ # cache template and retrieval time for subsequent calls thetime = datetime.datetime.utcnow().replace(tzinfo=pytz.timezone("GMT")) self.templates[key] = {"tmplt": response.json(), "updated": thetime} return copy.deepcopy(response.json())
Add a retrieved template to the cache for 304 checking accepts a dict and key name, adds the retrieval time, and adds both to self.templates as a new dict using the specified key
def load_user(): """Read user config file and return it as a dict.""" config_path = get_user_config_path() config = {} # TODO: This may be overkill and too slow just for reading a config file with open(config_path) as f: code = compile(f.read(), config_path, 'exec') exec(code, config) keys = list(six.iterkeys(config)) for k in keys: if k.startswith('_'): del config[k] return config
Read user config file and return it as a dict.
async def Actions(self, entities): ''' entities : typing.Sequence[~Entity] Returns -> typing.Sequence[~ActionResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='Action', request='Actions', version=2, params=_params) _params['entities'] = entities reply = await self.rpc(msg) return reply
entities : typing.Sequence[~Entity] Returns -> typing.Sequence[~ActionResult]
def open_remote_url(urls, **kwargs): """Open the url and check that it stores a file. Args: :urls: Endpoint to take the file """ if isinstance(urls, str): urls = [urls] for url in urls: try: web_file = requests.get(url, stream=True, **kwargs) if 'html' in web_file.headers['content-type']: raise ValueError("HTML source file retrieved.") return web_file except Exception as ex: logger.error('Fail to open remote url - {}'.format(ex)) continue
Open the url and check that it stores a file. Args: :urls: Endpoint to take the file
def touch(self, edited=False): """Mark the node as dirty. Args: edited (bool): Whether to set the edited time. """ self._dirty = True dt = datetime.datetime.utcnow() self.timestamps.updated = dt if edited: self.timestamps.edited = dt
Mark the node as dirty. Args: edited (bool): Whether to set the edited time.
def proc_request(self, req): """ Pre-process a request through all processors in the stack, in order. If any processor's proc_request() method returns a value other than None, that value is treated as a response and post-processed through the proc_response() methods of the processors preceding that processor in the stack. (Note that the response returned this way is not passed to the processor's proc_response() method.) Such a response will then be attached to a ShortCircuit exception. For convenience, returns the request passed to the method. """ for idx in range(len(self)): resp = _safe_call(self[idx], 'proc_request', req) # Do we have a response? if resp is not None: # Short-circuit raise exc.ShortCircuit(self.proc_response(resp, idx - 1)) # Return the request we were passed return req
Pre-process a request through all processors in the stack, in order. If any processor's proc_request() method returns a value other than None, that value is treated as a response and post-processed through the proc_response() methods of the processors preceding that processor in the stack. (Note that the response returned this way is not passed to the processor's proc_response() method.) Such a response will then be attached to a ShortCircuit exception. For convenience, returns the request passed to the method.
def retrieve_interval(self, start_time, end_time, compute_missing=False): """ Return the results for `query_function` on every `bucket_width` time period between `start_time` and `end_time`. Look for previously cached results to avoid recomputation. :param start_time: A datetime for the beginning of the range, aligned with `bucket_width`. :param end_time: A datetime for the end of the range, aligned with `bucket_width`. :param compute_missing: A boolean that, if True, will compute any non-cached results. """ events = self._compute_buckets(start_time, end_time, compute_missing=compute_missing) for event in events: yield event
Return the results for `query_function` on every `bucket_width` time period between `start_time` and `end_time`. Look for previously cached results to avoid recomputation. :param start_time: A datetime for the beginning of the range, aligned with `bucket_width`. :param end_time: A datetime for the end of the range, aligned with `bucket_width`. :param compute_missing: A boolean that, if True, will compute any non-cached results.
def filter_threshold(self, inst_rc, threshold, num_occur=1): ''' Filter the matrix rows or columns based on num_occur values being above a threshold (in absolute value). ''' inst_df = self.dat_to_df() inst_df = run_filter.filter_threshold(inst_df, inst_rc, threshold, num_occur) self.df_to_dat(inst_df)
Filter the matrix rows or columns based on num_occur values being above a threshold (in absolute value).
def validate_keys(self, *keys): """Validation helper to ensure that keys are present in data This method makes sure that all of keys received here are present in the data received from the caller. It is better to call this method in the `validate()` method of your event. Not in the `clean()` one, since the first will be called locally, making it easier to debug things and find problems. """ current_keys = set(self.data.keys()) needed_keys = set(keys) if not needed_keys.issubset(current_keys): raise ValidationError( 'One of the following keys are missing from the ' 'event\'s data: {}'.format( ', '.join(needed_keys.difference(current_keys))) ) return True
Validation helper to ensure that keys are present in data This method makes sure that all of keys received here are present in the data received from the caller. It is better to call this method in the `validate()` method of your event. Not in the `clean()` one, since the first will be called locally, making it easier to debug things and find problems.
def is_marginable(self): """True if adding counts across this dimension axis is meaningful.""" return self.dimension_type not in {DT.CA, DT.MR, DT.MR_CAT, DT.LOGICAL}
True if adding counts across this dimension axis is meaningful.
def put(self, urls=None, **overrides): """Sets the acceptable HTTP method to PUT""" if urls is not None: overrides['urls'] = urls return self.where(accept='PUT', **overrides)
Sets the acceptable HTTP method to PUT
def are_ilx(self, ilx_ids): ''' Checks list of objects to see if they are usable ILX IDs ''' total_data = [] for ilx_id in ilx_ids: ilx_id = ilx_id.replace('http', '').replace('.', '').replace('/', '') data, success = self.get_data_from_ilx(ilx_id) if success: total_data.append(data['data']) else: total_data.append({}) return total_data
Checks list of objects to see if they are usable ILX IDs
def meanvR(self,R,t=0.,nsigma=None,deg=False,phi=0., epsrel=1.e-02,epsabs=1.e-05, grid=None,gridpoints=101,returnGrid=False, surfacemass=None, hierarchgrid=False,nlevels=2,integrate_method='dopr54_c'): """ NAME: meanvR PURPOSE: calculate the mean vR of the velocity distribution at (R,phi) INPUT: R - radius at which to calculate the moment(/ro) (can be Quantity) phi= azimuth (rad unless deg=True; can be Quantity) t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity) surfacemass= if set use this pre-calculated surfacemass nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous) deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF) grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid gridpoints= number of points to use for the grid in 1D (default=101) returnGrid= if True, return the grid object (default=False) hierarchgrid= if True, use a hierarchical grid (default=False) nlevels= number of hierarchical levels for the hierarchical grid integrate_method= orbit.integrate method argument OUTPUT: mean vR HISTORY: 2011-03-31 - Written - Bovy (NYU) """ if isinstance(grid,evolveddiskdfGrid) or \ isinstance(grid,evolveddiskdfHierarchicalGrid): grido= grid vmomentR= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi, nsigma=nsigma, epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels, integrate_method=integrate_method) elif isinstance(grid,bool) and grid: #Precalculate the grid (vmomentR,grido)= self.vmomentsurfacemass(R,1,0,deg=deg,t=t, nsigma=nsigma,phi=phi, epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=True, hierarchgrid=hierarchgrid, nlevels=nlevels, integrate_method=integrate_method) else: grido= False vmomentR= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi, nsigma=nsigma, epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels,integrate_method=integrate_method) if surfacemass is None: surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=grido, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels,integrate_method=integrate_method) out= vmomentR/surfacemass if returnGrid and ((isinstance(grid,bool) and grid) or isinstance(grid,evolveddiskdfGrid) or isinstance(grid,evolveddiskdfHierarchicalGrid)): return (out,grido) else: return out
NAME: meanvR PURPOSE: calculate the mean vR of the velocity distribution at (R,phi) INPUT: R - radius at which to calculate the moment(/ro) (can be Quantity) phi= azimuth (rad unless deg=True; can be Quantity) t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity) surfacemass= if set use this pre-calculated surfacemass nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous) deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF) grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid gridpoints= number of points to use for the grid in 1D (default=101) returnGrid= if True, return the grid object (default=False) hierarchgrid= if True, use a hierarchical grid (default=False) nlevels= number of hierarchical levels for the hierarchical grid integrate_method= orbit.integrate method argument OUTPUT: mean vR HISTORY: 2011-03-31 - Written - Bovy (NYU)
def serialize(self, obj, level=0, objname=None, topLevelKeysToIgnore=None, toBytes=True): """ Create a string representation of the given object. Examples: :: >>> serialize("str") 'str' >>> serialize([1,2,3,4,5]) '1,2,3,4,5' >>> signing.serlize({1:'a', 2:'b'}) '1:a|2:b' >>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]}) '1:a|2:b|3:1,2:k' :param obj: the object to serlize :param level: a parameter used internally for recursion to serialize nested data structures :param topLevelKeysToIgnore: the list of top level keys to ignore for serialization :return: a string representation of `obj` """ res = None if not isinstance(obj, acceptableTypes): error("invalid type found {}: {}".format(objname, obj)) elif isinstance(obj, str): res = obj elif isinstance(obj, dict): if level > 0: keys = list(obj.keys()) else: topLevelKeysToIgnore = topLevelKeysToIgnore or [] keys = [k for k in obj.keys() if k not in topLevelKeysToIgnore] keys.sort() strs = [] for k in keys: onm = ".".join([str(objname), str(k)]) if objname else k strs.append( str(k) + ":" + self.serialize(obj[k], level + 1, onm, toBytes=False)) res = "|".join(strs) elif isinstance(obj, Iterable): strs = [] for o in obj: strs.append(self.serialize( o, level + 1, objname, toBytes=False)) res = ",".join(strs) elif obj is None: res = "" else: res = str(obj) # logger.trace("serialized msg {} into {}".format(obj, res)) if not toBytes: return res return res.encode('utf-8')
Create a string representation of the given object. Examples: :: >>> serialize("str") 'str' >>> serialize([1,2,3,4,5]) '1,2,3,4,5' >>> signing.serlize({1:'a', 2:'b'}) '1:a|2:b' >>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]}) '1:a|2:b|3:1,2:k' :param obj: the object to serlize :param level: a parameter used internally for recursion to serialize nested data structures :param topLevelKeysToIgnore: the list of top level keys to ignore for serialization :return: a string representation of `obj`
def Pack(cls, obj, version): """Pack the given object using Ad Manager-specific logic. Args: obj: an object to be packed for SOAP using Ad Manager-specific logic, if applicable. version: the version of the current API, e.g. 'v201811' Returns: The given object packed with Ad Manager-specific logic for SOAP, if applicable. Otherwise, returns the given object unmodified. """ if isinstance(obj, (datetime.datetime, datetime.date)): return cls.AdManagerDateTimePacker(obj, version) return obj
Pack the given object using Ad Manager-specific logic. Args: obj: an object to be packed for SOAP using Ad Manager-specific logic, if applicable. version: the version of the current API, e.g. 'v201811' Returns: The given object packed with Ad Manager-specific logic for SOAP, if applicable. Otherwise, returns the given object unmodified.
def get_dataset(self, key, info): """Load a dataset.""" # Read bands data = self.read_band(key, info) # Convert to xarray xdata = xr.DataArray(data, dims=['y', 'x']) return xdata
Load a dataset.
def set_expected_length(self, expected_length): """stub""" if not self.my_osid_object_form._is_valid_integer( expected_length, self.get_expected_length_metadata()): raise InvalidArgument('expectedLength') self.my_osid_object_form._my_map['expectedLength'] = expected_length
stub
def load_toolbar_item(import_path, *args, **kwargs): """ Load an item in the toolbar :param import_path: the dotted python path to class or function. :param args: For classes, any arguments to pass to the constructor. :param kwargs: For classes, any keyword arguments to pass to the constructor. """ if isinstance(import_path, (tuple, list)): children = [load_toolbar_item(path) for path in import_path] return Group(*children) elif isinstance(import_path, basestring): symbol = _import_symbol(import_path, 'STAFF_TOOLBAR_ITEMS') else: symbol = import_path if inspect.isclass(symbol): # Instantiate the class. symbol = symbol(*args, **kwargs) if not callable(symbol): raise ImproperlyConfigured("The {0} in {1} is not callable!".format(import_path, 'STAFF_TOOLBAR_ITEMS')) return symbol
Load an item in the toolbar :param import_path: the dotted python path to class or function. :param args: For classes, any arguments to pass to the constructor. :param kwargs: For classes, any keyword arguments to pass to the constructor.
async def send_command(self, command): """ Sends the given command to the server. Args: command (str): Command to send to the server. Raises: ConnectionResetError: If the connection with the server is lost. (Shouldn't it raise BrokenPipeError too ?) """ command = "{}\r\n".format(command).encode("ascii", errors="backslashreplace") self.write(command) # Don't forget to drain or the command will stay buffered: await self.drain()
Sends the given command to the server. Args: command (str): Command to send to the server. Raises: ConnectionResetError: If the connection with the server is lost. (Shouldn't it raise BrokenPipeError too ?)
def stop(name, call=None): ''' Stop a VM. .. versionadded:: 2016.3.0 name The name of the VM to stop. CLI Example: .. code-block:: bash salt-cloud -a stop my-vm ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.info('Stopping node %s', name) return vm_action(name, kwargs={'action': 'stop'}, call=call)
Stop a VM. .. versionadded:: 2016.3.0 name The name of the VM to stop. CLI Example: .. code-block:: bash salt-cloud -a stop my-vm
def get_value(record, key, default=None): """Return item as `dict.__getitem__` but using 'smart queries'. .. note:: Accessing one value in a normal way, meaning d['a'], is almost as fast as accessing a regular dictionary. But using the special name convention is a bit slower than using the regular access: .. code-block:: python >>> %timeit x = dd['a[0].b'] 100000 loops, best of 3: 3.94 us per loop >>> %timeit x = dd['a'][0]['b'] 1000000 loops, best of 3: 598 ns per loop """ def getitem(k, v, default): if isinstance(v, string_types): raise KeyError elif isinstance(v, dict): return v[k] elif ']' in k: k = k[:-1].replace('n', '-1') # Work around for list indexes and slices try: return v[int(k)] except IndexError: return default except ValueError: return v[slice(*map( lambda x: int(x.strip()) if x.strip() else None, k.split(':') ))] else: tmp = [] for inner_v in v: try: tmp.append(getitem(k, inner_v, default)) except KeyError: continue return tmp # Wrap a top-level list in a dict if isinstance(record, list): record = {'record': record} key = '.'.join(['record', key]) # Check if we are using python regular keys try: return record[key] except KeyError: pass keys = SPLIT_KEY_PATTERN.split(key) value = record for k in keys: try: value = getitem(k, value, default) except KeyError: return default return value
Return item as `dict.__getitem__` but using 'smart queries'. .. note:: Accessing one value in a normal way, meaning d['a'], is almost as fast as accessing a regular dictionary. But using the special name convention is a bit slower than using the regular access: .. code-block:: python >>> %timeit x = dd['a[0].b'] 100000 loops, best of 3: 3.94 us per loop >>> %timeit x = dd['a'][0]['b'] 1000000 loops, best of 3: 598 ns per loop
def create_content_instance(content_plugin_class, page, placeholder_name='main', **kwargs): """ Creates a content instance from a content plugin class. :param content_plugin_class: The class of the content plugin. :param page: The fluent_page instance to create the content instance one. :param placeholder_name: The placeholder name defined in the template. [DEFAULT: main] :param kwargs: Additional keyword arguments to be used in the content instance creation. :return: The content instance created. """ # Get the placeholders that are currently available for the slot. placeholders = page.get_placeholder_by_slot(placeholder_name) # If a placeholder exists for the placeholder_name use the first one provided otherwise create # a new placeholder instance. if placeholders.exists(): placeholder = placeholders[0] else: placeholder = page.create_placeholder(placeholder_name) # Obtain the content type for the page instance class. ct = ContentType.objects.get_for_model(type(page)) # Create the actual plugin instance. try: content_instance = content_plugin_class.objects.create( parent_type=ct, parent_id=page.id, placeholder=placeholder, **kwargs ) except TypeError: raise Exception( 'Could not create content item instance, ensure you ' 'have all required field values for the Model.' ) return content_instance
Creates a content instance from a content plugin class. :param content_plugin_class: The class of the content plugin. :param page: The fluent_page instance to create the content instance one. :param placeholder_name: The placeholder name defined in the template. [DEFAULT: main] :param kwargs: Additional keyword arguments to be used in the content instance creation. :return: The content instance created.
def run_postdecode_hooks(decode_hook_args, dataset_split): """Run hooks after decodes have run.""" hooks = decode_hook_args.problem.decode_hooks if not hooks: return global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir) if global_step is None: tf.logging.info( "Skipping decode hooks because no checkpoint yet available.") return tf.logging.info("Running decode hooks.") parent_dir = os.path.join(decode_hook_args.output_dirs[0], os.pardir) child_dir = decode_hook_args.decode_hparams.summaries_log_dir if dataset_split is not None: child_dir += "_{}".format(dataset_split) final_dir = os.path.join(parent_dir, child_dir) summary_writer = tf.summary.FileWriter(final_dir) for hook in hooks: # Isolate each hook in case it creates TF ops with tf.Graph().as_default(): summaries = hook(decode_hook_args) if summaries: summary = tf.Summary(value=list(summaries)) summary_writer.add_summary(summary, global_step) summary_writer.close() tf.logging.info("Decode hooks done.")
Run hooks after decodes have run.
def __deal_with_pagination(self, url, method, params, data): """ Perform multiple calls in order to have a full list of elements when the API are "paginated". (content list is divided in more than one page) """ all_data = data while data.get("links", {}).get("pages", {}).get("next"): url, query = data["links"]["pages"]["next"].split("?", 1) # Merge the query parameters for key, value in urlparse.parse_qs(query).items(): params[key] = value data = self.__perform_request(url, method, params).json() # Merge the dictionaries for key, value in data.items(): if isinstance(value, list) and key in all_data: all_data[key] += value else: all_data[key] = value return all_data
Perform multiple calls in order to have a full list of elements when the API are "paginated". (content list is divided in more than one page)
def param_texture_from_rasters(script, textName="TEMP3D.png", texsize=1024, colorCorrection=True, colorCorrectionFilterSize=1, useDistanceWeight=True, useImgBorderWeight=True, useAlphaWeight=False, cleanIsolatedTriangles=True, stretchingAllowed=False, textureGutter=4): """Set texture """ filter_xml = ''.join([ ' <filter name="Parameterization + texturing from registered rasters">\n', ' <Param name="textureSize"', 'value="%d"' % texsize, 'description="Texture size"', 'type="RichInt"', 'tooltip="Specifies the dimension of the generated texture"', '/>\n', ' <Param name="textureName"', 'value="%s"' % textName, 'description="Texture name"', 'type="RichString"', 'tooltip="Specifies the name of the file into which the texture image will be saved"', '/>\n', ' <Param name="colorCorrection"', 'value="%s"' % str(colorCorrection).lower(), 'description="Color correction"', 'type="RichBool"', 'tooltip="If true, the final texture is corrected so as to ensure seamless transitions"', '/>\n', ' <Param name="colorCorrectionFilterSize"', 'value="%d"' % colorCorrectionFilterSize, 'description="Color correction filter"', 'type="RichInt"', 'tooltip="It is the radius (in pixel) of the kernel that is used to compute the difference between corresponding texels in different rasters. Default is 1 that generate a 3x3 kernel. Highest values increase the robustness of the color correction process in the case of strong image-to-geometry misalignments"', '/>\n', ' <Param name="useDistanceWeight"', 'value="%s"' % str(useDistanceWeight).lower(), 'description="Use distance weight"', 'type="RichBool"', 'tooltip="Includes a weight accounting for the distance to the camera during the computation of reference images"', '/>\n', ' <Param name="useImgBorderWeight"', 'value="%s"' % str(useImgBorderWeight).lower(), 'description="Use image border weight"', 'type="RichBool"', 'tooltip="Includes a weight accounting for the distance to the image border during the computation of reference images"', '/>\n', ' <Param name="useAlphaWeight"', 'value="%s"' % str(useAlphaWeight).lower(), 'description="Use image alpha weight"', 'type="RichBool"', 'tooltip="If true, alpha channel of the image is used as additional weight. In this way it is possible to mask-out parts of the images that should not be projected on the mesh. Please note this is not a transparency effect, but just influences the weigthing between different images"', '/>\n', ' <Param name="cleanIsolatedTriangles"', 'value="%s"' % str(cleanIsolatedTriangles).lower(), 'description="Clean isolated triangles"', 'type="RichBool"', 'tooltip="Remove all patches compound of a single triangle by aggregating them to adjacent patches"', '/>\n', ' <Param name="stretchingAllowed"', 'value="%s"' % str(stretchingAllowed).lower(), 'description="UV stretching"', 'type="RichBool"', 'tooltip="If true, texture coordinates are stretched so as to cover the full interval [0,1] for both directions"', '/>\n', ' <Param name="textureGutter"', 'value="%d"' % textureGutter, 'description="Texture gutter"', 'type="RichInt"', 'tooltip="Extra boundary to add to each patch before packing in texture space (in pixels)"', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Set texture
def is_valid(self, csdl): """ Checks if the given CSDL is valid. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/validate :param csdl: CSDL to validate :type csdl: str :returns: Boolean indicating the validity of the CSDL :rtype: bool :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ try: self.validate(csdl) except DataSiftApiException as e: if e.response.status_code == 400: return False else: raise e return True
Checks if the given CSDL is valid. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/validate :param csdl: CSDL to validate :type csdl: str :returns: Boolean indicating the validity of the CSDL :rtype: bool :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def ensure_dir_exists(directory): "Creates local directories if they don't exist." if directory.startswith('gs://'): return if not os.path.exists(directory): dbg("Making dir {}".format(directory)) os.makedirs(directory, exist_ok=True)
Creates local directories if they don't exist.
def save_facts(self, path, mode=SaveMode.LOCAL_SAVE): """Save the facts in the system to the specified file. The Python equivalent of the CLIPS save-facts command. """ ret = lib.EnvSaveFacts(self._env, path.encode(), mode) if ret == -1: raise CLIPSError(self._env) return ret
Save the facts in the system to the specified file. The Python equivalent of the CLIPS save-facts command.
def show_tooltip(self, title, text, color=_DEFAULT_TITLE_COLOR, at_line=None, at_position=None, at_point=None): """ Show tooltip. Tooltips will disappear if mouse hovers them. They are meant for quick inspections. """ if text is not None and len(text) != 0: # Find position of calltip point = self._calculate_position( at_line=at_line, at_position=at_position, at_point=at_point, ) # Format text tiptext = self._format_text(title, text, color, ellide=True) self._update_stylesheet(self.tooltip_widget) # Display tooltip self.tooltip_widget.show_tip(point, tiptext) self.tooltip_widget.show()
Show tooltip. Tooltips will disappear if mouse hovers them. They are meant for quick inspections.
def register(self, mimetype, processor): """Register passed `processor` for passed `mimetype`.""" if mimetype not in self or processor not in self[mimetype]: self.setdefault(mimetype, []).append(processor)
Register passed `processor` for passed `mimetype`.
def deserialize(cls, key, network="bitcoin_testnet"): """Load the ExtendedBip32Key from a hex key. The key consists of * 4 byte version bytes (network key) * 1 byte depth: - 0x00 for master nodes, - 0x01 for level-1 descendants, .... * 4 byte fingerprint of the parent's key (0x00000000 if master key) * 4 byte child number. This is the number i in x_i = x_{par}/i, with x_i the key being serialized. This is encoded in MSB order. (0x00000000 if master key) * 32 bytes: the chain code * 33 bytes: the public key or private key data (0x02 + X or 0x03 + X for public keys, 0x00 + k for private keys) (Note that this also supports 0x04 + X + Y uncompressed points, but this is totally non-standard and this library won't even generate such data.) """ network = Wallet.get_network(network) if len(key) in [78, (78 + 32)]: # we have a byte array, so pass pass else: key = ensure_bytes(key) if len(key) in [78 * 2, (78 + 32) * 2]: # we have a hexlified non-base58 key, continue! key = unhexlify(key) elif len(key) == 111: # We have a base58 encoded string key = base58.b58decode_check(key) # Now that we double checkd the values, convert back to bytes because # they're easier to slice version, depth, parent_fingerprint, child, chain_code, key_data = ( key[:4], key[4], key[5:9], key[9:13], key[13:45], key[45:]) version_long = long_or_int(hexlify(version), 16) exponent = None pubkey = None point_type = key_data[0] if not isinstance(point_type, six.integer_types): point_type = ord(point_type) if point_type == 0: # Private key if version_long != network.EXT_SECRET_KEY: raise incompatible_network_exception_factory( network.NAME, network.EXT_SECRET_KEY, version) exponent = key_data[1:] elif point_type in [2, 3, 4]: # Compressed public coordinates if version_long != network.EXT_PUBLIC_KEY: raise incompatible_network_exception_factory( network.NAME, network.EXT_PUBLIC_KEY, version) pubkey = PublicKey.from_hex_key(key_data, network=network) # Even though this was generated from a compressed pubkey, we # want to store it as an uncompressed pubkey pubkey.compressed = False else: raise ValueError("Invalid key_data prefix, got %s" % point_type) def l(byte_seq): if byte_seq is None: return byte_seq elif isinstance(byte_seq, six.integer_types): return byte_seq return long_or_int(hexlify(byte_seq), 16) return cls(depth=l(depth), parent_fingerprint=l(parent_fingerprint), child_number=l(child), chain_code=l(chain_code), private_exponent=l(exponent), public_key=pubkey, network=network)
Load the ExtendedBip32Key from a hex key. The key consists of * 4 byte version bytes (network key) * 1 byte depth: - 0x00 for master nodes, - 0x01 for level-1 descendants, .... * 4 byte fingerprint of the parent's key (0x00000000 if master key) * 4 byte child number. This is the number i in x_i = x_{par}/i, with x_i the key being serialized. This is encoded in MSB order. (0x00000000 if master key) * 32 bytes: the chain code * 33 bytes: the public key or private key data (0x02 + X or 0x03 + X for public keys, 0x00 + k for private keys) (Note that this also supports 0x04 + X + Y uncompressed points, but this is totally non-standard and this library won't even generate such data.)
def _lookup(self, fail_on_missing=False, fail_on_found=False, include_debug_header=True, **kwargs): """ =====API DOCS===== Attempt to perform a lookup that is expected to return a single result, and return the record. This method is a wrapper around `get` that strips out non-unique keys, and is used internally by `write` and `delete`. :param fail_on_missing: Flag that raise exception if no resource is found. :type fail_on_missing: bool :param fail_on_found: Flag that raise exception if a resource is found. :type fail_on_found: bool :param include_debug_header: Flag determining whether to print debug messages when querying Tower backend. :type include_debug_header: bool :param `**kwargs`: Keyword arguments list of available fields used for searching resource. :returns: A JSON object containing details of the resource returned by Tower backend. :rtype: dict :raises tower_cli.exceptions.BadRequest: When no field are provided in kwargs. :raises tower_cli.exceptions.Found: When a resource is found and fail_on_found flag is on. :raises tower_cli.exceptions.NotFound: When no resource is found and fail_on_missing flag is on. =====API DOCS===== """ read_params = {} for field_name in self.identity: if field_name in kwargs: read_params[field_name] = kwargs[field_name] if 'id' in self.identity and len(self.identity) == 1: return {} if not read_params: raise exc.BadRequest('Cannot reliably determine which record to write. Include an ID or unique ' 'fields.') try: existing_data = self.get(include_debug_header=include_debug_header, **read_params) if fail_on_found: raise exc.Found('A record matching %s already exists, and you requested a failure in that case.' % read_params) return existing_data except exc.NotFound: if fail_on_missing: raise exc.NotFound('A record matching %s does not exist, and you requested a failure in that case.' % read_params) return {}
=====API DOCS===== Attempt to perform a lookup that is expected to return a single result, and return the record. This method is a wrapper around `get` that strips out non-unique keys, and is used internally by `write` and `delete`. :param fail_on_missing: Flag that raise exception if no resource is found. :type fail_on_missing: bool :param fail_on_found: Flag that raise exception if a resource is found. :type fail_on_found: bool :param include_debug_header: Flag determining whether to print debug messages when querying Tower backend. :type include_debug_header: bool :param `**kwargs`: Keyword arguments list of available fields used for searching resource. :returns: A JSON object containing details of the resource returned by Tower backend. :rtype: dict :raises tower_cli.exceptions.BadRequest: When no field are provided in kwargs. :raises tower_cli.exceptions.Found: When a resource is found and fail_on_found flag is on. :raises tower_cli.exceptions.NotFound: When no resource is found and fail_on_missing flag is on. =====API DOCS=====
def _validate_key(self, key): """ Make sure the supplied key values are within the bounds specified by the corresponding dimension range and soft_range. """ if key == () and len(self.kdims) == 0: return () key = util.wrap_tuple(key) assert len(key) == len(self.kdims) for ind, val in enumerate(key): kdim = self.kdims[ind] low, high = util.max_range([kdim.range, kdim.soft_range]) if util.is_number(low) and util.isfinite(low): if val < low: raise KeyError("Key value %s below lower bound %s" % (val, low)) if util.is_number(high) and util.isfinite(high): if val > high: raise KeyError("Key value %s above upper bound %s" % (val, high))
Make sure the supplied key values are within the bounds specified by the corresponding dimension range and soft_range.
def _write_standard(self, message, extra): ''' Writes a standard log statement @param message: The message to write @param extra: The object to pull defaults from ''' level = extra['level'] if self.include_extra: del extra['timestamp'] del extra['level'] del extra['logger'] if len(extra) > 0: message += " " + str(extra) if level == 'INFO': self.logger.info(message) elif level == 'DEBUG': self.logger.debug(message) elif level == 'WARNING': self.logger.warning(message) elif level == 'ERROR': self.logger.error(message) elif level == 'CRITICAL': self.logger.critical(message) else: self.logger.debug(message)
Writes a standard log statement @param message: The message to write @param extra: The object to pull defaults from
def add_task(self, name=None, desc=None): """ Adds object to list of grindstone['tasks']. """ # A name is required to create a task for k in self.get_tasks(): if name in keys_of(k): raise ValueError('Task already exists') if name is not None: # desc can be None, so we can just append whatever we have self.grindstone['tasks'].append( {name: desc} ) else: # Raising errors is good, and makes tests easy. raise ValueError('Tasks `name` cannot be None')
Adds object to list of grindstone['tasks'].
def del_subkey(self,name): """Delete the named subkey, and any values or keys it contains.""" self.sam |= KEY_WRITE subkey = self.get_subkey(name) subkey.clear() _winreg.DeleteKey(subkey.parent.hkey,subkey.name)
Delete the named subkey, and any values or keys it contains.
def _CaptureRequestLogId(self): """Captures the request log id if possible. The request log id is stored inside the breakpoint labels. """ # pylint: disable=not-callable if callable(request_log_id_collector): request_log_id = request_log_id_collector() if request_log_id: # We have a request_log_id, save it into the breakpoint labels self.breakpoint['labels'][ labels.Breakpoint.REQUEST_LOG_ID] = request_log_id
Captures the request log id if possible. The request log id is stored inside the breakpoint labels.
def inspect_config(app): """Inspect the Sphinx configuration and update for slide-linking. If links from HTML to slides are enabled, make sure the sidebar configuration includes the template and add the necessary theme directory as a loader so the sidebar template can be located. If the sidebar configuration already includes ``slidelink.html`` (in any key), the configuration will not be changed. If the configuration is not specified, we'll attempt to emulate what Sphinx does by default. """ # avoid import cycles :/ from hieroglyph import writer # only reconfigure Sphinx if we're generating HTML if app.builder.name not in HTML_BUILDERS: return if app.config.slide_link_html_to_slides: # add the slide theme dir as a Loader app.builder.templates.loaders.append( SphinxFileSystemLoader( os.path.join( os.path.dirname(__file__), 'themes', 'slides', ) ) ) # add the "show slides" sidebar template if not app.config.html_sidebars: # no sidebars explicitly defined, mimic the old style # behavior + slide links app.config.html_sidebars = { '**': [ 'localtoc.html', 'relations.html', 'sourcelink.html', SLIDELINK_TEMPLATE, 'searchbox.html', ], } else: # sidebars defined, add the template if needed included = False for glob, templates in app.config.html_sidebars: if SLIDELINK_TEMPLATE in templates: included = True break if not included: # the slidelink template was not included; append it # to the list of sidebars for all templates app.config.html_sidebars.setdefault('**', []).append( SLIDELINK_TEMPLATE, ) if app.config.slide_link_html_sections_to_slides: # fix up the HTML Translator if sphinx.version_info >= (1, 6, 0): override_translator = type( 'SlideLinkTranslator', (app.builder.get_translator_class(), object), { 'depart_title': writer.depart_title, }, ) app.set_translator(app.builder, override_translator) else: app.builder.translator_class = type( 'SlideLinkTranslator', (app.builder.translator_class, object), { 'depart_title': writer.depart_title, }, )
Inspect the Sphinx configuration and update for slide-linking. If links from HTML to slides are enabled, make sure the sidebar configuration includes the template and add the necessary theme directory as a loader so the sidebar template can be located. If the sidebar configuration already includes ``slidelink.html`` (in any key), the configuration will not be changed. If the configuration is not specified, we'll attempt to emulate what Sphinx does by default.
def transform(self, jam): '''Bypass transformations. Parameters ---------- jam : pyjams.JAMS A muda-enabled JAMS object Yields ------ jam_out : pyjams.JAMS iterator The first result is `jam` (unmodified), by reference All subsequent results are generated by `transformer` ''' # Step 1: yield the unmodified jam yield jam # Step 2: yield from the transformer for jam_out in self.transformer.transform(jam): yield jam_out
Bypass transformations. Parameters ---------- jam : pyjams.JAMS A muda-enabled JAMS object Yields ------ jam_out : pyjams.JAMS iterator The first result is `jam` (unmodified), by reference All subsequent results are generated by `transformer`
def clear(self, match="*", count=1000): """ :see:meth:RedisMap.clear """ cursor = '0' pipe = self._client.pipeline(transaction=False) while cursor != 0: cursor, keys = self.scan(cursor=cursor, match=match, count=count) if keys: pipe.delete(*keys) pipe.hdel(self._bucket_key, self.key_prefix) pipe.execute() return True
:see:meth:RedisMap.clear
def show_instance(instance_id, call=None, kwargs=None): ''' Show the details from QingCloud concerning an instance. CLI Examples: .. code-block:: bash salt-cloud -a show_instance i-2f733r5n ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) params = { 'action': 'DescribeInstances', 'instances.1': instance_id, 'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()), } items = query(params=params) if items['total_count'] == 0: raise SaltCloudNotFound( 'The specified instance, \'{0}\', could not be found.'.format(instance_id) ) full_node = items['instance_set'][0] normalized_node = _show_normalized_node(full_node) full_node.update(normalized_node) result = full_node return result
Show the details from QingCloud concerning an instance. CLI Examples: .. code-block:: bash salt-cloud -a show_instance i-2f733r5n
def remove_reader(self, fd): " Remove read file descriptor from the event loop. " fd = fd_to_int(fd) if fd in self._read_fds: del self._read_fds[fd] self.selector.unregister(fd)
Remove read file descriptor from the event loop.
def _build_prior(self, prior_tensors): """ Build a tf expression for the prior by summing all child-parameter priors. """ # TODO(@awav): What prior must represent empty list of parameters? if not prior_tensors: return tf.constant(0, dtype=settings.float_type) return tf.add_n(prior_tensors, name='prior')
Build a tf expression for the prior by summing all child-parameter priors.
def _check_header_lines(self, header_lines): """Check header lines, in particular for starting file "##fileformat" """ if not header_lines: raise exceptions.InvalidHeaderException( "The VCF file did not contain any header lines!" ) first = header_lines[0] if first.key != "fileformat": raise exceptions.InvalidHeaderException("The VCF file did not start with ##fileformat") if first.value not in SUPPORTED_VCF_VERSIONS: warnings.warn("Unknown VCF version {}".format(first.value), UnknownVCFVersion)
Check header lines, in particular for starting file "##fileformat"
def jdn_to_hdate(jdn): """Convert from the Julian day to the Hebrew day.""" # calculate Gregorian date date = jdn_to_gdate(jdn) # Guess Hebrew year is Gregorian year + 3760 year = date.year + 3760 jdn_tishrey1 = hdate_to_jdn(HebrewDate(year, 1, 1)) jdn_tishrey1_next_year = hdate_to_jdn(HebrewDate(year + 1, 1, 1)) # Check if computed year was underestimated if jdn_tishrey1_next_year <= jdn: year = year + 1 jdn_tishrey1 = jdn_tishrey1_next_year jdn_tishrey1_next_year = hdate_to_jdn(HebrewDate(year + 1, 1, 1)) size_of_year = get_size_of_hebrew_year(year) # days into this year, first month 0..29 days = jdn - jdn_tishrey1 # last 8 months always have 236 days if days >= (size_of_year - 236): # in last 8 months days = days - (size_of_year - 236) month = days * 2 // 59 day = days - (month * 59 + 1) // 2 + 1 month = month + 4 + 1 # if leap if size_of_year > 355 and month <= 6: month = month + 8 else: # in 4-5 first months # Special cases for this year if size_of_year % 10 > 4 and days == 59: # long Heshvan (day 30) month = 1 day = 30 elif size_of_year % 10 > 4 and days > 59: # long Heshvan month = (days - 1) * 2 // 59 day = days - (month * 59 + 1) // 2 elif size_of_year % 10 < 4 and days > 87: # short kislev month = (days + 1) * 2 // 59 day = days - (month * 59 + 1) // 2 + 2 else: # regular months month = days * 2 // 59 day = days - (month * 59 + 1) // 2 + 1 month = month + 1 return HebrewDate(year, month, day)
Convert from the Julian day to the Hebrew day.
def plot(self, sig_style='', title=None, figsize=None, return_fig=False): """ Plot the comparison of two sets of annotations, possibly overlaid on their original signal. Parameters ---------- sig_style : str, optional The matplotlib style of the signal title : str, optional The title of the plot figsize: tuple, optional Tuple pair specifying the width, and height of the figure. It is the'figsize' argument passed into matplotlib.pyplot's `figure` function. return_fig : bool, optional Whether the figure is to be returned as an output argument. """ fig = plt.figure(figsize=figsize) ax = fig.add_subplot(1, 1, 1) legend = ['Signal', 'Matched Reference Annotations (%d/%d)' % (self.tp, self.n_ref), 'Unmatched Reference Annotations (%d/%d)' % (self.fn, self.n_ref), 'Matched Test Annotations (%d/%d)' % (self.tp, self.n_test), 'Unmatched Test Annotations (%d/%d)' % (self.fp, self.n_test) ] # Plot the signal if any if self.signal is not None: ax.plot(self.signal, sig_style) # Plot reference annotations ax.plot(self.matched_ref_sample, self.signal[self.matched_ref_sample], 'ko') ax.plot(self.unmatched_ref_sample, self.signal[self.unmatched_ref_sample], 'ko', fillstyle='none') # Plot test annotations ax.plot(self.matched_test_sample, self.signal[self.matched_test_sample], 'g+') ax.plot(self.unmatched_test_sample, self.signal[self.unmatched_test_sample], 'rx') ax.legend(legend) # Just plot annotations else: # Plot reference annotations ax.plot(self.matched_ref_sample, np.ones(self.tp), 'ko') ax.plot(self.unmatched_ref_sample, np.ones(self.fn), 'ko', fillstyle='none') # Plot test annotations ax.plot(self.matched_test_sample, 0.5 * np.ones(self.tp), 'g+') ax.plot(self.unmatched_test_sample, 0.5 * np.ones(self.fp), 'rx') ax.legend(legend[1:]) if title: ax.set_title(title) ax.set_xlabel('time/sample') fig.show() if return_fig: return fig, ax
Plot the comparison of two sets of annotations, possibly overlaid on their original signal. Parameters ---------- sig_style : str, optional The matplotlib style of the signal title : str, optional The title of the plot figsize: tuple, optional Tuple pair specifying the width, and height of the figure. It is the'figsize' argument passed into matplotlib.pyplot's `figure` function. return_fig : bool, optional Whether the figure is to be returned as an output argument.
def parametric_mean_function(max_iters=100, optimize=True, plot=True): """ A linear mean function with parameters that we'll learn alongside the kernel """ #create simple mean function mf = GPy.core.Mapping(1,1) mf.f = np.sin X = np.linspace(0,10,50).reshape(-1,1) Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + 3*X mf = GPy.mappings.Linear(1,1) k =GPy.kern.RBF(1) lik = GPy.likelihoods.Gaussian() m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf) if optimize: m.optimize(max_iters=max_iters) if plot: m.plot() return m
A linear mean function with parameters that we'll learn alongside the kernel
def check_presence_of_mandatory_args(args, mandatory_args): ''' Checks whether all mandatory arguments are passed. This function aims at methods with many arguments which are passed as kwargs so that the order in which the are passed does not matter. :args: The dictionary passed as args. :mandatory_args: A list of keys that have to be present in the dictionary. :raise: :exc:`~ValueError` :returns: True, if all mandatory args are passed. If not, an exception is raised. ''' missing_args = [] for name in mandatory_args: if name not in args.keys(): missing_args.append(name) if len(missing_args) > 0: raise ValueError('Missing mandatory arguments: '+', '.join(missing_args)) else: return True
Checks whether all mandatory arguments are passed. This function aims at methods with many arguments which are passed as kwargs so that the order in which the are passed does not matter. :args: The dictionary passed as args. :mandatory_args: A list of keys that have to be present in the dictionary. :raise: :exc:`~ValueError` :returns: True, if all mandatory args are passed. If not, an exception is raised.
def from_torch_layers(cls, module_graph, variable): """Recover something like neural net layers from PyTorch Module's and the compute graph from a Variable. Example output for a multi-layer RNN. We confusingly assign shared embedding values to the encoder, but ordered next to the decoder. rnns.0.linear.module.weight_raw rnns.0 rnns.0.linear.module.bias rnns.0 rnns.1.linear.module.weight_raw rnns.1 rnns.1.linear.module.bias rnns.1 rnns.2.linear.module.weight_raw rnns.2 rnns.2.linear.module.bias rnns.2 rnns.3.linear.module.weight_raw rnns.3 rnns.3.linear.module.bias rnns.3 decoder.weight encoder decoder.bias decoder """ # TODO: We're currently not using this, but I left it here incase we want to resurrect! - CVP torch = util.get_module("torch", "Could not import torch") module_nodes_by_hash = {id(n): n for n in module_graph.nodes} module_parameter_nodes = [ n for n in module_graph.nodes if isinstance(n.obj, torch.nn.Parameter)] names_by_pid = {id(n.obj): n.name for n in module_parameter_nodes} reachable_param_nodes = module_graph[0].reachable_descendents() reachable_params = {} module_reachable_params = {} names = {} for pid, reachable_nodes in reachable_param_nodes.items(): node = module_nodes_by_hash[pid] if not isinstance(node.obj, torch.nn.Module): continue module = node.obj reachable_params = {} # by object id module_reachable_params[id(module)] = reachable_params names[node.name] = set() for reachable_hash in reachable_nodes: reachable = module_nodes_by_hash[reachable_hash] if isinstance(reachable.obj, torch.nn.Parameter): param = reachable.obj reachable_params[id(param)] = param names[node.name].add(names_by_pid[id(param)]) # we look for correspondences between sets of parameters used in subtrees of the # computation graph and sets of parameters contained in subtrees of the module # graph node_depths = {id(n): d for n, d in module_graph[0].descendent_bfs()} parameter_module_names = {} parameter_modules = {} for param_node in (n for n in module_graph.nodes if isinstance(n.obj, torch.nn.Parameter)): pid = id(param_node.obj) best_node = None best_depth = None best_reachable_params = None for node in module_graph.nodes: if not isinstance(node.obj, torch.nn.Module): continue module = node.obj reachable_params = module_reachable_params[id(module)] if pid in reachable_params: depth = node_depths[id(node)] if best_node is None or (len(reachable_params), depth) <= (len(best_reachable_params), best_depth): best_node = node best_depth = depth best_reachable_params = reachable_params parameter_modules[pid] = best_node parameter_module_names[param_node.name] = best_node.name # contains all parameters but only a minimal set of modules necessary # to contain them (and which ideally correspond to conceptual layers) reduced_module_graph = cls() rmg_ids = itertools.count() rmg_root = Node(id=next(rmg_ids), node=module_graph[0]) reduced_module_graph.add_node(rmg_root) reduced_module_graph.root = rmg_root rmg_nodes_by_pid = {} module_nodes_by_pid = {id(n.obj): n for n in module_graph.nodes} compute_graph, compute_node_vars = cls.from_torch_compute_graph( variable) for node, _ in reversed(list(compute_graph[0].ancestor_bfs())): param = compute_node_vars.get(node.id) pid = id(param) if not isinstance(param, torch.nn.Parameter): continue if pid not in module_nodes_by_pid: # not all Parameters that occur in the compute graph come from the Module graph continue # add the nodes in the order we want to display them on the frontend mid = id(parameter_modules[pid].obj) if mid in rmg_nodes_by_pid: rmg_module = rmg_nodes_by_pid[mid] else: rmg_module = rmg_nodes_by_pid[mid] = Node( id=next(rmg_ids), node=module_nodes_by_pid[mid]) reduced_module_graph.add_node(rmg_module) reduced_module_graph.add_edge(rmg_root, rmg_module) rmg_param = Node(id=next(rmg_ids), node=module_nodes_by_pid[pid]) rmg_nodes_by_pid[pid] = rmg_param reduced_module_graph.add_node(rmg_param) reduced_module_graph.add_edge(rmg_module, rmg_param) return reduced_module_graph
Recover something like neural net layers from PyTorch Module's and the compute graph from a Variable. Example output for a multi-layer RNN. We confusingly assign shared embedding values to the encoder, but ordered next to the decoder. rnns.0.linear.module.weight_raw rnns.0 rnns.0.linear.module.bias rnns.0 rnns.1.linear.module.weight_raw rnns.1 rnns.1.linear.module.bias rnns.1 rnns.2.linear.module.weight_raw rnns.2 rnns.2.linear.module.bias rnns.2 rnns.3.linear.module.weight_raw rnns.3 rnns.3.linear.module.bias rnns.3 decoder.weight encoder decoder.bias decoder
def flatsize(obj, align=0, **opts): '''Return the flat size of an object (in bytes), optionally aligned to a given power of 2. See function **basicsize** for a description of the other options. See the documentation of this module for the definition of flat size. ''' v = _typedefof(obj, **opts) if v: if align > 1: m = align - 1 if (align & m) != 0: raise ValueError('invalid option: %s=%r' % ('align', align)) else: m = 0 v = v.flat(obj, m) return v
Return the flat size of an object (in bytes), optionally aligned to a given power of 2. See function **basicsize** for a description of the other options. See the documentation of this module for the definition of flat size.
def delete_channel_cb(self, viewer, channel): """Called when a channel is deleted from the main interface. Parameter is channel (a bunch).""" chname_del = channel.name # TODO: delete thumbs for this channel! self.logger.debug("deleting thumbs for channel '%s'" % (chname_del)) with self.thmblock: new_thumb_list = [] un_hilite_set = set([]) for thumbkey in self.thumb_list: chname = thumbkey[0] if chname != chname_del: new_thumb_list.append(thumbkey) else: if thumbkey in self.thumb_dict: del self.thumb_dict[thumbkey] un_hilite_set.add(thumbkey) self.thumb_list = new_thumb_list self._tkf_highlight -= un_hilite_set # Unhighlight self.fv.gui_do_oneshot('thumbs-reorder', self.reorder_thumbs)
Called when a channel is deleted from the main interface. Parameter is channel (a bunch).
def _load(target, **vars): """ Fetch something from a module. The exact behaviour depends on the the target string: If the target is a valid python import path (e.g. `package.module`), the rightmost part is returned as a module object. If the target contains a colon (e.g. `package.module:var`) the module variable specified after the colon is returned. If the part after the colon contains any non-alphanumeric characters (e.g. `package.module:func(var)`) the result of the expression is returned. The expression has access to keyword arguments supplied to this function. Example:: >>> _load('bottle') <module 'bottle' from 'bottle.py'> >>> _load('bottle:Bottle') <class 'bottle.Bottle'> >>> _load('bottle:cookie_encode(v, secret)', v='foo', secret='bar') '!F+hN4dQxaDJ4QxxaZ+Z3jw==?gAJVA2Zvb3EBLg==' """ module, target = target.split(":", 1) if ':' in target else (target, None) if module not in sys.modules: __import__(module) if not target: return sys.modules[module] if target.isalnum(): return getattr(sys.modules[module], target) package_name = module.split('.')[0] vars[package_name] = sys.modules[package_name] return eval('%s.%s' % (module, target), vars)
Fetch something from a module. The exact behaviour depends on the the target string: If the target is a valid python import path (e.g. `package.module`), the rightmost part is returned as a module object. If the target contains a colon (e.g. `package.module:var`) the module variable specified after the colon is returned. If the part after the colon contains any non-alphanumeric characters (e.g. `package.module:func(var)`) the result of the expression is returned. The expression has access to keyword arguments supplied to this function. Example:: >>> _load('bottle') <module 'bottle' from 'bottle.py'> >>> _load('bottle:Bottle') <class 'bottle.Bottle'> >>> _load('bottle:cookie_encode(v, secret)', v='foo', secret='bar') '!F+hN4dQxaDJ4QxxaZ+Z3jw==?gAJVA2Zvb3EBLg=='
def add_errors(self, *errors: Union[BaseSchemaError, List[BaseSchemaError]]) -> None: """ Adds errors to the error repository in schema loader """ self.schema_loader.add_errors(*errors)
Adds errors to the error repository in schema loader
def check_dependencies(): """Make sure virtualenv is in the path.""" print 'Checking dependencies...' if not HAS_VIRTUALENV: print 'Virtual environment not found.' # Try installing it via easy_install... if HAS_EASY_INSTALL: print 'Installing virtualenv via easy_install...', run_command(['easy_install', 'virtualenv'], die_message='easy_install failed to install virtualenv' '\ndevelopment requires virtualenv, please' ' install it using your favorite tool') if not run_command(['which', 'virtualenv']): die('ERROR: virtualenv not found in path.\n\ndevelopment ' ' requires virtualenv, please install it using your' ' favorite package management tool and ensure' ' virtualenv is in your path') print 'virtualenv installation done.' else: die('easy_install not found.\n\nInstall easy_install' ' (python-setuptools in ubuntu) or virtualenv by hand,' ' then rerun.') print 'dependency check done.'
Make sure virtualenv is in the path.
def first(args): """ %prog first N fastqfile(s) Get first N reads from file. """ from jcvi.apps.base import need_update p = OptionParser(first.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) N = int(args[0]) nlines = N * 4 fastqfiles = args[1:] fastqfile = fastqfiles[0] outfile = opts.outfile if not need_update(fastqfiles, outfile): logging.debug("File `{0}` exists. Will not overwrite.".format(outfile)) return gz = fastqfile.endswith(".gz") for fastqfile in fastqfiles: if gz: cmd = "zcat {0} | head -n {1}".format(fastqfile, nlines) else: cmd = "head -n {0} {1}".format(nlines, fastqfile) sh(cmd, outfile=opts.outfile, append=True)
%prog first N fastqfile(s) Get first N reads from file.
def from_compressed_buffer(cls, compressed_buffer, point_format, count, laszip_vlr): """ Construct the point record by reading and decompressing the points data from the input buffer """ point_dtype = point_format.dtype uncompressed = decompress_buffer( compressed_buffer, point_dtype, count, laszip_vlr ) return cls(uncompressed, point_format)
Construct the point record by reading and decompressing the points data from the input buffer
def step(self, metrics): """ EarlyStopping step on each epoch Arguments: metrics {float} -- metric value """ if self.best is None: self.best = metrics return False if np.isnan(metrics): return True if self.is_better(metrics, self.best): self.num_bad_epochs = 0 self.best = metrics else: self.num_bad_epochs += 1 if self.num_bad_epochs >= self.patience: return True return False
EarlyStopping step on each epoch Arguments: metrics {float} -- metric value
def vcfunpackinfo(table, *keys): """ Unpack the INFO field into separate fields. E.g.:: >>> import petl as etl >>> # activate bio extensions ... import petlx.bio >>> table1 = ( ... etl ... .fromvcf('fixture/sample.vcf', samples=None) ... .vcfunpackinfo() ... ) >>> table1 +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | CHROM | POS | ID | REF | ALT | QUAL | FILTER | AA | AC | AF | AN | DB | DP | H2 | NS | +=======+=========+=============+=====+========+======+=========+======+======+================+======+======+======+======+======+ | '19' | 111 | None | 'A' | [C] | 9.6 | None | None | None | None | None | None | None | None | None | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '19' | 112 | None | 'A' | [G] | 10 | None | None | None | None | None | None | None | None | None | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '20' | 14370 | 'rs6054257' | 'G' | [A] | 29 | [] | None | None | [0.5] | None | True | 14 | True | 3 | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '20' | 17330 | None | 'T' | [A] | 3 | ['q10'] | None | None | [0.017] | None | None | 11 | None | 3 | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '20' | 1110696 | 'rs6040355' | 'A' | [G, T] | 67 | [] | 'T' | None | [0.333, 0.667] | None | True | 10 | None | 2 | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ ... """ result = etl.unpackdict(table, 'INFO', keys=keys) return result
Unpack the INFO field into separate fields. E.g.:: >>> import petl as etl >>> # activate bio extensions ... import petlx.bio >>> table1 = ( ... etl ... .fromvcf('fixture/sample.vcf', samples=None) ... .vcfunpackinfo() ... ) >>> table1 +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | CHROM | POS | ID | REF | ALT | QUAL | FILTER | AA | AC | AF | AN | DB | DP | H2 | NS | +=======+=========+=============+=====+========+======+=========+======+======+================+======+======+======+======+======+ | '19' | 111 | None | 'A' | [C] | 9.6 | None | None | None | None | None | None | None | None | None | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '19' | 112 | None | 'A' | [G] | 10 | None | None | None | None | None | None | None | None | None | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '20' | 14370 | 'rs6054257' | 'G' | [A] | 29 | [] | None | None | [0.5] | None | True | 14 | True | 3 | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '20' | 17330 | None | 'T' | [A] | 3 | ['q10'] | None | None | [0.017] | None | None | 11 | None | 3 | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '20' | 1110696 | 'rs6040355' | 'A' | [G, T] | 67 | [] | 'T' | None | [0.333, 0.667] | None | True | 10 | None | 2 | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ ...
def add_action_rule(self, rule_config_name): """ Adds the given action rule to this ruleset's action rules :param rule_config_name: :return: """ self.logger.debug('Adding action rule {0}'.format(rule_config_name)) # Handle rules that are just a string, like 'stop-processing' if type(rule_config_name) == str: self.add_rule(rule_config_name, None, self.available_action_plugins, self.action_rules) # Handle rules built from key-value pairs elif type(rule_config_name) == dict: for r in rule_config_name: self.add_rule(r, rule_config_name[r], self.available_action_plugins, self.action_rules)
Adds the given action rule to this ruleset's action rules :param rule_config_name: :return:
def _get_standalone_app_url(self, app_id, spark_master_address, requests_config, tags): """ Return the application URL from the app info page on the Spark master. Due to a bug, we need to parse the HTML manually because we cannot fetch JSON data from HTTP interface. """ app_page = self._rest_request( spark_master_address, SPARK_MASTER_APP_PATH, SPARK_STANDALONE_SERVICE_CHECK, requests_config, tags, appId=app_id, ) dom = BeautifulSoup(app_page.text, 'html.parser') app_detail_ui_links = dom.find_all('a', string='Application Detail UI') if app_detail_ui_links and len(app_detail_ui_links) == 1: return app_detail_ui_links[0].attrs['href']
Return the application URL from the app info page on the Spark master. Due to a bug, we need to parse the HTML manually because we cannot fetch JSON data from HTTP interface.
def check_for_input_len_diff(*args): """ Check for Input Length Difference. This method checks if multiple data sets that are inputted are all the same size. If they are not the same length an error is raised with a custom message that informs the developer that the data set's lengths are not the same. """ arrays_len = [len(arr) for arr in args] if not all(a == arrays_len[0] for a in arrays_len): err_msg = ("Error: mismatched data lengths, check to ensure that all " "input data is the same length and valid") raise Exception(err_msg)
Check for Input Length Difference. This method checks if multiple data sets that are inputted are all the same size. If they are not the same length an error is raised with a custom message that informs the developer that the data set's lengths are not the same.
def _replace_args_with_defaults(self, _args=None, **kwargs): """Internal method to fill absent values in the kwargs with the defaults. Args: _args: A list of arguments to replace if a subset is required. Name chosen to prevent conflicts with kwargs. **kwargs: The arguments to replace with defaults. Returns: A map with the same fields as kwargs, but absent values are filled with defaults. """ if _args is None: _args = six.iterkeys(kwargs) my_defaults = self.defaults for k in _args: if k not in kwargs: if k in my_defaults: kwargs[k] = my_defaults[k] elif k in _defaults: kwargs[k] = _defaults[k] return kwargs
Internal method to fill absent values in the kwargs with the defaults. Args: _args: A list of arguments to replace if a subset is required. Name chosen to prevent conflicts with kwargs. **kwargs: The arguments to replace with defaults. Returns: A map with the same fields as kwargs, but absent values are filled with defaults.
def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)}
Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine
def harmonic_mean(data): """Return the harmonic mean of data """ if not data: raise StatisticsError('harmonic_mean requires at least one data point') divisor = sum(map(lambda x: 1.0 / x if x else 0.0, data)) return len(data) / divisor if divisor else 0.0
Return the harmonic mean of data
def glob(self, pat): """`pat` can be an extended glob pattern, e.g. `'**/*.less'` This code handles negations similarly to node.js' minimatch, i.e. a leading `!` will negate the entire pattern. """ r = "" negate = int(pat.startswith('!')) i = negate while i < len(pat): if pat[i:i + 3] == '**/': r += "(?:.*/)?" i += 3 elif pat[i] == "*": r += "[^/]*" i += 1 elif pat[i] == ".": r += "[.]" i += 1 elif pat[i] == "?": r += "." i += 1 else: r += pat[i] i += 1 r += r'\Z(?ms)' # print '\n\npat', pat # print 'regex:', r # print [s.relpath(self).replace('\\', '/') for s in self] rx = re.compile(r) def match(d): m = rx.match(d) return not m if negate else m return [s for s in self if match(s.relpath(self).replace('\\', '/'))]
`pat` can be an extended glob pattern, e.g. `'**/*.less'` This code handles negations similarly to node.js' minimatch, i.e. a leading `!` will negate the entire pattern.
def log_type(args_kw, ret, func, slf=False, prop_getter=False, clss=None, argspecs=None, args_kw_type=None, ret_type = None): """Stores information of a function or method call into a cache, so pytypes can create a PEP 484 stubfile from this information later on (see dump_cache). """ if args_kw_type is None: args_kw_type = deep_type(args_kw) if ret_type is None: ret_type = deep_type(ret) if argspecs is None: argspecs = getargspecs(func) node = _register_logged_func(func, slf, prop_getter, clss, argspecs) node.add_observation(args_kw_type, ret_type) md = util.getmodule_for_member(func, prop_getter) if not md.__name__ in _module_file_map: _module_file_map[md.__name__] = md.__file__ if clss is None: try: clss = util.get_class_that_defined_method(func) except ValueError: pass if not clss is None and not clss in _member_line_map: _member_line_map[clss] = findsource(clss)[1]
Stores information of a function or method call into a cache, so pytypes can create a PEP 484 stubfile from this information later on (see dump_cache).
def parse_args(mixed=None): """ Context manager to extract and apply environment related options from the provided argparser result. A dictionnary with unknown options will be yielded, so the remaining options can be used by the caller. :api: bonobo.patch_environ :param mixed: ArgumentParser instance, Namespace, or dict. :return: """ if mixed is None: global _parser if _parser is not None: warnings.warn( "You are calling bonobo.parse_args() without a parser argument, but it looks like you created a parser before. You probably want to pass your parser to this call, or if creating a new parser here is really what you want to do, please create a new one explicitely to silence this warning." ) # use the api from bonobo namespace, in case a command patched it. import bonobo mixed = bonobo.get_argument_parser() if isinstance(mixed, argparse.ArgumentParser): options = mixed.parse_args() else: options = mixed if not isinstance(options, dict): options = options.__dict__ # make a copy so we don't polute our parent variables. options = dict(options) # storage for values before patch. _backup = {} # Priority order: --env > --env-file > system > --default-env > --default-env-file # # * The code below is reading default-env before default-env-file as if the first sets something, default-env-file # won't override it. # * Then, env-file is read from before env, as the behaviour will be the oposite (env will override a var even if # env-file sets something.) try: # Set default environment for name, value in map(parse_var, options.pop("default_env", []) or []): if not name in os.environ: if not name in _backup: _backup[name] = os.environ.get(name, None) os.environ[name] = value # Read and set default environment from file(s) for filename in options.pop("default_env_file", []) or []: for name, value in load_env_from_file(filename): if not name in os.environ: if not name in _backup: _backup[name] = os.environ.get(name, None) os.environ[name] = value # Read and set environment from file(s) for filename in options.pop("env_file", []) or []: for name, value in load_env_from_file(filename): if not name in _backup: _backup[name] = os.environ.get(name, None) os.environ[name] = value # Set environment for name, value in map(parse_var, options.pop("env", []) or []): if not name in _backup: _backup[name] = os.environ.get(name, None) os.environ[name] = value yield options finally: for name, value in _backup.items(): if value is None: del os.environ[name] else: os.environ[name] = value
Context manager to extract and apply environment related options from the provided argparser result. A dictionnary with unknown options will be yielded, so the remaining options can be used by the caller. :api: bonobo.patch_environ :param mixed: ArgumentParser instance, Namespace, or dict. :return:
def write(self, text, add_p_style=True, add_t_style=True): """ see mixed content http://effbot.org/zone/element-infoset.htm#mixed-content Writing is complicated by requirements of odp to ignore duplicate spaces together. Deal with this by splitting on white spaces then dealing with the '' (empty strings) which would be the extra spaces """ self._add_styles(add_p_style, add_t_style) self._add_pending_nodes() spaces = [] for i, letter in enumerate(text): if letter == " ": spaces.append(letter) continue elif len(spaces) == 1: self._write(" ") self._write(letter) spaces = [] continue elif spaces: num_spaces = len(spaces) - 1 # write just a plain space at the start self._write(" ") if num_spaces > 1: # write the attrib only if more than one space self.add_node("text:s", {"text:c": str(num_spaces)}) else: self.add_node("text:s") self.pop_node() self._write(letter) spaces = [] continue self._write(letter) if spaces: num_spaces = len(spaces) if num_spaces > 1: self.add_node("text:s", {"text:c": str(num_spaces)}) else: self.add_node("text:s") self.pop_node()
see mixed content http://effbot.org/zone/element-infoset.htm#mixed-content Writing is complicated by requirements of odp to ignore duplicate spaces together. Deal with this by splitting on white spaces then dealing with the '' (empty strings) which would be the extra spaces
def build_filtered_queryset(self, query, **kwargs): """ Build and return the fully-filtered queryset """ # Take the basic queryset qs = self.get_queryset() # filter it via the query conditions qs = qs.filter(self.get_queryset_filters(query)) return self.build_extra_filtered_queryset(qs, **kwargs)
Build and return the fully-filtered queryset
def _set_fcoe_fabric_map_fcmap(self, v, load=False): """ Setter method for fcoe_fabric_map_fcmap, mapped from YANG variable /fcoe/fcoe_fabric_map/fcoe_fabric_map_fcmap (fcoe-fcmap-type) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe_fabric_map_fcmap is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe_fabric_map_fcmap() directly. YANG Description: This specifies the FCMAP value for a FCoE Fabric-map. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0][Ee]:[Ff][Cc]:[0-9a-fA-F]{2}', 'length': [u'1..9']}), is_leaf=True, yang_name="fcoe-fabric-map-fcmap", rest_name="fcmap", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure the FCMAP value for a FCoE Fabric-map', u'alt-name': u'fcmap', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='fcoe-fcmap-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """fcoe_fabric_map_fcmap must be of a type compatible with fcoe-fcmap-type""", 'defined-type': "brocade-fcoe:fcoe-fcmap-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0][Ee]:[Ff][Cc]:[0-9a-fA-F]{2}', 'length': [u'1..9']}), is_leaf=True, yang_name="fcoe-fabric-map-fcmap", rest_name="fcmap", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure the FCMAP value for a FCoE Fabric-map', u'alt-name': u'fcmap', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='fcoe-fcmap-type', is_config=True)""", }) self.__fcoe_fabric_map_fcmap = t if hasattr(self, '_set'): self._set()
Setter method for fcoe_fabric_map_fcmap, mapped from YANG variable /fcoe/fcoe_fabric_map/fcoe_fabric_map_fcmap (fcoe-fcmap-type) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe_fabric_map_fcmap is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe_fabric_map_fcmap() directly. YANG Description: This specifies the FCMAP value for a FCoE Fabric-map.
def get_linked_version(doi): """ Get the original link behind the DOI. :param doi: A canonical DOI. :returns: The canonical URL behind the DOI, or ``None``. >>> get_linked_version('10.1209/0295-5075/111/40005') 'http://stacks.iop.org/0295-5075/111/i=4/a=40005?key=crossref.9ad851948a976ecdf216d4929b0b6f01' """ try: request = requests.head(to_url(doi)) return request.headers.get("location") except RequestException: return None
Get the original link behind the DOI. :param doi: A canonical DOI. :returns: The canonical URL behind the DOI, or ``None``. >>> get_linked_version('10.1209/0295-5075/111/40005') 'http://stacks.iop.org/0295-5075/111/i=4/a=40005?key=crossref.9ad851948a976ecdf216d4929b0b6f01'
def perform_permissions_check(self, user, obj, perms): """ Performs the permission check. """ return self.request.forum_permission_handler.can_vote_in_poll(obj, user)
Performs the permission check.
def handle(self, *args, **options): """ This management command gets data for a given election date from elex. Then, it loops through each row of the data and calls `process_row`. In order for this command to work, you must have bootstrapped all of the dependent apps: entity, geography, government, election, vote, and almanac. """ self.senate_class = options["senate_class"] writefile = open("bootstrap.json", "w") elex_args = [ "elex", "results", options["election_date"], "-o", "json", "--national-only", ] if options["test"]: elex_args.append("-t") subprocess.run(elex_args, stdout=writefile) with open("bootstrap.json", "r") as readfile: data = json.load(readfile) candidates = collections.defaultdict(list) for d in data: key = "{0} {1}: {2}, {3}".format( d["officename"], d["statename"], d["last"], d["first"] ) candidates[key].append(d) for candidate_races in tqdm( candidates.values(), desc="Candidates" ): tqdm.write( "{0} {1}: {2}, {3}".format( candidate_races[0]["statename"], candidate_races[0]["officename"], candidate_races[0]["last"], candidate_races[0]["first"], ) ) for race in tqdm( candidate_races, desc="Contests", leave=False ): if race["level"] == geography.DivisionLevel.TOWNSHIP: continue # TODO: Check this with Tyler if not race.get("level", None): continue if race["is_ballot_measure"]: continue self.process_row(race)
This management command gets data for a given election date from elex. Then, it loops through each row of the data and calls `process_row`. In order for this command to work, you must have bootstrapped all of the dependent apps: entity, geography, government, election, vote, and almanac.
def Downsampled(cls, stats, interval=None): """Constructs a copy of given stats but downsampled to given interval. Args: stats: A `ClientStats` instance. interval: A downsampling interval. Returns: A downsampled `ClientStats` instance. """ interval = interval or cls.DEFAULT_SAMPLING_INTERVAL result = cls(stats) result.cpu_samples = cls._Downsample( kind=CpuSample, samples=stats.cpu_samples, interval=interval) result.io_samples = cls._Downsample( kind=IOSample, samples=stats.io_samples, interval=interval) return result
Constructs a copy of given stats but downsampled to given interval. Args: stats: A `ClientStats` instance. interval: A downsampling interval. Returns: A downsampled `ClientStats` instance.
def updateDefaults(self, name, body, verbose=None): """ Updates the default values for the Visual Properties in the Visual Style specified by the `name` parameter. Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html) :param name: Name of the Visual Style :param body: A list of Visual Property values to update. :param verbose: print more :returns: default: successful operation """ response=api(url=self.___url+'styles/'+str(name)+'/defaults', method="PUT", body=body, verbose=verbose) return response
Updates the default values for the Visual Properties in the Visual Style specified by the `name` parameter. Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html) :param name: Name of the Visual Style :param body: A list of Visual Property values to update. :param verbose: print more :returns: default: successful operation
def main(): """ iodp_samples_magic.py OPTIONS: -f FILE, input csv file -Fsa FILE, output samples file for updating, default is to overwrite existing samples file """ if "-h" in sys.argv: print(main.__doc__) sys.exit() dataframe = extractor.command_line_dataframe([['WD', False, '.'], ['ID', False, '.'], ['f', True, ''], ['Fsa', False, 'samples.txt'], ['DM', False, 3]]) args = sys.argv checked_args = extractor.extract_and_check_args(args, dataframe) samp_file, output_samp_file, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(['f', 'Fsa', 'WD', 'ID', 'DM'], checked_args) data_model_num = int(float(data_model_num)) if '-Fsa' not in args and data_model_num == 2: output_samp_file = "er_samples.txt" ran, error = convert.iodp_samples(samp_file, output_samp_file, output_dir_path, input_dir_path, data_model_num=data_model_num) if not ran: print("-W- " + error)
iodp_samples_magic.py OPTIONS: -f FILE, input csv file -Fsa FILE, output samples file for updating, default is to overwrite existing samples file
def sine_psd(data, delta, number_of_tapers=None, number_of_iterations=2, degree_of_smoothing=1.0, statistics=False, verbose=False): """ Wrapper method for the sine_psd subroutine in the library by German A. Prieto. The subroutine is in charge of estimating the adaptive sine multitaper as in Riedel and Sidorenko (1995). It outputs the power spectral density (PSD). This is done by performing a MSE adaptive estimation. First a pilot spectral estimate is used, and S" is estimated, in order to get te number of tapers to use, using (13) of Riedel and Sidorenko for a min square error spectrum. Unlike the prolate spheroidal multitapers, the sine multitaper adaptive process introduces a variable resolution and error in the frequency domain. Complete error information is contained in the output variables as the corridor of 1-standard-deviation errors, and in the number of tapers used at each frequency. The errors are estimated in the simplest way, from the number of degrees of freedom (two per taper), not by jack-knifing. The frequency resolution is found from K*fN/Nf where fN is the Nyquist frequency and Nf is the number of frequencies estimated. The adaptive process used is as follows. A quadratic fit to the log PSD within an adaptively determined frequency band is used to find an estimate of the local second derivative of the spectrum. This is used in an equation like R & S (13) for the MSE taper number, with the difference that a parabolic weighting is applied with increasing taper order. Because the FFTs of the tapered series can be found by resampling the FFT of the original time series (doubled in length and padded with zeros) only one FFT is required per series, no matter how many tapers are used. This makes the program fast. Compared with the Thomson multitaper programs, this code is not only fast but simple and short. The spectra associated with the sine tapers are weighted before averaging with a parabolically varying weight. The expression for the optimal number of tapers given by R & S must be modified since it gives an unbounded result near points where S" vanishes, which happens at many points in most spectra. This program restricts the rate of growth of the number of tapers so that a neighboring covering interval estimate is never completely contained in the next such interval. This method SHOULD not be used for sharp cutoffs or deep valleys, or small sample sizes. Instead use Thomson multitaper in mtspec in this same library. :param data: :class:`numpy.ndarray` Array with the data. :param delta: float Sample spacing of the data. :param number_of_tapers: integer/None, optional Number of tapers to use. If none is given, the library will perform an adaptive taper estimation with a varying number of tapers for each frequency. Defaults to None. :param number_of_iterations: integer, optional Number of iterations to perform. Values less than 2 will be set to 2. Defaults to 2. :param degree_of_smoothing: float, optional Degree of smoothing. Defaults to 1.0. :param statistics: bool, optional Calculates and returns statistics. See the notes in the docstring for further details. :param verbose: bool, optional Passed to the fortran library. Defaults to False. :return: Returns a list with :class:`numpy.ndarray`. See the note below for details. .. note:: This method will at return at least two arrays: The calculated spectrum and the corresponding frequencies. If statistics is True is will also return (in the given order) (multidimensional) arrays containing the 1-std errors (a simple dof estimate) and the number of tapers used for each frequency point. """ # Verbose mode on or off. if verbose is True: verbose = C.byref(C.c_char('y')) else: verbose = None # Set the number of tapers so it can be read by the library. if number_of_tapers is None: number_of_tapers = 0 # initialize _MtspecType to save some space mt = _MtspecType("float32") # Transform the data to work with the library. data = np.require(data, dtype=mt.float, requirements=[mt.order]) # Some variables necessary to call the library. npts = len(data) number_of_frequency_bins = int(npts / 2) + 1 # Create output arrays. frequency_bins = mt.empty(number_of_frequency_bins) spectrum = mt.empty(number_of_frequency_bins) # Create optional arrays or set to None. if statistics is True: # here an exception, mt sets the type float32, here we need int32 # that is do all the type and POINTER definition once by hand tapers_per_freq_point = np.empty(number_of_frequency_bins, dtype='int32', order=mt.order) tapers_per_freq_point_p = \ tapers_per_freq_point.ctypes.data_as(C.POINTER(C.c_int)) errors = mt.empty((number_of_frequency_bins, 2)) else: tapers_per_freq_point_p = errors = None # Call the library. Fortran passes pointers! mtspeclib.sine_psd_( C.byref(C.c_int(npts)), C.byref(C.c_float(delta)), mt.p(data), C.byref(C.c_int(number_of_tapers)), C.byref(C.c_int(number_of_iterations)), C.byref(C.c_float(degree_of_smoothing)), C.byref(C.c_int(number_of_frequency_bins)), mt.p(frequency_bins), mt.p(spectrum), tapers_per_freq_point_p, mt.p(errors), verbose) # Calculate return values. return_values = [spectrum, frequency_bins] if statistics is True: return_values.extend([errors, tapers_per_freq_point]) return return_values
Wrapper method for the sine_psd subroutine in the library by German A. Prieto. The subroutine is in charge of estimating the adaptive sine multitaper as in Riedel and Sidorenko (1995). It outputs the power spectral density (PSD). This is done by performing a MSE adaptive estimation. First a pilot spectral estimate is used, and S" is estimated, in order to get te number of tapers to use, using (13) of Riedel and Sidorenko for a min square error spectrum. Unlike the prolate spheroidal multitapers, the sine multitaper adaptive process introduces a variable resolution and error in the frequency domain. Complete error information is contained in the output variables as the corridor of 1-standard-deviation errors, and in the number of tapers used at each frequency. The errors are estimated in the simplest way, from the number of degrees of freedom (two per taper), not by jack-knifing. The frequency resolution is found from K*fN/Nf where fN is the Nyquist frequency and Nf is the number of frequencies estimated. The adaptive process used is as follows. A quadratic fit to the log PSD within an adaptively determined frequency band is used to find an estimate of the local second derivative of the spectrum. This is used in an equation like R & S (13) for the MSE taper number, with the difference that a parabolic weighting is applied with increasing taper order. Because the FFTs of the tapered series can be found by resampling the FFT of the original time series (doubled in length and padded with zeros) only one FFT is required per series, no matter how many tapers are used. This makes the program fast. Compared with the Thomson multitaper programs, this code is not only fast but simple and short. The spectra associated with the sine tapers are weighted before averaging with a parabolically varying weight. The expression for the optimal number of tapers given by R & S must be modified since it gives an unbounded result near points where S" vanishes, which happens at many points in most spectra. This program restricts the rate of growth of the number of tapers so that a neighboring covering interval estimate is never completely contained in the next such interval. This method SHOULD not be used for sharp cutoffs or deep valleys, or small sample sizes. Instead use Thomson multitaper in mtspec in this same library. :param data: :class:`numpy.ndarray` Array with the data. :param delta: float Sample spacing of the data. :param number_of_tapers: integer/None, optional Number of tapers to use. If none is given, the library will perform an adaptive taper estimation with a varying number of tapers for each frequency. Defaults to None. :param number_of_iterations: integer, optional Number of iterations to perform. Values less than 2 will be set to 2. Defaults to 2. :param degree_of_smoothing: float, optional Degree of smoothing. Defaults to 1.0. :param statistics: bool, optional Calculates and returns statistics. See the notes in the docstring for further details. :param verbose: bool, optional Passed to the fortran library. Defaults to False. :return: Returns a list with :class:`numpy.ndarray`. See the note below for details. .. note:: This method will at return at least two arrays: The calculated spectrum and the corresponding frequencies. If statistics is True is will also return (in the given order) (multidimensional) arrays containing the 1-std errors (a simple dof estimate) and the number of tapers used for each frequency point.
def p_expr_unary_op(p): '''expr : PLUS expr | MINUS expr | NOT expr | BOOLEAN_NOT expr''' p[0] = ast.UnaryOp(p[1], p[2], lineno=p.lineno(1))
expr : PLUS expr | MINUS expr | NOT expr | BOOLEAN_NOT expr
def convert(model, input_features, output_features): """Convert a _imputer model to the protobuf spec. Parameters ---------- model: Imputer A trained Imputer model. input_features: str Name of the input column. output_features: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model """ if not(_HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') # Test the scikit-learn model _sklearn_util.check_expected_type(model, StandardScaler) _sklearn_util.check_fitted(model, lambda m: hasattr(m, 'mean_')) _sklearn_util.check_fitted(model, lambda m: hasattr(m, 'scale_')) # Set the interface params. spec = _Model_pb2.Model() spec.specificationVersion = SPECIFICATION_VERSION spec = _set_transform_interface_params(spec, input_features, output_features) # Set the parameters tr_spec = spec.scaler for x in model.mean_: tr_spec.shiftValue.append(-x) for x in model.scale_: tr_spec.scaleValue.append(1.0 / x) return _MLModel(spec)
Convert a _imputer model to the protobuf spec. Parameters ---------- model: Imputer A trained Imputer model. input_features: str Name of the input column. output_features: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model