code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def by_classifiers(cls, session, classifiers):
"""
Get releases for given classifiers.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param classifiers: classifiers
:type classifiers: unicode
:return: release instances
:rtype: generator of :class:`pyshop.models.Release`
"""
return cls.find(session,
join=(cls.classifiers,),
where=(Classifier.name.in_(classifiers),),
) | Get releases for given classifiers.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param classifiers: classifiers
:type classifiers: unicode
:return: release instances
:rtype: generator of :class:`pyshop.models.Release` |
def get_template_loaders():
"""
Compatibility method to fetch the template loaders.
Source: https://github.com/django-debug-toolbar/django-debug-toolbar/blob/ece1c2775af108a92a0ef59636266b49e286e916/debug_toolbar/compat.py
"""
try:
from django.template.engine import Engine
except ImportError: # Django < 1.8
Engine = None
if Engine:
try:
engine = Engine.get_default()
except ImproperlyConfigured:
loaders = []
else:
loaders = engine.template_loaders
else: # Django < 1.8
from django.template.loader import find_template_loader
loaders = [
find_template_loader(loader_name)
for loader_name in settings.TEMPLATE_LOADERS]
return loaders | Compatibility method to fetch the template loaders.
Source: https://github.com/django-debug-toolbar/django-debug-toolbar/blob/ece1c2775af108a92a0ef59636266b49e286e916/debug_toolbar/compat.py |
def min(self):
""" -> #float :func:numpy.min of the timing intervals """
return round(np.min(self.array), self.precision)\
if len(self.array) else None | -> #float :func:numpy.min of the timing intervals |
def register(cls):
''' Decorator to add a Message (and its revision) to the Protocol index.
Example:
.. code-block:: python
@register
class some_msg_1(Message):
msgtype = 'SOME-MSG'
revision = 1
@classmethod
def create(cls, **metadata):
header = cls.create_header()
content = {}
return cls(header, metadata, content)
'''
key = (cls.msgtype, cls.revision)
if key in index:
raise ProtocolError("Duplicate message specification encountered: %r" % key)
index[key] = cls
return cls | Decorator to add a Message (and its revision) to the Protocol index.
Example:
.. code-block:: python
@register
class some_msg_1(Message):
msgtype = 'SOME-MSG'
revision = 1
@classmethod
def create(cls, **metadata):
header = cls.create_header()
content = {}
return cls(header, metadata, content) |
def get_user_deliveryserver(self, domainid, serverid):
"""Get a user delivery server"""
return self.api_call(
ENDPOINTS['userdeliveryservers']['get'],
dict(domainid=domainid, serverid=serverid)) | Get a user delivery server |
def __autoconnect_signals(self):
"""This is called during view registration, to autoconnect
signals in glade file with methods within the controller"""
dic = {}
for name in dir(self):
method = getattr(self, name)
if (not isinstance(method, collections.Callable)):
continue
assert(name not in dic) # not already connected!
dic[name] = method
# autoconnects glade in the view (if available any)
for xml in self.view.glade_xmlWidgets:
xml.signal_autoconnect(dic)
# autoconnects builder if available
if self.view._builder is not None:
self.view._builder_connect_signals(dic) | This is called during view registration, to autoconnect
signals in glade file with methods within the controller |
def get_rtc_table(self):
"""Returns global RTC table.
Creates the table if it does not exist.
"""
rtc_table = self._global_tables.get(RF_RTC_UC)
# Lazy initialization of the table.
if not rtc_table:
rtc_table = RtcTable(self._core_service, self._signal_bus)
self._global_tables[RF_RTC_UC] = rtc_table
self._tables[(None, RF_RTC_UC)] = rtc_table
return rtc_table | Returns global RTC table.
Creates the table if it does not exist. |
def _dir2(obj, pref='', excl=(), slots=None, itor=''):
'''Return an attribute name, object 2-tuple for certain
attributes or for the ``__slots__`` attributes of the
given object, but not both. Any iterator referent
objects are returned with the given name if the
latter is non-empty.
'''
if slots: # __slots__ attrs
if hasattr(obj, slots):
# collect all inherited __slots__ attrs
# from list, tuple, or dict __slots__,
# while removing any duplicate attrs
s = {}
for c in type(obj).mro():
for a in getattr(c, slots, ()):
if hasattr(obj, a):
s.setdefault(a, getattr(obj, a))
# assume __slots__ tuple/list
# is holding the attr values
yield slots, _Slots(s) # _keys(s)
for t in _items(s):
yield t # attr name, value
elif itor: # iterator referents
for o in obj: # iter(obj)
yield itor, o
else: # regular attrs
for a in dir(obj):
if a.startswith(pref) and a not in excl and hasattr(obj, a):
yield a, getattr(obj, a) | Return an attribute name, object 2-tuple for certain
attributes or for the ``__slots__`` attributes of the
given object, but not both. Any iterator referent
objects are returned with the given name if the
latter is non-empty. |
def validate_config(self, organization, config, actor=None):
"""
```
if config['foo'] and not config['bar']:
raise PluginError('You cannot configure foo with bar')
return config
```
"""
if config.get('name'):
client = self.get_client(actor)
try:
repo = client.get_repo(config['name'])
except Exception as e:
self.raise_error(e)
else:
config['external_id'] = six.text_type(repo['id'])
return config | ```
if config['foo'] and not config['bar']:
raise PluginError('You cannot configure foo with bar')
return config
``` |
def http_request(self, path="/", method="GET", host=None, port=None, json=False, data=None):
"""
perform a HTTP request
:param path: str, path within the request, e.g. "/api/version"
:param method: str, HTTP method
:param host: str, if None, set to 127.0.0.1
:param port: str or int, if None, set to 8080
:param json: bool, should we expect json?
:param data: data to send (can be dict, list, str)
:return: dict
"""
host = host or '127.0.0.1'
port = port or 8080
url = get_url(host=host, port=port, path=path)
return self.http_session.request(method, url, json=json, data=data) | perform a HTTP request
:param path: str, path within the request, e.g. "/api/version"
:param method: str, HTTP method
:param host: str, if None, set to 127.0.0.1
:param port: str or int, if None, set to 8080
:param json: bool, should we expect json?
:param data: data to send (can be dict, list, str)
:return: dict |
def scale_WCS(self,pixel_scale,retain=True):
''' Scale the WCS to a new pixel_scale. The 'retain' parameter
[default value: True] controls whether or not to retain the original
distortion solution in the CD matrix.
'''
_ratio = pixel_scale / self.pscale
# Correct the size of the image and CRPIX values for scaled WCS
self.naxis1 /= _ratio
self.naxis2 /= _ratio
self.crpix1 = self.naxis1/2.
self.crpix2 = self.naxis2/2.
if retain:
# Correct the WCS while retaining original distortion information
self.cd11 *= _ratio
self.cd12 *= _ratio
self.cd21 *= _ratio
self.cd22 *= _ratio
else:
pscale = pixel_scale / 3600.
self.cd11 = -pscale * N.cos(pa)
self.cd12 = pscale * N.sin(pa)
self.cd21 = self.cd12
self.cd22 = -self.cd11
# Now make sure that all derived values are really up-to-date based
# on these changes
self.update() | Scale the WCS to a new pixel_scale. The 'retain' parameter
[default value: True] controls whether or not to retain the original
distortion solution in the CD matrix. |
def inline(self) -> str:
"""
Return inline document string
:return:
"""
return "{0}:{1}:{2}:{3}".format(self.pubkey_from, self.pubkey_to,
self.timestamp.number, self.signatures[0]) | Return inline document string
:return: |
def apply_uncertainties(self, branch_ids, source_group):
"""
Parse the path through the source model logic tree and return
"apply uncertainties" function.
:param branch_ids:
List of string identifiers of branches, representing the path
through source model logic tree.
:param source_group:
A group of sources
:return:
A copy of the original group with modified sources
"""
branchset = self.root_branchset
branchsets_and_uncertainties = []
branch_ids = list(branch_ids[::-1])
while branchset is not None:
branch = branchset.get_branch_by_id(branch_ids.pop(-1))
if not branchset.uncertainty_type == 'sourceModel':
branchsets_and_uncertainties.append((branchset, branch.value))
branchset = branch.child_branchset
if not branchsets_and_uncertainties:
return source_group # nothing changed
sg = copy.deepcopy(source_group)
sg.applied_uncertainties = []
sg.changed = numpy.zeros(len(sg.sources), int)
for branchset, value in branchsets_and_uncertainties:
for s, source in enumerate(sg.sources):
changed = branchset.apply_uncertainty(value, source)
if changed:
sg.changed[s] += changed
sg.applied_uncertainties.append(
(branchset.uncertainty_type, value))
return sg | Parse the path through the source model logic tree and return
"apply uncertainties" function.
:param branch_ids:
List of string identifiers of branches, representing the path
through source model logic tree.
:param source_group:
A group of sources
:return:
A copy of the original group with modified sources |
def best_matches(self, target, choices):
"""\
Get all the first choices listed from best match to worst match,
optionally grouping on equal matches.
:param target:
:param choices:
:param group:
"""
all = self.all_matches
try:
matches = next(all(target, choices, group=True))
for match in matches:
yield match
except StopIteration:
pass | \
Get all the first choices listed from best match to worst match,
optionally grouping on equal matches.
:param target:
:param choices:
:param group: |
def reload_pimms():
'''
reload_pimms() reloads the entire pimms module and returns it.
'''
import sys, six
try: from importlib import reload
except: from imp import reload
reload(sys.modules['pimms.util'])
reload(sys.modules['pimms.table'])
reload(sys.modules['pimms.immutable'])
reload(sys.modules['pimms.calculation'])
reload(sys.modules['pimms.cmdline'])
reload(sys.modules['pimms'])
return sys.modules['pimms'] | reload_pimms() reloads the entire pimms module and returns it. |
def complete_credit_note(self, credit_note_it, complete_dict):
"""
Completes an credit note
:param complete_dict: the complete dict with the template id
:param credit_note_it: the credit note id
:return: Response
"""
return self._create_put_request(
resource=CREDIT_NOTES,
billomat_id=credit_note_it,
command=COMPLETE,
send_data=complete_dict
) | Completes an credit note
:param complete_dict: the complete dict with the template id
:param credit_note_it: the credit note id
:return: Response |
def str_between(str_, startstr, endstr):
r"""
gets substring between two sentianl strings
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> import utool as ut
>>> str_ = '\n INSERT INTO vsone(\n'
>>> startstr = 'INSERT'
>>> endstr = '('
>>> result = str_between(str_, startstr, endstr)
>>> print(result)
"""
if startstr is None:
startpos = 0
else:
startpos = str_.find(startstr) + len(startstr)
if endstr is None:
endpos = None
else:
endpos = str_.find(endstr)
if endpos == -1:
endpos = None
newstr = str_[startpos:endpos]
return newstr | r"""
gets substring between two sentianl strings
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> import utool as ut
>>> str_ = '\n INSERT INTO vsone(\n'
>>> startstr = 'INSERT'
>>> endstr = '('
>>> result = str_between(str_, startstr, endstr)
>>> print(result) |
def UsesArtifact(self, artifacts):
"""Determines if the check uses the specified artifact.
Args:
artifacts: Either a single artifact name, or a list of artifact names
Returns:
True if the check uses a specific artifact.
"""
# If artifact is a single string, see if it is in the list of artifacts
# as-is. Otherwise, test whether any of the artifacts passed in to this
# function exist in the list of artifacts.
if isinstance(artifacts, string_types):
return artifacts in self.artifacts
else:
return any(True for artifact in artifacts if artifact in self.artifacts) | Determines if the check uses the specified artifact.
Args:
artifacts: Either a single artifact name, or a list of artifact names
Returns:
True if the check uses a specific artifact. |
def add_slave_server(self):
"""
添加slave服务器
:return:
"""
master = self.args.config[1]
slave = self.args.add_slave
# install java at slave server
self.reset_server_env(slave, self.configure)
if self.prompt_check("Update package at slave server"):
self.update_source_list()
self.common_update_sys()
if self.prompt_check("Install java and python at slave server"):
self.java_install()
sudo('apt-get install -y python python3 python-dev python3-dev')
# generate ssh key at master server
if not self.args.skip_master:
if self.prompt_check("Generate ssh key at Master Server"):
self.generate_ssh(master, self.args, self.configure)
# generate ssh key at slave server and make slave connect with master
if self.prompt_check("Make ssh connection within master and slave"):
self.generate_ssh(slave, self.args, self.configure)
# scp slave server ssh key to master server
run('scp ~/.ssh/id_rsa.pub {0}@{1}:~/.ssh/id_rsa.pub.{2}'.format(
self.configure[master]['user'],
self.configure[master]['host'],
slave
))
# add slave ssh key to master authorized_keys
self.reset_server_env(master, self.configure)
run('cat ~/.ssh/id_rsa.pub* >> ~/.ssh/authorized_keys')
# scp master authorized_keys to slave authorized_keys
run('scp ~/.ssh/authorized_keys {0}@{1}:~/.ssh'.format(
self.configure[slave]['user'],
self.configure[slave]['host']
))
# config slave and master
if self.prompt_check("Configure master and slave server"):
master = self.args.config[1]
slave = self.args.add_slave
if self.args.bigdata_app == 'spark':
self.add_spark_slave(master, slave, self.configure) | 添加slave服务器
:return: |
def iwls(y, x, family, offset, y_fix,
ini_betas=None, tol=1.0e-8, max_iter=200, wi=None):
"""
Iteratively re-weighted least squares estimation routine
Parameters
----------
y : array
n*1, dependent variable
x : array
n*k, designs matrix of k independent variables
family : family object
probability models: Gaussian, Poisson, or Binomial
offset : array
n*1, the offset variable for each observation.
y_fix : array
n*1, the fixed intercept value of y for each observation
ini_betas : array
1*k, starting values for the k betas within the iteratively
weighted least squares routine
tol : float
tolerance for estimation convergence
max_iter : integer maximum number of iterations if convergence not met
wi : array
n*1, weights to transform observations from location i in GWR
Returns
-------
betas : array
k*1, estimated coefficients
mu : array
n*1, predicted y values
wx : array
n*1, final weights used for iwls for GLM
n_iter : integer
number of iterations that when iwls algorithm terminates
w : array
n*1, final weights used for iwls for GWR
z : array
iwls throughput
v : array
iwls throughput
xtx_inv_xt : array
iwls throughout to compute GWR hat matrix
[X'X]^-1 X'
"""
n_iter = 0
diff = 1.0e6
if ini_betas is None:
betas = np.zeros((x.shape[1], 1), np.float)
else:
betas = ini_betas
if isinstance(family, Binomial):
y = family.link._clean(y)
if isinstance(family, Poisson):
y_off = y / offset
y_off = family.starting_mu(y_off)
v = family.predict(y_off)
mu = family.starting_mu(y)
else:
mu = family.starting_mu(y)
v = family.predict(mu)
while diff > tol and n_iter < max_iter:
n_iter += 1
w = family.weights(mu)
z = v + (family.link.deriv(mu) * (y - mu))
w = np.sqrt(w)
if not isinstance(x, np.ndarray):
w = sp.csr_matrix(w)
z = sp.csr_matrix(z)
wx = spmultiply(x, w, array_out=False)
wz = spmultiply(z, w, array_out=False)
if wi is None:
n_betas = _compute_betas(wz, wx)
else:
n_betas, xtx_inv_xt = _compute_betas_gwr(wz, wx, wi)
v = spdot(x, n_betas)
mu = family.fitted(v)
if isinstance(family, Poisson):
mu = mu * offset
diff = min(abs(n_betas - betas))
betas = n_betas
if wi is None:
return betas, mu, wx, n_iter
else:
return betas, mu, v, w, z, xtx_inv_xt, n_iter | Iteratively re-weighted least squares estimation routine
Parameters
----------
y : array
n*1, dependent variable
x : array
n*k, designs matrix of k independent variables
family : family object
probability models: Gaussian, Poisson, or Binomial
offset : array
n*1, the offset variable for each observation.
y_fix : array
n*1, the fixed intercept value of y for each observation
ini_betas : array
1*k, starting values for the k betas within the iteratively
weighted least squares routine
tol : float
tolerance for estimation convergence
max_iter : integer maximum number of iterations if convergence not met
wi : array
n*1, weights to transform observations from location i in GWR
Returns
-------
betas : array
k*1, estimated coefficients
mu : array
n*1, predicted y values
wx : array
n*1, final weights used for iwls for GLM
n_iter : integer
number of iterations that when iwls algorithm terminates
w : array
n*1, final weights used for iwls for GWR
z : array
iwls throughput
v : array
iwls throughput
xtx_inv_xt : array
iwls throughout to compute GWR hat matrix
[X'X]^-1 X' |
def pid(self):
"""Get the pid which represents a daemonized process.
The result should be None if the process is not running.
"""
try:
with open(self.pidfile, 'r') as pidfile:
try:
pid = int(pidfile.read().strip())
except ValueError:
return None
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.EPERM:
return pid
elif e.errno == errno.ESRCH:
return None
LOG.exception(
"os.kill returned unhandled error "
"{0}".format(e.strerror)
)
sys.exit(exit.PIDFILE_ERROR)
return pid
except IOError:
if not os.path.isfile(self.pidfile):
return None
LOG.exception("Failed to read pidfile {0}.".format(self.pidfile))
sys.exit(exit.PIDFILE_INACCESSIBLE) | Get the pid which represents a daemonized process.
The result should be None if the process is not running. |
def channel(self, channel_id=None, auto_encode_decode=True):
"""Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. See Channel for meaning
of auto_encode_decode. If the channel already exists, the auto_* flag
will not be updated."""
try:
return self.channels[channel_id]
except KeyError:
return self.Channel(self, channel_id,
auto_encode_decode=auto_encode_decode) | Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. See Channel for meaning
of auto_encode_decode. If the channel already exists, the auto_* flag
will not be updated. |
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None | Remove any mapping for *filename* and return that if it exists |
def consumer(self, name):
"""
Create a new consumer for the :py:class:`ConsumerGroup`.
:param name: name of consumer
:returns: a :py:class:`ConsumerGroup` using the given consumer name.
"""
return type(self)(self.database, self.name, self.keys, name) | Create a new consumer for the :py:class:`ConsumerGroup`.
:param name: name of consumer
:returns: a :py:class:`ConsumerGroup` using the given consumer name. |
def try_enqueue(conn, queue_name, msg):
"""
Try to enqueue a message. If it succeeds, return the message ID.
:param conn: SQS API connection
:type conn: :py:class:`botocore:SQS.Client`
:param queue_name: name of queue to put message in
:type queue_name: str
:param msg: JSON-serialized message body
:type msg: str
:return: message ID
:rtype: str
"""
logger.debug('Getting Queue URL for queue %s', queue_name)
qurl = conn.get_queue_url(QueueName=queue_name)['QueueUrl']
logger.debug('Sending message to queue at: %s', qurl)
resp = conn.send_message(
QueueUrl=qurl,
MessageBody=msg,
DelaySeconds=0
)
logger.debug('Enqueued message in %s with ID %s', queue_name,
resp['MessageId'])
return resp['MessageId'] | Try to enqueue a message. If it succeeds, return the message ID.
:param conn: SQS API connection
:type conn: :py:class:`botocore:SQS.Client`
:param queue_name: name of queue to put message in
:type queue_name: str
:param msg: JSON-serialized message body
:type msg: str
:return: message ID
:rtype: str |
def add_threat_list(self, threat_list):
"""Add threat list entry if it does not exist."""
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params) | Add threat list entry if it does not exist. |
def search(term, lang=None):
"""
As a convenient alternative to downloading and parsing a dump,
This function will instead query the AID search provided by Eloyard.
This is the same information available at http://anisearch.outrance.pl/.
:param str term: Search Term
:param list lang: A list of language codes which determines what titles are returned
"""
r = requests.get(
"http://anisearch.outrance.pl/index.php",
params={
"task": "search",
"query": term,
"langs": "ja,x-jat,en" if lang is None else ','.join(lang)
}
)
if r.status_code != 200:
raise ServerError
tree = ET.fromstring(r.text)
root = tree.getroot()
for item in root.iter("anime"):
# Parse XML http://wiki.anidb.net/w/User:Eloyard/anititles_dump
results[aid]={}
for title in item.iter('title'):
if title.attrib['type'] in ['official', 'main']:
results[aid][title.attrib['xml:lang']] = title.text
return results | As a convenient alternative to downloading and parsing a dump,
This function will instead query the AID search provided by Eloyard.
This is the same information available at http://anisearch.outrance.pl/.
:param str term: Search Term
:param list lang: A list of language codes which determines what titles are returned |
def save(self, *args, **kwargs):
"""Saving ensures that the slug, if not set, is set to the slugified name."""
self.clean()
if not self.slug:
self.slug = slugify(self.name)
super(SpecialCoverage, self).save(*args, **kwargs)
if self.query and self.query != {}:
# Always save and require client to filter active date range
self._save_percolator() | Saving ensures that the slug, if not set, is set to the slugified name. |
def add_new_refs(cls, manifest, current_project, node, macros):
"""Given a new node that is not in the manifest, copy the manifest and
insert the new node into it as if it were part of regular ref
processing
"""
manifest = manifest.deepcopy(config=current_project)
# it's ok for macros to silently override a local project macro name
manifest.macros.update(macros)
if node.unique_id in manifest.nodes:
# this should be _impossible_ due to the fact that rpc calls get
# a unique ID that starts with 'rpc'!
raise dbt.exceptions.raise_duplicate_resource_name(
manifest.nodes[node.unique_id], node
)
manifest.nodes[node.unique_id] = node
cls.process_sources_for_node(manifest, current_project, node)
cls.process_refs_for_node(manifest, current_project, node)
cls.process_docs_for_node(manifest, current_project, node)
return manifest | Given a new node that is not in the manifest, copy the manifest and
insert the new node into it as if it were part of regular ref
processing |
def cleanup(self, **kwargs):
"""cleanup ASA context for an edge tenant pair. """
params = kwargs.get('params')
LOG.info("asa_cleanup: tenant %(tenant)s %(in_vlan)d %(out_vlan)d"
" %(in_ip)s %(in_mask)s %(out_ip)s %(out_mask)s",
{'tenant': params.get('tenant_name'),
'in_vlan': params.get('in_vlan'),
'out_vlan': params.get('out_vlan'),
'in_ip': params.get('in_ip'),
'in_mask': params.get('in_mask'),
'out_ip': params.get('out_ip'),
'out_mask': params.get('out_mask')})
inside_vlan = str(params.get('in_vlan'))
outside_vlan = str(params.get('out_vlan'))
context = params.get('tenant_name')
cmds = ["conf t", "changeto system"]
cmds.append("no context " + context + " noconfirm")
inside_int = params.get('intf_in') + '.' + inside_vlan
outside_int = params.get('intf_out') + '.' + outside_vlan
cmds.append("no interface " + inside_int)
cmds.append("no interface " + outside_int)
cmds.append("write memory")
cmds.append("del /noconfirm disk0:/" + context + ".cfg")
if context in self.tenant_rule:
for rule in self.tenant_rule[context].get('rule_lst'):
del self.rule_tbl[rule]
del self.tenant_rule[context]
data = {"commands": cmds}
return self.rest_send_cli(data) | cleanup ASA context for an edge tenant pair. |
def _validate_config(self, config):
"""
Validates that all necessary config parameters are specified.
:type config: dict[str, dict[str, Any] | str]
:param config: the module config
"""
if config is None:
raise ValueError("OIDCFrontend conf can't be 'None'.")
for k in {"signing_key_path", "provider"}:
if k not in config:
raise ValueError("Missing configuration parameter '{}' for OpenID Connect frontend.".format(k)) | Validates that all necessary config parameters are specified.
:type config: dict[str, dict[str, Any] | str]
:param config: the module config |
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
funcs = {'add': ('add_beacon', (name, beacon_data)),
'modify': ('modify_beacon', (name, beacon_data)),
'delete': ('delete_beacon', (name,)),
'enable': ('enable_beacons', ()),
'disable': ('disable_beacons', ()),
'enable_beacon': ('enable_beacon', (name,)),
'disable_beacon': ('disable_beacon', (name,)),
'list': ('list_beacons', (include_opts,
include_pillar)),
'list_available': ('list_available_beacons', ()),
'validate_beacon': ('validate_beacon', (name,
beacon_data)),
'reset': ('reset', ())}
# Call the appropriate beacon function
try:
alias, params = funcs.get(func)
getattr(self.beacons, alias)(*params)
except AttributeError:
log.error('Function "%s" is unavailable in salt.beacons', func)
except TypeError as exc:
log.info(
'Failed to handle %s with data(%s). Error: %s',
tag, data, exc,
exc_info_on_loglevel=logging.DEBUG
) | Manage Beacons |
def set_char(key, value):
""" Updates charters used to render components.
"""
global _chars
category = _get_char_category(key)
if not category:
raise KeyError
_chars[category][key] = value | Updates charters used to render components. |
def callers(variant_obj, category='snv'):
"""Return info about callers."""
calls = set()
for caller in CALLERS[category]:
if variant_obj.get(caller['id']):
calls.add((caller['name'], variant_obj[caller['id']]))
return list(calls) | Return info about callers. |
def to_dict(self, ignore_none: bool=True, force_value: bool=True, ignore_empty: bool=False) -> dict:
"""From instance to dict
:param ignore_none: Properties which is None are excluded if True
:param force_value: Transform to value using to_value (default: str()) of ValueTransformer which inherited if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Dict
Usage:
>>> from owlmixin.samples import Human, Food
>>> human_dict = {
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple"}}
... ]
... }
>>> Human.from_dict(human_dict).to_dict() == human_dict
True
You can include None properties by specifying False for ignore_none
>>> f = Food.from_dict({"name": "Apple"}).to_dict(ignore_none=False)
>>> f["name"]
'Apple'
>>> "names_by_lang" in f
True
>>> f["names_by_lang"]
As default
>>> f = Food.from_dict({"name": "Apple"}).to_dict()
>>> f["name"]
'Apple'
>>> "names_by_lang" in f
False
You can exclude Empty properties by specifying True for ignore_empty
>>> f = Human.from_dict({"id": 1, "name": "Ichiro", "favorites": []}).to_dict()
>>> f["favorites"]
[]
>>> f = Human.from_dict({"id": 1, "name": "Ichiro", "favorites": []}).to_dict(ignore_empty=True)
>>> "favorites" in f
False
"""
return traverse_dict(self._dict, ignore_none, force_value, ignore_empty) | From instance to dict
:param ignore_none: Properties which is None are excluded if True
:param force_value: Transform to value using to_value (default: str()) of ValueTransformer which inherited if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Dict
Usage:
>>> from owlmixin.samples import Human, Food
>>> human_dict = {
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple"}}
... ]
... }
>>> Human.from_dict(human_dict).to_dict() == human_dict
True
You can include None properties by specifying False for ignore_none
>>> f = Food.from_dict({"name": "Apple"}).to_dict(ignore_none=False)
>>> f["name"]
'Apple'
>>> "names_by_lang" in f
True
>>> f["names_by_lang"]
As default
>>> f = Food.from_dict({"name": "Apple"}).to_dict()
>>> f["name"]
'Apple'
>>> "names_by_lang" in f
False
You can exclude Empty properties by specifying True for ignore_empty
>>> f = Human.from_dict({"id": 1, "name": "Ichiro", "favorites": []}).to_dict()
>>> f["favorites"]
[]
>>> f = Human.from_dict({"id": 1, "name": "Ichiro", "favorites": []}).to_dict(ignore_empty=True)
>>> "favorites" in f
False |
def getmessage(self) -> str:
""" parse self into unicode string as message content """
image = {}
for key, default in vars(self.__class__).items():
if not key.startswith('_') and key !='' and (not key in vars(QueueMessage).items()):
if isinstance(default, datetime.date):
image[key] = safe_cast(getattr(self, key, default), str, dformat=self._dateformat)
if isinstance(default, datetime.datetime):
image[key] = safe_cast(getattr(self, key, default), str, dformat=self._datetimeformat)
else:
image[key] = getattr(self, key, default)
return str(image) | parse self into unicode string as message content |
def BackAssign(cls,
other_entity_klass,
this_entity_backpopulate_field,
other_entity_backpopulate_field,
is_many_to_one=False):
"""
Assign defined one side mapping relationship to other side.
For example, each employee belongs to one department, then one department
includes many employees. If you defined each employee's department,
this method will assign employees to ``Department.employees`` field.
This is an one to many (department to employee) example.
Another example would be, each employee has multiple tags. If you defined
tags for each employee, this method will assign employees to
``Tag.employees`` field. This is and many to many (employee to tag) example.
Support:
- many to many mapping
- one to many mapping
:param other_entity_klass: a :class:`Constant` class.
:param this_entity_backpopulate_field: str
:param other_entity_backpopulate_field: str
:param is_many_to_one: bool
:return:
"""
data = dict()
for _, other_klass in other_entity_klass.Subclasses():
other_field_value = getattr(
other_klass, this_entity_backpopulate_field)
if isinstance(other_field_value, (tuple, list)):
for self_klass in other_field_value:
self_key = self_klass.__name__
try:
data[self_key].append(other_klass)
except KeyError:
data[self_key] = [other_klass, ]
else:
if other_field_value is not None:
self_klass = other_field_value
self_key = self_klass.__name__
try:
data[self_key].append(other_klass)
except KeyError:
data[self_key] = [other_klass, ]
if is_many_to_one:
new_data = dict()
for key, value in data.items():
try:
new_data[key] = value[0]
except: # pragma: no cover
pass
data = new_data
for self_key, other_klass_list in data.items():
setattr(getattr(cls, self_key),
other_entity_backpopulate_field, other_klass_list) | Assign defined one side mapping relationship to other side.
For example, each employee belongs to one department, then one department
includes many employees. If you defined each employee's department,
this method will assign employees to ``Department.employees`` field.
This is an one to many (department to employee) example.
Another example would be, each employee has multiple tags. If you defined
tags for each employee, this method will assign employees to
``Tag.employees`` field. This is and many to many (employee to tag) example.
Support:
- many to many mapping
- one to many mapping
:param other_entity_klass: a :class:`Constant` class.
:param this_entity_backpopulate_field: str
:param other_entity_backpopulate_field: str
:param is_many_to_one: bool
:return: |
def lp7(self, reaction_subset):
"""Approximately maximize the number of reaction with flux.
This is similar to FBA but approximately maximizing the number of
reactions in subset with flux > epsilon, instead of just maximizing the
flux of one particular reaction. LP7 prefers "flux splitting" over
"flux concentrating".
"""
if self._zl is None:
self._add_maximization_vars()
positive = set(reaction_subset) - self._flipped
negative = set(reaction_subset) & self._flipped
v = self._v.set(positive)
zl = self._zl.set(positive)
cs = self._prob.add_linear_constraints(v >= zl)
self._temp_constr.extend(cs)
v = self._v.set(negative)
zl = self._zl.set(negative)
cs = self._prob.add_linear_constraints(v <= -zl)
self._temp_constr.extend(cs)
self._prob.set_objective(self._zl.sum(reaction_subset))
self._solve() | Approximately maximize the number of reaction with flux.
This is similar to FBA but approximately maximizing the number of
reactions in subset with flux > epsilon, instead of just maximizing the
flux of one particular reaction. LP7 prefers "flux splitting" over
"flux concentrating". |
def bandpass(s, f1, f2, order=2, fs=1000.0, use_filtfilt=False):
"""
-----
Brief
-----
For a given signal s passes the frequencies within a certain range (between f1 and f2) and rejects (attenuates) the
frequencies outside that range by applying a Butterworth digital filter.
-----------
Description
-----------
Signals may have frequency components of multiple bands. If our interest is to have an idea about the behaviour
of a specific frequency band, we should apply a band pass filter, which would attenuate all the remaining
frequencies of the signal. The degree of attenuation is controlled by the parameter "order", that as it increases,
allows to better attenuate frequencies closer to the cutoff frequency. Notwithstanding, the higher the order, the
higher the computational complexity and the higher the instability of the filter that may compromise the results.
This function allows to apply a band pass Butterworth digital filter and returns the filtered signal.
----------
Parameters
----------
s: array-like
signal
f1: int
the lower cutoff frequency
f2: int
the upper cutoff frequency
order: int
Butterworth filter order
fs: float
sampling frequency
use_filtfilt: boolean
If True, the signal will be filtered once forward and then backwards. The result will have zero phase and twice
the order chosen.
Returns
-------
signal: array-like
filtered signal
"""
b, a = butter(order, [f1 * 2 / fs, f2 * 2 / fs], btype='bandpass')
if use_filtfilt:
return filtfilt(b, a, s)
return lfilter(b, a, s) | -----
Brief
-----
For a given signal s passes the frequencies within a certain range (between f1 and f2) and rejects (attenuates) the
frequencies outside that range by applying a Butterworth digital filter.
-----------
Description
-----------
Signals may have frequency components of multiple bands. If our interest is to have an idea about the behaviour
of a specific frequency band, we should apply a band pass filter, which would attenuate all the remaining
frequencies of the signal. The degree of attenuation is controlled by the parameter "order", that as it increases,
allows to better attenuate frequencies closer to the cutoff frequency. Notwithstanding, the higher the order, the
higher the computational complexity and the higher the instability of the filter that may compromise the results.
This function allows to apply a band pass Butterworth digital filter and returns the filtered signal.
----------
Parameters
----------
s: array-like
signal
f1: int
the lower cutoff frequency
f2: int
the upper cutoff frequency
order: int
Butterworth filter order
fs: float
sampling frequency
use_filtfilt: boolean
If True, the signal will be filtered once forward and then backwards. The result will have zero phase and twice
the order chosen.
Returns
-------
signal: array-like
filtered signal |
def add_called_sequence(self, section, name, sequence, qstring):
""" Add basecalled sequence data
:param section: ['template', 'complement' or '2D']
:param name: The record ID to use for the fastq.
:param sequence: The called sequence.
:param qstring: The quality string.
"""
event_group = 'BaseCalled_{}'.format(section)
if not event_group in self.handle.handle['Analyses/{}'.format(self.group_name)]:
self.handle.add_analysis_subgroup(self.group_name, event_group)
fastq_text = '@{}\n{}\n+\n{}\n'.format(name, sequence, qstring)
fastq_arr = np.array(fastq_text, dtype=str)
self.handle.add_analysis_dataset('{}/{}'.format(self.group_name, event_group), 'Fastq', fastq_arr) | Add basecalled sequence data
:param section: ['template', 'complement' or '2D']
:param name: The record ID to use for the fastq.
:param sequence: The called sequence.
:param qstring: The quality string. |
def info(self, section=None):
"""The INFO command returns information and statistics about the server
in a format that is simple to parse by computers and easy to read by
humans.
The optional parameter can be used to select a specific section of
information:
- server: General information about the Redis server
- clients: Client connections section
- memory: Memory consumption related information
- persistence: RDB and AOF related information
- stats: General statistics
- replication: Master/slave replication information
- cpu: CPU consumption statistics
- commandstats: Redis command statistics
- cluster: Redis Cluster section
- keyspace: Database related statistics
It can also take the following values:
- all: Return all sections
- default: Return only the default set of sections
When no parameter is provided, the default option is assumed.
:param str section: Optional
:return: dict
"""
cmd = [b'INFO']
if section:
cmd.append(section)
return self._execute(cmd, format_callback=common.format_info_response) | The INFO command returns information and statistics about the server
in a format that is simple to parse by computers and easy to read by
humans.
The optional parameter can be used to select a specific section of
information:
- server: General information about the Redis server
- clients: Client connections section
- memory: Memory consumption related information
- persistence: RDB and AOF related information
- stats: General statistics
- replication: Master/slave replication information
- cpu: CPU consumption statistics
- commandstats: Redis command statistics
- cluster: Redis Cluster section
- keyspace: Database related statistics
It can also take the following values:
- all: Return all sections
- default: Return only the default set of sections
When no parameter is provided, the default option is assumed.
:param str section: Optional
:return: dict |
def removereadergroup(self, group):
"""Remove a reader group"""
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
if 0 != hresult:
raise error(
'Failed to establish context: ' + \
SCardGetErrorMessage(hresult))
try:
hresult = SCardForgetReaderGroup(hcontext, group)
if hresult != 0:
raise error(
'Unable to forget reader group: ' + \
SCardGetErrorMessage(hresult))
else:
innerreadergroups.removereadergroup(self, group)
finally:
hresult = SCardReleaseContext(hcontext)
if 0 != hresult:
raise error(
'Failed to release context: ' + \
SCardGetErrorMessage(hresult)) | Remove a reader group |
def _filter_repeating_items(download_list):
""" Because of data_filter some requests in download list might be the same. In order not to download them again
this method will reduce the list of requests. It will also return a mapping list which can be used to
reconstruct the previous list of download requests.
:param download_list: List of download requests
:type download_list: list(sentinelhub.DownloadRequest)
:return: reduced download list with unique requests and mapping list
:rtype: (list(sentinelhub.DownloadRequest), list(int))
"""
unique_requests_map = {}
mapping_list = []
unique_download_list = []
for download_request in download_list:
if download_request not in unique_requests_map:
unique_requests_map[download_request] = len(unique_download_list)
unique_download_list.append(download_request)
mapping_list.append(unique_requests_map[download_request])
return unique_download_list, mapping_list | Because of data_filter some requests in download list might be the same. In order not to download them again
this method will reduce the list of requests. It will also return a mapping list which can be used to
reconstruct the previous list of download requests.
:param download_list: List of download requests
:type download_list: list(sentinelhub.DownloadRequest)
:return: reduced download list with unique requests and mapping list
:rtype: (list(sentinelhub.DownloadRequest), list(int)) |
def find_minimal_node(self, node_head, discriminator):
"""!
@brief Find minimal node in line with coordinate that is defined by discriminator.
@param[in] node_head (node): Node of KD tree from that search should be started.
@param[in] discriminator (uint): Coordinate number that is used for comparison.
@return (node) Minimal node in line with descriminator from the specified node.
"""
min_key = lambda cur_node: cur_node.data[discriminator]
stack = []
candidates = []
isFinished = False
while isFinished is False:
if node_head is not None:
stack.append(node_head)
node_head = node_head.left
else:
if len(stack) != 0:
node_head = stack.pop()
candidates.append(node_head)
node_head = node_head.right
else:
isFinished = True
return min(candidates, key = min_key) | !
@brief Find minimal node in line with coordinate that is defined by discriminator.
@param[in] node_head (node): Node of KD tree from that search should be started.
@param[in] discriminator (uint): Coordinate number that is used for comparison.
@return (node) Minimal node in line with descriminator from the specified node. |
def login(self, username=None, password=None, login_url=None,
auth_url=None):
"""
This will automatically log the user into the pre-defined account
Feel free to overwrite this with an endpoint on endpoint load
:param username: str of the user name to login in as
:param password: str of the password to login as
:param login_url: str of the url for the server's login
:param auth_url: str of the url for the server's authorization login
:return: str of self._status
"""
self._username = username or self._username
self._password = password or self._password
self.login_url = login_url or self.login_url
self.auth_url = auth_url or self.auth_url
if self._username and self._password and self.login_url:
if '%(username)s' in self.login_url:
ret = self.post(_url=login_url % dict(
username=self._username,
password=self._password,
api_key=self._api_key))
else:
ret = self.post(_url=self.login_url, username=self._username,
password=self._password, api_key=self._api_key)
if isinstance(ret, basestring) and 'Burp' in ret:
raise Exception("Failed to Login because the "
"server is not responding")
self.user_id = ret.get('user_id', None)
if self.auth_url:
if ('%(' + ret.keys()[0] + ')s') in self.auth_url:
ret = self.post(_url=self.auth_url % ret)
else:
ret = self.post(_url=self.auth_url, **ret)
log.info('Successfully logged in with response: %s' % ret)
if self._login_clears_history:
self.clear()
self._status = STATUS_LOGIN_SUCCESS
else:
self._status = STATUS_LOGIN_FAIL
return self._status | This will automatically log the user into the pre-defined account
Feel free to overwrite this with an endpoint on endpoint load
:param username: str of the user name to login in as
:param password: str of the password to login as
:param login_url: str of the url for the server's login
:param auth_url: str of the url for the server's authorization login
:return: str of self._status |
def cancelScannerSubscription(self, dataList: ScanDataList):
"""
Cancel market data subscription.
https://interactivebrokers.github.io/tws-api/market_scanners.html
Args:
dataList: The scan data list that was obtained from
:meth:`.reqScannerSubscription`.
"""
self.client.cancelScannerSubscription(dataList.reqId)
self.wrapper.endSubscription(dataList) | Cancel market data subscription.
https://interactivebrokers.github.io/tws-api/market_scanners.html
Args:
dataList: The scan data list that was obtained from
:meth:`.reqScannerSubscription`. |
def build_state_assignment(self, runnable, regime, state_assignment):
"""
Build state assignment code.
@param state_assignment: State assignment object
@type state_assignment: lems.model.dynamics.StateAssignment
@return: Generated state assignment code
@rtype: string
"""
return ['self.{0} = {1}'.format(\
state_assignment.variable,
self.build_expression_from_tree(runnable,
regime,
state_assignment.expression_tree))] | Build state assignment code.
@param state_assignment: State assignment object
@type state_assignment: lems.model.dynamics.StateAssignment
@return: Generated state assignment code
@rtype: string |
def create_api_key(self, body, **kwargs): # noqa: E501
"""Create a new API key. # noqa: E501
An endpoint for creating a new API key. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys -d '{\"name\": \"MyKey1\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_api_key(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param ApiKeyInfoReq body: The details of the API key to be created. (required)
:return: ApiKeyInfoResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.create_api_key_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_api_key_with_http_info(body, **kwargs) # noqa: E501
return data | Create a new API key. # noqa: E501
An endpoint for creating a new API key. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys -d '{\"name\": \"MyKey1\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_api_key(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param ApiKeyInfoReq body: The details of the API key to be created. (required)
:return: ApiKeyInfoResp
If the method is called asynchronously,
returns the request thread. |
def build_tree(self, data, tagname, attrs=None, depth=0):
r"""Build xml tree.
:param data: data for build xml.
:param tagname: element tag name.
:param attrs: element attributes. Default:``None``.
:type attrs: dict or None
:param depth: element depth of the hierarchy. Default:``0``.
:type depth: int
"""
if data is None:
data = ''
indent = ('\n%s' % (self.__options['indent'] * depth)) if self.__options['indent'] else ''
if isinstance(data, utils.DictTypes):
if self.__options['hasattr'] and self.check_structure(data.keys()):
attrs, values = self.pickdata(data)
self.build_tree(values, tagname, attrs, depth)
else:
self.__tree.append('%s%s' % (indent, self.tag_start(tagname, attrs)))
iter = data.iteritems()
if self.__options['ksort']:
iter = sorted(iter, key=lambda x:x[0], reverse=self.__options['reverse'])
for k, v in iter:
attrs = {}
if self.__options['hasattr'] and isinstance(v, utils.DictTypes) and self.check_structure(v.keys()):
attrs, v = self.pickdata(v)
self.build_tree(v, k, attrs, depth+1)
self.__tree.append('%s%s' % (indent, self.tag_end(tagname)))
elif utils.is_iterable(data):
for v in data:
self.build_tree(v, tagname, attrs, depth)
else:
self.__tree.append(indent)
data = self.safedata(data, self.__options['cdata'])
self.__tree.append(self.build_tag(tagname, data, attrs)) | r"""Build xml tree.
:param data: data for build xml.
:param tagname: element tag name.
:param attrs: element attributes. Default:``None``.
:type attrs: dict or None
:param depth: element depth of the hierarchy. Default:``0``.
:type depth: int |
def get_queryset(self):
"Restrict to a single kind of event, if any, and include Venue data."
qs = super().get_queryset()
kind = self.get_event_kind()
if kind is not None:
qs = qs.filter(kind=kind)
qs = qs.select_related('venue')
return qs | Restrict to a single kind of event, if any, and include Venue data. |
def get(self):
"""Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.sum_metric / self.num_inst) | Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations. |
def build_from_file(self, dockerfile, tag, **kwargs):
"""
Builds a docker image from the given :class:`~dockermap.build.dockerfile.DockerFile`. Use this as a shortcut to
:meth:`build_from_context`, if no extra data is added to the context.
:param dockerfile: An instance of :class:`~dockermap.build.dockerfile.DockerFile`.
:type dockerfile: dockermap.build.dockerfile.DockerFile
:param tag: New image tag.
:type tag: unicode | str
:param kwargs: See :meth:`docker.client.Client.build`.
:return: New, generated image id or ``None``.
:rtype: unicode | str
"""
with DockerContext(dockerfile, finalize=True) as ctx:
return self.build_from_context(ctx, tag, **kwargs) | Builds a docker image from the given :class:`~dockermap.build.dockerfile.DockerFile`. Use this as a shortcut to
:meth:`build_from_context`, if no extra data is added to the context.
:param dockerfile: An instance of :class:`~dockermap.build.dockerfile.DockerFile`.
:type dockerfile: dockermap.build.dockerfile.DockerFile
:param tag: New image tag.
:type tag: unicode | str
:param kwargs: See :meth:`docker.client.Client.build`.
:return: New, generated image id or ``None``.
:rtype: unicode | str |
def get_topology(self):
"""
Get the converted topology ready for JSON encoding
:return: converted topology assembled into a single dict
:rtype: dict
"""
topology = {'name': self._name,
'resources_type': 'local',
'topology': {},
'type': 'topology',
'version': '1.0'}
if self._links:
topology['topology']['links'] = self._links
if self._nodes:
topology['topology']['nodes'] = self._nodes
if self._servers:
topology['topology']['servers'] = self._servers
if self._notes:
topology['topology']['notes'] = self._notes
if self._shapes['ellipse']:
topology['topology']['ellipses'] = self._shapes['ellipse']
if self._shapes['rectangle']:
topology['topology']['rectangles'] = \
self._shapes['rectangle']
if self._images:
topology['topology']['images'] = self._images
return topology | Get the converted topology ready for JSON encoding
:return: converted topology assembled into a single dict
:rtype: dict |
def port_profile_vlan_profile_switchport_mode_vlan_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
mode = ET.SubElement(switchport, "mode")
vlan_mode = ET.SubElement(mode, "vlan-mode")
vlan_mode.text = kwargs.pop('vlan_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def lang_items(self, lang=None):
"""Yield pairs of (id, string) for the given language."""
if lang is None:
lang = self.language
yield from self.cache.setdefault(lang, {}).items() | Yield pairs of (id, string) for the given language. |
def bullet_ant():
"""Configuration for PyBullet's ant task."""
locals().update(default())
# Environment
import pybullet_envs # noqa pylint: disable=unused-import
env = 'AntBulletEnv-v0'
max_length = 1000
steps = 3e7 # 30M
update_every = 60
return locals() | Configuration for PyBullet's ant task. |
def to_reminders(self, ical, label=None, priority=None, tags=None,
tail=None, sep=" ", postdate=None, posttime=None):
"""Return Remind commands for all events of a iCalendar"""
if not hasattr(ical, 'vevent_list'):
return ''
reminders = [self.to_remind(vevent, label, priority, tags, tail, sep,
postdate, posttime)
for vevent in ical.vevent_list]
return ''.join(reminders) | Return Remind commands for all events of a iCalendar |
def x_at_y(self, y, reverse=False):
"""
Calculates inverse profile - for given y returns x such that f(x) = y
If given y is not found in the self.y, then interpolation is used.
By default returns first result looking from left,
if reverse argument set to True,
looks from right. If y is outside range of self.y
then np.nan is returned.
Use inverse lookup to get x-coordinate of first point:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(5.))
0.0
Use inverse lookup to get x-coordinate of second point,
looking from left:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(10.))
0.1
Use inverse lookup to get x-coordinate of fourth point,
looking from right:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(10., reverse=True))
0.3
Use interpolation between first two points:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(7.5))
0.05
Looking for y below self.y range:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(2.0))
nan
Looking for y above self.y range:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(22.0))
nan
:param y: reference value
:param reverse: boolean value - direction of lookup
:return: x value corresponding to given y or NaN if not found
"""
logger.info('Running %(name)s.y_at_x(y=%(y)s, reverse=%(rev)s)',
{"name": self.__class__, "y": y, "rev": reverse})
# positive or negative direction handles
x_handle, y_handle = self.x, self.y
if reverse:
x_handle, y_handle = self.x[::-1], self.y[::-1]
# find the index of first value in self.y greater or equal than y
cond = y_handle >= y
ind = np.argmax(cond)
# two boundary conditions where x cannot be found:
# A) y > max(self.y)
# B) y < min(self.y)
# A) if y > max(self.y) then condition self.y >= y
# will never be satisfied
# np.argmax( cond ) will be equal 0 and cond[ind] will be False
if not cond[ind]:
return np.nan
# B) if y < min(self.y) then condition self.y >= y
# will be satisfied on first item
# np.argmax(cond) will be equal 0,
# to exclude situation that y_handle[0] = y
# we also check if y < y_handle[0]
if ind == 0 and y < y_handle[0]:
return np.nan
# use lookup if y in self.y:
if cond[ind] and y_handle[ind] == y:
return x_handle[ind]
# alternatively - pure python implementation
# return x_handle[ind] - \
# ((x_handle[ind] - x_handle[ind - 1]) / \
# (y_handle[ind] - y_handle[ind - 1])) * \
# (y_handle[ind] - y)
# use interpolation
sl = slice(ind - 1, ind + 1)
return np.interp(y, y_handle[sl], x_handle[sl]) | Calculates inverse profile - for given y returns x such that f(x) = y
If given y is not found in the self.y, then interpolation is used.
By default returns first result looking from left,
if reverse argument set to True,
looks from right. If y is outside range of self.y
then np.nan is returned.
Use inverse lookup to get x-coordinate of first point:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(5.))
0.0
Use inverse lookup to get x-coordinate of second point,
looking from left:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(10.))
0.1
Use inverse lookup to get x-coordinate of fourth point,
looking from right:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(10., reverse=True))
0.3
Use interpolation between first two points:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(7.5))
0.05
Looking for y below self.y range:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(2.0))
nan
Looking for y above self.y range:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(22.0))
nan
:param y: reference value
:param reverse: boolean value - direction of lookup
:return: x value corresponding to given y or NaN if not found |
def digital_write_pullup(pin_num, value, hardware_addr=0):
"""Writes the value to the input pullup specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``gppub`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> hex(pfd.gppub.value)
0xff
>>> pfd.gppub.bits[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
"""
_get_pifacedigital(hardware_addr).gppub.bits[pin_num].value = value | Writes the value to the input pullup specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``gppub`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> hex(pfd.gppub.value)
0xff
>>> pfd.gppub.bits[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int |
def assign_interval(data):
"""Identify coverage based on percent of genome covered and relation to targets.
Classifies coverage into 3 categories:
- genome: Full genome coverage
- regional: Regional coverage, like exome capture, with off-target reads
- amplicon: Amplication based regional coverage without off-target reads
"""
if not dd.get_coverage_interval(data):
vrs = dd.get_variant_regions_merged(data)
callable_file = dd.get_sample_callable(data)
if vrs:
callable_size = pybedtools.BedTool(vrs).total_coverage()
else:
callable_size = pybedtools.BedTool(callable_file).total_coverage()
total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])])
genome_cov_pct = callable_size / float(total_size)
if genome_cov_pct > GENOME_COV_THRESH:
cov_interval = "genome"
offtarget_pct = 0.0
elif not vrs:
cov_interval = "regional"
offtarget_pct = 0.0
else:
offtarget_pct = _count_offtarget(data, dd.get_align_bam(data) or dd.get_work_bam(data),
vrs or callable_file, "variant_regions")
if offtarget_pct > OFFTARGET_THRESH:
cov_interval = "regional"
else:
cov_interval = "amplicon"
logger.info("%s: Assigned coverage as '%s' with %.1f%% genome coverage and %.1f%% offtarget coverage"
% (dd.get_sample_name(data), cov_interval, genome_cov_pct * 100.0, offtarget_pct * 100.0))
data["config"]["algorithm"]["coverage_interval"] = cov_interval
return data | Identify coverage based on percent of genome covered and relation to targets.
Classifies coverage into 3 categories:
- genome: Full genome coverage
- regional: Regional coverage, like exome capture, with off-target reads
- amplicon: Amplication based regional coverage without off-target reads |
def translate_src(src, cortex):
"""
Convert source nodes to new surface (without medial wall).
"""
src_new = np.array(np.where(np.in1d(cortex, src))[0], dtype=np.int32)
return src_new | Convert source nodes to new surface (without medial wall). |
def _tot_unhandled_hosts_by_state(self, state):
"""Generic function to get the number of unhandled problem hosts in the specified state
:param state: state to filter on
:type state:
:return: number of host in state *state* and which are not acknowledged problems
:rtype: int
"""
return sum(1 for h in self.hosts if h.state == state and h.state_type == u'HARD' and
h.is_problem and not h.problem_has_been_acknowledged) | Generic function to get the number of unhandled problem hosts in the specified state
:param state: state to filter on
:type state:
:return: number of host in state *state* and which are not acknowledged problems
:rtype: int |
def encode_xml(obj, E=None):
""" Encodes an OpenMath object as an XML node.
:param obj: OpenMath object (or related item) to encode as XML.
:type obj: OMAny
:param ns: Namespace prefix to use for
http://www.openmath.org/OpenMath", or None if default namespace.
:type ns: str, None
:return: The XML node representing the OpenMath data structure.
:rtype: etree._Element
"""
if E is None:
E = default_E
elif isinstance(E, str):
E = ElementMaker(namespace=xml.openmath_ns,
nsmap={ E: xml.openmath_ns })
name = ""
attr = {}
children = []
if isinstance(obj, om.CDBaseAttribute) and obj.cdbase is not None:
attr["cdbase"] = obj.cdbase
if isinstance(obj, om.CommonAttributes) and obj.id is not None:
attr["id"] = obj.id
# Wrapper object
if isinstance(obj, om.OMObject):
children.append(encode_xml(obj.omel, E))
attr["version"] = obj.version
# Derived Objects
elif isinstance(obj, om.OMReference):
attr["href"] = obj.href
# Basic Objects
elif isinstance(obj, om.OMInteger):
children.append(str(obj.integer))
elif isinstance(obj, om.OMFloat):
attr["dec"] = obj.double
elif isinstance(obj, om.OMString):
if obj.string is not None:
children.append(str(obj.string))
elif isinstance(obj, om.OMBytes):
children.append(base64.b64encode(obj.bytes).decode('ascii'))
elif isinstance(obj, om.OMSymbol):
attr["name"] = obj.name
attr["cd"] = obj.cd
elif isinstance(obj, om.OMVariable):
attr["name"] = obj.name
# Derived Elements
elif isinstance(obj, om.OMForeign):
attr["encoding"] = obj.encoding
children.append(str(obj.obj))
# Compound Elements
elif isinstance(obj, om.OMApplication):
children = [encode_xml(obj.elem, E)]
children.extend(encode_xml(x, E) for x in obj.arguments)
elif isinstance(obj, om.OMAttribution):
children = [encode_xml(obj.pairs, E), encode_xml(obj.obj, E)]
elif isinstance(obj, om.OMAttributionPairs):
for (k, v) in obj.pairs:
children.append(encode_xml(k, E))
children.append(encode_xml(v, E))
elif isinstance(obj, om.OMBinding):
children = [
encode_xml(obj.binder, E),
encode_xml(obj.vars, E),
encode_xml(obj.obj, E)
]
elif isinstance(obj, om.OMBindVariables):
children = [encode_xml(x, E) for x in obj.vars]
elif isinstance(obj, om.OMAttVar):
children = [encode_xml(obj.pairs, E), encode_xml(obj.obj, E)]
elif isinstance(obj, om.OMError):
children = [encode_xml(obj.name, E)]
children.extend(encode_xml(x, E) for x in obj.params)
else:
raise TypeError("Expected obj to be of type OMAny, found %s." % obj.__class__.__name__)
attr = dict((k,str(v)) for k, v in attr.items() if v is not None)
return E(xml.object_to_tag(obj), *children, **attr) | Encodes an OpenMath object as an XML node.
:param obj: OpenMath object (or related item) to encode as XML.
:type obj: OMAny
:param ns: Namespace prefix to use for
http://www.openmath.org/OpenMath", or None if default namespace.
:type ns: str, None
:return: The XML node representing the OpenMath data structure.
:rtype: etree._Element |
def match(self, node):
u"""
Since the tree needs to be fixed once and only once if and only if it
matches, we can start discarding matches after the first.
"""
if node.type == self.syms.term:
div_idx = find_division(node)
if div_idx is not False:
# if expr1 or expr2 are obviously floats, we don't need to wrap in
# old_div, as the behavior of division between any number and a float
# should be the same in 2 or 3
if not is_floaty(node, div_idx):
return clone_div_operands(node, div_idx)
return False | u"""
Since the tree needs to be fixed once and only once if and only if it
matches, we can start discarding matches after the first. |
def delete_autostart_entry():
"""Remove a present autostart entry. If none is found, nothing happens."""
autostart_file = Path(common.AUTOSTART_DIR) / "autokey.desktop"
if autostart_file.exists():
autostart_file.unlink()
_logger.info("Deleted old autostart entry: {}".format(autostart_file)) | Remove a present autostart entry. If none is found, nothing happens. |
def make_compute_file(self):
"""
Make the compute file from the self.vardict and self.vardictformat
"""
string = ""
try:
vardict_items = self.vardict.iteritems()
except AttributeError:
vardict_items = self.vardict.items()
for key, val in vardict_items:
# get default
default_format = get_default_format(val)
string_format = "\\newcommand{{\\{}}}{{" + self.vardictformat.get(
key, default_format) + "}}\n"
string += string_format.format(key, val).replace("+", "")
# get settings
compute_file = open(
"{root_dir}{os_sep}{name}{os_extsep}{ext}".format(
root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep,
name=SETTINGS["COMPUTE"]["name"],
ext=SETTINGS["COMPUTE"]["extension"]
), "wb")
compute_file.write(string)
compute_file.close() | Make the compute file from the self.vardict and self.vardictformat |
def flush(self):
""" Sends the current. batch to Cloud Bigtable.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_batcher_flush]
:end-before: [END bigtable_batcher_flush]
"""
if len(self.rows) != 0:
self.table.mutate_rows(self.rows)
self.total_mutation_count = 0
self.total_size = 0
self.rows = [] | Sends the current. batch to Cloud Bigtable.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_batcher_flush]
:end-before: [END bigtable_batcher_flush] |
def show_download_links(self):
"""
Query PyPI for pkg download URI for a packge
@returns: 0
"""
#In case they specify version as 'dev' instead of using -T svn,
#don't show three svn URI's
if self.options.file_type == "all" and self.version == "dev":
self.options.file_type = "svn"
if self.options.file_type == "svn":
version = "dev"
else:
if self.version:
version = self.version
else:
version = self.all_versions[0]
if self.options.file_type == "all":
#Search for source, egg, and svn
self.print_download_uri(version, True)
self.print_download_uri(version, False)
self.print_download_uri("dev", True)
else:
if self.options.file_type == "source":
source = True
else:
source = False
self.print_download_uri(version, source)
return 0 | Query PyPI for pkg download URI for a packge
@returns: 0 |
def initialize(self):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.initialize`.
Is called once by NuPIC before the first call to compute().
Initializes self._sdrClassifier if it is not already initialized.
"""
if self._sdrClassifier is None:
self._sdrClassifier = SDRClassifierFactory.create(
steps=self.stepsList,
alpha=self.alpha,
verbosity=self.verbosity,
implementation=self.implementation,
) | Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.initialize`.
Is called once by NuPIC before the first call to compute().
Initializes self._sdrClassifier if it is not already initialized. |
def display_required_items(msg_type):
"""
Display the required items needed to configure a profile for the given
message type.
Args:
:msg_type: (str) message type to create config entry.
"""
print("Configure a profile for: " + msg_type)
print("You will need the following information:")
for k, v in CONFIG[msg_type]["settings"].items():
print(" * " + v)
print("Authorization/credentials required:")
for k, v in CONFIG[msg_type]["auth"].items():
print(" * " + v) | Display the required items needed to configure a profile for the given
message type.
Args:
:msg_type: (str) message type to create config entry. |
def shard_filename(path, tag, shard_num, total_shards):
"""Create filename for data shard."""
return os.path.join(
path, "%s-%s-%s-%.5d-of-%.5d" % (_PREFIX, _ENCODE_TAG, tag, shard_num, total_shards)) | Create filename for data shard. |
def cmd_arp_sniff(iface):
"""Listen for ARP packets and show information for each device.
Columns: Seconds from last packet | IP | MAC | Vendor
Example:
\b
1 192.168.0.1 a4:08:f5:19:17:a4 Sagemcom Broadband SAS
7 192.168.0.2 64:bc:0c:33:e5:57 LG Electronics (Mobile Communications)
2 192.168.0.5 00:c2:c6:30:2c:58 Intel Corporate
6 192.168.0.7 54:f2:01:db:35:58 Samsung Electronics Co.,Ltd
"""
conf.verb = False
if iface:
conf.iface = iface
print("Waiting for ARP packets...", file=sys.stderr)
sniff(filter="arp", store=False, prn=procpkt) | Listen for ARP packets and show information for each device.
Columns: Seconds from last packet | IP | MAC | Vendor
Example:
\b
1 192.168.0.1 a4:08:f5:19:17:a4 Sagemcom Broadband SAS
7 192.168.0.2 64:bc:0c:33:e5:57 LG Electronics (Mobile Communications)
2 192.168.0.5 00:c2:c6:30:2c:58 Intel Corporate
6 192.168.0.7 54:f2:01:db:35:58 Samsung Electronics Co.,Ltd |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_management_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
remote_management_address = ET.SubElement(lldp_neighbor_detail, "remote-management-address")
remote_management_address.text = kwargs.pop('remote_management_address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def consumption(self):
"""
Consumption in kWh per sector for whole grid
Returns
-------
:pandas:`pandas.Series<series>`
Indexed by demand sector
"""
consumption = defaultdict(float)
for load in self.graph.nodes_by_attribute('load'):
for sector, val in load.consumption.items():
consumption[sector] += val
return pd.Series(consumption) | Consumption in kWh per sector for whole grid
Returns
-------
:pandas:`pandas.Series<series>`
Indexed by demand sector |
def open(self, *, autocommit=False):
"""Sets the connection with the core's open method.
:param autocommit: the default autocommit state
:type autocommit: boolean
:return: self
"""
if self.connection is not None:
raise Exception("Connection already set")
self.connection = self.core.open()
self.autocommit = autocommit
if self._search_path:
self._configure_connection(
"search_path",
self._search_path)
return self | Sets the connection with the core's open method.
:param autocommit: the default autocommit state
:type autocommit: boolean
:return: self |
def image_members(self):
"""
Returns a json-schema document that represents an image members entity
(a container of member entities).
"""
uri = "/%s/members" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body | Returns a json-schema document that represents an image members entity
(a container of member entities). |
def woodbury_chol(self):
"""
return $L_{W}$ where L is the lower triangular Cholesky decomposition of the Woodbury matrix
$$
L_{W}L_{W}^{\top} = W^{-1}
W^{-1} := \texttt{Woodbury inv}
$$
"""
if self._woodbury_chol is None:
# compute woodbury chol from
if self._woodbury_inv is not None:
winv = np.atleast_3d(self._woodbury_inv)
self._woodbury_chol = np.zeros(winv.shape)
for p in range(winv.shape[-1]):
self._woodbury_chol[:, :, p] = pdinv(winv[:, :, p])[2]
# Li = jitchol(self._woodbury_inv)
# self._woodbury_chol, _ = dtrtri(Li)
# W, _, _, _, = pdinv(self._woodbury_inv)
# symmetrify(W)
# self._woodbury_chol = jitchol(W)
# try computing woodbury chol from cov
elif self._covariance is not None:
raise NotImplementedError("TODO: check code here")
B = self._K - self._covariance
tmp, _ = dpotrs(self.K_chol, B)
self._woodbury_inv, _ = dpotrs(self.K_chol, tmp.T)
_, _, self._woodbury_chol, _ = pdinv(self._woodbury_inv)
else:
raise ValueError("insufficient information to compute posterior")
return self._woodbury_chol | return $L_{W}$ where L is the lower triangular Cholesky decomposition of the Woodbury matrix
$$
L_{W}L_{W}^{\top} = W^{-1}
W^{-1} := \texttt{Woodbury inv}
$$ |
def read_pot_status(self):
"""Read the status of the digital pot. Firmware v18+ only.
The return value is a dictionary containing the following as
unsigned 8-bit integers: FanON, LaserON, FanDACVal, LaserDACVal.
:rtype: dict
:Example:
>>> alpha.read_pot_status()
{
'LaserDACVal': 230,
'FanDACVal': 255,
'FanON': 0,
'LaserON': 0
}
"""
# Send the command byte and wait 10 ms
a = self.cnxn.xfer([0x13])[0]
sleep(10e-3)
# Build an array of the results
res = []
for i in range(4):
res.append(self.cnxn.xfer([0x00])[0])
sleep(0.1)
return {
'FanON': res[0],
'LaserON': res[1],
'FanDACVal': res[2],
'LaserDACVal': res[3]
} | Read the status of the digital pot. Firmware v18+ only.
The return value is a dictionary containing the following as
unsigned 8-bit integers: FanON, LaserON, FanDACVal, LaserDACVal.
:rtype: dict
:Example:
>>> alpha.read_pot_status()
{
'LaserDACVal': 230,
'FanDACVal': 255,
'FanON': 0,
'LaserON': 0
} |
def delete(self):
""" Delete the table.
Returns:
True if the Table no longer exists; False otherwise.
"""
try:
self._api.table_delete(self._name_parts)
except google.datalab.utils.RequestException:
# TODO(gram): May want to check the error reasons here and if it is not
# because the file didn't exist, return an error.
pass
except Exception as e:
raise e
return not self.exists() | Delete the table.
Returns:
True if the Table no longer exists; False otherwise. |
def token(self, value):
""" Setter to convert any token dict into Token instance """
if value and not isinstance(value, Token):
value = Token(value)
self._token = value | Setter to convert any token dict into Token instance |
async def auto_add(self, device, recursive=None, automount=True):
"""
Automatically attempt to mount or unlock a device, but be quiet if the
device is not supported.
:param device: device object, block device path or mount path
:param bool recursive: recursively mount and unlock child devices
:returns: whether all attempted operations succeeded
"""
device, created = await self._find_device_losetup(device)
if created and recursive is False:
return device
if device.is_luks_cleartext and self.udisks.version_info >= (2, 7, 0):
await sleep(1.5) # temporary workaround for #153, unreliable
success = True
if not self.is_automount(device, automount):
pass
elif device.is_filesystem:
if not device.is_mounted:
success = await self.mount(device)
elif device.is_crypto:
if self._prompt and not device.is_unlocked:
success = await self.unlock(device)
if success and recursive:
await self.udisks._sync()
device = self.udisks[device.object_path]
success = await self.auto_add(
device.luks_cleartext_holder,
recursive=True)
elif recursive and device.is_partition_table:
tasks = [
self.auto_add(dev, recursive=True)
for dev in self.get_all_handleable()
if dev.is_partition and dev.partition_slave == device
]
results = await gather(*tasks)
success = all(results)
else:
self._log.debug(_('not adding {0}: unhandled device', device))
return success | Automatically attempt to mount or unlock a device, but be quiet if the
device is not supported.
:param device: device object, block device path or mount path
:param bool recursive: recursively mount and unlock child devices
:returns: whether all attempted operations succeeded |
def _default_capacity(self, value):
""" Get the value for ReturnConsumedCapacity from provided value """
if value is not None:
return value
if self.default_return_capacity or self.rate_limiters:
return INDEXES
return NONE | Get the value for ReturnConsumedCapacity from provided value |
def _reader_thread_func(self, read_stdout: bool) -> None:
"""
Thread function that reads a stream from the process
:param read_stdout: if True, then this thread deals with stdout. Otherwise it deals with stderr.
"""
if read_stdout:
read_stream = self._proc.stdout
write_stream = self._stdout
else:
read_stream = self._proc.stderr
write_stream = self._stderr
# The thread should have been started only if this stream was a pipe
assert read_stream is not None
# Run until process completes
while self._proc.poll() is None:
# noinspection PyUnresolvedReferences
available = read_stream.peek()
if available:
read_stream.read(len(available))
self._write_bytes(write_stream, available) | Thread function that reads a stream from the process
:param read_stdout: if True, then this thread deals with stdout. Otherwise it deals with stderr. |
def create_gtk_grid(self, row_spacing=6, col_spacing=6, row_homogenous=False, col_homogenous=True):
"""
Function creates a Gtk Grid with spacing
and homogeous tags
"""
grid_lang = Gtk.Grid()
grid_lang.set_column_spacing(row_spacing)
grid_lang.set_row_spacing(col_spacing)
grid_lang.set_border_width(12)
grid_lang.set_row_homogeneous(row_homogenous)
grid_lang.set_column_homogeneous(col_homogenous)
return grid_lang | Function creates a Gtk Grid with spacing
and homogeous tags |
def get_reply(self, method, reply):
"""
Process the I{reply} for the specified I{method} by sax parsing the
I{reply} and then unmarshalling into python object(s).
@param method: The name of the invoked method.
@type method: str
@param reply: The reply XML received after invoking the specified
method.
@type reply: str
@return: The unmarshalled reply. The returned value is an L{Object}
for a I{list} depending on whether the service returns a single
object or a collection.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
replyroot = sax.parse(string=reply)
plugins = PluginContainer(self.options().plugins)
plugins.message.parsed(reply=replyroot)
soapenv = replyroot.getChild('Envelope')
soapenv.promotePrefixes()
soapbody = soapenv.getChild('Body')
self.detect_fault(soapbody)
soapbody = self.multiref.process(soapbody)
nodes = self.replycontent(method, soapbody)
rtypes = self.returned_types(method)
if len(rtypes) > 1:
result = self.replycomposite(rtypes, nodes)
return (replyroot, result)
if len(rtypes) == 1:
if rtypes[0].unbounded():
result = self.replylist(rtypes[0], nodes)
return (replyroot, result)
if len(nodes):
unmarshaller = self.unmarshaller()
resolved = rtypes[0].resolve(nobuiltin=True)
result = unmarshaller.process(nodes[0], resolved)
return (replyroot, result)
return (replyroot, None) | Process the I{reply} for the specified I{method} by sax parsing the
I{reply} and then unmarshalling into python object(s).
@param method: The name of the invoked method.
@type method: str
@param reply: The reply XML received after invoking the specified
method.
@type reply: str
@return: The unmarshalled reply. The returned value is an L{Object}
for a I{list} depending on whether the service returns a single
object or a collection.
@rtype: tuple ( L{Element}, L{Object} ) |
def update_or_create(cls, **kwargs):
"""Checks if an instance already exists by filtering with the
kwargs. If yes, updates the instance with new kwargs and
returns that instance. If not, creates a new
instance with kwargs and returns it.
Args:
**kwargs: The keyword arguments which are used for filtering
and initialization.
keys (list, optional): A special keyword argument. If passed,
only the set of keys mentioned here will be used for filtering.
Useful when we want to 'filter' based on a subset of the keys
and create with all the keys.
Examples:
>>> customer = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="India")
>>> customer.id
45
>>> customer1 = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="India")
>>> customer1==customer
True
>>> customer2 = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="Russia")
>>> customer2==customer
False
>>> customer3 = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="Russia",
... keys=['name', 'email'])
>>> customer3==customer
True
"""
keys = kwargs.pop('keys') if 'keys' in kwargs else []
filter_kwargs = subdict(kwargs, keys)
if filter_kwargs == {}:
obj = None
else:
obj = cls.first(**filter_kwargs)
if obj is not None:
for key, value in kwargs.iteritems():
if (key not in keys and
key not in cls._no_overwrite_):
setattr(obj, key, value)
try:
cls.session.commit()
except:
cls.session.rollback()
raise
else:
obj = cls.create(**kwargs)
return obj | Checks if an instance already exists by filtering with the
kwargs. If yes, updates the instance with new kwargs and
returns that instance. If not, creates a new
instance with kwargs and returns it.
Args:
**kwargs: The keyword arguments which are used for filtering
and initialization.
keys (list, optional): A special keyword argument. If passed,
only the set of keys mentioned here will be used for filtering.
Useful when we want to 'filter' based on a subset of the keys
and create with all the keys.
Examples:
>>> customer = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="India")
>>> customer.id
45
>>> customer1 = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="India")
>>> customer1==customer
True
>>> customer2 = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="Russia")
>>> customer2==customer
False
>>> customer3 = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="Russia",
... keys=['name', 'email'])
>>> customer3==customer
True |
def process(self):
"""
Calls the process endpoint for all locales of the asset.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/assets/asset-processing
"""
for locale in self._fields.keys():
self._client._put(
"{0}/files/{1}/process".format(
self.__class__.base_url(
self.space.id,
self.id,
environment_id=self._environment_id
),
locale
),
{},
headers=self._update_headers()
)
return self.reload() | Calls the process endpoint for all locales of the asset.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/assets/asset-processing |
def select_segments_by_definer(segment_file, segment_name=None, ifo=None):
""" Return the list of segments that match the segment name
Parameters
----------
segment_file: str
path to segment xml file
segment_name: str
Name of segment
ifo: str, optional
Returns
-------
seg: list of segments
"""
from glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h)
indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h)
segment_table = table.get_table(indoc, 'segment')
seg_def_table = table.get_table(indoc, 'segment_definer')
def_ifos = seg_def_table.getColumnByName('ifos')
def_names = seg_def_table.getColumnByName('name')
def_ids = seg_def_table.getColumnByName('segment_def_id')
valid_id = []
for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids):
if ifo and ifo != def_ifo:
continue
if segment_name and segment_name != def_name:
continue
valid_id += [def_id]
start = numpy.array(segment_table.getColumnByName('start_time'))
start_ns = numpy.array(segment_table.getColumnByName('start_time_ns'))
end = numpy.array(segment_table.getColumnByName('end_time'))
end_ns = numpy.array(segment_table.getColumnByName('end_time_ns'))
start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns
did = segment_table.getColumnByName('segment_def_id')
keep = numpy.array([d in valid_id for d in did])
if sum(keep) > 0:
return start_end_to_segments(start[keep], end[keep])
else:
return segmentlist([]) | Return the list of segments that match the segment name
Parameters
----------
segment_file: str
path to segment xml file
segment_name: str
Name of segment
ifo: str, optional
Returns
-------
seg: list of segments |
def _parse_time_to_freeze(time_to_freeze_str):
"""Parses all the possible inputs for freeze_time
:returns: a naive ``datetime.datetime`` object
"""
if time_to_freeze_str is None:
time_to_freeze_str = datetime.datetime.utcnow()
if isinstance(time_to_freeze_str, datetime.datetime):
time_to_freeze = time_to_freeze_str
elif isinstance(time_to_freeze_str, datetime.date):
time_to_freeze = datetime.datetime.combine(time_to_freeze_str, datetime.time())
elif isinstance(time_to_freeze_str, datetime.timedelta):
time_to_freeze = datetime.datetime.utcnow() + time_to_freeze_str
else:
time_to_freeze = parser.parse(time_to_freeze_str)
return convert_to_timezone_naive(time_to_freeze) | Parses all the possible inputs for freeze_time
:returns: a naive ``datetime.datetime`` object |
def import_all_modules():
"""
<Purpose>
Imports all modules within the modules folder. This should only be called once
throughout the entire execution of seash.
<Side Effects>
Modules that don't have collisions will have their commanddicts and
helptexts loaded and returned.
<Exceptions>
ImportError: There is an existing module with the same name already imported.
<Return>
The seashcommanddict that contains the imported commands on top of the
passed in commanddict.
"""
for module_folder in get_installed_modules():
try:
if module_folder in module_data:
raise seash_exceptions.ModuleImportError("Module already imported")
module_data[module_folder] = import_module(module_folder)
except seash_exceptions.ModuleImportError, e:
print str(e) | <Purpose>
Imports all modules within the modules folder. This should only be called once
throughout the entire execution of seash.
<Side Effects>
Modules that don't have collisions will have their commanddicts and
helptexts loaded and returned.
<Exceptions>
ImportError: There is an existing module with the same name already imported.
<Return>
The seashcommanddict that contains the imported commands on top of the
passed in commanddict. |
def backspace(self, n=1, interval=0, pre_dl=None, post_dl=None):
"""Press backspace key n times.
**中文文档**
按退格键 n 次。
"""
self.delay(pre_dl)
self.k.tap_key(self.k.backspace_key, n, interval)
self.delay(post_dl) | Press backspace key n times.
**中文文档**
按退格键 n 次。 |
def proto_01_01_HP010(abf=exampleABF):
"""hyperpolarization step. Use to calculate tau and stuff."""
swhlab.memtest.memtest(abf) #knows how to do IC memtest
swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did
swhlab.plot.save(abf,tag="tau") | hyperpolarization step. Use to calculate tau and stuff. |
def register_all_shape_checker(shape_checker_function,
arg_types,
exclude=(),
ignore_existing=False):
"""Register a gradient adder for all combinations of given types.
This is a convenience shorthand for calling register_add_grad when registering
gradient adders for multiple types that can be interchanged for the purpose
of addition.
Args:
shape_checker_function: A shape checker, see register_shape_checker.
arg_types: List of Python type objects. The shape checker will be
registered for all pairs of these types.
exclude: Optional list of type tuples to exclude.
ignore_existing: Boolean. Whether to silently skip argument pairs that were
already registered.
"""
for t1 in arg_types:
for t2 in arg_types:
if (t1, t2) in exclude:
continue
if ignore_existing and (t1, t2) in shape_checkers:
continue
register_shape_checker(t1, t2, shape_checker_function) | Register a gradient adder for all combinations of given types.
This is a convenience shorthand for calling register_add_grad when registering
gradient adders for multiple types that can be interchanged for the purpose
of addition.
Args:
shape_checker_function: A shape checker, see register_shape_checker.
arg_types: List of Python type objects. The shape checker will be
registered for all pairs of these types.
exclude: Optional list of type tuples to exclude.
ignore_existing: Boolean. Whether to silently skip argument pairs that were
already registered. |
def _apply_axes_mapping(self, target, inverse=False):
"""
Apply the transposition to the target iterable.
Parameters
----------
target - iterable
The iterable to transpose. This would be suitable for things
such as a shape as well as a list of ``__getitem__`` keys.
inverse - bool
Whether to map old dimension to new dimension (forward), or
new dimension to old dimension (inverse). Default is False
(forward).
Returns
-------
A tuple derived from target which has been ordered based on the new
axes.
"""
if len(target) != self.ndim:
raise ValueError('The target iterable is of length {}, but '
'should be of length {}.'.format(len(target),
self.ndim))
if inverse:
axis_map = self._inverse_axes_map
else:
axis_map = self._forward_axes_map
result = [None] * self.ndim
for axis, item in enumerate(target):
result[axis_map[axis]] = item
return tuple(result) | Apply the transposition to the target iterable.
Parameters
----------
target - iterable
The iterable to transpose. This would be suitable for things
such as a shape as well as a list of ``__getitem__`` keys.
inverse - bool
Whether to map old dimension to new dimension (forward), or
new dimension to old dimension (inverse). Default is False
(forward).
Returns
-------
A tuple derived from target which has been ordered based on the new
axes. |
def gatk_type(self):
"""Retrieve type of GATK jar, allowing support for older GATK lite.
Returns either `lite` (targeting GATK-lite 2.3.9) or `restricted`,
the latest 2.4+ restricted version of GATK.
"""
if LooseVersion(self.gatk_major_version()) > LooseVersion("3.9"):
return "gatk4"
elif LooseVersion(self.gatk_major_version()) > LooseVersion("2.3"):
return "restricted"
else:
return "lite" | Retrieve type of GATK jar, allowing support for older GATK lite.
Returns either `lite` (targeting GATK-lite 2.3.9) or `restricted`,
the latest 2.4+ restricted version of GATK. |
def get(self, path):
""" Perform a GET request with GSSAPI authentication """
# Generate token
service_name = gssapi.Name('HTTP@{0}'.format(self.url.netloc),
gssapi.NameType.hostbased_service)
ctx = gssapi.SecurityContext(usage="initiate", name=service_name)
data = b64encode(ctx.step()).decode()
# Make the connection
connection = httplib.HTTPSConnection(self.url.netloc, 443)
log.debug("GET {0}".format(path))
connection.putrequest("GET", path)
connection.putheader("Authorization", "Negotiate {0}".format(data))
connection.putheader("Referer", self.url_string)
connection.endheaders()
# Perform the request, convert response into lines
response = connection.getresponse()
if response.status != 200:
raise ReportError(
"Failed to fetch tickets: {0}".format(response.status))
lines = response.read().decode("utf8").strip().split("\n")[1:]
log.debug("Tickets fetched:")
log.debug(pretty(lines))
return lines | Perform a GET request with GSSAPI authentication |
def create_namespaced_ingress(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_ingress # noqa: E501
create an Ingress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_ingress(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1Ingress body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1Ingress
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_ingress_with_http_info(namespace, body, **kwargs) # noqa: E501
else:
(data) = self.create_namespaced_ingress_with_http_info(namespace, body, **kwargs) # noqa: E501
return data | create_namespaced_ingress # noqa: E501
create an Ingress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_ingress(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1Ingress body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1Ingress
If the method is called asynchronously,
returns the request thread. |
def do_dice_roll():
"""
Roll n-sided dice and return each result and the total
"""
options = get_options()
dice = Dice(options.sides)
rolls = [dice.roll() for n in range(options.number)]
for roll in rolls:
print('rolled', roll)
if options.number > 1:
print('total', sum(rolls)) | Roll n-sided dice and return each result and the total |
def set_contributor_details(self, contdetails):
""" Sets 'contributor_details' parameter used to enhance the \
contributors element of the status response to include \
the screen_name of the contributor. By default only \
the user_id of the contributor is included
:param contdetails: Boolean triggering the usage of the parameter
:raises: TwitterSearchException
"""
if not isinstance(contdetails, bool):
raise TwitterSearchException(1008)
self.arguments.update({'contributor_details': 'true'
if contdetails
else 'false'}) | Sets 'contributor_details' parameter used to enhance the \
contributors element of the status response to include \
the screen_name of the contributor. By default only \
the user_id of the contributor is included
:param contdetails: Boolean triggering the usage of the parameter
:raises: TwitterSearchException |
def get_languages(self):
"""
Return a list of all used languages for this page.
"""
if self._languages:
return self._languages
self._languages = cache.get(self.PAGE_LANGUAGES_KEY % (self.id))
if self._languages is not None:
return self._languages
languages = [c['language'] for
c in Content.objects.filter(page=self,
type="slug").values('language')]
# remove duplicates
languages = list(set(languages))
languages.sort()
cache.set(self.PAGE_LANGUAGES_KEY % (self.id), languages)
self._languages = languages
return languages | Return a list of all used languages for this page. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.