code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
async def unsubscribe(self, container, *keys):
'''
Unsubscribe specified channels. Every subscribed key should be unsubscribed exactly once, even if duplicated subscribed.
:param container: routine container
:param \*keys: subscribed channels
'''
await self._get_subscribe_connection(container)
realkeys = []
for k in keys:
count = self._subscribecounter.get(k, 0)
if count <= 1:
realkeys.append(k)
try:
del self._subscribecounter[k]
except KeyError:
pass
else:
self._subscribecounter[k] = count - 1
if realkeys:
await self._protocol.execute_command(self._subscribeconn, container, 'UNSUBSCRIBE', *realkeys) | Unsubscribe specified channels. Every subscribed key should be unsubscribed exactly once, even if duplicated subscribed.
:param container: routine container
:param \*keys: subscribed channels |
def _parse_to_recoverable_signature(sig):
"""
Returns a parsed recoverable signature of length 65 bytes
"""
# Buffer for getting values of signature object
assert isinstance(sig, bytes)
assert len(sig) == 65
# Make a recoverable signature of 65 bytes
rec_sig = ffi.new("secp256k1_ecdsa_recoverable_signature *")
# Retrieving the recid from the last byte of the signed key
recid = ord(sig[64:65])
# Parse a revoverable signature
parsable_sig = lib.secp256k1_ecdsa_recoverable_signature_parse_compact(
ctx,
rec_sig,
sig,
recid
)
# Verify that the signature is parsable
if not parsable_sig:
raise InvalidSignatureError()
return rec_sig | Returns a parsed recoverable signature of length 65 bytes |
def _get_machines_cache_from_srv(self, srv):
"""Fetch list of etcd-cluster member by resolving _etcd-server._tcp. SRV record.
This record should contain list of host and peer ports which could be used to run
'GET http://{host}:{port}/members' request (peer protocol)"""
ret = []
for r in ['-client-ssl', '-client', '-ssl', '', '-server-ssl', '-server']:
protocol = 'https' if '-ssl' in r else 'http'
endpoint = '/members' if '-server' in r else ''
for host, port in self.get_srv_record('_etcd{0}._tcp.{1}'.format(r, srv)):
url = uri(protocol, host, port, endpoint)
if endpoint:
try:
response = requests.get(url, timeout=self.read_timeout, verify=False)
if response.ok:
for member in response.json():
ret.extend(member['clientURLs'])
break
except RequestException:
logger.exception('GET %s', url)
else:
ret.append(url)
if ret:
self._protocol = protocol
break
else:
logger.warning('Can not resolve SRV for %s', srv)
return list(set(ret)) | Fetch list of etcd-cluster member by resolving _etcd-server._tcp. SRV record.
This record should contain list of host and peer ports which could be used to run
'GET http://{host}:{port}/members' request (peer protocol) |
def split_by_idxs(seq, idxs):
'''A generator that returns sequence pieces, seperated by indexes specified in idxs. '''
last = 0
for idx in idxs:
if not (-len(seq) <= idx < len(seq)):
raise KeyError(f'Idx {idx} is out-of-bounds')
yield seq[last:idx]
last = idx
yield seq[last:] | A generator that returns sequence pieces, seperated by indexes specified in idxs. |
def handle_noargs(self, **options):
"""
By default this function runs on all objects.
As we are using a publishing system it should only update draft
objects which can be modified in the tree structure.
Once published the tree preferences should remain the same to
ensure the tree data structure is consistent with what was
published by the user.
"""
is_dry_run = options.get('dry-run', False)
mptt_only = options.get('mptt-only', False)
slugs = {}
overrides = {}
# MODIFIED
# This was modified to filter draft objects only.
# ORIGINAL LINE -> `parents = dict(UrlNode.objects.values_list('id', 'parent_id'))`
parents = dict(
UrlNode.objects.filter(status=UrlNode.DRAFT).values_list('id', 'parent_id')
)
# END MODIFIED
self.stdout.write("Updated MPTT columns")
if is_dry_run and mptt_only:
# Can't really do anything
return
if not is_dry_run:
# Fix MPTT first, that is the basis for walking through all nodes.
# MODIFIED
# Original line -> `UrlNode.objects.rebuild()`
# The `rebuild` function works on the manager. As we need to filter the queryset first
# it does not play nicely. The code for `rebuild` was brought in here and modified to
# work with the current context.
# Get opts from `UrlNode` rather than `self.model`.
opts = UrlNode._mptt_meta
# Add a queryset parameter will draft objects only.
qs = UrlNode.objects._mptt_filter(
qs=UrlNode.objects.filter(status=UrlNode.DRAFT),
parent=None
)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
# Obtain the `rebuild_helper` from `UrlNode.objects` rather than `self`.
rebuild_helper = UrlNode.objects._rebuild_helper
idx = 0
for pk in pks:
idx += 1
rebuild_helper(pk, 1, idx)
# END MODIFIED
self.stdout.write("Updated MPTT columns")
if mptt_only:
return
self.stdout.write("Updating cached URLs")
self.stdout.write("Page tree nodes:\n\n")
col_style = u"| {0:6} | {1:6} | {2:6} | {3}"
header = col_style.format("Site", "Page", "Locale", "URL")
sep = '-' * (len(header) + 40)
self.stdout.write(sep)
self.stdout.write(header)
self.stdout.write(sep)
# MODIFIED
# Modified to add the filter for draft objects only.
for translation in UrlNode_Translation.objects.filter(
master__status=UrlNode.DRAFT
).select_related('master').order_by(
'master__parent_site__id', 'master__tree_id', 'master__lft', 'language_code'
):
# END MODIFIED
slugs.setdefault(translation.language_code, {})[translation.master_id] = translation.slug
overrides.setdefault(translation.language_code, {})[translation.master_id] = translation.override_url
old_url = translation._cached_url
try:
new_url = self._construct_url(translation.language_code, translation.master_id, parents, slugs, overrides)
except KeyError:
if is_dry_run:
# When the mptt tree is broken, some URLs can't be correctly generated yet.
self.stderr.write("Failed to determine new URL for {0}, please run with --mptt-only first.".format(old_url))
return
raise
if old_url != new_url:
translation._cached_url = new_url
if not is_dry_run:
translation.save()
if old_url != new_url:
self.stdout.write(smart_text(u"{0} {1} {2}\n".format(
col_style.format(translation.master.parent_site_id, translation.master_id, translation.language_code, translation._cached_url),
"WILL CHANGE from" if is_dry_run else "UPDATED from",
old_url
)))
else:
self.stdout.write(smart_text(col_style.format(
translation.master.parent_site_id, translation.master_id, translation.language_code, translation._cached_url
))) | By default this function runs on all objects.
As we are using a publishing system it should only update draft
objects which can be modified in the tree structure.
Once published the tree preferences should remain the same to
ensure the tree data structure is consistent with what was
published by the user. |
def _initialized(self, partitioner):
"""Store the partitioner and reset the internal state.
Now that we successfully got an actual
:class:`kazoo.recipe.partitioner.SetPartitioner` object, we
store it and reset our internal ``_state`` to ``None``,
causing the ``state`` property to defer to the partitioner's
state.
"""
self._partitioner = partitioner
self._thimble = Thimble(self.reactor, self.pool,
partitioner, _blocking_partitioner_methods)
self._state = None | Store the partitioner and reset the internal state.
Now that we successfully got an actual
:class:`kazoo.recipe.partitioner.SetPartitioner` object, we
store it and reset our internal ``_state`` to ``None``,
causing the ``state`` property to defer to the partitioner's
state. |
def removeRef(self, attr):
"""Remove the given attribute from the Ref table maintained
internally. """
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlRemoveRef(self._o, attr__o)
return ret | Remove the given attribute from the Ref table maintained
internally. |
def connect_child(self, node):
"""Adds the given node as a child to this one. No new nodes are
created, only connections are made.
:param node:
a ``Node`` object to connect
"""
if node.graph != self.graph:
raise AttributeError('cannot connect nodes from different graphs')
node.parents.add(self)
self.children.add(node) | Adds the given node as a child to this one. No new nodes are
created, only connections are made.
:param node:
a ``Node`` object to connect |
def get_account(self, address, id=None, endpoint=None):
"""
Look up an account on the blockchain. Sample output:
Args:
address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK')
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(GET_ACCOUNT_STATE, params=[address], id=id, endpoint=endpoint) | Look up an account on the blockchain. Sample output:
Args:
address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK')
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call |
def send_request(ndex_service_url, params, is_json=True, use_get=False):
"""Send a request to the NDEx server.
Parameters
----------
ndex_service_url : str
The URL of the service to use for the request.
params : dict
A dictionary of parameters to send with the request. Parameter keys
differ based on the type of request.
is_json : bool
True if the response is in json format, otherwise it is assumed to be
text. Default: False
use_get : bool
True if the request needs to use GET instead of POST.
Returns
-------
res : str
Depending on the type of service and the is_json parameter, this
function either returns a text string or a json dict.
"""
if use_get:
res = requests.get(ndex_service_url, json=params)
else:
res = requests.post(ndex_service_url, json=params)
status = res.status_code
# If response is immediate, we get 200
if status == 200:
if is_json:
return res.json()
else:
return res.text
# If there is a continuation of the message we get status 300, handled below.
# Otherwise we return None.
elif status != 300:
logger.error('Request returned with code %d' % status)
return None
# In case the response is not immediate, a task ID can be used to get
# the result.
task_id = res.json().get('task_id')
logger.info('NDEx task submitted...')
time_used = 0
try:
while status != 200:
res = requests.get(ndex_base_url + '/task/' + task_id)
status = res.status_code
if status != 200:
time.sleep(5)
time_used += 5
except KeyError:
next
return None
logger.info('NDEx task complete.')
if is_json:
return res.json()
else:
return res.text | Send a request to the NDEx server.
Parameters
----------
ndex_service_url : str
The URL of the service to use for the request.
params : dict
A dictionary of parameters to send with the request. Parameter keys
differ based on the type of request.
is_json : bool
True if the response is in json format, otherwise it is assumed to be
text. Default: False
use_get : bool
True if the request needs to use GET instead of POST.
Returns
-------
res : str
Depending on the type of service and the is_json parameter, this
function either returns a text string or a json dict. |
def json_encode(self, out, limit=None, sort_keys=False, indent=None):
'''Encode the results of this paged response as JSON writing to the
provided file-like `out` object. This function will iteratively read
as many pages as present, streaming the contents out as JSON.
:param file-like out: an object with a `write` function
:param int limit: optional maximum number of items to write
:param bool sort_keys: if True, output keys sorted, default is False
:param bool indent: if True, indent output, default is False
'''
stream = self._json_stream(limit)
enc = json.JSONEncoder(indent=indent, sort_keys=sort_keys)
for chunk in enc.iterencode(stream):
out.write(u'%s' % chunk) | Encode the results of this paged response as JSON writing to the
provided file-like `out` object. This function will iteratively read
as many pages as present, streaming the contents out as JSON.
:param file-like out: an object with a `write` function
:param int limit: optional maximum number of items to write
:param bool sort_keys: if True, output keys sorted, default is False
:param bool indent: if True, indent output, default is False |
def bind(self):
"""Bind the FBO. Anything drawn afterward will be stored in the FBO's texture."""
# This is called simply to deal with anything that might be currently bound (for example, Pyglet objects),
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
# Store current viewport size for later
self._old_viewport = get_viewport()
# Bind the FBO, and change the viewport to fit its texture.
gl.glBindFramebufferEXT(gl.GL_FRAMEBUFFER_EXT, self.id) # Rendering off-screen
gl.glViewport(0, 0, self.texture.width, self.texture.height) | Bind the FBO. Anything drawn afterward will be stored in the FBO's texture. |
def monkey_patch_override_instance_method(instance):
"""
Override an instance method with a new version of the same name. The
original method implementation is made available within the override method
as `_original_<METHOD_NAME>`.
"""
def perform_override(override_fn):
fn_name = override_fn.__name__
original_fn_name = '_original_' + fn_name
# Override instance method, if it hasn't already been done
if not hasattr(instance, original_fn_name):
original_fn = getattr(instance, fn_name)
setattr(instance, original_fn_name, original_fn)
bound_override_fn = override_fn.__get__(instance)
setattr(instance, fn_name, bound_override_fn)
return perform_override | Override an instance method with a new version of the same name. The
original method implementation is made available within the override method
as `_original_<METHOD_NAME>`. |
def url(self, key, includeToken=None):
""" Build a URL string with proper token argument. Token will be appended to the URL
if either includeToken is True or CONFIG.log.show_secrets is 'true'.
"""
if self._token and (includeToken or self._showSecrets):
delim = '&' if '?' in key else '?'
return '%s%s%sX-Plex-Token=%s' % (self._baseurl, key, delim, self._token)
return '%s%s' % (self._baseurl, key) | Build a URL string with proper token argument. Token will be appended to the URL
if either includeToken is True or CONFIG.log.show_secrets is 'true'. |
def get_fast_scanner(self):
"""
Return :class:`.FastScanner` for association scan.
Returns
-------
:class:`.FastScanner`
Instance of a class designed to perform very fast association scan.
"""
terms = self._terms
return KronFastScanner(self._Y, self._mean.A, self._mean.X, self._cov.Ge, terms) | Return :class:`.FastScanner` for association scan.
Returns
-------
:class:`.FastScanner`
Instance of a class designed to perform very fast association scan. |
def selected_attributes(self):
"""
Returns the selected attributes from the last run.
:return: the Numpy array of 0-based indices
:rtype: ndarray
"""
array = javabridge.call(self.jobject, "selectedAttributes", "()[I")
if array is None:
return None
else:
return javabridge.get_env().get_int_array_elements(array) | Returns the selected attributes from the last run.
:return: the Numpy array of 0-based indices
:rtype: ndarray |
def _structure(self, source_code):
"""return structure in ACDP format."""
# define cutter as a per block reader
def cutter(seq, block_size):
for index in range(0, len(seq), block_size):
lexem = seq[index:index+block_size]
if len(lexem) == block_size:
yield self.table_struct[seq[index:index+block_size]]
return tuple(cutter(source_code, self.idnt_struct_size)) | return structure in ACDP format. |
def getInterfaceAddresses(self, node, interface):
'''
Return the Ethernet and IP+mask addresses assigned to a
given interface on a node.
'''
intf = self.getNode(node)['nodeobj'].getInterface(interface)
return intf.ethaddr,intf.ipaddr,intf.netmask | Return the Ethernet and IP+mask addresses assigned to a
given interface on a node. |
def fit(self, **kwargs):
"""
This will try to determine fit parameters using scipy.optimize.leastsq
algorithm. This function relies on a previous call of set_data() and
set_functions().
Notes
-----
results of the fit algorithm are stored in self.results.
See scipy.optimize.leastsq for more information.
Optional keyword arguments are sent to self.set() prior to
fitting.
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0:
return self._error("No data. Please use set_data() prior to fitting.")
if self._f_raw is None:
return self._error("No functions. Please use set_functions() prior to fitting.")
# Do the processing once, to increase efficiency
self._massage_data()
# Send the keyword arguments to the settings
self.set(**kwargs)
# do the actual optimization
self.results = _opt.leastsq(self._studentized_residuals_concatenated, self._pguess, full_output=1)
# plot if necessary
if self['autoplot']: self.plot()
return self | This will try to determine fit parameters using scipy.optimize.leastsq
algorithm. This function relies on a previous call of set_data() and
set_functions().
Notes
-----
results of the fit algorithm are stored in self.results.
See scipy.optimize.leastsq for more information.
Optional keyword arguments are sent to self.set() prior to
fitting. |
def calculate_reshape_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C', H', W']
Note that C*H*W should equal to C'*H'*W'.
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
params = operator.raw_operator.reshape
output_shape = list(int(i) for i in params.targetShape)
if len(output_shape) == 3:
output_shape = [operator.inputs[0].type.shape[0]] + output_shape
operator.outputs[0].type.shape = output_shape | Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C', H', W']
Note that C*H*W should equal to C'*H'*W'. |
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in xrange(steps)]
ps = [scipy.special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf | Returns the CDF of this distribution. |
def cluster(self, n, embed_dim=None, algo=mds.CLASSICAL, method=methods.KMEANS):
"""
Cluster the embedded coordinates using multidimensional scaling
Parameters
----------
n: int
The number of clusters to return
embed_dim int
The dimensionality of the underlying coordinates
Defaults to same value as n
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition
"""
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == mds.CLASSICAL:
self._coords = self.dm.embedding(embed_dim, 'cmds')
elif algo == mds.METRIC:
self._coords = self.dm.embedding(embed_dim, 'mmds')
else:
raise OptionError(algo, list(mds.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
#if self._verbosity > 0:
# print('Using clustering method: {}'.format(methods.reverse[method]))
return p | Cluster the embedded coordinates using multidimensional scaling
Parameters
----------
n: int
The number of clusters to return
embed_dim int
The dimensionality of the underlying coordinates
Defaults to same value as n
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition |
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv | Create all the covariance matrices from a given template. |
def exists(self, workflow_id):
""" Checks whether a document with the specified workflow id already exists.
Args:
workflow_id (str): The workflow id that should be checked.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
bool: ``True`` if a document with the specified workflow id exists.
"""
try:
db = self._client[self.database]
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return col.find_one({"_id": ObjectId(workflow_id)}) is not None
except ConnectionFailure:
raise DataStoreNotConnected() | Checks whether a document with the specified workflow id already exists.
Args:
workflow_id (str): The workflow id that should be checked.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
bool: ``True`` if a document with the specified workflow id exists. |
def main():
"""Main function for :command:`fabulous-image`."""
import optparse
parser = optparse.OptionParser()
parser.add_option(
"-w", "--width", dest="width", type="int", default=None,
help=("Width of printed image in characters. Default: %default"))
(options, args) = parser.parse_args(args=sys.argv[1:])
for imgpath in args:
for line in Image(imgpath, options.width):
printy(line) | Main function for :command:`fabulous-image`. |
def getChild(self, name, request):
"""
Postpath needs to contain all segments of
the url, if it is incomplete then that incomplete url will be passed on
to the child resource (in this case our wsgi application).
"""
request.prepath = []
request.postpath.insert(0, name)
# re-establishes request.postpath so to contain the entire path
return self.wsgi_resource | Postpath needs to contain all segments of
the url, if it is incomplete then that incomplete url will be passed on
to the child resource (in this case our wsgi application). |
def login_with_api_token(api_token):
"""Login to Todoist using a user's api token.
.. note:: It is up to you to obtain the api token.
:param api_token: A Todoist user's api token.
:type api_token: str
:return: The Todoist user.
:rtype: :class:`pytodoist.todoist.User`
>>> from pytodoist import todoist
>>> api_token = 'api_token'
>>> user = todoist.login_with_api_token(api_token)
>>> print(user.full_name)
John Doe
"""
response = API.sync(api_token, '*', '["user"]')
_fail_if_contains_errors(response)
user_json = response.json()['user']
# Required as sync doesn't return the api_token.
user_json['api_token'] = user_json['token']
return User(user_json) | Login to Todoist using a user's api token.
.. note:: It is up to you to obtain the api token.
:param api_token: A Todoist user's api token.
:type api_token: str
:return: The Todoist user.
:rtype: :class:`pytodoist.todoist.User`
>>> from pytodoist import todoist
>>> api_token = 'api_token'
>>> user = todoist.login_with_api_token(api_token)
>>> print(user.full_name)
John Doe |
def find_consensus(bases):
"""
find consensus base based on nucleotide
frequencies
"""
nucs = ['A', 'T', 'G', 'C', 'N']
total = sum([bases[nuc] for nuc in nucs if nuc in bases])
# save most common base as consensus (random nuc if there is a tie)
try:
top = max([bases[nuc] for nuc in nucs if nuc in bases])
except:
bases['consensus'] = ('N', 'n/a')
bases['consensus frequency'] = 'n/a'
bases['reference frequency'] = 'n/a'
return bases
top = [(nuc, bases[nuc]) for nuc in bases if bases[nuc] == top]
if top[0][1] == 0:
bases['consensus'] = ('n/a', 0)
else:
bases['consensus'] = random.choice(top)
if total == 0:
c_freq = 'n/a'
ref_freq = 'n/a'
else:
c_freq = float(bases['consensus'][1]) / float(total)
if bases['ref'] not in bases:
ref_freq = 0
else:
ref_freq = float(bases[bases['ref']]) / float(total)
bases['consensus frequency'] = c_freq
bases['reference frequency'] = ref_freq
return bases | find consensus base based on nucleotide
frequencies |
def dump_json(d: dict) -> None:
"""Dump json when using tuples for dictionary keys
Have to convert tuples to strings to dump out as json
"""
import json
k = d.keys()
v = d.values()
k1 = [str(i) for i in k]
return json.dumps(dict(zip(*[k1, v])), indent=4) | Dump json when using tuples for dictionary keys
Have to convert tuples to strings to dump out as json |
def _GetNormalizedTimestamp(self):
"""Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
"""
if self._normalized_timestamp is None:
if self._number_of_seconds is not None:
self._normalized_timestamp = (
decimal.Decimal(self._microseconds) /
definitions.MICROSECONDS_PER_SECOND)
self._normalized_timestamp += decimal.Decimal(self._number_of_seconds)
return self._normalized_timestamp | Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined. |
def create(self, instance, cidr_mask, description, **kwargs):
"""Create an ACL entry for the specified instance.
:param str instance: The name of the instance to associate the new ACL entry with.
:param str cidr_mask: The IPv4 CIDR mask for the new ACL entry.
:param str description: A short description for the new ACL entry.
:param collector kwargs: (optional) Additional key=value pairs to be supplied to the
creation payload. **Caution:** fields unrecognized by the API will cause this request
to fail with a 400 from the API.
"""
# Build up request data.
url = self._url.format(instance=instance)
request_data = {
'cidr_mask': cidr_mask,
'description': description
}
request_data.update(kwargs)
# Call to create an instance.
response = requests.post(
url,
data=json.dumps(request_data),
**self._default_request_kwargs
)
# Log outcome of instance creation request.
if response.status_code == 200:
logger.info('Successfully created a new ACL for instance {} with: {}.'
.format(instance, request_data))
else:
logger.info('Failed to create a new ACL for instance {} with: {}.'
.format(instance, request_data))
data = self._get_response_data(response)
return self._concrete_acl(data) | Create an ACL entry for the specified instance.
:param str instance: The name of the instance to associate the new ACL entry with.
:param str cidr_mask: The IPv4 CIDR mask for the new ACL entry.
:param str description: A short description for the new ACL entry.
:param collector kwargs: (optional) Additional key=value pairs to be supplied to the
creation payload. **Caution:** fields unrecognized by the API will cause this request
to fail with a 400 from the API. |
def encrypt_dir(self,
path,
output_path=None,
overwrite=False,
stream=True,
enable_verbose=True):
"""
Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information
"""
path, output_path = files.process_dst_overwrite_args(
src=path, dst=output_path, overwrite=overwrite,
src_to_dst_func=files.get_encrpyted_path,
)
self._show("--- Encrypt directory '%s' ---" % path,
enable_verbose=enable_verbose)
st = time.clock()
for current_dir, _, file_list in os.walk(path):
new_dir = current_dir.replace(path, output_path)
if not os.path.exists(new_dir): # pragma: no cover
os.mkdir(new_dir)
for basename in file_list:
old_path = os.path.join(current_dir, basename)
new_path = os.path.join(new_dir, basename)
self.encrypt_file(old_path, new_path,
overwrite=overwrite,
stream=stream,
enable_verbose=enable_verbose)
self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,),
enable_verbose=enable_verbose)
return output_path | Encrypt everything in a directory.
:param path: path of the dir you need to encrypt
:param output_path: encrypted dir output path
:param overwrite: if True, then silently overwrite output file if exists
:param stream: if it is a very big file, stream mode can avoid using
too much memory
:param enable_verbose: boolean, trigger on/off the help information |
def html_dataset_type(is_binary, is_imbalanced):
"""
Return HTML report file dataset type.
:param is_binary: is_binary flag (binary : True , multi-class : False)
:type is_binary: bool
:param is_imbalanced: is_imbalanced flag (imbalance : True , balance : False)
:type is_imbalanced: bool
:return: dataset_type as str
"""
result = "<h2>Dataset Type : </h2>\n"
balance_type = "Balanced"
class_type = "Binary Classification"
if is_imbalanced:
balance_type = "Imbalanced"
if not is_binary:
class_type = "Multi-Class Classification"
result += "<ul>\n\n<li>{0}</li>\n\n<li>{1}</li>\n</ul>\n".format(
class_type, balance_type)
result += "<p>{0}</p>\n".format(RECOMMEND_HTML_MESSAGE)
result += "<p>{0}</p>\n".format(RECOMMEND_HTML_MESSAGE2)
return result | Return HTML report file dataset type.
:param is_binary: is_binary flag (binary : True , multi-class : False)
:type is_binary: bool
:param is_imbalanced: is_imbalanced flag (imbalance : True , balance : False)
:type is_imbalanced: bool
:return: dataset_type as str |
def get_bip32_address(self, ecdh=False):
"""Compute BIP32 derivation address according to SLIP-0013/0017."""
index = struct.pack('<L', self.identity_dict.get('index', 0))
addr = index + self.to_bytes()
log.debug('bip32 address string: %r', addr)
digest = hashlib.sha256(addr).digest()
s = io.BytesIO(bytearray(digest))
hardened = 0x80000000
addr_0 = 17 if bool(ecdh) else 13
address_n = [addr_0] + list(util.recv(s, '<LLLL'))
return [(hardened | value) for value in address_n] | Compute BIP32 derivation address according to SLIP-0013/0017. |
def revert_to_max(self):
"""
N.revert_to_max()
Sets all N's stochastics to their MAP values.
"""
try:
self._set_stochastics([self.mu[s] for s in self.stochastics])
except KeyError:
self._set_stochastics(self.mu[self.stochastics]) | N.revert_to_max()
Sets all N's stochastics to their MAP values. |
def get_cache(self, namespace, query_hash, length, start, end):
"""Get a cached value for the specified date range and query"""
query = 'SELECT start, value FROM gauged_cache WHERE namespace = ? ' \
'AND hash = ? AND length = ? AND start BETWEEN ? AND ?'
cursor = self.cursor
cursor.execute(query, (namespace, query_hash, length, start, end))
return tuple(cursor.fetchall()) | Get a cached value for the specified date range and query |
def get(self, container):
"""
Returns a Container matching the specified container name. If no such
container exists, a NoSuchContainer exception is raised.
"""
name = utils.get_name(container)
uri = "/%s" % name
resp, resp_body = self.api.method_head(uri)
hdrs = resp.headers
data = {"total_bytes": int(hdrs.get("x-container-bytes-used", "0")),
"object_count": int(hdrs.get("x-container-object-count", "0")),
"name": name}
return Container(self, data, loaded=False) | Returns a Container matching the specified container name. If no such
container exists, a NoSuchContainer exception is raised. |
def handle_receive_lock_expired(
channel_state: NettingChannelState,
state_change: ReceiveLockExpired,
block_number: BlockNumber,
) -> TransitionResult[NettingChannelState]:
"""Remove expired locks from channel states."""
is_valid, msg, merkletree = is_valid_lock_expired(
state_change=state_change,
channel_state=channel_state,
sender_state=channel_state.partner_state,
receiver_state=channel_state.our_state,
block_number=block_number,
)
events: List[Event] = list()
if is_valid:
assert merkletree, 'is_valid_lock_expired should return merkletree if valid'
channel_state.partner_state.balance_proof = state_change.balance_proof
channel_state.partner_state.merkletree = merkletree
_del_unclaimed_lock(channel_state.partner_state, state_change.secrethash)
send_processed = SendProcessed(
recipient=state_change.balance_proof.sender,
channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE,
message_identifier=state_change.message_identifier,
)
events = [send_processed]
else:
assert msg, 'is_valid_lock_expired should return error msg if not valid'
invalid_lock_expired = EventInvalidReceivedLockExpired(
secrethash=state_change.secrethash,
reason=msg,
)
events = [invalid_lock_expired]
return TransitionResult(channel_state, events) | Remove expired locks from channel states. |
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter) | Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog` |
def timed(name=None, file=sys.stdout, callback=None, wall_clock=True):
"""
Context manager to make it easy to time the execution of a piece of code.
This timer will never run your code several times and is meant more for
simple in-production timing, instead of benchmarking. Reports the
wall-clock time (using `time.time`) and not the processor time.
Parameters
----------
name : str
Name of the timing block, to identify it.
file : file handler
Which file handler to print the results to. Default is standard output.
If a numpy array and size 1 is given, the time in seconds will be
stored inside it. Ignored if `callback` is set.
callback : callable
This offer even more flexibility than `file`. The callable will be
called at the end of the execution with a single floating point
argument with the elapsed time in seconds.
Examples
--------
>>> import deepdish as dd
>>> import time
The `timed` function is a context manager, so everything inside the
``with`` block will be timed. The results will be printed by default to
standard output:
>>> with dd.timed('Sleep'): # doctest: +SKIP
... time.sleep(1)
[timed] Sleep: 1.001035451889038 s
Using the `callback` parameter, we can accumulate multiple runs into a
list:
>>> times = []
>>> for i in range(3): # doctest: +SKIP
... with dd.timed(callback=times.append):
... time.sleep(1)
>>> times # doctest: +SKIP
[1.0035350322723389, 1.0035550594329834, 1.0039470195770264]
"""
start = time.time()
yield
end = time.time()
delta = end - start
if callback is not None:
callback(delta)
elif isinstance(file, np.ndarray) and len(file) == 1:
file[0] = delta
else:
name_str = ' {}'.format(name) if name is not None else ''
print(("[timed]{0}: {1} s".format(name_str, delta)), file=file) | Context manager to make it easy to time the execution of a piece of code.
This timer will never run your code several times and is meant more for
simple in-production timing, instead of benchmarking. Reports the
wall-clock time (using `time.time`) and not the processor time.
Parameters
----------
name : str
Name of the timing block, to identify it.
file : file handler
Which file handler to print the results to. Default is standard output.
If a numpy array and size 1 is given, the time in seconds will be
stored inside it. Ignored if `callback` is set.
callback : callable
This offer even more flexibility than `file`. The callable will be
called at the end of the execution with a single floating point
argument with the elapsed time in seconds.
Examples
--------
>>> import deepdish as dd
>>> import time
The `timed` function is a context manager, so everything inside the
``with`` block will be timed. The results will be printed by default to
standard output:
>>> with dd.timed('Sleep'): # doctest: +SKIP
... time.sleep(1)
[timed] Sleep: 1.001035451889038 s
Using the `callback` parameter, we can accumulate multiple runs into a
list:
>>> times = []
>>> for i in range(3): # doctest: +SKIP
... with dd.timed(callback=times.append):
... time.sleep(1)
>>> times # doctest: +SKIP
[1.0035350322723389, 1.0035550594329834, 1.0039470195770264] |
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content) | Returns the json-encoded content of a response, if any. |
def sum_table(records):
"""
Used to compute summaries. The records are assumed to have numeric
fields, except the first field which is ignored, since it typically
contains a label. Here is an example:
>>> sum_table([('a', 1), ('b', 2)])
['total', 3]
"""
size = len(records[0])
result = [None] * size
firstrec = records[0]
for i in range(size):
if isinstance(firstrec[i], (numbers.Number, numpy.ndarray)):
result[i] = sum(rec[i] for rec in records)
else:
result[i] = 'total'
return result | Used to compute summaries. The records are assumed to have numeric
fields, except the first field which is ignored, since it typically
contains a label. Here is an example:
>>> sum_table([('a', 1), ('b', 2)])
['total', 3] |
def bipole(src, rec, depth, res, freqtime, signal=None, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, msrc=False,
srcpts=1, mrec=False, recpts=1, strength=0, xdirect=False,
ht='fht', htarg=None, ft='sin', ftarg=None, opt=None, loop=None,
verb=2):
r"""Return the electromagnetic field due to an electromagnetic source.
Calculate the electromagnetic frequency- or time-domain field due to
arbitrary finite electric or magnetic bipole sources, measured by arbitrary
finite electric or magnetic bipole receivers. By default, the
electromagnetic response is normalized to to source and receiver of 1 m
length, and source strength of 1 A.
See Also
--------
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m):
- [x0, x1, y0, y1, z0, z1] (bipole of finite length)
- [x, y, z, azimuth, dip] (dipole, infinitesimal small)
Dimensions:
- The coordinates x, y, and z (dipole) or x0, x1, y0, y1, z0, and
z1 (bipole) can be single values or arrays.
- The variables x and y (dipole) or x0, x1, y0, and y1 (bipole)
must have the same dimensions.
- The variable z (dipole) or z0 and z1 (bipole) must either be
single values or having the same dimension as the other
coordinates.
- The variables azimuth and dip must be single values. If they have
different angles, you have to use the bipole-method (with
srcpts/recpts = 1, so it is calculated as dipoles).
Angles (coordinate system is left-handed, positive z down
(East-North-Depth):
- azimuth (°): horizontal deviation from x-axis, anti-clockwise.
- dip (°): vertical deviation from xy-plane downwards.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
Alternatively, res can be a dictionary. See the main manual of empymod
too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and
zetaV, which can be used to, for instance, use the Cole-Cole model for
IP.
freqtime : array_like
Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0).
signal : {None, 0, 1, -1}, optional
Source signal, default is None:
- None: Frequency-domain response
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
msrc, mrec : boolean, optional
If True, source/receiver (msrc/mrec) is magnetic, else electric.
Default is False.
srcpts, recpts : int, optional
Number of integration points for bipole source/receiver, default is 1:
- srcpts/recpts < 3 : bipole, but calculated as dipole at centre
- srcpts/recpts >= 3 : bipole
strength : float, optional
Source strength (A):
- If 0, output is normalized to source and receiver of 1 m length,
and source strength of 1 A.
- If != 0, output is returned for given source and receiver length,
and source strength.
Default is 0.
xdirect : bool or None, optional
Direct field calculation (only if src and rec are in the same layer):
- If True, direct field is calculated analytically in the frequency
domain.
- If False, direct field is calculated in the wavenumber domain.
- If None, direct field is excluded from the calculation, and only
reflected fields are returned (secondary field).
Defaults to False.
ht : {'fht', 'qwe', 'quad'}, optional
Flag to choose either the *Digital Linear Filter* method (FHT, *Fast
Hankel Transform*), the *Quadrature-With-Extrapolation* (QWE), or a
simple *Quadrature* (QUAD) for the Hankel transform. Defaults to
'fht'.
htarg : dict or list, optional
Depends on the value for ``ht``:
- If ``ht`` = 'fht': [fhtfilt, pts_per_dec]:
- fhtfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(default: ``empymod.filters.key_201_2009()``)
- pts_per_dec: points per decade; (default: 0)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ht`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec,
diff_quad, a, b, limit]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-30)
- nquad: order of Gaussian quadrature (default: 51)
- maxint: maximum number of partial integral intervals
(default: 40)
- pts_per_dec: points per decade; (default: 0)
- If 0, no interpolation is used.
- If > 0, interpolation is used.
- diff_quad: criteria when to swap to QUAD (only relevant if
opt='spline') (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ht`` = 'quad': [atol, rtol, limit, lmin, lmax, pts_per_dec]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-20)
- limit: An upper bound on the number of subintervals used in
the adaptive algorithm (default: 500)
- lmin: Minimum wavenumber (default 1e-6)
- lmax: Maximum wavenumber (default 0.1)
- pts_per_dec: points per decade (default: 40)
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
A few examples, assuming ``ht`` = ``qwe``:
- Only changing rtol:
{'rtol': 1e-4} or [1e-4] or 1e-4
- Changing rtol and nquad:
{'rtol': 1e-4, 'nquad': 101} or [1e-4, '', 101]
- Only changing diff_quad:
{'diffquad': 10} or ['', '', '', '', '', 10]
ft : {'sin', 'cos', 'qwe', 'fftlog', 'fft'}, optional
Only used if ``signal`` != None. Flag to choose either the Digital
Linear Filter method (Sine- or Cosine-Filter), the
Quadrature-With-Extrapolation (QWE), the FFTLog, or the FFT for the
Fourier transform. Defaults to 'sin'.
ftarg : dict or list, optional
Only used if ``signal`` !=None. Depends on the value for ``ft``:
- If ``ft`` = 'sin' or 'cos': [fftfilt, pts_per_dec]:
- fftfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(Default: ``empymod.filters.key_201_CosSin_2012()``)
- pts_per_dec: points per decade; (default: -1)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ft`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec]:
- rtol: relative tolerance (default: 1e-8)
- atol: absolute tolerance (default: 1e-20)
- nquad: order of Gaussian quadrature (default: 21)
- maxint: maximum number of partial integral intervals
(default: 200)
- pts_per_dec: points per decade (default: 20)
- diff_quad: criteria when to swap to QUAD (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ft`` = 'fftlog': [pts_per_dec, add_dec, q]:
- pts_per_dec: sampels per decade (default: 10)
- add_dec: additional decades [left, right] (default: [-2, 1])
- q: exponent of power law bias (default: 0); -1 <= q <= 1
- If ``ft`` = 'fft': [dfreq, nfreq, ntot]:
- dfreq: Linear step-size of frequencies (default: 0.002)
- nfreq: Number of frequencies (default: 2048)
- ntot: Total number for FFT; difference between nfreq and
ntot is padded with zeroes. This number is ideally a
power of 2, e.g. 2048 or 4096 (default: nfreq).
- pts_per_dec : points per decade (default: None)
Padding can sometimes improve the result, not always. The
default samples from 0.002 Hz - 4.096 Hz. If pts_per_dec is set
to an integer, calculated frequencies are logarithmically
spaced with the given number per decade, and then interpolated
to yield the required frequencies for the FFT.
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
See ``htarg`` for a few examples.
opt : {None, 'parallel'}, optional
Optimization flag. Defaults to None:
- None: Normal case, no parallelization nor interpolation is used.
- If 'parallel', the package ``numexpr`` is used to evaluate the
most expensive statements. Always check if it actually improves
performance for a specific problem. It can speed up the
calculation for big arrays, but will most likely be slower for
small arrays. It will use all available cores for these specific
statements, which all contain ``Gamma`` in one way or another,
which has dimensions (#frequencies, #offsets, #layers, #lambdas),
therefore can grow pretty big. The module ``numexpr`` uses by
default all available cores up to a maximum of 8. You can change
this behaviour to your desired number of threads ``nthreads``
with ``numexpr.set_num_threads(nthreads)``.
- The value 'spline' is deprecated and will be removed. See
``htarg`` instead for the interpolated versions.
The option 'parallel' only affects speed and memory usage, whereas
'spline' also affects precision! Please read the note in the *README*
documentation for more information.
loop : {None, 'freq', 'off'}, optional
Define if to calculate everything vectorized or if to loop over
frequencies ('freq') or over offsets ('off'), default is None. It
always loops over frequencies if ``ht = 'qwe'`` or if ``opt =
'spline'``. Calculating everything vectorized is fast for few offsets
OR for few frequencies. However, if you calculate many frequencies for
many offsets, it might be faster to loop over frequencies. Only
comparing the different versions will yield the answer for your
specific problem at hand!
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
EM : ndarray, (nfreq, nrec, nsrc)
Frequency- or time-domain EM field (depending on ``signal``):
- If rec is electric, returns E [V/m].
- If rec is magnetic, returns B [T] (not H [A/m]!).
However, source and receiver are normalised (unless strength != 0). So
for instance in the electric case the source strength is 1 A and its
length is 1 m. So the electric field could also be written as
[V/(A.m2)].
In the magnetic case the source strength is given by
:math:`i\omega\mu_0 A I^e`, where A is the loop area (m2), and
:math:`I^e` the electric source strength. For the normalized magnetic
source :math:`A=1m^2` and :math:`I^e=1 Ampere`. A magnetic source is
therefore frequency dependent.
The shape of EM is (nfreq, nrec, nsrc). However, single dimensions
are removed.
Examples
--------
>>> import numpy as np
>>> from empymod import bipole
>>> # x-directed bipole source: x0, x1, y0, y1, z0, z1
>>> src = [-50, 50, 0, 0, 100, 100]
>>> # x-directed dipole source-array: x, y, z, azimuth, dip
>>> rec = [np.arange(1, 11)*500, np.zeros(10), 200, 0, 0]
>>> # layer boundaries
>>> depth = [0, 300, 1000, 1050]
>>> # layer resistivities
>>> res = [1e20, .3, 1, 50, 1]
>>> # Frequency
>>> freq = 1
>>> # Calculate electric field due to an electric source at 1 Hz.
>>> # [msrc = mrec = True (default)]
>>> EMfield = bipole(src, rec, depth, res, freq, verb=4)
:: empymod START ::
~
depth [m] : 0 300 1000 1050
res [Ohm.m] : 1E+20 0.3 1 50 1
aniso [-] : 1 1 1 1 1
epermH [-] : 1 1 1 1 1
epermV [-] : 1 1 1 1 1
mpermH [-] : 1 1 1 1 1
mpermV [-] : 1 1 1 1 1
frequency [Hz] : 1
Hankel : DLF (Fast Hankel Transform)
> Filter : Key 201 (2009)
> DLF type : Standard
Kernel Opt. : None
Loop over : None (all vectorized)
Source(s) : 1 bipole(s)
> intpts : 1 (as dipole)
> length [m] : 100
> x_c [m] : 0
> y_c [m] : 0
> z_c [m] : 100
> azimuth [°] : 0
> dip [°] : 0
Receiver(s) : 10 dipole(s)
> x [m] : 500 - 5000 : 10 [min-max; #]
: 500 1000 1500 2000 2500 3000 3500 4000 4500 5000
> y [m] : 0 - 0 : 10 [min-max; #]
: 0 0 0 0 0 0 0 0 0 0
> z [m] : 200
> azimuth [°] : 0
> dip [°] : 0
Required ab's : 11
~
:: empymod END; runtime = 0:00:00.005536 :: 1 kernel call(s)
~
>>> print(EMfield)
[ 1.68809346e-10 -3.08303130e-10j -8.77189179e-12 -3.76920235e-11j
-3.46654704e-12 -4.87133683e-12j -3.60159726e-13 -1.12434417e-12j
1.87807271e-13 -6.21669759e-13j 1.97200208e-13 -4.38210489e-13j
1.44134842e-13 -3.17505260e-13j 9.92770406e-14 -2.33950871e-13j
6.75287598e-14 -1.74922886e-13j 4.62724887e-14 -1.32266600e-13j]
"""
# === 1. LET'S START ============
t0 = printstartfinish(verb)
# === 2. CHECK INPUT ============
# Backwards compatibility
htarg, opt = spline_backwards_hankel(ht, htarg, opt)
# Check times and Fourier Transform arguments and get required frequencies
if signal is None:
freq = freqtime
else:
time, freq, ft, ftarg = check_time(freqtime, signal, ft, ftarg, verb)
# Check layer parameters
model = check_model(depth, res, aniso, epermH, epermV, mpermH, mpermV,
xdirect, verb)
depth, res, aniso, epermH, epermV, mpermH, mpermV, isfullspace = model
# Check frequency => get etaH, etaV, zetaH, and zetaV
frequency = check_frequency(freq, res, aniso, epermH, epermV, mpermH,
mpermV, verb)
freq, etaH, etaV, zetaH, zetaV = frequency
# Update etaH/etaV and zetaH/zetaV according to user-provided model
if isinstance(res, dict) and 'func_eta' in res:
etaH, etaV = res['func_eta'](res, locals())
if isinstance(res, dict) and 'func_zeta' in res:
zetaH, zetaV = res['func_zeta'](res, locals())
# Check Hankel transform parameters
ht, htarg = check_hankel(ht, htarg, verb)
# Check optimization
use_ne_eval, loop_freq, loop_off = check_opt(opt, loop, ht, htarg, verb)
# Check src and rec, get flags if dipole or not
# nsrcz/nrecz are number of unique src/rec-pole depths
src, nsrc, nsrcz, srcdipole = check_bipole(src, 'src')
rec, nrec, nrecz, recdipole = check_bipole(rec, 'rec')
# === 3. EM-FIELD CALCULATION ============
# Pre-allocate output EM array
EM = np.zeros((freq.size, nrec*nsrc), dtype=complex)
# Initialize kernel count, conv (only for QWE)
# (how many times the wavenumber-domain kernel was calld)
kcount = 0
conv = True
# Define some indeces
isrc = int(nsrc/nsrcz) # this is either 1 or nsrc
irec = int(nrec/nrecz) # this is either 1 or nrec
isrz = int(isrc*irec) # this is either 1, nsrc, nrec, or nsrc*nrec
# The kernel handles only 1 ab with one srcz-recz combination at once.
# Hence we have to loop over every different depth of src or rec, and
# over all required ab's.
for isz in range(nsrcz): # Loop over source depths
# Get this source
srcazmdip = get_azm_dip(src, isz, nsrcz, srcpts, srcdipole, strength,
'src', verb)
tsrc, srcazm, srcdip, srcg_w, srcpts, src_w = srcazmdip
for irz in range(nrecz): # Loop over receiver depths
# Get this receiver
recazmdip = get_azm_dip(rec, irz, nrecz, recpts, recdipole,
strength, 'rec', verb)
trec, recazm, recdip, recg_w, recpts, rec_w = recazmdip
# Get required ab's
ab_calc = get_abs(msrc, mrec, srcazm, srcdip, recazm, recdip, verb)
# Pre-allocate temporary source-EM array for integration loop
sEM = np.zeros((freq.size, isrz), dtype=complex)
for isg in range(srcpts): # Loop over src integration points
# This integration source
tisrc = [tsrc[0][isg::srcpts], tsrc[1][isg::srcpts],
tsrc[2][isg]]
# Get layer number in which src resides
lsrc, zsrc = get_layer_nr(tisrc, depth)
# Pre-allocate temporary receiver EM arrays for integr. loop
rEM = np.zeros((freq.size, isrz), dtype=complex)
for irg in range(recpts): # Loop over rec integration pts
# Note, if source or receiver is a bipole, but horizontal
# (dip=0), then calculation could be sped up by not looping
# over the bipole elements, but calculate it all in one go.
# This integration receiver
tirec = [trec[0][irg::recpts], trec[1][irg::recpts],
trec[2][irg]]
# Get src-rec offsets and angles
off, angle = get_off_ang(tisrc, tirec, isrc, irec, verb)
# Get layer number in which rec resides
lrec, zrec = get_layer_nr(tirec, depth)
# Gather variables
finp = (off, angle, zsrc, zrec, lsrc, lrec, depth, freq,
etaH, etaV, zetaH, zetaV, xdirect, isfullspace, ht,
htarg, use_ne_eval, msrc, mrec, loop_freq,
loop_off, conv)
# Pre-allocate temporary EM array for ab-loop
abEM = np.zeros((freq.size, isrz), dtype=complex)
for iab in ab_calc: # Loop over required ab's
# Carry-out the frequency-domain calculation
out = fem(iab, *finp)
# Get geometrical scaling factor
tfact = get_geo_fact(iab, srcazm, srcdip, recazm,
recdip, msrc, mrec)
# Add field to EM with geometrical factor
abEM += out[0]*np.squeeze(tfact)
# Update kernel count
kcount += out[1]
# Update conv (QWE convergence)
conv *= out[2]
# Add this receiver element, with weight from integration
rEM += abEM*recg_w[irg]
# Add this source element, with weight from integration
sEM += rEM*srcg_w[isg]
# Scale signal for src-strength and src/rec-lengths
src_rec_w = 1
if strength > 0:
src_rec_w *= np.repeat(src_w, irec)
src_rec_w *= np.tile(rec_w, isrc)
sEM *= src_rec_w
# Add this src-rec signal
if nrec == nrecz:
if nsrc == nsrcz: # Case 1: Looped over each src and each rec
EM[:, isz*nrec+irz:isz*nrec+irz+1] = sEM
else: # Case 2: Looped over each rec
EM[:, irz:nsrc*nrec:nrec] = sEM
else:
if nsrc == nsrcz: # Case 3: Looped over each src
EM[:, isz*nrec:nrec*(isz+1)] = sEM
else: # Case 4: All in one go
EM = sEM
# In case of QWE/QUAD, print Warning if not converged
conv_warning(conv, htarg, 'Hankel', verb)
# Do f->t transform if required
if signal is not None:
EM, conv = tem(EM, EM[0, :], freq, time, signal, ft, ftarg)
# In case of QWE/QUAD, print Warning if not converged
conv_warning(conv, ftarg, 'Fourier', verb)
# Reshape for number of sources
EM = np.squeeze(EM.reshape((-1, nrec, nsrc), order='F'))
# === 4. FINISHED ============
printstartfinish(verb, t0, kcount)
return EM | r"""Return the electromagnetic field due to an electromagnetic source.
Calculate the electromagnetic frequency- or time-domain field due to
arbitrary finite electric or magnetic bipole sources, measured by arbitrary
finite electric or magnetic bipole receivers. By default, the
electromagnetic response is normalized to to source and receiver of 1 m
length, and source strength of 1 A.
See Also
--------
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m):
- [x0, x1, y0, y1, z0, z1] (bipole of finite length)
- [x, y, z, azimuth, dip] (dipole, infinitesimal small)
Dimensions:
- The coordinates x, y, and z (dipole) or x0, x1, y0, y1, z0, and
z1 (bipole) can be single values or arrays.
- The variables x and y (dipole) or x0, x1, y0, and y1 (bipole)
must have the same dimensions.
- The variable z (dipole) or z0 and z1 (bipole) must either be
single values or having the same dimension as the other
coordinates.
- The variables azimuth and dip must be single values. If they have
different angles, you have to use the bipole-method (with
srcpts/recpts = 1, so it is calculated as dipoles).
Angles (coordinate system is left-handed, positive z down
(East-North-Depth):
- azimuth (°): horizontal deviation from x-axis, anti-clockwise.
- dip (°): vertical deviation from xy-plane downwards.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
Alternatively, res can be a dictionary. See the main manual of empymod
too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and
zetaV, which can be used to, for instance, use the Cole-Cole model for
IP.
freqtime : array_like
Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0).
signal : {None, 0, 1, -1}, optional
Source signal, default is None:
- None: Frequency-domain response
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
msrc, mrec : boolean, optional
If True, source/receiver (msrc/mrec) is magnetic, else electric.
Default is False.
srcpts, recpts : int, optional
Number of integration points for bipole source/receiver, default is 1:
- srcpts/recpts < 3 : bipole, but calculated as dipole at centre
- srcpts/recpts >= 3 : bipole
strength : float, optional
Source strength (A):
- If 0, output is normalized to source and receiver of 1 m length,
and source strength of 1 A.
- If != 0, output is returned for given source and receiver length,
and source strength.
Default is 0.
xdirect : bool or None, optional
Direct field calculation (only if src and rec are in the same layer):
- If True, direct field is calculated analytically in the frequency
domain.
- If False, direct field is calculated in the wavenumber domain.
- If None, direct field is excluded from the calculation, and only
reflected fields are returned (secondary field).
Defaults to False.
ht : {'fht', 'qwe', 'quad'}, optional
Flag to choose either the *Digital Linear Filter* method (FHT, *Fast
Hankel Transform*), the *Quadrature-With-Extrapolation* (QWE), or a
simple *Quadrature* (QUAD) for the Hankel transform. Defaults to
'fht'.
htarg : dict or list, optional
Depends on the value for ``ht``:
- If ``ht`` = 'fht': [fhtfilt, pts_per_dec]:
- fhtfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(default: ``empymod.filters.key_201_2009()``)
- pts_per_dec: points per decade; (default: 0)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ht`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec,
diff_quad, a, b, limit]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-30)
- nquad: order of Gaussian quadrature (default: 51)
- maxint: maximum number of partial integral intervals
(default: 40)
- pts_per_dec: points per decade; (default: 0)
- If 0, no interpolation is used.
- If > 0, interpolation is used.
- diff_quad: criteria when to swap to QUAD (only relevant if
opt='spline') (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ht`` = 'quad': [atol, rtol, limit, lmin, lmax, pts_per_dec]:
- rtol: relative tolerance (default: 1e-12)
- atol: absolute tolerance (default: 1e-20)
- limit: An upper bound on the number of subintervals used in
the adaptive algorithm (default: 500)
- lmin: Minimum wavenumber (default 1e-6)
- lmax: Maximum wavenumber (default 0.1)
- pts_per_dec: points per decade (default: 40)
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
A few examples, assuming ``ht`` = ``qwe``:
- Only changing rtol:
{'rtol': 1e-4} or [1e-4] or 1e-4
- Changing rtol and nquad:
{'rtol': 1e-4, 'nquad': 101} or [1e-4, '', 101]
- Only changing diff_quad:
{'diffquad': 10} or ['', '', '', '', '', 10]
ft : {'sin', 'cos', 'qwe', 'fftlog', 'fft'}, optional
Only used if ``signal`` != None. Flag to choose either the Digital
Linear Filter method (Sine- or Cosine-Filter), the
Quadrature-With-Extrapolation (QWE), the FFTLog, or the FFT for the
Fourier transform. Defaults to 'sin'.
ftarg : dict or list, optional
Only used if ``signal`` !=None. Depends on the value for ``ft``:
- If ``ft`` = 'sin' or 'cos': [fftfilt, pts_per_dec]:
- fftfilt: string of filter name in ``empymod.filters`` or
the filter method itself.
(Default: ``empymod.filters.key_201_CosSin_2012()``)
- pts_per_dec: points per decade; (default: -1)
- If 0: Standard DLF.
- If < 0: Lagged Convolution DLF.
- If > 0: Splined DLF
- If ``ft`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec]:
- rtol: relative tolerance (default: 1e-8)
- atol: absolute tolerance (default: 1e-20)
- nquad: order of Gaussian quadrature (default: 21)
- maxint: maximum number of partial integral intervals
(default: 200)
- pts_per_dec: points per decade (default: 20)
- diff_quad: criteria when to swap to QUAD (default: 100)
- a: lower limit for QUAD (default: first interval from QWE)
- b: upper limit for QUAD (default: last interval from QWE)
- limit: limit for quad (default: maxint)
- If ``ft`` = 'fftlog': [pts_per_dec, add_dec, q]:
- pts_per_dec: sampels per decade (default: 10)
- add_dec: additional decades [left, right] (default: [-2, 1])
- q: exponent of power law bias (default: 0); -1 <= q <= 1
- If ``ft`` = 'fft': [dfreq, nfreq, ntot]:
- dfreq: Linear step-size of frequencies (default: 0.002)
- nfreq: Number of frequencies (default: 2048)
- ntot: Total number for FFT; difference between nfreq and
ntot is padded with zeroes. This number is ideally a
power of 2, e.g. 2048 or 4096 (default: nfreq).
- pts_per_dec : points per decade (default: None)
Padding can sometimes improve the result, not always. The
default samples from 0.002 Hz - 4.096 Hz. If pts_per_dec is set
to an integer, calculated frequencies are logarithmically
spaced with the given number per decade, and then interpolated
to yield the required frequencies for the FFT.
The values can be provided as dict with the keywords, or as list.
However, if provided as list, you have to follow the order given above.
See ``htarg`` for a few examples.
opt : {None, 'parallel'}, optional
Optimization flag. Defaults to None:
- None: Normal case, no parallelization nor interpolation is used.
- If 'parallel', the package ``numexpr`` is used to evaluate the
most expensive statements. Always check if it actually improves
performance for a specific problem. It can speed up the
calculation for big arrays, but will most likely be slower for
small arrays. It will use all available cores for these specific
statements, which all contain ``Gamma`` in one way or another,
which has dimensions (#frequencies, #offsets, #layers, #lambdas),
therefore can grow pretty big. The module ``numexpr`` uses by
default all available cores up to a maximum of 8. You can change
this behaviour to your desired number of threads ``nthreads``
with ``numexpr.set_num_threads(nthreads)``.
- The value 'spline' is deprecated and will be removed. See
``htarg`` instead for the interpolated versions.
The option 'parallel' only affects speed and memory usage, whereas
'spline' also affects precision! Please read the note in the *README*
documentation for more information.
loop : {None, 'freq', 'off'}, optional
Define if to calculate everything vectorized or if to loop over
frequencies ('freq') or over offsets ('off'), default is None. It
always loops over frequencies if ``ht = 'qwe'`` or if ``opt =
'spline'``. Calculating everything vectorized is fast for few offsets
OR for few frequencies. However, if you calculate many frequencies for
many offsets, it might be faster to loop over frequencies. Only
comparing the different versions will yield the answer for your
specific problem at hand!
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
EM : ndarray, (nfreq, nrec, nsrc)
Frequency- or time-domain EM field (depending on ``signal``):
- If rec is electric, returns E [V/m].
- If rec is magnetic, returns B [T] (not H [A/m]!).
However, source and receiver are normalised (unless strength != 0). So
for instance in the electric case the source strength is 1 A and its
length is 1 m. So the electric field could also be written as
[V/(A.m2)].
In the magnetic case the source strength is given by
:math:`i\omega\mu_0 A I^e`, where A is the loop area (m2), and
:math:`I^e` the electric source strength. For the normalized magnetic
source :math:`A=1m^2` and :math:`I^e=1 Ampere`. A magnetic source is
therefore frequency dependent.
The shape of EM is (nfreq, nrec, nsrc). However, single dimensions
are removed.
Examples
--------
>>> import numpy as np
>>> from empymod import bipole
>>> # x-directed bipole source: x0, x1, y0, y1, z0, z1
>>> src = [-50, 50, 0, 0, 100, 100]
>>> # x-directed dipole source-array: x, y, z, azimuth, dip
>>> rec = [np.arange(1, 11)*500, np.zeros(10), 200, 0, 0]
>>> # layer boundaries
>>> depth = [0, 300, 1000, 1050]
>>> # layer resistivities
>>> res = [1e20, .3, 1, 50, 1]
>>> # Frequency
>>> freq = 1
>>> # Calculate electric field due to an electric source at 1 Hz.
>>> # [msrc = mrec = True (default)]
>>> EMfield = bipole(src, rec, depth, res, freq, verb=4)
:: empymod START ::
~
depth [m] : 0 300 1000 1050
res [Ohm.m] : 1E+20 0.3 1 50 1
aniso [-] : 1 1 1 1 1
epermH [-] : 1 1 1 1 1
epermV [-] : 1 1 1 1 1
mpermH [-] : 1 1 1 1 1
mpermV [-] : 1 1 1 1 1
frequency [Hz] : 1
Hankel : DLF (Fast Hankel Transform)
> Filter : Key 201 (2009)
> DLF type : Standard
Kernel Opt. : None
Loop over : None (all vectorized)
Source(s) : 1 bipole(s)
> intpts : 1 (as dipole)
> length [m] : 100
> x_c [m] : 0
> y_c [m] : 0
> z_c [m] : 100
> azimuth [°] : 0
> dip [°] : 0
Receiver(s) : 10 dipole(s)
> x [m] : 500 - 5000 : 10 [min-max; #]
: 500 1000 1500 2000 2500 3000 3500 4000 4500 5000
> y [m] : 0 - 0 : 10 [min-max; #]
: 0 0 0 0 0 0 0 0 0 0
> z [m] : 200
> azimuth [°] : 0
> dip [°] : 0
Required ab's : 11
~
:: empymod END; runtime = 0:00:00.005536 :: 1 kernel call(s)
~
>>> print(EMfield)
[ 1.68809346e-10 -3.08303130e-10j -8.77189179e-12 -3.76920235e-11j
-3.46654704e-12 -4.87133683e-12j -3.60159726e-13 -1.12434417e-12j
1.87807271e-13 -6.21669759e-13j 1.97200208e-13 -4.38210489e-13j
1.44134842e-13 -3.17505260e-13j 9.92770406e-14 -2.33950871e-13j
6.75287598e-14 -1.74922886e-13j 4.62724887e-14 -1.32266600e-13j] |
def get_manual_intervention(self, project, release_id, manual_intervention_id):
"""GetManualIntervention.
[Preview API] Get manual intervention for a given release and manual intervention id.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int manual_intervention_id: Id of the manual intervention.
:rtype: :class:`<ManualIntervention> <azure.devops.v5_1.release.models.ManualIntervention>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if manual_intervention_id is not None:
route_values['manualInterventionId'] = self._serialize.url('manual_intervention_id', manual_intervention_id, 'int')
response = self._send(http_method='GET',
location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('ManualIntervention', response) | GetManualIntervention.
[Preview API] Get manual intervention for a given release and manual intervention id.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int manual_intervention_id: Id of the manual intervention.
:rtype: :class:`<ManualIntervention> <azure.devops.v5_1.release.models.ManualIntervention>` |
def hash32(data: Any, seed=0) -> int:
"""
Non-cryptographic, deterministic, fast hash.
Args:
data: data to hash
seed: seed
Returns:
signed 32-bit integer
"""
with MultiTimerContext(timer, TIMING_HASH):
c_data = to_str(data)
if mmh3:
return mmh3.hash(c_data, seed=seed)
py_data = to_bytes(c_data)
py_unsigned = murmur3_x86_32(py_data, seed=seed)
return twos_comp_to_signed(py_unsigned, n_bits=32) | Non-cryptographic, deterministic, fast hash.
Args:
data: data to hash
seed: seed
Returns:
signed 32-bit integer |
def read(self):
"""Read the brightness of the LED.
Returns:
int: Current brightness.
Raises:
LEDError: if an I/O or OS error occurs.
"""
# Read value
try:
buf = os.read(self._fd, 8)
except OSError as e:
raise LEDError(e.errno, "Reading LED brightness: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror)
return int(buf) | Read the brightness of the LED.
Returns:
int: Current brightness.
Raises:
LEDError: if an I/O or OS error occurs. |
def make_thematic_png(self, outpath=None):
"""
Convert a thematic map into png format with a legend
:param outpath: if specified, will save the image instead of showing it
"""
from matplotlib.patches import Patch
fig, previewax = plt.subplots()
shape = self.thmap.shape
previewax.imshow(self.thmap,
origin='lower',
interpolation='nearest',
cmap=self.config.solar_cmap,
vmin=-1, vmax=len(self.config.solar_classes)-1)
legend_elements = [Patch(facecolor=c, label=sc, edgecolor='k')
for sc, c in self.config.solar_colors.items()]
previewax.legend(handles=legend_elements, fontsize='x-small',
bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
previewax.set_xlim([0, shape[0]])
previewax.set_ylim([0, shape[0]])
previewax.set_aspect("equal")
previewax.set_axis_off()
if outpath:
fig.savefig(outpath, dpi=300,
transparent=True,
bbox_inches='tight',
pad_inches=0.)
plt.close()
else:
plt.show() | Convert a thematic map into png format with a legend
:param outpath: if specified, will save the image instead of showing it |
def validate_all_keys(obj_name, obj, validation_fun):
"""Validate all (nested) keys in `obj` by using `validation_fun`.
Args:
obj_name (str): name for `obj` being validated.
obj (dict): dictionary object.
validation_fun (function): function used to validate the value
of `key`.
Returns:
None: indicates validation successful
Raises:
ValidationError: `validation_fun` will raise this error on failure
"""
for key, value in obj.items():
validation_fun(obj_name, key)
if isinstance(value, dict):
validate_all_keys(obj_name, value, validation_fun) | Validate all (nested) keys in `obj` by using `validation_fun`.
Args:
obj_name (str): name for `obj` being validated.
obj (dict): dictionary object.
validation_fun (function): function used to validate the value
of `key`.
Returns:
None: indicates validation successful
Raises:
ValidationError: `validation_fun` will raise this error on failure |
def encode_int(self, n):
""" Encodes an integer into a short Base64 string.
Example:
``encode_int(123)`` returns ``'B7'``.
"""
str = []
while True:
n, r = divmod(n, self.BASE)
str.append(self.ALPHABET[r])
if n == 0: break
return ''.join(reversed(str)) | Encodes an integer into a short Base64 string.
Example:
``encode_int(123)`` returns ``'B7'``. |
def list_public_containers(self):
"""
Returns a list of the names of all CDN-enabled containers.
"""
resp, resp_body = self.api.cdn_request("", "GET")
return [cont["name"] for cont in resp_body] | Returns a list of the names of all CDN-enabled containers. |
def _set_scores(self):
"""
Compute anomaly scores for the time series
This algorithm just takes the diff of threshold with current value as anomaly score
"""
anom_scores = {}
for i, (timestamp, value) in enumerate(self.time_series.items()):
baseline_value = self.baseline_time_series[i]
if baseline_value > 0:
diff_percent = 100 * (value - baseline_value) / baseline_value
elif value > 0:
diff_percent = 100.0
else:
diff_percent = 0.0
anom_scores[timestamp] = 0.0
if self.percent_threshold_upper and diff_percent > 0 and diff_percent > self.percent_threshold_upper:
anom_scores[timestamp] = diff_percent
if self.percent_threshold_lower and diff_percent < 0 and diff_percent < self.percent_threshold_lower:
anom_scores[timestamp] = -1 * diff_percent
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | Compute anomaly scores for the time series
This algorithm just takes the diff of threshold with current value as anomaly score |
def create_box_field(self, box_key, name, field_type, **kwargs):
'''Creates a box field with the provided attributes.
Args:
box_key specifying the box to add the field to
name required name string
field_type required type string [TEXT_INPUT, DATE or PERSON]
kwargs {}
return (status code, field dict)
'''
#does not work
self._raise_unimplemented_error()
uri = '/'.join([self.api_uri,
self.boxes_suffix,
box_key,
self.fields_suffix
])
code, data = self._create_field(uri, name, field_type, **kwargs)
return code, data | Creates a box field with the provided attributes.
Args:
box_key specifying the box to add the field to
name required name string
field_type required type string [TEXT_INPUT, DATE or PERSON]
kwargs {}
return (status code, field dict) |
def run(self, args):
"""
Download a project based on passed in args.
:param args: Namespace arguments parsed from the command line.
"""
project_name_or_id = self.create_project_name_or_id_from_args(args)
folder = args.folder # path to a folder to download data into
# Default to project name with spaces replaced with '_' if not specified
if not folder:
folder = replace_invalid_path_chars(project_name_or_id.value.replace(' ', '_'))
destination_path = format_destination_path(folder)
path_filter = PathFilter(args.include_paths, args.exclude_paths)
project = self.fetch_project(args, must_exist=True)
project_download = ProjectDownload(self.remote_store, project, destination_path, path_filter)
project_download.run() | Download a project based on passed in args.
:param args: Namespace arguments parsed from the command line. |
def generate_single_seasonal_average(args):
"""
This function calculates the seasonal average for a single day of the year
for all river segments
"""
qout_file = args[0]
seasonal_average_file = args[1]
day_of_year = args[2]
mp_lock = args[3]
min_day = day_of_year - 3
max_day = day_of_year + 3
with RAPIDDataset(qout_file) as qout_nc_file:
time_indices = []
for idx, t in enumerate(qout_nc_file.get_time_array()):
var_time = gmtime(t)
compare_yday = var_time.tm_yday
# move day back one past because of leap year adds
# a day after feb 29 (day 60)
if isleap(var_time.tm_year) and compare_yday > 60:
compare_yday -= 1
# check if date within range of season
if max_day > compare_yday >= min_day:
time_indices.append(idx)
if not time_indices:
raise IndexError("No time steps found within range ...")
streamflow_array = qout_nc_file.get_qout(time_index_array=time_indices)
avg_streamflow_array = np.mean(streamflow_array, axis=1)
std_streamflow_array = np.std(streamflow_array, axis=1)
max_streamflow_array = np.amax(streamflow_array, axis=1)
min_streamflow_array = np.min(streamflow_array, axis=1)
mp_lock.acquire()
seasonal_avg_nc = Dataset(seasonal_average_file, 'a')
seasonal_avg_nc.variables['average_flow'][:, day_of_year-1] = \
avg_streamflow_array
seasonal_avg_nc.variables['std_dev_flow'][:, day_of_year-1] = \
std_streamflow_array
seasonal_avg_nc.variables['max_flow'][:, day_of_year-1] = \
max_streamflow_array
seasonal_avg_nc.variables['min_flow'][:, day_of_year-1] = \
min_streamflow_array
seasonal_avg_nc.close()
mp_lock.release() | This function calculates the seasonal average for a single day of the year
for all river segments |
def search_records(self, domain, record_type, name=None, data=None):
"""
Returns a list of all records configured for the specified domain
that match the supplied search criteria.
"""
return domain.search_records(record_type=record_type,
name=name, data=data) | Returns a list of all records configured for the specified domain
that match the supplied search criteria. |
def _parse_astorb_database_file(
self,
astorbgz):
"""* parse astorb database file*
**Key Arguments:**
- ``astorbgz`` -- path to the downloaded astorb database file
**Return:**
- ``astorbDictList`` -- the astorb database parsed as a list of dictionaries
"""
self.log.info('starting the ``_parse_astorb_database_file`` method')
print "Parsing the astorb.dat orbital elements file"
with gzip.open(astorbgz, 'rb') as f:
thisData = f.read()
astorbDictList = []
lines = thisData.split("\n")
for l in lines:
if len(l) < 50:
continue
d = {}
# PARSE THE LINE FROM astorb.dat (UNNEEDED VALUES COMMENTED OUT)
d["mpc_number"] = l[0:7].strip()
d["name"] = l[7:26].strip()
d["discoverer"] = l[26:41].strip()
d["H_abs_mag"] = l[41:48].strip()
d["G_slope"] = l[48:54].strip()
d["color_b_v"] = l[54:59].strip()
d["diameter_km"] = l[59:65].strip()
d["class"] = l[65:71].strip()
# d["int1"] = l[71:75].strip()
# d["int2"] = l[75:79].strip()
# d["int3"] = l[79:83].strip()
# d["int4"] = l[83:87].strip()
# d["int5"] = l[87:91].strip()
# d["int6"] = l[91:95].strip()
d["orbital_arc_days"] = l[95:101].strip()
d["number_obs"] = l[101:106].strip()
d["epoch"] = l[106:115].strip()
d["M_mean_anomaly_deg"] = l[115:126].strip()
d["o_arg_peri_deg"] = l[126:137].strip()
d["O_long_asc_node_deg"] = l[137:148].strip()
d["i_inclination_deg"] = l[148:158].strip()
d["e_eccentricity"] = l[158:169].strip()
d["a_semimajor_axis"] = l[169:182].strip()
d["orbit_comp_date"] = l[182:191].strip()
d["ephem_uncertainty_arcsec"] = l[191:199].strip()
d["ephem_uncertainty_change_arcsec_day"] = l[199:208].strip()
d["ephem_uncertainty_date"] = l[208:217].strip()
# d["peak_ephem_uncertainty_next_arcsec"] = l[217:225].strip()
# d["peak_ephem_uncertainty_next_date"] = l[225:234].strip()
# d["peak_ephem_uncertainty_10_yrs_from_ceu_arcsec"] = l[
# 217:225].strip()
# d["peak_ephem_uncertainty_10_yrs_from_ceu_date"] = l[
# 242:251].strip()
# d["peak_ephem_uncertainty_10_yrs_from_peu_arcsec"] = l[
# 251:259].strip()
# d["peak_ephem_uncertainty_10_yrs_from_peu_date"] = l[
# 259:].strip()
yyyy = int(d["epoch"][:4])
mm = int(d["epoch"][4:6])
dd = int(d["epoch"][6:])
d["epoch_xeph"] = "%(mm)s/%(dd)s/%(yyyy)s" % locals()
# CONVERT ASTORB DATABASE LINE TO XEPHEM DATABASE FORMAT
xephemStr = "%(mpc_number)s %(name)s,e,%(i_inclination_deg)s,%(O_long_asc_node_deg)s,%(o_arg_peri_deg)s,%(a_semimajor_axis)s,0,%(e_eccentricity)s,%(M_mean_anomaly_deg)s,%(epoch_xeph)s,2000.0,%(H_abs_mag)s,%(G_slope)s" % d
xephemStr = xephemStr.strip()
d["pyephem_string"] = xephemStr
d["astorb_string"] = l
# TIDYUP
if len(d["mpc_number"]) == 0:
d["mpc_number"] = None
for k, v in d.iteritems():
if v != None and len(v) == 0:
d[k] = None
astorbDictList.append(d)
print "Finshed parsing the astorb.dat orbital elements file"
self.log.info('completed the ``_parse_astorb_database_file`` method')
return astorbDictList | * parse astorb database file*
**Key Arguments:**
- ``astorbgz`` -- path to the downloaded astorb database file
**Return:**
- ``astorbDictList`` -- the astorb database parsed as a list of dictionaries |
def getFeatureID(self, location):
"""
Returns the feature index associated with the provided location.
In the case of a sphere, it is always the same if the location is valid.
"""
truthyFeature = self.contains(location)
if not truthyFeature:
return self.EMPTY_FEATURE
elif truthyFeature=='face':
return self.FLAT
elif truthyFeature=='vertex':
return self.POINTY
elif truthyFeature=="edge":
return self.EDGE
elif truthyFeature=="surface":
return self.SURFACE
else:
return self.EMPTY_FEATURE | Returns the feature index associated with the provided location.
In the case of a sphere, it is always the same if the location is valid. |
def fuzzy_search_by_title(self, title, ignore_groups=None):
"""Find an entry by by fuzzy match.
This will check things such as:
* case insensitive matching
* typo checks
* prefix matches
If the ``ignore_groups`` argument is provided, then any matching
entries in the ``ignore_groups`` list will not be returned. This
argument can be used to filter out groups you are not interested in.
Returns a list of matches (an empty list is returned if no matches are
found).
"""
entries = []
# Exact matches trump
for entry in self.entries:
if entry.title == title:
entries.append(entry)
if entries:
return self._filter_entries(entries, ignore_groups)
# Case insensitive matches next.
title_lower = title.lower()
for entry in self.entries:
if entry.title.lower() == title.lower():
entries.append(entry)
if entries:
return self._filter_entries(entries, ignore_groups)
# Subsequence/prefix matches next.
for entry in self.entries:
if self._is_subsequence(title_lower, entry.title.lower()):
entries.append(entry)
if entries:
return self._filter_entries(entries, ignore_groups)
# Finally close matches that might have mispellings.
entry_map = {entry.title.lower(): entry for entry in self.entries}
matches = difflib.get_close_matches(
title.lower(), entry_map.keys(), cutoff=0.7)
if matches:
return self._filter_entries(
[entry_map[name] for name in matches], ignore_groups)
return [] | Find an entry by by fuzzy match.
This will check things such as:
* case insensitive matching
* typo checks
* prefix matches
If the ``ignore_groups`` argument is provided, then any matching
entries in the ``ignore_groups`` list will not be returned. This
argument can be used to filter out groups you are not interested in.
Returns a list of matches (an empty list is returned if no matches are
found). |
def render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name, root_output_file_path,
res_x=150, res_y=150, extra_args=None):
"""Use the pdftoppm program to render a PDF file to .png images. The
root_output_file_path is prepended to all the output files, which have numbers
and extensions added. Extra arguments can be passed as a list in extra_args.
Return the command output."""
if extra_args is None: extra_args = []
if not pdftoppm_executable:
init_and_test_pdftoppm_executable(prefer_local=False, exit_on_fail=True)
if old_pdftoppm_version:
# We only have -r, not -rx and -ry.
command = [pdftoppm_executable] + extra_args + ["-r", res_x, pdf_file_name,
root_output_file_path]
else:
command = [pdftoppm_executable] + extra_args + ["-rx", res_x, "-ry", res_y,
pdf_file_name, root_output_file_path]
comm_output = get_external_subprocess_output(command)
return comm_output | Use the pdftoppm program to render a PDF file to .png images. The
root_output_file_path is prepended to all the output files, which have numbers
and extensions added. Extra arguments can be passed as a list in extra_args.
Return the command output. |
def predict(self, n_periods=10, exogenous=None,
return_conf_int=False, alpha=0.05, **kwargs):
"""Forecast future (transformed) values
Generate predictions (forecasts) ``n_periods`` in the future.
Note that if ``exogenous`` variables were used in the model fit, they
will be expected for the predict procedure and will fail otherwise.
Forecasts may be transformed by the endogenous steps along the way and
might be on a different scale than raw training/test data.
Parameters
----------
n_periods : int, optional (default=10)
The number of periods in the future to forecast.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables. If provided, these
variables are used as additional features in the regression
operation. This should not include a constant or trend. Note that
if an ``ARIMA`` is fit on exogenous features, it must be provided
exogenous features for making predictions.
return_conf_int : bool, optional (default=False)
Whether to get the confidence intervals of the forecasts.
alpha : float, optional (default=0.05)
The confidence intervals for the forecasts are (1 - alpha) %
**kwargs : keyword args
Extra keyword arguments used for each stage's ``transform`` stage
and the estimator's ``predict`` stage. Similar to scikit-learn
pipeline keyword args, the keys are compound, comprised of the
stage name and the argument name separated by a "__". For instance,
if you have a FourierFeaturizer whose stage is named
"fourier", your transform kwargs could resemble::
{"fourier__n_periods": 50}
Returns
-------
forecasts : array-like, shape=(n_periods,)
The array of transformed, forecasted values.
conf_int : array-like, shape=(n_periods, 2), optional
The confidence intervals for the forecasts. Only returned if
``return_conf_int`` is True.
"""
check_is_fitted(self, "steps_")
# Push the arrays through the transformer stages, but ONLY the exog
# transformer stages since we don't have a Y...
Xt = exogenous
named_kwargs = self._get_kwargs(**kwargs)
for step_idx, name, transformer in self._iter(with_final=False):
if isinstance(transformer, BaseExogTransformer):
kw = named_kwargs[name]
# If it's a featurizer, we may also need to add 'n_periods'
if isinstance(transformer, BaseExogFeaturizer):
num_p = kw.get("n_periods", None)
if num_p is not None and num_p != n_periods:
raise ValueError("Manually set 'n_periods' kwarg for "
"step '%s' differs from forecasting "
"n_periods (%r != %r)"
% (name, num_p, n_periods))
kw["n_periods"] = n_periods
_, Xt = transformer.transform(y=None, exogenous=Xt, **kw)
# Now we should be able to run the prediction
nm, est = self.steps_[-1]
return est.predict(
n_periods=n_periods, exogenous=Xt,
return_conf_int=return_conf_int,
alpha=alpha, **named_kwargs[nm]) | Forecast future (transformed) values
Generate predictions (forecasts) ``n_periods`` in the future.
Note that if ``exogenous`` variables were used in the model fit, they
will be expected for the predict procedure and will fail otherwise.
Forecasts may be transformed by the endogenous steps along the way and
might be on a different scale than raw training/test data.
Parameters
----------
n_periods : int, optional (default=10)
The number of periods in the future to forecast.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables. If provided, these
variables are used as additional features in the regression
operation. This should not include a constant or trend. Note that
if an ``ARIMA`` is fit on exogenous features, it must be provided
exogenous features for making predictions.
return_conf_int : bool, optional (default=False)
Whether to get the confidence intervals of the forecasts.
alpha : float, optional (default=0.05)
The confidence intervals for the forecasts are (1 - alpha) %
**kwargs : keyword args
Extra keyword arguments used for each stage's ``transform`` stage
and the estimator's ``predict`` stage. Similar to scikit-learn
pipeline keyword args, the keys are compound, comprised of the
stage name and the argument name separated by a "__". For instance,
if you have a FourierFeaturizer whose stage is named
"fourier", your transform kwargs could resemble::
{"fourier__n_periods": 50}
Returns
-------
forecasts : array-like, shape=(n_periods,)
The array of transformed, forecasted values.
conf_int : array-like, shape=(n_periods, 2), optional
The confidence intervals for the forecasts. Only returned if
``return_conf_int`` is True. |
def mass_3d(self, R, Rs, rho0, r_trunc):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:return:
"""
x = R * Rs ** -1
func = (r_trunc ** 2 * (-2 * x * (1 + r_trunc ** 2) + 4 * (1 + x) * r_trunc * np.arctan(x / r_trunc) -
2 * (1 + x) * (-1 + r_trunc ** 2) * np.log(Rs) + 2 * (1 + x) * (-1 + r_trunc ** 2) * np.log(Rs * (1 + x)) +
2 * (1 + x) * (-1 + r_trunc ** 2) * np.log(Rs * r_trunc) -
(1 + x) * (-1 + r_trunc ** 2) * np.log(Rs ** 2 * (x ** 2 + r_trunc ** 2)))) / (2. * (1 + x) * (1 + r_trunc ** 2) ** 2)
m_3d = 4*np.pi*Rs ** 3 * rho0 * func
return m_3d | mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:return: |
def to_schema(self):
"""Return process schema for this process."""
process_type = self.metadata.process_type
if not process_type.endswith(':'):
process_type = '{}:'.format(process_type)
schema = {
'slug': self.metadata.slug,
'name': self.metadata.name,
'type': process_type,
'version': self.metadata.version,
'data_name': '',
'requirements': {
'executor': {
'docker': {
'image': 'resolwe/base:ubuntu-18.04',
},
},
},
}
if self.metadata.description is not None:
schema['description'] = self.metadata.description
if self.metadata.category is not None:
schema['category'] = self.metadata.category
if self.metadata.scheduling_class is not None:
schema['scheduling_class'] = self.metadata.scheduling_class
if self.metadata.persistence is not None:
schema['persistence'] = self.metadata.persistence
if self.metadata.requirements is not None:
schema['requirements'] = self.metadata.requirements
if self.metadata.data_name is not None:
schema['data_name'] = self.metadata.data_name
if self.metadata.entity is not None:
schema['entity'] = self.metadata.entity
if self.inputs:
schema['input'] = []
for field in self.inputs.values():
schema['input'].append(field.to_schema())
if self.outputs:
schema['output'] = []
for field in self.outputs.values():
schema['output'].append(field.to_schema())
schema['run'] = {
'language': 'python',
'program': self.source or '',
}
return schema | Return process schema for this process. |
def create_calc_dh_d_shape(estimator):
"""
Return the function that can be used in the various gradient and hessian
calculations to calculate the derivative of the transformation with respect
to the shape parameters.
Parameters
----------
estimator : an instance of the estimation.LogitTypeEstimator class.
Should contain a `rows_to_alts` attribute that is a 2D scipy sparse
matrix that maps the rows of the `design` matrix to the alternatives
available in this dataset.
Returns
-------
Callable.
Will accept a 1D array of systematic utility values, a 1D array of
alternative IDs, (shape parameters if there are any) and miscellaneous
args and kwargs. Should return a 2D array whose elements contain the
derivative of the tranformed utility vector with respect to the vector
of shape parameters. The dimensions of the returned vector should
be `(design.shape[0], num_alternatives)`.
"""
dh_d_shape = estimator.rows_to_alts.copy()
# Create a function that will take in the pre-formed matrix, replace its
# data in-place with the new data, and return the correct dh_dshape on each
# iteration of the minimizer
calc_dh_d_shape = partial(_uneven_transform_deriv_shape,
output_array=dh_d_shape)
return calc_dh_d_shape | Return the function that can be used in the various gradient and hessian
calculations to calculate the derivative of the transformation with respect
to the shape parameters.
Parameters
----------
estimator : an instance of the estimation.LogitTypeEstimator class.
Should contain a `rows_to_alts` attribute that is a 2D scipy sparse
matrix that maps the rows of the `design` matrix to the alternatives
available in this dataset.
Returns
-------
Callable.
Will accept a 1D array of systematic utility values, a 1D array of
alternative IDs, (shape parameters if there are any) and miscellaneous
args and kwargs. Should return a 2D array whose elements contain the
derivative of the tranformed utility vector with respect to the vector
of shape parameters. The dimensions of the returned vector should
be `(design.shape[0], num_alternatives)`. |
def wait_for(self, text, seconds):
"""
Returns True when the specified text has appeared in a line of the
output, or False when the specified number of seconds have passed
without that occurring.
"""
found = False
stream = self.stream
start_time = time.time()
while not found:
if time.time() - start_time > seconds:
break
stream.data_available.wait(0.5)
stream.data_unoccupied.clear()
while stream.data:
line = stream.data.pop(0)
value = line.getvalue()
if text in value:
found = True
self.lines.append(value)
stream.data_available.clear()
stream.data_unoccupied.set()
if time.time() - start_time > seconds:
break
return found | Returns True when the specified text has appeared in a line of the
output, or False when the specified number of seconds have passed
without that occurring. |
def point_before_card(self, card, x, y):
"""Return whether ``(x, y)`` is somewhere before ``card``, given how I
know cards to be arranged.
If the cards are being stacked down and to the right, that
means I'm testing whether ``(x, y)`` is above or to the left
of the card.
"""
def ycmp():
if self.card_y_hint_step == 0:
return False
elif self.card_y_hint_step > 0:
# stacking upward
return y < card.y
else:
# stacking downward
return y > card.top
if self.card_x_hint_step > 0:
# stacking to the right
if x < card.x:
return True
return ycmp()
elif self.card_x_hint_step == 0:
return ycmp()
else:
# stacking to the left
if x > card.right:
return True
return ycmp() | Return whether ``(x, y)`` is somewhere before ``card``, given how I
know cards to be arranged.
If the cards are being stacked down and to the right, that
means I'm testing whether ``(x, y)`` is above or to the left
of the card. |
def find_largest_contig(self):
"""
Determine the largest contig for each strain
"""
# for file_name, contig_lengths in contig_lengths_dict.items():
for sample in self.metadata:
# As the list is sorted in descending order, the largest contig is the first entry in the list
sample[self.analysistype].longest_contig = sample[self.analysistype].contig_lengths | Determine the largest contig for each strain |
def Stiel_Thodos(T, Tc, Pc, MW):
r'''Calculates the viscosity of a gas using an emperical formula
developed in [1]_.
.. math::
TODO
Parameters
----------
T : float
Temperature of the fluid [K]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
MW : float
Molwcular weight of fluid [g/mol]
Returns
-------
mu_g : float
Viscosity of gas, [Pa*S]
Notes
-----
Untested.
Claimed applicability from 0.2 to 5 atm.
Developed with data from 52 nonpolar, and 53 polar gases.
internal units are poise and atm.
Seems to give reasonable results.
Examples
--------
>>> Stiel_Thodos(300., 556.35, 4.5596E6, 153.8) #CCl4
1.0408926223608723e-05
References
----------
.. [1] Stiel, Leonard I., and George Thodos. "The Viscosity of Nonpolar
Gases at Normal Pressures." AIChE Journal 7, no. 4 (1961): 611-15.
doi:10.1002/aic.690070416.
'''
Pc = Pc/101325.
Tr = T/Tc
xi = Tc**(1/6.)/(MW**0.5*Pc**(2/3.))
if Tr > 1.5:
mu_g = 17.78E-5*(4.58*Tr-1.67)**.625/xi
else:
mu_g = 34E-5*Tr**0.94/xi
return mu_g/1000. | r'''Calculates the viscosity of a gas using an emperical formula
developed in [1]_.
.. math::
TODO
Parameters
----------
T : float
Temperature of the fluid [K]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
MW : float
Molwcular weight of fluid [g/mol]
Returns
-------
mu_g : float
Viscosity of gas, [Pa*S]
Notes
-----
Untested.
Claimed applicability from 0.2 to 5 atm.
Developed with data from 52 nonpolar, and 53 polar gases.
internal units are poise and atm.
Seems to give reasonable results.
Examples
--------
>>> Stiel_Thodos(300., 556.35, 4.5596E6, 153.8) #CCl4
1.0408926223608723e-05
References
----------
.. [1] Stiel, Leonard I., and George Thodos. "The Viscosity of Nonpolar
Gases at Normal Pressures." AIChE Journal 7, no. 4 (1961): 611-15.
doi:10.1002/aic.690070416. |
def add(self, name, path=None, **kwargs):
"""add new project with given name and path to database
if the path is not given, current working directory will be taken
...as default
"""
path = path or kwargs.pop('default_path', None)
if not self._path_is_valid(path):
return
if not self._is_unique(name, path):
p = Project.select().where(
(Project.name == name) |
(Project.path == path)
)[0]
self._print(self._ERROR_PROJECT_EXISTS.format(name, p.path), 'red')
return
Project.create(name=name, path=path)
self._print(self._SUCCESS_PROJECT_ADDED.format(name), 'green') | add new project with given name and path to database
if the path is not given, current working directory will be taken
...as default |
def get_dashboard_panels_visibility_by_section(section_name):
"""
Return a list of pairs as values that represents the role-permission
view relation for the panel section passed in.
:param section_name: the panels section id.
:return: a list of tuples.
"""
registry_info = get_dashboard_registry_record()
if section_name not in registry_info:
# Registry hasn't been set, do it at least for this section
registry_info = \
setup_dashboard_panels_visibility_registry(section_name)
pairs = registry_info.get(section_name)
pairs = get_strings(pairs)
if pairs is None:
# In the registry, but with None value?
setup_dashboard_panels_visibility_registry(section_name)
return get_dashboard_panels_visibility_by_section(section_name)
pairs = pairs.split(',')
if len(pairs) == 0 or len(pairs) % 2 != 0:
# Non-valid or malformed value
setup_dashboard_panels_visibility_registry(section_name)
return get_dashboard_panels_visibility_by_section(section_name)
result = [
(pairs[i], pairs[i + 1]) for i in range(len(pairs)) if i % 2 == 0]
return result | Return a list of pairs as values that represents the role-permission
view relation for the panel section passed in.
:param section_name: the panels section id.
:return: a list of tuples. |
def _make_request(self, bbox, meta_info, timestamps):
""" Make OGC request to create input for cloud detector classifier
:param bbox: Bounding box
:param meta_info: Meta-info dictionary of input eopatch
:return: Requested data
"""
service_type = ServiceType(meta_info['service_type'])
# Raise error if resolutions are not specified
if self.cm_size_x is None and self.cm_size_y is None:
raise ValueError("Specify size_x and size_y for data request")
# If WCS request, make sure both resolutions are set
if service_type == ServiceType.WCS:
if self.cm_size_y is None:
self.cm_size_y = self.cm_size_x
elif self.cm_size_x is None:
self.cm_size_x = self.cm_size_y
custom_url_params = {CustomUrlParam.SHOWLOGO: False,
CustomUrlParam.TRANSPARENT: False,
CustomUrlParam.EVALSCRIPT: self.model_evalscript}
request = {ServiceType.WMS: self._get_wms_request,
ServiceType.WCS: self._get_wcs_request}[service_type](bbox,
meta_info['time_interval'],
self.cm_size_x,
self.cm_size_y,
meta_info['maxcc'],
meta_info['time_difference'],
custom_url_params)
request_dates = request.get_dates()
download_frames = get_common_timestamps(request_dates, timestamps)
request_return = request.get_data(raise_download_errors=False, data_filter=download_frames)
bad_data = [idx for idx, value in enumerate(request_return) if value is None]
for idx in reversed(sorted(bad_data)):
LOGGER.warning('Data from %s could not be downloaded for %s!', str(request_dates[idx]), self.data_feature)
del request_return[idx]
del request_dates[idx]
return np.asarray(request_return), request_dates | Make OGC request to create input for cloud detector classifier
:param bbox: Bounding box
:param meta_info: Meta-info dictionary of input eopatch
:return: Requested data |
def force_encoding(self, encoding):
"""Sets a fixed encoding. The change is emitted right away.
From now one, this buffer will switch the code page anymore.
However, it will still keep track of the current code page.
"""
if not encoding:
self.disabled = False
else:
self.write_with_encoding(encoding, None)
self.disabled = True | Sets a fixed encoding. The change is emitted right away.
From now one, this buffer will switch the code page anymore.
However, it will still keep track of the current code page. |
def calc_in_wc_v1(self):
"""Calculate the actual water release from the snow layer due to the
exceedance of the snow layers capacity for (liquid) water.
Required control parameters:
|NmbZones|
|ZoneType|
|WHC|
Required state sequence:
|SP|
Required flux sequence
|TF|
Calculated fluxes sequences:
|In_|
Updated state sequence:
|WC|
Basic equations:
:math:`\\frac{dWC}{dt} = -In` \n
:math:`-In = max(WC - WHC \\cdot SP, 0)`
Examples:
Initialize six zones of different types and frozen water
contents of the snow layer and set the relative water holding
capacity to 20% of the respective frozen water content:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(6)
>>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD)
>>> whc(0.2)
>>> states.sp = 0.0, 10.0, 10.0, 10.0, 5.0, 0.0
Also set the actual value of stand precipitation to 5 mm/d:
>>> fluxes.tf = 5.0
When there is no (liquid) water content in the snow layer, no water
can be released:
>>> states.wc = 0.0
>>> model.calc_in_wc_v1()
>>> fluxes.in_
in_(5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
>>> states.wc
wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
When there is a (liquid) water content in the snow layer, the water
release depends on the frozen water content. Note the special
cases of the first zone being an internal lake, for which the snow
routine does not apply, and of the last zone, which has no ice
content and thus effectively not really a snow layer:
>>> states.wc = 5.0
>>> model.calc_in_wc_v1()
>>> fluxes.in_
in_(5.0, 3.0, 3.0, 3.0, 4.0, 5.0)
>>> states.wc
wc(0.0, 2.0, 2.0, 2.0, 1.0, 0.0)
When the relative water holding capacity is assumed to be zero,
all liquid water is released:
>>> whc(0.0)
>>> states.wc = 5.0
>>> model.calc_in_wc_v1()
>>> fluxes.in_
in_(5.0, 5.0, 5.0, 5.0, 5.0, 5.0)
>>> states.wc
wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
Note that for the single lake zone, stand precipitation is
directly passed to `in_` in all three examples.
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
for k in range(con.nmbzones):
if con.zonetype[k] != ILAKE:
flu.in_[k] = max(sta.wc[k]-con.whc[k]*sta.sp[k], 0.)
sta.wc[k] -= flu.in_[k]
else:
flu.in_[k] = flu.tf[k]
sta.wc[k] = 0. | Calculate the actual water release from the snow layer due to the
exceedance of the snow layers capacity for (liquid) water.
Required control parameters:
|NmbZones|
|ZoneType|
|WHC|
Required state sequence:
|SP|
Required flux sequence
|TF|
Calculated fluxes sequences:
|In_|
Updated state sequence:
|WC|
Basic equations:
:math:`\\frac{dWC}{dt} = -In` \n
:math:`-In = max(WC - WHC \\cdot SP, 0)`
Examples:
Initialize six zones of different types and frozen water
contents of the snow layer and set the relative water holding
capacity to 20% of the respective frozen water content:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(6)
>>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD)
>>> whc(0.2)
>>> states.sp = 0.0, 10.0, 10.0, 10.0, 5.0, 0.0
Also set the actual value of stand precipitation to 5 mm/d:
>>> fluxes.tf = 5.0
When there is no (liquid) water content in the snow layer, no water
can be released:
>>> states.wc = 0.0
>>> model.calc_in_wc_v1()
>>> fluxes.in_
in_(5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
>>> states.wc
wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
When there is a (liquid) water content in the snow layer, the water
release depends on the frozen water content. Note the special
cases of the first zone being an internal lake, for which the snow
routine does not apply, and of the last zone, which has no ice
content and thus effectively not really a snow layer:
>>> states.wc = 5.0
>>> model.calc_in_wc_v1()
>>> fluxes.in_
in_(5.0, 3.0, 3.0, 3.0, 4.0, 5.0)
>>> states.wc
wc(0.0, 2.0, 2.0, 2.0, 1.0, 0.0)
When the relative water holding capacity is assumed to be zero,
all liquid water is released:
>>> whc(0.0)
>>> states.wc = 5.0
>>> model.calc_in_wc_v1()
>>> fluxes.in_
in_(5.0, 5.0, 5.0, 5.0, 5.0, 5.0)
>>> states.wc
wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
Note that for the single lake zone, stand precipitation is
directly passed to `in_` in all three examples. |
def fit(self, X, y, model_filename=None):
"""Fit FastText according to X, y
Parameters:
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem)
"""
train_file = "temp.train"
X = [x.replace("\n", " ") for x in X]
y = [item[0] for item in y]
y = [_.replace(" ", "-") for _ in y]
lines = ["__label__{} , {}".format(j, i) for i, j in zip(X, y)]
content = "\n".join(lines)
write(train_file, content)
if model_filename:
self.estimator = fasttext.supervised(train_file, model_filename)
else:
self.estimator = fasttext.supervised(train_file)
os.remove(train_file) | Fit FastText according to X, y
Parameters:
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem) |
def random(self, shape, tf_fn, kwargs):
"""Call a random tf operation (e.g. tf.random.uniform).
Args:
shape: a Shape
tf_fn: a function such as tf.random.uniform
kwargs: kwargs to pass to tf_fn, except for seed
Returns:
a LaidOutTensor
"""
slice_shape = self.slice_shape(shape)
op_seed = random.random()
def my_fn(pnum):
# seeds are necessary to make sure that slices that should have the
# same values actually do have the same values.
seed = hash("%s,%s" % (op_seed, self.slice_begin(shape, pnum)))
return tf_fn(slice_shape, seed=seed, **kwargs)
return self.slicewise(my_fn, self.laid_out_pnum()) | Call a random tf operation (e.g. tf.random.uniform).
Args:
shape: a Shape
tf_fn: a function such as tf.random.uniform
kwargs: kwargs to pass to tf_fn, except for seed
Returns:
a LaidOutTensor |
def dist_between(h,seg1,seg2):
"""
Calculates the distance between two segments. I stole this function from
a post by Michael Hines on the NEURON forum
(www.neuron.yale.edu/phpbb/viewtopic.php?f=2&t=2114)
"""
h.distance(0, seg1.x, sec=seg1.sec)
return h.distance(seg2.x, sec=seg2.sec) | Calculates the distance between two segments. I stole this function from
a post by Michael Hines on the NEURON forum
(www.neuron.yale.edu/phpbb/viewtopic.php?f=2&t=2114) |
def doc_dir(self):
"""The absolute directory of the document"""
from os.path import abspath
if not self.ref:
return None
u = parse_app_url(self.ref)
return abspath(dirname(u.path)) | The absolute directory of the document |
def _wait_for_macaroon(wait_url):
''' Returns a macaroon from a legacy wait endpoint.
'''
headers = {
BAKERY_PROTOCOL_HEADER: str(bakery.LATEST_VERSION)
}
resp = requests.get(url=wait_url, headers=headers)
if resp.status_code != 200:
raise InteractionError('cannot get {}'.format(wait_url))
return bakery.Macaroon.from_dict(resp.json().get('Macaroon')) | Returns a macaroon from a legacy wait endpoint. |
def tool_factory(clsname, name, driver, base=GromacsCommand):
""" Factory for GromacsCommand derived types. """
clsdict = {
'command_name': name,
'driver': driver,
'__doc__': property(base._get_gmx_docs)
}
return type(clsname, (base,), clsdict) | Factory for GromacsCommand derived types. |
async def get_power_parameters_for(
cls, system_ids: typing.Sequence[str]):
"""
Get a list of power parameters for specified systems.
*WARNING*: This method is considered 'alpha' and may be modified
in future.
:param system_ids: The system IDs to get power parameters for
"""
if len(system_ids) == 0:
return {}
data = await cls._handler.power_parameters(id=system_ids)
return data | Get a list of power parameters for specified systems.
*WARNING*: This method is considered 'alpha' and may be modified
in future.
:param system_ids: The system IDs to get power parameters for |
def _to_url(self):
""" Serialises this query into a request-able URL including parameters """
url = self._target_url
params = collections.defaultdict(list, copy.deepcopy(self._filters))
if self._order_by is not None:
params['sort'] = self._order_by
for k, vl in self._extra.items():
params[k] += vl
if params:
url += "?" + urllib.parse.urlencode(params, doseq=True)
return url | Serialises this query into a request-able URL including parameters |
def is_table_existed(self, tablename):
"""
Check whether the given table name exists in this database. Return boolean.
"""
all_tablenames = self.list_tables()
tablename = tablename.lower()
if tablename in all_tablenames:
return True
else:
return False | Check whether the given table name exists in this database. Return boolean. |
def encode_plus(s):
"""
Literally encodes the plus sign
input is a string
returns the string with plus signs encoded
"""
regex = r"\+"
pat = re.compile(regex)
return pat.sub("%2B", s) | Literally encodes the plus sign
input is a string
returns the string with plus signs encoded |
def popitem(self):
"""D.popitem() -> (k, v)
Remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
"""
try:
value = next(iter(self))
key = value[self._keycol]
except StopIteration:
raise KeyError
del self[key]
return key, value | D.popitem() -> (k, v)
Remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty. |
def canonicalize(cls, dataset, data, data_coords=None, virtual_coords=[]):
"""
Canonicalize takes an array of values as input and reorients
and transposes it to match the canonical format expected by
plotting functions. In certain cases the dimensions defined
via the kdims of an Element may not match the dimensions of
the underlying data. A set of data_coords may be passed in to
define the dimensionality of the data, which can then be used
to np.squeeze the data to remove any constant dimensions. If
the data is also irregular, i.e. contains multi-dimensional
coordinates, a set of virtual_coords can be supplied, required
by some interfaces (e.g. xarray) to index irregular datasets
with a virtual integer index. This ensures these coordinates
are not simply dropped.
"""
if data_coords is None:
data_coords = dataset.dimensions('key', label='name')[::-1]
# Transpose data
dims = [name for name in data_coords
if isinstance(cls.coords(dataset, name), get_array_types())]
dropped = [dims.index(d) for d in dims
if d not in dataset.kdims+virtual_coords]
if dropped:
data = np.squeeze(data, axis=tuple(dropped))
if not any(cls.irregular(dataset, d) for d in dataset.kdims):
inds = [dims.index(kd.name) for kd in dataset.kdims]
inds = [i - sum([1 for d in dropped if i>=d]) for i in inds]
if inds:
data = data.transpose(inds[::-1])
# Reorient data
invert = False
slices = []
for d in dataset.kdims[::-1]:
coords = cls.coords(dataset, d)
if np.all(coords[1:] < coords[:-1]) and not coords.ndim > 1:
slices.append(slice(None, None, -1))
invert = True
else:
slices.append(slice(None))
data = data[tuple(slices)] if invert else data
# Allow lower dimensional views into data
if len(dataset.kdims) < 2:
data = data.flatten()
return data | Canonicalize takes an array of values as input and reorients
and transposes it to match the canonical format expected by
plotting functions. In certain cases the dimensions defined
via the kdims of an Element may not match the dimensions of
the underlying data. A set of data_coords may be passed in to
define the dimensionality of the data, which can then be used
to np.squeeze the data to remove any constant dimensions. If
the data is also irregular, i.e. contains multi-dimensional
coordinates, a set of virtual_coords can be supplied, required
by some interfaces (e.g. xarray) to index irregular datasets
with a virtual integer index. This ensures these coordinates
are not simply dropped. |
def get_buildroot(self, build_id):
"""
Build the buildroot entry of the metadata.
:return: dict, partial metadata
"""
docker_info = self.tasker.get_info()
host_arch, docker_version = get_docker_architecture(self.tasker)
buildroot = {
'id': 1,
'host': {
'os': docker_info['OperatingSystem'],
'arch': host_arch,
},
'content_generator': {
'name': PROG,
'version': atomic_reactor_version,
},
'container': {
'type': 'docker',
'arch': os.uname()[4],
},
'tools': [
{
'name': tool['name'],
'version': tool['version'],
}
for tool in get_version_of_tools()] + [
{
'name': 'docker',
'version': docker_version,
},
],
'components': self.get_rpms(),
'extra': {
'osbs': {
'build_id': build_id,
'builder_image_id': self.get_builder_image_id(),
}
},
}
return buildroot | Build the buildroot entry of the metadata.
:return: dict, partial metadata |
def info(self):
"""Retrieve information about the SD interface.
Args::
no argument
Returns::
2-element tuple holding:
number of datasets inside the file
number of file attributes
C library equivalent : SDfileinfo
"""
status, n_datasets, n_file_attrs = _C.SDfileinfo(self._id)
_checkErr('info', status, "cannot execute")
return n_datasets, n_file_attrs | Retrieve information about the SD interface.
Args::
no argument
Returns::
2-element tuple holding:
number of datasets inside the file
number of file attributes
C library equivalent : SDfileinfo |
def create_token_mapping(docgraph_with_old_names, docgraph_with_new_names,
verbose=False):
"""
given two document graphs which annotate the same text and which use the
same tokenization, creates a dictionary with a mapping from the token
IDs used in the first graph to the token IDs used in the second graph.
Parameters
----------
docgraph_with_old_names : DiscourseDocumentGraph
a document graph with token IDs that will be replaced later on
docgraph_with_new_names : DiscourseDocumentGraph
a document graph with token IDs that will replace the token IDs
used in ``docgraph_with_old_names`` later on
Returns
-------
old2new : dict
maps from a token ID used in ``docgraph_with_old_names`` to the token
ID used in ``docgraph_with_new_names`` to reference the same token
"""
def kwic_string(docgraph, keyword_index):
tokens = [tok for (tokid, tok) in list(docgraph.get_tokens())]
before, keyword, after = get_kwic(tokens, keyword_index)
return "{0} (Index: {1}): {2} [[{3}]] {4}\n".format(
docgraph.name, keyword_index, ' '.join(before), keyword,
' '.join(after))
# generators of (token ID, token) tuples
old_token_gen = docgraph_with_old_names.get_tokens()
new_token_gen = docgraph_with_new_names.get_tokens()
old2new = {}
for i, (new_tok_id, new_tok) in enumerate(new_token_gen):
old_tok_id, old_tok = old_token_gen.next()
if new_tok != old_tok: # token mismatch
if verbose:
raise ValueError(u"Tokenization mismatch:\n{0}{1}".format(
kwic_string(docgraph_with_old_names, i),
kwic_string(docgraph_with_new_names, i)))
raise ValueError(
u"Tokenization mismatch: {0} ({1}) vs. {2} ({3})\n"
"\t{4} != {5}".format(
docgraph_with_new_names.name, docgraph_with_new_names.ns,
docgraph_with_old_names.name, docgraph_with_old_names.ns,
new_tok, old_tok).encode('utf-8'))
else:
old2new[old_tok_id] = new_tok_id
return old2new | given two document graphs which annotate the same text and which use the
same tokenization, creates a dictionary with a mapping from the token
IDs used in the first graph to the token IDs used in the second graph.
Parameters
----------
docgraph_with_old_names : DiscourseDocumentGraph
a document graph with token IDs that will be replaced later on
docgraph_with_new_names : DiscourseDocumentGraph
a document graph with token IDs that will replace the token IDs
used in ``docgraph_with_old_names`` later on
Returns
-------
old2new : dict
maps from a token ID used in ``docgraph_with_old_names`` to the token
ID used in ``docgraph_with_new_names`` to reference the same token |
def get_issue_comments(self):
"""
:calls: `GET /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueComment.IssueComment`
"""
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.issue_url + "/comments",
None
) | :calls: `GET /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueComment.IssueComment` |
def fn_from_str(name: str) -> Callable[..., Any]:
"""Returns a function object with the name given in string."""
try:
module_name, fn_name = name.split(':')
except ValueError:
raise ConfigError('Expected function description in a `module.submodules:function_name` form, but got `{}`'
.format(name))
return getattr(importlib.import_module(module_name), fn_name) | Returns a function object with the name given in string. |
def to_new(self, data, k=None, sigma=None, return_distances=False):
"""Compute the affinities of new samples to the initial samples.
This is necessary for embedding new data points into an existing
embedding.
Parameters
----------
data: np.ndarray
The data points to be added to the existing embedding.
k: int
The number of nearest neighbors to consider for each kernel.
sigma: float
The bandwidth to use for the Gaussian kernels in the ambient space.
return_distances: bool
If needed, the function can return the indices of the nearest
neighbors and their corresponding distances.
Returns
-------
P: array_like
An :math:`N \\times M` affinity matrix expressing interactions
between :math:`N` new data points the initial :math:`M` data
samples.
indices: np.ndarray
Returned if ``return_distances=True``. The indices of the :math:`k`
nearest neighbors in the existing embedding for every new data
point.
distances: np.ndarray
Returned if ``return_distances=True``. The distances to the
:math:`k` nearest neighbors in the existing embedding for every new
data point.
"""
n_samples = data.shape[0]
n_reference_samples = self.n_samples
if k is None:
k = self.k
elif k >= n_reference_samples:
raise ValueError(
"`k` (%d) cannot be larger than the number of reference "
"samples (%d)." % (k, self.n_samples)
)
if sigma is None:
sigma = self.sigma
# Find nearest neighbors and the distances to the new points
neighbors, distances = self.knn_index.query(data, k)
# Compute asymmetric pairwise input similarities
conditional_P = np.exp(-distances ** 2 / (2 * sigma ** 2))
# Convert weights to probabilities
conditional_P /= np.sum(conditional_P, axis=1)[:, np.newaxis]
P = sp.csr_matrix(
(conditional_P.ravel(), neighbors.ravel(), range(0, n_samples * k + 1, k)),
shape=(n_samples, n_reference_samples),
)
if return_distances:
return P, neighbors, distances
return P | Compute the affinities of new samples to the initial samples.
This is necessary for embedding new data points into an existing
embedding.
Parameters
----------
data: np.ndarray
The data points to be added to the existing embedding.
k: int
The number of nearest neighbors to consider for each kernel.
sigma: float
The bandwidth to use for the Gaussian kernels in the ambient space.
return_distances: bool
If needed, the function can return the indices of the nearest
neighbors and their corresponding distances.
Returns
-------
P: array_like
An :math:`N \\times M` affinity matrix expressing interactions
between :math:`N` new data points the initial :math:`M` data
samples.
indices: np.ndarray
Returned if ``return_distances=True``. The indices of the :math:`k`
nearest neighbors in the existing embedding for every new data
point.
distances: np.ndarray
Returned if ``return_distances=True``. The distances to the
:math:`k` nearest neighbors in the existing embedding for every new
data point. |
def create_saml_provider(name, saml_metadata_document, region=None, key=None, keyid=None, profile=None):
'''
Create SAML provider
CLI Example:
.. code-block:: bash
salt myminion boto_iam.create_saml_provider my_saml_provider_name saml_metadata_document
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.create_saml_provider(saml_metadata_document, name)
log.info('Successfully created %s SAML provider.', name)
return True
except boto.exception.BotoServerError as e:
aws = __utils__['boto.get_error'](e)
log.debug(aws)
log.error('Failed to create SAML provider %s.', name)
return False | Create SAML provider
CLI Example:
.. code-block:: bash
salt myminion boto_iam.create_saml_provider my_saml_provider_name saml_metadata_document |
def bounding_box(img):
r"""
Return the bounding box incorporating all non-zero values in the image.
Parameters
----------
img : array_like
An array containing non-zero objects.
Returns
-------
bbox : a list of slicer objects defining the bounding box
"""
locations = numpy.argwhere(img)
mins = locations.min(0)
maxs = locations.max(0) + 1
return [slice(x, y) for x, y in zip(mins, maxs)] | r"""
Return the bounding box incorporating all non-zero values in the image.
Parameters
----------
img : array_like
An array containing non-zero objects.
Returns
-------
bbox : a list of slicer objects defining the bounding box |
def search_account_domains(self, domain=None, latitude=None, longitude=None, name=None):
"""
Search account domains.
Returns a list of up to 5 matching account domains
Partial match on name / domain are supported
"""
path = {}
data = {}
params = {}
# OPTIONAL - name
"""campus name"""
if name is not None:
params["name"] = name
# OPTIONAL - domain
"""no description"""
if domain is not None:
params["domain"] = domain
# OPTIONAL - latitude
"""no description"""
if latitude is not None:
params["latitude"] = latitude
# OPTIONAL - longitude
"""no description"""
if longitude is not None:
params["longitude"] = longitude
self.logger.debug("GET /api/v1/accounts/search with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/search".format(**path), data=data, params=params, no_data=True) | Search account domains.
Returns a list of up to 5 matching account domains
Partial match on name / domain are supported |
def ascend_bip32(bip32_pub_node, secret_exponent, child):
"""
Given a BIP32Node with public derivation child "child" with a known private key,
return the secret exponent for the bip32_pub_node.
"""
i_as_bytes = struct.pack(">l", child)
sec = public_pair_to_sec(bip32_pub_node.public_pair(), compressed=True)
data = sec + i_as_bytes
I64 = hmac.HMAC(key=bip32_pub_node._chain_code, msg=data, digestmod=hashlib.sha512).digest()
I_left_as_exponent = from_bytes_32(I64[:32])
return (secret_exponent - I_left_as_exponent) % bip32_pub_node._generator.order() | Given a BIP32Node with public derivation child "child" with a known private key,
return the secret exponent for the bip32_pub_node. |
def _get_next_available_channel_id(self):
"""Returns the next available available channel id.
:raises AMQPConnectionError: Raises if there is no available channel.
:rtype: int
"""
for index in compatibility.RANGE(self._last_channel_id or 1,
self.max_allowed_channels + 1):
if index in self._channels:
continue
self._last_channel_id = index
return index
if self._last_channel_id:
self._last_channel_id = None
return self._get_next_available_channel_id()
raise AMQPConnectionError(
'reached the maximum number of channels %d' %
self.max_allowed_channels) | Returns the next available available channel id.
:raises AMQPConnectionError: Raises if there is no available channel.
:rtype: int |
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if mcf_mod._scrypt_mcf_7_is_standard(mcf) and not _scrypt_ll:
return _scrypt_str_chk(mcf, password, len(password)) == 0
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password) | Returns True if the password matches the given MCF hash |
def clock(rpc):
"""
This task runs forever and notifies all clients subscribed to
'clock' once a second.
"""
while True:
yield from rpc.notify('clock', str(datetime.datetime.now()))
yield from asyncio.sleep(1) | This task runs forever and notifies all clients subscribed to
'clock' once a second. |
def merge_dict(dict1, dict2):
# type: (dict, dict) -> dict
"""Recursively merge dictionaries: dict2 on to dict1. This differs
from dict.update() in that values that are dicts are recursively merged.
Note that only dict value types are merged, not lists, etc.
:param dict dict1: dictionary to merge to
:param dict dict2: dictionary to merge with
:rtype: dict
:return: merged dictionary
"""
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
raise ValueError('dict1 or dict2 is not a dictionary')
result = copy.deepcopy(dict1)
for k, v in dict2.items():
if k in result and isinstance(result[k], dict):
result[k] = merge_dict(result[k], v)
else:
result[k] = copy.deepcopy(v)
return result | Recursively merge dictionaries: dict2 on to dict1. This differs
from dict.update() in that values that are dicts are recursively merged.
Note that only dict value types are merged, not lists, etc.
:param dict dict1: dictionary to merge to
:param dict dict2: dictionary to merge with
:rtype: dict
:return: merged dictionary |
def prime_field_inv(a: int, n: int) -> int:
"""
Extended euclidean algorithm to find modular inverses for integers
"""
if a == 0:
return 0
lm, hm = 1, 0
low, high = a % n, n
while low > 1:
r = high // low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % n | Extended euclidean algorithm to find modular inverses for integers |
def init_logger(v_num: int):
"""
Call when initialize Jumeaux !!
:return:
"""
logging.addLevelName(LogLevel.INFO_LV1.value, 'INFO_LV1') # type: ignore # Prevent for enum problem
logging.addLevelName(LogLevel.INFO_LV2.value, 'INFO_LV2') # type: ignore # Prevent for enum problem
logging.addLevelName(LogLevel.INFO_LV3.value, 'INFO_LV3') # type: ignore # Prevent for enum problem
logging.config.dictConfig(create_logger_config({ # type: ignore # Prevent for enum problem
0: LogLevel.INFO_LV1,
1: LogLevel.INFO_LV2,
2: LogLevel.INFO_LV3,
3: LogLevel.DEBUG,
}[v_num])) | Call when initialize Jumeaux !!
:return: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.