code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def dump_stats(self, fdump, close=True):
"""
Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if isinstance(fdump, type('')):
fdump = open(fdump, 'wb')
pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL)
if close:
fdump.close() | Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour). |
def read_byte(self, address):
"""Reads unadressed byte from a device. """
LOGGER.debug("Reading byte from device %s!", hex(address))
return self.driver.read_byte(address) | Reads unadressed byte from a device. |
def merge_translations(localization_bundle_path):
""" Merges the new translation with the old one.
The translated files are saved as '.translated' file, and are merged with old translated file.
Args:
localization_bundle_path (str): The path to the localization bundle.
"""
logging.info("Merging translations")
for lang_dir in os.listdir(localization_bundle_path):
if lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME:
continue
for translated_path in glob.glob(os.path.join(localization_bundle_path, lang_dir, "*" + TRANSLATED_SUFFIX)):
strings_path = translated_path[:-1 * len(TRANSLATED_SUFFIX)]
localizable_path = os.path.join(localization_bundle_path,
DEFAULT_LANGUAGE_DIRECTORY_NAME,
os.path.basename(strings_path))
localization_merge_back(localizable_path, strings_path, translated_path, strings_path) | Merges the new translation with the old one.
The translated files are saved as '.translated' file, and are merged with old translated file.
Args:
localization_bundle_path (str): The path to the localization bundle. |
def create_oqhazardlib_source(self, tom, mesh_spacing, use_defaults=False):
"""
Creates an instance of the source model as :class:
openquake.hazardlib.source.complex_fault.ComplexFaultSource
"""
if not self.mfd:
raise ValueError("Cannot write to hazardlib without MFD")
return ComplexFaultSource(
self.id,
self.name,
self.trt,
self.mfd,
mesh_spacing,
conv.mag_scale_rel_to_hazardlib(self.mag_scale_rel, use_defaults),
conv.render_aspect_ratio(self.rupt_aspect_ratio, use_defaults),
tom,
self.fault_edges,
self.rake) | Creates an instance of the source model as :class:
openquake.hazardlib.source.complex_fault.ComplexFaultSource |
def best_model(self):
"""Rebuilds the top scoring model from an optimisation.
Returns
-------
model: AMPAL
Returns an AMPAL model of the top scoring parameters.
Raises
------
AttributeError
Raises a name error if the optimiser has not been run.
"""
if not hasattr(self, 'halloffame'):
raise AttributeError(
'No best model found, have you ran the optimiser?')
model = self.build_fn(
(self.specification,
self.sequences,
self.parse_individual(self.halloffame[0])
))
return model | Rebuilds the top scoring model from an optimisation.
Returns
-------
model: AMPAL
Returns an AMPAL model of the top scoring parameters.
Raises
------
AttributeError
Raises a name error if the optimiser has not been run. |
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None | Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description |
def init_app(self, app, configstore):
"""Initialize the extension for the given application and store.
Parse the configuration values stored in the database obtained from
the ``WAFFLE_CONFS`` value of the configuration.
Arguments:
app: Flask application instance
configstore (WaffleStore): database store.
"""
if not hasattr(app, 'extensions'):
app.extensions = {}
self.state = _WaffleState(app, configstore)
app.extensions['waffleconf'] = self.state | Initialize the extension for the given application and store.
Parse the configuration values stored in the database obtained from
the ``WAFFLE_CONFS`` value of the configuration.
Arguments:
app: Flask application instance
configstore (WaffleStore): database store. |
def buyQuestItems(self):
""" Attempts to buy all quest items, returns result
Returns
bool - True if successful, otherwise False
"""
for item in self.items:
us = UserShopFront(self.usr, item.owner, item.id, str(item.price))
us.loadInventory()
if not item.name in us.inventory:
return False
if not us.inventory[item.name].buy():
return False
return True | Attempts to buy all quest items, returns result
Returns
bool - True if successful, otherwise False |
def update_instance(
self, model_name, pk, instance=None, version=None,
update_only=False):
"""Create or update a cached instance.
Keyword arguments are:
model_name - The name of the model
pk - The primary key of the instance
instance - The Django model instance, or None to load it
versions - Version to update, or None for all
update_only - If False (default), then missing cache entries will be
populated and will cause follow-on invalidation. If True, then
only entries already in the cache will be updated and cause
follow-on invalidation.
Return is a list of tuples (model name, pk, immediate) that also needs
to be updated.
"""
versions = [version] if version else self.versions
invalid = []
for version in versions:
serializer = self.model_function(model_name, version, 'serializer')
loader = self.model_function(model_name, version, 'loader')
invalidator = self.model_function(
model_name, version, 'invalidator')
if serializer is None and loader is None and invalidator is None:
continue
if self.cache is None:
continue
# Try to load the instance
if not instance:
instance = loader(pk)
if serializer:
# Get current value, if in cache
key = self.key_for(version, model_name, pk)
current_raw = self.cache.get(key)
current = json.loads(current_raw) if current_raw else None
# Get new value
if update_only and current_raw is None:
new = None
else:
new = serializer(instance)
deleted = not instance
# If cache is invalid, update cache
invalidate = (current != new) or deleted
if invalidate:
if deleted:
self.cache.delete(key)
else:
self.cache.set(key, json.dumps(new))
else:
invalidate = True
# Invalidate upstream caches
if instance and invalidate:
for upstream in invalidator(instance):
if isinstance(upstream, str):
self.cache.delete(upstream)
else:
m, i, immediate = upstream
if immediate:
invalidate_key = self.key_for(version, m, i)
self.cache.delete(invalidate_key)
invalid.append((m, i, version))
return invalid | Create or update a cached instance.
Keyword arguments are:
model_name - The name of the model
pk - The primary key of the instance
instance - The Django model instance, or None to load it
versions - Version to update, or None for all
update_only - If False (default), then missing cache entries will be
populated and will cause follow-on invalidation. If True, then
only entries already in the cache will be updated and cause
follow-on invalidation.
Return is a list of tuples (model name, pk, immediate) that also needs
to be updated. |
def get_port_profile_for_intf_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def _parse_bbox_grid(bbox_grid):
""" Helper method for parsing bounding box grid. It will try to parse it into `BBoxCollection`
"""
if isinstance(bbox_grid, BBoxCollection):
return bbox_grid
if isinstance(bbox_grid, list):
return BBoxCollection(bbox_grid)
raise ValueError("Parameter 'bbox_grid' should be an instance of {}".format(BBoxCollection.__name__)) | Helper method for parsing bounding box grid. It will try to parse it into `BBoxCollection` |
def recent(self, include=None):
"""
Retrieve the most recent tickets
"""
return self._query_zendesk(self.endpoint.recent, 'ticket', id=None, include=include) | Retrieve the most recent tickets |
def render(self, request, collect_render_data=True, **kwargs):
"""
Render this view. This will call the render method
on the render class specified.
:param request: The request object
:param collect_render_data: If True we will call \
the get_render_data method to pass a complete context \
to the renderer.
:param kwargs: Any other keyword arguments that should \
be passed to the renderer.
"""
assert self.render_type in self.renders
render = self.renders[self.render_type]
if collect_render_data:
kwargs = self.get_render_data(**kwargs)
return render.render(request, **kwargs) | Render this view. This will call the render method
on the render class specified.
:param request: The request object
:param collect_render_data: If True we will call \
the get_render_data method to pass a complete context \
to the renderer.
:param kwargs: Any other keyword arguments that should \
be passed to the renderer. |
def DownloadResource(url, path):
'''Downloads resources from s3 by url and unzips them to the provided path'''
import requests
from six import BytesIO
import zipfile
print("Downloading... {} to {}".format(url, path))
r = requests.get(url, stream=True)
z = zipfile.ZipFile(BytesIO(r.content))
z.extractall(path)
print("Completed download and extraction.") | Downloads resources from s3 by url and unzips them to the provided path |
def is_separator(self, char):
"""
Test if a character is a separator.
Parameters
----------
char : str
The character to test.
Returns
-------
ret : bool
True if character is a separator, False otherwise.
"""
if len(char) > 1:
raise TypeError("Expected a char.")
if char in self.separators:
return True
return False | Test if a character is a separator.
Parameters
----------
char : str
The character to test.
Returns
-------
ret : bool
True if character is a separator, False otherwise. |
def is_valid_url(self, url, non_blocking=True):
"""Check if url is valid."""
logger.debug(str((url)))
if non_blocking:
method = self._is_valid_url
return self._create_worker(method, url)
else:
return self._is_valid_url(url) | Check if url is valid. |
def wcomplex(wave):
r"""
Convert a waveform's dependent variable vector to complex.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.wcomplex
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
ret = copy.copy(wave)
ret._dep_vector = ret._dep_vector.astype(np.complex)
return ret | r"""
Convert a waveform's dependent variable vector to complex.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.wcomplex
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]] |
def expand(cls, match, expand):
"""
If use expand directly, the url-decoded context will be decoded again, which create a security
issue. Hack expand to quote the text before expanding
"""
return re._expand(match.re, cls._EncodedMatch(match), expand) | If use expand directly, the url-decoded context will be decoded again, which create a security
issue. Hack expand to quote the text before expanding |
def get_rows_by_cols(self, matching_dict):
"""Return all rows where the cols match the elements given in the matching_dict
Parameters
----------
matching_dict: :obj:'dict'
Desired dictionary of col values.
Returns
-------
:obj:`list`
A list of rows that satisfy the matching_dict
"""
result = []
for i in range(self.num_rows):
row = self._table[i+1]
matching = True
for key, val in matching_dict.items():
if row[key] != val:
matching = False
break
if matching:
result.append(row)
return result | Return all rows where the cols match the elements given in the matching_dict
Parameters
----------
matching_dict: :obj:'dict'
Desired dictionary of col values.
Returns
-------
:obj:`list`
A list of rows that satisfy the matching_dict |
def server_factory(global_conf, host, port, **options):
"""Server factory for paste.
Options are:
* proactor: class name to use from cogen.core.proactors
(default: DefaultProactor - best available proactor for current platform)
* proactor_resolution: float
* sched_default_priority: int (see cogen.core.util.priority)
* sched_default_timeout: float (default: 0 - no timeout)
* server_name: str
* request_queue_size: int
* sockoper_timeout: float (default: 15 - operations timeout in 15 seconds),
-1 (no timeout), 0 (use scheduler's default), >0 (seconds)
* sendfile_timeout: float (default: 300) - same as sockoper_timeout,
only applied to sendfile operations (wich might need a much higher timeout
value)
* sockaccept_greedy: bool
"""
port = int(port)
try:
import paste.util.threadinglocal as pastelocal
pastelocal.local = local
except ImportError:
pass
def serve(app):
runner = Runner(host, port, app, options)
runner.run()
return serve | Server factory for paste.
Options are:
* proactor: class name to use from cogen.core.proactors
(default: DefaultProactor - best available proactor for current platform)
* proactor_resolution: float
* sched_default_priority: int (see cogen.core.util.priority)
* sched_default_timeout: float (default: 0 - no timeout)
* server_name: str
* request_queue_size: int
* sockoper_timeout: float (default: 15 - operations timeout in 15 seconds),
-1 (no timeout), 0 (use scheduler's default), >0 (seconds)
* sendfile_timeout: float (default: 300) - same as sockoper_timeout,
only applied to sendfile operations (wich might need a much higher timeout
value)
* sockaccept_greedy: bool |
def sample_annealed_importance_chain(
num_steps,
proposal_log_prob_fn,
target_log_prob_fn,
current_state,
make_kernel_fn,
parallel_iterations=10,
name=None):
"""Runs annealed importance sampling (AIS) to estimate normalizing constants.
This function uses an MCMC transition operator (e.g., Hamiltonian Monte Carlo)
to sample from a series of distributions that slowly interpolates between
an initial "proposal" distribution:
`exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`
and the target distribution:
`exp(target_log_prob_fn(x) - target_log_normalizer)`,
accumulating importance weights along the way. The product of these
importance weights gives an unbiased estimate of the ratio of the
normalizing constants of the initial distribution and the target
distribution:
`E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.
Note: When running in graph mode, `proposal_log_prob_fn` and
`target_log_prob_fn` are called exactly three times (although this may be
reduced to two times in the future).
Args:
num_steps: Integer number of Markov chain updates to run. More
iterations means more expense, but smoother annealing between q
and p, which in turn means exponentially lower variance for the
normalizing constant estimator.
proposal_log_prob_fn: Python callable that returns the log density of the
initial distribution.
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
make_kernel_fn: Python `callable` which returns a `TransitionKernel`-like
object. Must take one argument representing the `TransitionKernel`'s
`target_log_prob_fn`. The `target_log_prob_fn` argument represents the
`TransitionKernel`'s target log distribution. Note:
`sample_annealed_importance_chain` creates a new `target_log_prob_fn`
which is an interpolation between the supplied `target_log_prob_fn` and
`proposal_log_prob_fn`; it is this interpolated function which is used as
an argument to `make_kernel_fn`.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "sample_annealed_importance_chain").
Returns:
next_state: `Tensor` or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at the final iteration. Has same shape as
input `current_state`.
ais_weights: Tensor with the estimated weight(s). Has shape matching
`target_log_prob_fn(current_state)`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
#### Examples
##### Estimate the normalizing constant of a log-gamma distribution.
```python
tfd = tfp.distributions
# Run 100 AIS chains in parallel
num_chains = 100
dims = 20
dtype = np.float32
proposal = tfd.MultivatiateNormalDiag(
loc=tf.zeros([dims], dtype=dtype))
target = tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=dtype(2),
rate=dtype(3)),
bijector=tfp.bijectors.Invert(tfp.bijectors.Exp()),
event_shape=[dims])
chains_state, ais_weights, kernels_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target.log_prob,
current_state=proposal.sample(num_chains),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.2,
num_leapfrog_steps=2)))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)
```
##### Estimate marginal likelihood of a Bayesian regression model.
```python
tfd = tfp.distributions
def make_prior(dims, dtype):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
def make_likelihood(weights, x):
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(weights, x, axes=[[0], [-1]]))
# Run 100 AIS chains in parallel
num_chains = 100
dims = 10
dtype = np.float32
# Make training data.
x = np.random.randn(num_chains, dims).astype(dtype)
true_weights = np.random.randn(dims).astype(dtype)
y = np.dot(x, true_weights) + np.random.randn(num_chains)
# Setup model.
prior = make_prior(dims, dtype)
def target_log_prob_fn(weights):
return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)
proposal = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
weight_samples, ais_weights, kernel_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target_log_prob_fn
current_state=tf.zeros([num_chains, dims], dtype),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.1,
num_leapfrog_steps=2)))
log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
```
"""
with tf.compat.v1.name_scope(name, "sample_annealed_importance_chain",
[num_steps, current_state]):
num_steps = tf.convert_to_tensor(
value=num_steps, dtype=tf.int32, name="num_steps")
if mcmc_util.is_list_like(current_state):
current_state = [
tf.convert_to_tensor(value=s, name="current_state")
for s in current_state
]
else:
current_state = tf.convert_to_tensor(
value=current_state, name="current_state")
def _make_convex_combined_log_prob_fn(iter_):
def _fn(*args):
p = tf.identity(proposal_log_prob_fn(*args), name="proposal_log_prob")
t = tf.identity(target_log_prob_fn(*args), name="target_log_prob")
dtype = p.dtype.base_dtype
beta = tf.cast(iter_ + 1, dtype) / tf.cast(num_steps, dtype)
return tf.identity(beta * t + (1. - beta) * p,
name="convex_combined_log_prob")
return _fn
def _loop_body(iter_, ais_weights, current_state, kernel_results):
"""Closure which implements `tf.while_loop` body."""
x = (current_state if mcmc_util.is_list_like(current_state)
else [current_state])
proposal_log_prob = proposal_log_prob_fn(*x)
target_log_prob = target_log_prob_fn(*x)
ais_weights += ((target_log_prob - proposal_log_prob) /
tf.cast(num_steps, ais_weights.dtype))
kernel = make_kernel_fn(_make_convex_combined_log_prob_fn(iter_))
next_state, inner_results = kernel.one_step(
current_state, kernel_results.inner_results)
kernel_results = AISResults(
proposal_log_prob=proposal_log_prob,
target_log_prob=target_log_prob,
inner_results=inner_results,
)
return [iter_ + 1, ais_weights, next_state, kernel_results]
def _bootstrap_results(init_state):
"""Creates first version of `previous_kernel_results`."""
kernel = make_kernel_fn(_make_convex_combined_log_prob_fn(iter_=0))
inner_results = kernel.bootstrap_results(init_state)
convex_combined_log_prob = inner_results.accepted_results.target_log_prob
dtype = convex_combined_log_prob.dtype.as_numpy_dtype
shape = tf.shape(input=convex_combined_log_prob)
proposal_log_prob = tf.fill(shape, dtype(np.nan),
name="bootstrap_proposal_log_prob")
target_log_prob = tf.fill(shape, dtype(np.nan),
name="target_target_log_prob")
return AISResults(
proposal_log_prob=proposal_log_prob,
target_log_prob=target_log_prob,
inner_results=inner_results,
)
previous_kernel_results = _bootstrap_results(current_state)
inner_results = previous_kernel_results.inner_results
ais_weights = tf.zeros(
shape=tf.broadcast_dynamic_shape(
tf.shape(input=inner_results.proposed_results.target_log_prob),
tf.shape(input=inner_results.accepted_results.target_log_prob)),
dtype=inner_results.proposed_results.target_log_prob.dtype.base_dtype)
[_, ais_weights, current_state, kernel_results] = tf.while_loop(
cond=lambda iter_, *args: iter_ < num_steps,
body=_loop_body,
loop_vars=[
np.int32(0), # iter_
ais_weights,
current_state,
previous_kernel_results,
],
parallel_iterations=parallel_iterations)
return [current_state, ais_weights, kernel_results] | Runs annealed importance sampling (AIS) to estimate normalizing constants.
This function uses an MCMC transition operator (e.g., Hamiltonian Monte Carlo)
to sample from a series of distributions that slowly interpolates between
an initial "proposal" distribution:
`exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`
and the target distribution:
`exp(target_log_prob_fn(x) - target_log_normalizer)`,
accumulating importance weights along the way. The product of these
importance weights gives an unbiased estimate of the ratio of the
normalizing constants of the initial distribution and the target
distribution:
`E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.
Note: When running in graph mode, `proposal_log_prob_fn` and
`target_log_prob_fn` are called exactly three times (although this may be
reduced to two times in the future).
Args:
num_steps: Integer number of Markov chain updates to run. More
iterations means more expense, but smoother annealing between q
and p, which in turn means exponentially lower variance for the
normalizing constant estimator.
proposal_log_prob_fn: Python callable that returns the log density of the
initial distribution.
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
make_kernel_fn: Python `callable` which returns a `TransitionKernel`-like
object. Must take one argument representing the `TransitionKernel`'s
`target_log_prob_fn`. The `target_log_prob_fn` argument represents the
`TransitionKernel`'s target log distribution. Note:
`sample_annealed_importance_chain` creates a new `target_log_prob_fn`
which is an interpolation between the supplied `target_log_prob_fn` and
`proposal_log_prob_fn`; it is this interpolated function which is used as
an argument to `make_kernel_fn`.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "sample_annealed_importance_chain").
Returns:
next_state: `Tensor` or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at the final iteration. Has same shape as
input `current_state`.
ais_weights: Tensor with the estimated weight(s). Has shape matching
`target_log_prob_fn(current_state)`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
#### Examples
##### Estimate the normalizing constant of a log-gamma distribution.
```python
tfd = tfp.distributions
# Run 100 AIS chains in parallel
num_chains = 100
dims = 20
dtype = np.float32
proposal = tfd.MultivatiateNormalDiag(
loc=tf.zeros([dims], dtype=dtype))
target = tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=dtype(2),
rate=dtype(3)),
bijector=tfp.bijectors.Invert(tfp.bijectors.Exp()),
event_shape=[dims])
chains_state, ais_weights, kernels_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target.log_prob,
current_state=proposal.sample(num_chains),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.2,
num_leapfrog_steps=2)))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)
```
##### Estimate marginal likelihood of a Bayesian regression model.
```python
tfd = tfp.distributions
def make_prior(dims, dtype):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
def make_likelihood(weights, x):
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(weights, x, axes=[[0], [-1]]))
# Run 100 AIS chains in parallel
num_chains = 100
dims = 10
dtype = np.float32
# Make training data.
x = np.random.randn(num_chains, dims).astype(dtype)
true_weights = np.random.randn(dims).astype(dtype)
y = np.dot(x, true_weights) + np.random.randn(num_chains)
# Setup model.
prior = make_prior(dims, dtype)
def target_log_prob_fn(weights):
return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)
proposal = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
weight_samples, ais_weights, kernel_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target_log_prob_fn
current_state=tf.zeros([num_chains, dims], dtype),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.1,
num_leapfrog_steps=2)))
log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
``` |
def plot_grouped_gos(self, fout_img=None, exclude_hdrs=None, **kws_usr):
"""One Plot containing all user GOs (yellow or green) and header GO IDs(green or purple)."""
# kws_plt -> go2color go2bordercolor
kws_plt, kws_dag = self._get_kws_plt(self.grprobj.usrgos, **kws_usr)
pltgosusr = self.grprobj.usrgos
if exclude_hdrs is not None:
pltgosusr = pltgosusr.difference(self.grprobj.get_usrgos_g_hdrgos(exclude_hdrs))
if fout_img is None:
fout_img = "{GRP_NAME}.png".format(GRP_NAME=self.grprobj.grpname)
# Split one plot into potentially three (BP, MF, CC) if png filename contains '{NS}'
if '{NS}' in fout_img:
go2nt = self.grprobj.gosubdag.get_go2nt(pltgosusr)
for namespace in ['BP', 'MF', 'CC']:
pltgos_ns = [go for go in pltgosusr if go2nt[go].NS == namespace]
if pltgos_ns:
png = fout_img.format(NS=namespace)
self._plot_grouped_gos(png, pltgos_ns, kws_plt, kws_dag)
# Plot all user GO IDs into a single plot, regardless of their namespace
else:
self._plot_grouped_gos(fout_img, pltgosusr, kws_plt, kws_dag) | One Plot containing all user GOs (yellow or green) and header GO IDs(green or purple). |
def password_hash(password, password_salt=None):
"""Hashes a specified password"""
password_salt = password_salt or oz.settings["session_salt"]
salted_password = password_salt + password
return "sha256!%s" % hashlib.sha256(salted_password.encode("utf-8")).hexdigest() | Hashes a specified password |
def _plot_weights_heatmap(self, index=None, figsize=None, **kwargs):
"""Plot weights as a heatmap
index = can be a particular index or a list of indicies
**kwargs - additional arguments to concise.utils.plot.heatmap
"""
W = self.get_weights()[0]
if index is None:
index = np.arange(W.shape[2])
fig = heatmap(np.swapaxes(W[:, :, index], 0, 1), plot_name="filter: ",
vocab=self.VOCAB, figsize=figsize, **kwargs)
# plt.show()
return fig | Plot weights as a heatmap
index = can be a particular index or a list of indicies
**kwargs - additional arguments to concise.utils.plot.heatmap |
def ensure_compliance(self):
"""Ensure that the all registered files comply to registered criteria.
"""
for p in self.paths:
if os.path.exists(p):
if self.is_compliant(p):
continue
log('File %s is not in compliance.' % p, level=INFO)
else:
if not self.always_comply:
log("Non-existent path '%s' - skipping compliance check"
% (p), level=INFO)
continue
if self._take_action():
log("Applying compliance criteria to '%s'" % (p), level=INFO)
self.comply(p) | Ensure that the all registered files comply to registered criteria. |
def enter_maintenance_mode(self):
"""
Put the cluster in maintenance mode.
@return: Reference to the completed command.
@since: API v2
"""
cmd = self._cmd('enterMaintenanceMode')
if cmd.success:
self._update(get_cluster(self._get_resource_root(), self.name))
return cmd | Put the cluster in maintenance mode.
@return: Reference to the completed command.
@since: API v2 |
def getProperty(self, prop, *args, **kwargs):
"""
Get the value of a property. See the corresponding method for the
required arguments. For example, for the property _NET_WM_STATE, look
for :meth:`getWmState`
"""
f = self.__getAttrs.get(prop)
if not f:
raise KeyError('Unknown readable property: %s' % prop)
return f(self, *args, **kwargs) | Get the value of a property. See the corresponding method for the
required arguments. For example, for the property _NET_WM_STATE, look
for :meth:`getWmState` |
def dispatch_event(self, event: "Event") -> None:
"""
Dispatches the given event.
It is the duty of this method to set the target of the dispatched event by calling
`event.set_target(self)`.
Args:
event (Event): The event to dispatch. Must not be `None`.
Raises:
TypeError: If the event is `None` or its type is incorrect.
"""
# Set the target of the event if it doesn't have one already. It could happen that
# we are simply redispatching an event.
if event.target is None:
event.set_target(self)
listeners: dict[types.MethodType, bool] = self._registered_listeners.get(event.type)
if listeners is None:
return
for listener in listeners:
listener(event) | Dispatches the given event.
It is the duty of this method to set the target of the dispatched event by calling
`event.set_target(self)`.
Args:
event (Event): The event to dispatch. Must not be `None`.
Raises:
TypeError: If the event is `None` or its type is incorrect. |
def _resolve_dependencies(self, cur, dependencies):
"""
Function checks if dependant packages are installed in DB
"""
list_of_deps_ids = []
_list_of_deps_unresolved = []
_is_deps_resolved = True
for k, v in dependencies.items():
pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name)
cur.execute("SELECT _find_schema('{0}', '{1}')"
.format(k, v))
pgpm_v_ext = tuple(cur.fetchone()[0][1:-1].split(','))
try:
list_of_deps_ids.append(int(pgpm_v_ext[0]))
except:
pass
if not pgpm_v_ext[0]:
_is_deps_resolved = False
_list_of_deps_unresolved.append("{0}: {1}".format(k, v))
return _is_deps_resolved, list_of_deps_ids, _list_of_deps_unresolved | Function checks if dependant packages are installed in DB |
def getHosts(filename=None, hostlist=None):
"""Return a list of hosts depending on the environment"""
if filename:
return getHostsFromFile(filename)
elif hostlist:
return getHostsFromList(hostlist)
elif getEnv() == "SLURM":
return getHostsFromSLURM()
elif getEnv() == "PBS":
return getHostsFromPBS()
elif getEnv() == "SGE":
return getHostsFromSGE()
else:
return getDefaultHosts() | Return a list of hosts depending on the environment |
def __feed_arthur(self):
""" Feed Ocean with backend data collected from arthur redis queue"""
with self.ARTHUR_FEED_LOCK:
# This is a expensive operation so don't do it always
if (time.time() - self.ARTHUR_LAST_MEMORY_CHECK) > 5 * self.ARTHUR_LAST_MEMORY_CHECK_TIME:
self.ARTHUR_LAST_MEMORY_CHECK = time.time()
logger.debug("Measuring the memory used by the raw items dict ...")
try:
memory_size = self.measure_memory(self.arthur_items) / (1024 * 1024)
except RuntimeError as ex:
# During memory usage measure, other thread could change the dict
logger.warning("Can't get the memory used by the raw items dict: %s", ex)
memory_size = self.ARTHUR_LAST_MEMORY_SIZE
self.ARTHUR_LAST_MEMORY_CHECK_TIME = time.time() - self.ARTHUR_LAST_MEMORY_CHECK
logger.debug("Arthur items memory size: %0.2f MB (%is to check)",
memory_size, self.ARTHUR_LAST_MEMORY_CHECK_TIME)
self.ARTHUR_LAST_MEMORY_SIZE = memory_size
# Don't feed items from redis if the current python dict is
# larger than ARTHUR_MAX_MEMORY_SIZE
if self.ARTHUR_LAST_MEMORY_SIZE > self.ARTHUR_MAX_MEMORY_SIZE:
logger.debug("Items queue full. Not collecting items from redis queue.")
return
logger.info("Collecting items from redis queue")
db_url = self.config.get_conf()['es_collection']['redis_url']
conn = redis.StrictRedis.from_url(db_url)
logger.debug("Redis connection stablished with %s.", db_url)
# Get and remove queued items in an atomic transaction
pipe = conn.pipeline()
# pipe.lrange(Q_STORAGE_ITEMS, 0, -1)
pipe.lrange(Q_STORAGE_ITEMS, 0, self.ARTHUR_REDIS_ITEMS - 1)
pipe.ltrim(Q_STORAGE_ITEMS, self.ARTHUR_REDIS_ITEMS, -1)
items = pipe.execute()[0]
for item in items:
arthur_item = pickle.loads(item)
if arthur_item['tag'] not in self.arthur_items:
self.arthur_items[arthur_item['tag']] = []
self.arthur_items[arthur_item['tag']].append(arthur_item)
for tag in self.arthur_items:
if self.arthur_items[tag]:
logger.debug("Arthur items for %s: %i", tag, len(self.arthur_items[tag])) | Feed Ocean with backend data collected from arthur redis queue |
def title(self, category):
""" Return the total printed length of this category item.
"""
return sum(
[self.getWidth(category, x) for x in self.fields]) | Return the total printed length of this category item. |
def estimate_ride(api_client):
"""Use an UberRidesClient to fetch a ride estimate and print the results.
Parameters
api_client (UberRidesClient)
An authorized UberRidesClient with 'request' scope.
"""
try:
estimate = api_client.estimate_ride(
product_id=SURGE_PRODUCT_ID,
start_latitude=START_LAT,
start_longitude=START_LNG,
end_latitude=END_LAT,
end_longitude=END_LNG,
seat_count=2
)
except (ClientError, ServerError) as error:
fail_print(error)
else:
success_print(estimate.json) | Use an UberRidesClient to fetch a ride estimate and print the results.
Parameters
api_client (UberRidesClient)
An authorized UberRidesClient with 'request' scope. |
def _reset_em(self):
"""Resets self.em and the shared instances."""
self.em = _ExtendedManager(self.addr, self.authkey, mode=self.mode, start=False)
self.em.start()
self._set_shared_instances() | Resets self.em and the shared instances. |
def get_bool_relative(strings: Sequence[str],
prefix1: str,
delta: int,
prefix2: str,
ignoreleadingcolon: bool = False) -> Optional[bool]:
"""
Fetches a boolean parameter via :func:`get_string_relative`.
"""
return get_bool_raw(get_string_relative(
strings, prefix1, delta, prefix2,
ignoreleadingcolon=ignoreleadingcolon)) | Fetches a boolean parameter via :func:`get_string_relative`. |
def hostapi_info(index=None):
"""Return a generator with information about each host API.
If index is given, only one dictionary for the given host API is
returned.
"""
if index is None:
return (hostapi_info(i) for i in range(_pa.Pa_GetHostApiCount()))
else:
info = _pa.Pa_GetHostApiInfo(index)
if not info:
raise RuntimeError("Invalid host API")
assert info.structVersion == 1
return {'name': ffi.string(info.name).decode(errors='ignore'),
'default_input_device': info.defaultInputDevice,
'default_output_device': info.defaultOutputDevice} | Return a generator with information about each host API.
If index is given, only one dictionary for the given host API is
returned. |
def _eval_call(self, node):
"""
Evaluate a function call
:param node: Node to eval
:return: Result of node
"""
try:
func = self.functions[node.func.id]
except KeyError:
raise NameError(node.func.id)
value = func(
*(self._eval(a) for a in node.args),
**dict(self._eval(k) for k in node.keywords)
)
if value is True:
return 1
elif value is False:
return 0
else:
return value | Evaluate a function call
:param node: Node to eval
:return: Result of node |
def _iiOfAny(instance, classes):
"""
Returns true, if `instance` is instance of any (_iiOfAny) of the `classes`.
This function doesn't use :func:`isinstance` check, it just compares the
class names.
This can be generally dangerous, but it is really useful when you are
comparing class serialized in one module and deserialized in another.
This causes, that module paths in class internals are different and
:func:`isinstance` and :func:`type` comparsions thus fails.
Use this function instead, if you want to check what type is your
deserialized message.
Args:
instance (object): class instance you want to know the type
classes (list): classes, or just the class you want to compare - func
automatically converts nonlist/nontuple parameters to
list
Returns:
bool: True if `instance` is instance of any of the `classes`.
"""
if type(classes) not in [list, tuple]:
classes = [classes]
return any(map(lambda x: type(instance).__name__ == x.__name__, classes)) | Returns true, if `instance` is instance of any (_iiOfAny) of the `classes`.
This function doesn't use :func:`isinstance` check, it just compares the
class names.
This can be generally dangerous, but it is really useful when you are
comparing class serialized in one module and deserialized in another.
This causes, that module paths in class internals are different and
:func:`isinstance` and :func:`type` comparsions thus fails.
Use this function instead, if you want to check what type is your
deserialized message.
Args:
instance (object): class instance you want to know the type
classes (list): classes, or just the class you want to compare - func
automatically converts nonlist/nontuple parameters to
list
Returns:
bool: True if `instance` is instance of any of the `classes`. |
def blame(self, rev='HEAD', committer=True, by='repository', ignore_globs=None, include_globs=None):
"""
Returns the blame from the current HEAD of the repository as a DataFrame. The DataFrame is grouped by committer
name, so it will be the sum of all contributions to the repository by each committer. As with the commit history
method, extensions and ignore_dirs parameters can be passed to exclude certain directories, or focus on certain
file extensions. The DataFrame will have the columns:
* committer
* loc
:param rev: (optional, default=HEAD) the specific revision to blame
:param committer: (optional, default=True) true if committer should be reported, false if author
:param by: (optional, default=repository) whether to group by repository or by file
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
blames = []
file_names = [x for x in self.repo.git.log(pretty='format:', name_only=True, diff_filter='A').split('\n') if
x.strip() != '']
for file in self.__check_extension({x: x for x in file_names}, ignore_globs=ignore_globs,
include_globs=include_globs).keys():
try:
blames.append(
[x + [str(file).replace(self.git_dir + '/', '')] for x in
self.repo.blame(rev, str(file).replace(self.git_dir + '/', ''))]
)
except GitCommandError:
pass
blames = [item for sublist in blames for item in sublist]
if committer:
if by == 'repository':
blames = DataFrame(
[[x[0].committer.name, len(x[1])] for x in blames],
columns=['committer', 'loc']
).groupby('committer').agg({'loc': np.sum})
elif by == 'file':
blames = DataFrame(
[[x[0].committer.name, len(x[1]), x[2]] for x in blames],
columns=['committer', 'loc', 'file']
).groupby(['committer', 'file']).agg({'loc': np.sum})
else:
if by == 'repository':
blames = DataFrame(
[[x[0].author.name, len(x[1])] for x in blames],
columns=['author', 'loc']
).groupby('author').agg({'loc': np.sum})
elif by == 'file':
blames = DataFrame(
[[x[0].author.name, len(x[1]), x[2]] for x in blames],
columns=['author', 'loc', 'file']
).groupby(['author', 'file']).agg({'loc': np.sum})
return blames | Returns the blame from the current HEAD of the repository as a DataFrame. The DataFrame is grouped by committer
name, so it will be the sum of all contributions to the repository by each committer. As with the commit history
method, extensions and ignore_dirs parameters can be passed to exclude certain directories, or focus on certain
file extensions. The DataFrame will have the columns:
* committer
* loc
:param rev: (optional, default=HEAD) the specific revision to blame
:param committer: (optional, default=True) true if committer should be reported, false if author
:param by: (optional, default=repository) whether to group by repository or by file
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame |
def build_gemini_query(self, query, extra_info):
"""Append sql to a gemini query
Args:
query(str): The gemini query
extra_info(str): The text that should be added
Return:
extended_query(str)
"""
if 'WHERE' in query:
return "{0} AND {1}".format(query, extra_info)
else:
return "{0} WHERE {1}".format(query, extra_info) | Append sql to a gemini query
Args:
query(str): The gemini query
extra_info(str): The text that should be added
Return:
extended_query(str) |
def webhoneypotbytype(date, return_format=None):
"""API data for `Webhoneypot: Attack By Type
<https://isc.sans.edu/webhoneypot/types.html>`_. We currently use a set
of regular expressions to determine the type of attack used to attack the
honeypot. Output is the top 30 attacks for the last month.
:param date: string or datetime.date() (required)
"""
uri = 'webhoneypotbytype'
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
return _get(uri, return_format) | API data for `Webhoneypot: Attack By Type
<https://isc.sans.edu/webhoneypot/types.html>`_. We currently use a set
of regular expressions to determine the type of attack used to attack the
honeypot. Output is the top 30 attacks for the last month.
:param date: string or datetime.date() (required) |
def T_sigma(self, sigma):
"""
Given a policy `sigma`, return the T_sigma operator.
Parameters
----------
sigma : array_like(int, ndim=1)
Policy vector, of length n.
Returns
-------
callable
The T_sigma operator.
"""
R_sigma, Q_sigma = self.RQ_sigma(sigma)
return lambda v: R_sigma + self.beta * Q_sigma.dot(v) | Given a policy `sigma`, return the T_sigma operator.
Parameters
----------
sigma : array_like(int, ndim=1)
Policy vector, of length n.
Returns
-------
callable
The T_sigma operator. |
def findnextmatch(self, startkey, find_string, flags, search_result=True):
""" Returns a tuple with the position of the next match of find_string
Returns None if string not found.
Parameters:
-----------
startkey: Start position of search
find_string:String to be searched for
flags: List of strings, out of
["UP" xor "DOWN", "WHOLE_WORD", "MATCH_CASE", "REG_EXP"]
search_result: Bool, defaults to True
\tIf True then the search includes the result string (slower)
"""
assert "UP" in flags or "DOWN" in flags
assert not ("UP" in flags and "DOWN" in flags)
if search_result:
def is_matching(key, find_string, flags):
code = self(key)
if self.string_match(code, find_string, flags) is not None:
return True
else:
res_str = unicode(self[key])
return self.string_match(res_str, find_string, flags) \
is not None
else:
def is_matching(code, find_string, flags):
code = self(key)
return self.string_match(code, find_string, flags) is not None
# List of keys in sgrid in search order
reverse = "UP" in flags
for key in self._sorted_keys(self.keys(), startkey, reverse=reverse):
try:
if is_matching(key, find_string, flags):
return key
except Exception:
# re errors are cryptical: sre_constants,...
pass | Returns a tuple with the position of the next match of find_string
Returns None if string not found.
Parameters:
-----------
startkey: Start position of search
find_string:String to be searched for
flags: List of strings, out of
["UP" xor "DOWN", "WHOLE_WORD", "MATCH_CASE", "REG_EXP"]
search_result: Bool, defaults to True
\tIf True then the search includes the result string (slower) |
def get_flow(self, name, options=None):
""" Get a primed and readytogo flow coordinator. """
config = self.project_config.get_flow(name)
callbacks = self.callback_class()
coordinator = FlowCoordinator(
self.project_config,
config,
name=name,
options=options,
skip=None,
callbacks=callbacks,
)
return coordinator | Get a primed and readytogo flow coordinator. |
def plot_time_freq(self, mindB=-100, maxdB=None, norm=True,
yaxis_label_position="right"):
"""Plotting method to plot both time and frequency domain results.
See :meth:`plot_frequencies` for the optional arguments.
.. plot::
:width: 80%
:include-source:
from spectrum.window import Window
w = Window(64, name='hamming')
w.plot_time_freq()
"""
from pylab import subplot, gca
subplot(1, 2, 1)
self.plot_window()
subplot(1, 2, 2)
self.plot_frequencies(mindB=mindB, maxdB=maxdB, norm=norm)
if yaxis_label_position=="left":
try: tight_layout()
except: pass
else:
ax = gca()
ax.yaxis.set_label_position("right") | Plotting method to plot both time and frequency domain results.
See :meth:`plot_frequencies` for the optional arguments.
.. plot::
:width: 80%
:include-source:
from spectrum.window import Window
w = Window(64, name='hamming')
w.plot_time_freq() |
def rsky_lhood(self,rsky,**kwargs):
"""
Evaluates Rsky likelihood at provided position(s)
:param rsky:
position
:param **kwargs:
Keyword arguments passed to :func:`BinaryPopulation.rsky_distribution`
"""
dist = self.rsky_distribution(**kwargs)
return dist(rsky) | Evaluates Rsky likelihood at provided position(s)
:param rsky:
position
:param **kwargs:
Keyword arguments passed to :func:`BinaryPopulation.rsky_distribution` |
def get_limits(self):
"""
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
"""
logger.debug("Gathering %s's limits from AWS", self.service_name)
if self.limits:
return self.limits
limits = {}
limits['Trails Per Region'] = AwsLimit(
'Trails Per Region',
self,
5,
self.warning_threshold,
self.critical_threshold,
limit_type=self.aws_type
)
limits['Event Selectors Per Trail'] = AwsLimit(
'Event Selectors Per Trail',
self,
5,
self.warning_threshold,
self.critical_threshold,
limit_type=self.aws_type,
limit_subtype='AWS::CloudTrail::EventSelector'
)
limits['Data Resources Per Trail'] = AwsLimit(
'Data Resources Per Trail',
self,
250,
self.warning_threshold,
self.critical_threshold,
limit_type=self.aws_type,
limit_subtype='AWS::CloudTrail::DataResource'
)
self.limits = limits
return limits | Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict |
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr)) | Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)] |
def home(self):
""" Homes the robot.
"""
self._log.debug("home")
self._location_cache = None
self._hw_manager.hardware.home() | Homes the robot. |
def read_dynamic_inasafe_field(inasafe_fields, dynamic_field, black_list=None):
"""Helper to read inasafe_fields using a dynamic field.
:param inasafe_fields: inasafe_fields keywords to use.
:type inasafe_fields: dict
:param dynamic_field: The dynamic field to use.
:type dynamic_field: safe.definitions.fields
:param black_list: A list of fields which are conflicting with the dynamic
field. Same field name pattern.
:return: A list of unique value used in this dynamic field.
:return: list
"""
pattern = dynamic_field['key']
pattern = pattern.replace('%s', '')
if black_list is None:
black_list = []
black_list = [field['key'] for field in black_list]
unique_exposure = []
for field_key, name_field in list(inasafe_fields.items()):
if field_key.endswith(pattern) and field_key not in black_list:
unique_exposure.append(field_key.replace(pattern, ''))
return unique_exposure | Helper to read inasafe_fields using a dynamic field.
:param inasafe_fields: inasafe_fields keywords to use.
:type inasafe_fields: dict
:param dynamic_field: The dynamic field to use.
:type dynamic_field: safe.definitions.fields
:param black_list: A list of fields which are conflicting with the dynamic
field. Same field name pattern.
:return: A list of unique value used in this dynamic field.
:return: list |
def init(self, left_end_needle, right_end_needle):
"""Initialize the StartRequest with start and stop needle.
:raises TypeError: if the arguments are not integers
:raises ValueError: if the values do not match the
:ref:`specification <m4-01>`
"""
if not isinstance(left_end_needle, int):
raise TypeError(_left_end_needle_error_message(left_end_needle))
if left_end_needle < 0 or left_end_needle > 198:
raise ValueError(_left_end_needle_error_message(left_end_needle))
if not isinstance(right_end_needle, int):
raise TypeError(_right_end_needle_error_message(right_end_needle))
if right_end_needle < 1 or right_end_needle > 199:
raise ValueError(_right_end_needle_error_message(right_end_needle))
self._left_end_needle = left_end_needle
self._right_end_needle = right_end_needle | Initialize the StartRequest with start and stop needle.
:raises TypeError: if the arguments are not integers
:raises ValueError: if the values do not match the
:ref:`specification <m4-01>` |
def wwpn_free_if_allocated(self, wwpn):
"""
Free a WWPN allocated with :meth:`wwpn_alloc`.
If the WWPN is not currently allocated or not in the pool
range, nothing happens.
Parameters:
WWPN (string): The WWPN as 16 hexadecimal digits.
"""
wwpn_int = int(wwpn[-4:], 16)
self._wwpn_pool.free_if_allocated(wwpn_int) | Free a WWPN allocated with :meth:`wwpn_alloc`.
If the WWPN is not currently allocated or not in the pool
range, nothing happens.
Parameters:
WWPN (string): The WWPN as 16 hexadecimal digits. |
def s(*members: T, meta=None) -> Set[T]:
"""Creates a new set from members."""
return Set(pset(members), meta=meta) | Creates a new set from members. |
def calc_mean_std(c0, c1=[], nonStdZero=False):
""" Calculates both the mean of the data. """
mi = calc_mean(c0, c1)
std = calc_std(c0, c1)
if (nonStdZero):
std[std == 0] = 1
return mi, std | Calculates both the mean of the data. |
def parse_otu_list(lines, precision=0.0049):
"""Parser for mothur *.list file
To ensure all distances are of type float, the parser returns a
distance of 0.0 for the unique groups. However, if some sequences
are very similar, mothur may return a grouping at zero distance.
What Mothur really means by this, however, is that the clustering
is at the level of Mothur's precision. In this case, the parser
returns the distance explicitly.
If you are parsing otu's with a non-default precision, you must
specify the precision here to ensure that the parsed distances are
in order.
Returns an iterator over (distance, otu_list)
"""
for line in lines:
if is_empty(line):
continue
tokens = line.strip().split('\t')
distance_str = tokens.pop(0)
if distance_str.lstrip().lower().startswith('u'):
distance = 0.0
elif distance_str == '0.0':
distance = float(precision)
else:
distance = float(distance_str)
num_otus = int(tokens.pop(0))
otu_list = [t.split(',') for t in tokens]
yield (distance, otu_list) | Parser for mothur *.list file
To ensure all distances are of type float, the parser returns a
distance of 0.0 for the unique groups. However, if some sequences
are very similar, mothur may return a grouping at zero distance.
What Mothur really means by this, however, is that the clustering
is at the level of Mothur's precision. In this case, the parser
returns the distance explicitly.
If you are parsing otu's with a non-default precision, you must
specify the precision here to ensure that the parsed distances are
in order.
Returns an iterator over (distance, otu_list) |
def _get_sghead(expnum):
"""
Use the data web service to retrieve the stephen's astrometric header.
:param expnum: CFHT exposure number you want the header for
:rtype : list of astropy.io.fits.Header objects.
"""
version = 'p'
key = "{}{}".format(expnum, version)
if key in sgheaders:
return sgheaders[key]
url = "http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/data/pub/CFHTSG/{}{}.head".format(expnum, version)
logging.getLogger("requests").setLevel(logging.ERROR)
logging.debug("Attempting to retrieve {}".format(url))
resp = requests.get(url)
if resp.status_code != 200:
raise IOError(errno.ENOENT, "Could not get {}".format(url))
header_str_list = re.split('END \n', resp.content)
# # make the first entry in the list a Null
headers = [None]
for header_str in header_str_list:
headers.append(fits.Header.fromstring(header_str, sep='\n'))
logging.debug(headers[-1].get('EXTVER', -1))
sgheaders[key] = headers
return sgheaders[key] | Use the data web service to retrieve the stephen's astrometric header.
:param expnum: CFHT exposure number you want the header for
:rtype : list of astropy.io.fits.Header objects. |
def mutate_add_connection(self, config):
"""
Attempt to add a new connection, the only restriction being that the output
node cannot be one of the network input pins.
"""
possible_outputs = list(iterkeys(self.nodes))
out_node = choice(possible_outputs)
possible_inputs = possible_outputs + config.input_keys
in_node = choice(possible_inputs)
# Don't duplicate connections.
key = (in_node, out_node)
if key in self.connections:
# TODO: Should this be using mutation to/from rates? Hairy to configure...
if config.check_structural_mutation_surer():
self.connections[key].enabled = True
return
# Don't allow connections between two output nodes
if in_node in config.output_keys and out_node in config.output_keys:
return
# No need to check for connections between input nodes:
# they cannot be the output end of a connection (see above).
# For feed-forward networks, avoid creating cycles.
if config.feed_forward and creates_cycle(list(iterkeys(self.connections)), key):
return
cg = self.create_connection(config, in_node, out_node)
self.connections[cg.key] = cg | Attempt to add a new connection, the only restriction being that the output
node cannot be one of the network input pins. |
def is_bare(self):
"""
:data:`True` if the repository has no working tree, :data:`False` if it does.
The value of this property is computed by running the ``hg id`` command
to check whether the special global revision id ``000000000000`` is
reported.
"""
# Make sure the local repository exists.
self.create()
# Check the global revision id of the working tree.
try:
output = self.context.capture('hg', 'id', silent=True)
tokens = output.split()
return int(tokens[0]) == 0
except Exception:
return False | :data:`True` if the repository has no working tree, :data:`False` if it does.
The value of this property is computed by running the ``hg id`` command
to check whether the special global revision id ``000000000000`` is
reported. |
def set_time_function(self, function):
"""
Set time function to be used.
:param function: callable function
:return: Nothing
:raises: ValueError if function is not types.FunctionType.
"""
if isinstance(function, types.FunctionType):
self.get_time = function
else:
raise ValueError("Invalid value for DUT time function") | Set time function to be used.
:param function: callable function
:return: Nothing
:raises: ValueError if function is not types.FunctionType. |
def _match(names):
'''
Since pkg_delete requires the full "pkgname-version" string, this function
will attempt to match the package name with its version. Returns a list of
partial matches and package names that match the "pkgname-version" string
required by pkg_delete, and a list of errors encountered.
'''
pkgs = list_pkgs(versions_as_list=True)
errors = []
# Look for full matches
full_pkg_strings = []
out = __salt__['cmd.run_stdout'](['pkg_info'],
output_loglevel='trace',
python_shell=False)
for line in out.splitlines():
try:
full_pkg_strings.append(line.split()[0])
except IndexError:
continue
full_matches = [x for x in names if x in full_pkg_strings]
# Look for pkgname-only matches
matches = []
ambiguous = []
for name in set(names) - set(full_matches):
cver = pkgs.get(name)
if cver is not None:
if len(cver) == 1:
matches.append('{0}-{1}'.format(name, cver[0]))
else:
ambiguous.append(name)
errors.append(
'Ambiguous package \'{0}\'. Full name/version required. '
'Possible matches: {1}'.format(
name,
', '.join(['{0}-{1}'.format(name, x) for x in cver])
)
)
# Find packages that did not match anything
not_matched = \
set(names) - set(matches) - set(full_matches) - set(ambiguous)
for name in not_matched:
errors.append('Package \'{0}\' not found'.format(name))
return matches + full_matches, errors | Since pkg_delete requires the full "pkgname-version" string, this function
will attempt to match the package name with its version. Returns a list of
partial matches and package names that match the "pkgname-version" string
required by pkg_delete, and a list of errors encountered. |
def CaptureVariableInternal(self, value, depth, limits, can_enqueue=True):
"""Captures a single nameless object into Variable message.
TODO(vlif): safely evaluate iterable types.
TODO(vlif): safely call str(value)
Args:
value: data to capture
depth: nested depth of dictionaries and vectors so far.
limits: Per-object limits for capturing variable data.
can_enqueue: allows referencing the object in variables table.
Returns:
Formatted captured data as per Variable proto.
"""
if depth == limits.max_depth:
return {'varTableIndex': 0} # Buffer full.
if value is None:
self._total_size += 4
return {'value': 'None'}
if isinstance(value, _PRIMITIVE_TYPES):
r = _TrimString(repr(value), # Primitive type, always immutable.
min(limits.max_value_len,
self.max_size - self._total_size))
self._total_size += len(r)
return {'value': r, 'type': type(value).__name__}
if isinstance(value, _DATE_TYPES):
r = str(value) # Safe to call str().
self._total_size += len(r)
return {'value': r, 'type': 'datetime.'+ type(value).__name__}
if isinstance(value, dict):
# Do not use iteritems() here. If GC happens during iteration (which it
# often can for dictionaries containing large variables), you will get a
# RunTimeError exception.
items = [(repr(k), v) for (k, v) in value.items()]
return {'members':
self.CaptureVariablesList(items, depth + 1,
EMPTY_DICTIONARY, limits),
'type': 'dict'}
if isinstance(value, _VECTOR_TYPES):
fields = self.CaptureVariablesList(
(('[%d]' % i, x) for i, x in enumerate(value)),
depth + 1, EMPTY_COLLECTION, limits)
return {'members': fields, 'type': type(value).__name__}
if isinstance(value, types.FunctionType):
self._total_size += len(value.__name__)
# TODO(vlif): set value to func_name and type to 'function'
return {'value': 'function ' + value.__name__}
if isinstance(value, Exception):
fields = self.CaptureVariablesList(
(('[%d]' % i, x) for i, x in enumerate(value.args)),
depth + 1, EMPTY_COLLECTION, limits)
return {'members': fields, 'type': type(value).__name__}
if can_enqueue:
index = self._var_table_index.get(id(value))
if index is None:
index = len(self._var_table)
self._var_table_index[id(value)] = index
self._var_table.append(value)
self._total_size += 4 # number of characters to accommodate a number.
return {'varTableIndex': index}
for pretty_printer in CaptureCollector.pretty_printers:
pretty_value = pretty_printer(value)
if not pretty_value:
continue
fields, object_type = pretty_value
return {'members':
self.CaptureVariablesList(fields, depth + 1, OBJECT_HAS_NO_FIELDS,
limits),
'type': object_type}
if not hasattr(value, '__dict__'):
# TODO(vlif): keep "value" empty and populate the "type" field instead.
r = str(type(value))
self._total_size += len(r)
return {'value': r}
# Add an additional depth for the object itself
items = value.__dict__.items()
if six.PY3:
# Make a list of the iterator in Python 3, to avoid 'dict changed size
# during iteration' errors from GC happening in the middle.
# Only limits.max_list_items + 1 items are copied, anything past that will
# get ignored by CaptureVariablesList().
items = list(itertools.islice(items, limits.max_list_items + 1))
members = self.CaptureVariablesList(items, depth + 2,
OBJECT_HAS_NO_FIELDS, limits)
v = {'members': members}
type_string = DetermineType(value)
if type_string:
v['type'] = type_string
return v | Captures a single nameless object into Variable message.
TODO(vlif): safely evaluate iterable types.
TODO(vlif): safely call str(value)
Args:
value: data to capture
depth: nested depth of dictionaries and vectors so far.
limits: Per-object limits for capturing variable data.
can_enqueue: allows referencing the object in variables table.
Returns:
Formatted captured data as per Variable proto. |
def _new_stream(self, idx):
"""Activate a new stream, given the index into the stream pool.
BaseMux's _new_stream simply chooses a new stream and activates it.
For special behavior (ie Weighted streams), you must override this
in a child class.
Parameters
----------
idx : int, [0:n_streams - 1]
The stream index to replace
"""
# Get the stream index from the candidate pool
stream_index = self.stream_idxs_[idx]
# Activate the Streamer, and get the weights
self.streams_[idx] = self.streamers[stream_index].iterate()
# Reset the sample count to zero
self.stream_counts_[idx] = 0 | Activate a new stream, given the index into the stream pool.
BaseMux's _new_stream simply chooses a new stream and activates it.
For special behavior (ie Weighted streams), you must override this
in a child class.
Parameters
----------
idx : int, [0:n_streams - 1]
The stream index to replace |
def validate_reference(self, reference: ReferenceDefinitionType) -> Optional[Path]:
""" Converts reference to :class:`Path <pathlib.Path>`
:raise ValueError: If ``reference`` can't be converted to :class:`Path <pathlib.Path>`.
"""
if reference is not None:
if isinstance(reference, bytes):
reference = reference.decode("utf-8")
try:
return Path(reference)
except TypeError:
raise ValueError(f"Can't convert reference path {reference} to a pathlib.Path")
return None | Converts reference to :class:`Path <pathlib.Path>`
:raise ValueError: If ``reference`` can't be converted to :class:`Path <pathlib.Path>`. |
def attack_batch(self, imgs, labs):
"""
Run the attack on a batch of instance and labels.
"""
def compare(x, y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
if self.TARGETED:
x[y] -= self.CONFIDENCE
else:
x[y] += self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return x == y
else:
return x != y
batch_size = self.batch_size
oimgs = np.clip(imgs, self.clip_min, self.clip_max)
# re-scale instances to be within range [0, 1]
imgs = (imgs - self.clip_min) / (self.clip_max - self.clip_min)
imgs = np.clip(imgs, 0, 1)
# now convert to [-1, 1]
imgs = (imgs * 2) - 1
# convert to tanh-space
imgs = np.arctanh(imgs * .999999)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = np.ones(batch_size) * self.initial_const
upper_bound = np.ones(batch_size) * 1e10
# placeholders for the best l2, score, and instance attack found so far
o_bestl2 = [1e10] * batch_size
o_bestscore = [-1] * batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl2 = [1e10] * batch_size
bestscore = [-1] * batch_size
_logger.debug(" Binary search step %s of %s",
outer_step, self.BINARY_SEARCH_STEPS)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
CONST = upper_bound
# set the variables so that we don't have to send them over again
self.sess.run(
self.setup, {
self.assign_timg: batch,
self.assign_tlab: batchlab,
self.assign_const: CONST
})
prev = 1e6
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
_, l, l2s, scores, nimg = self.sess.run([
self.train, self.loss, self.l2dist, self.output,
self.newimg
])
if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
_logger.debug((" Iteration {} of {}: loss={:.3g} " +
"l2={:.3g} f={:.3g}").format(
iteration, self.MAX_ITERATIONS, l,
np.mean(l2s), np.mean(scores)))
# check if we should abort search if we're getting nowhere.
if self.ABORT_EARLY and \
iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
if l > prev * .9999:
msg = " Failed to make progress; stop early"
_logger.debug(msg)
break
prev = l
# adjust the best result found so far
for e, (l2, sc, ii) in enumerate(zip(l2s, scores, nimg)):
lab = np.argmax(batchlab[e])
if l2 < bestl2[e] and compare(sc, lab):
bestl2[e] = l2
bestscore[e] = np.argmax(sc)
if l2 < o_bestl2[e] and compare(sc, lab):
o_bestl2[e] = l2
o_bestscore[e] = np.argmax(sc)
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(batch_size):
if compare(bestscore[e], np.argmax(batchlab[e])) and \
bestscore[e] != -1:
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples " +
"on {} of {} instances.".format(
sum(upper_bound < 1e9), batch_size))
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack | Run the attack on a batch of instance and labels. |
def write_classes(self, diagram):
"""write a class diagram"""
# sorted to get predictable (hence testable) results
for i, obj in enumerate(sorted(diagram.objects, key=lambda x: x.title)):
self.printer.emit_node(i, **self.get_values(obj))
obj.fig_id = i
# inheritance links
for rel in diagram.get_relationships("specialization"):
self.printer.emit_edge(
rel.from_object.fig_id, rel.to_object.fig_id, **self.inh_edges
)
# implementation links
for rel in diagram.get_relationships("implements"):
self.printer.emit_edge(
rel.from_object.fig_id, rel.to_object.fig_id, **self.imp_edges
)
# generate associations
for rel in diagram.get_relationships("association"):
self.printer.emit_edge(
rel.from_object.fig_id,
rel.to_object.fig_id,
label=rel.name,
**self.association_edges
) | write a class diagram |
def interpolation_bilinear(x, y, x1, x2, y1, y2, z11, z21, z22, z12):
'''
The points (x_i, y_i) and values z_ij are connected as follows:
Starting from lower left going in mathematically positive direction, i.e. counter clockwise.
Therefore: (x1,y1,z11), (x2,y1,z21), (x2,y2,z22), (x1,y2,z12).
'''
t = (x - x1) / (x2 - x1)
s = (y - y1) / (y2 - y1)
v1 = (1.0 - t) * (1.0 - s) * z11
v2 = t * (1.0 - s) * z21
v3 = t * s * z22
v4 = (1.0 - t) * s * z12
ret = v1 + v2 + v3 + v4
return ret | The points (x_i, y_i) and values z_ij are connected as follows:
Starting from lower left going in mathematically positive direction, i.e. counter clockwise.
Therefore: (x1,y1,z11), (x2,y1,z21), (x2,y2,z22), (x1,y2,z12). |
def rebind(self, column=None, brew='GnBu'):
"""Bind a new column to the data map
Parameters
----------
column: str, default None
Pandas DataFrame column name
brew: str, default None
Color brewer abbreviation. See colors.py
"""
self.data['table'] = Data.keypairs(
self.raw_data, columns=[self.data_key, column])
domain = [Data.serialize(self.raw_data[column].min()),
Data.serialize(self.raw_data[column].quantile(0.95))]
scale = Scale(name='color', type='quantize', domain=domain,
range=brews[brew])
self.scales['color'] = scale | Bind a new column to the data map
Parameters
----------
column: str, default None
Pandas DataFrame column name
brew: str, default None
Color brewer abbreviation. See colors.py |
def make_gaussian_prf_sources_image(shape, source_table):
"""
Make an image containing 2D Gaussian sources.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
source_table : `~astropy.table.Table`
Table of parameters for the Gaussian sources. Each row of the
table corresponds to a Gaussian source whose parameters are
defined by the column names. With the exception of ``'flux'``,
column names that do not match model parameters will be ignored
(flux will be converted to amplitude). If both ``'flux'`` and
``'amplitude'`` are present, then ``'flux'`` will be ignored.
Model parameters not defined in the table will be set to the
default value.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing 2D Gaussian sources.
See Also
--------
make_model_sources_image, make_random_gaussians_table
Examples
--------
.. plot::
:include-source:
# make a table of Gaussian sources
from astropy.table import Table
table = Table()
table['amplitude'] = [50, 70, 150, 210]
table['x_0'] = [160, 25, 150, 90]
table['y_0'] = [70, 40, 25, 60]
table['sigma'] = [15.2, 5.1, 3., 8.1]
# make an image of the sources without noise, with Gaussian
# noise, and with Poisson noise
from photutils.datasets import make_gaussian_prf_sources_image
from photutils.datasets import make_noise_image
shape = (100, 200)
image1 = make_gaussian_prf_sources_image(shape, table)
image2 = image1 + make_noise_image(shape, type='gaussian', mean=5.,
stddev=5.)
image3 = image1 + make_noise_image(shape, type='poisson', mean=5.)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(8, 12))
ax1.imshow(image1, origin='lower', interpolation='nearest')
ax1.set_title('Original image')
ax2.imshow(image2, origin='lower', interpolation='nearest')
ax2.set_title('Original image with added Gaussian noise'
' ($\\mu = 5, \\sigma = 5$)')
ax3.imshow(image3, origin='lower', interpolation='nearest')
ax3.set_title('Original image with added Poisson noise ($\\mu = 5$)')
"""
model = IntegratedGaussianPRF(sigma=1)
if 'sigma' in source_table.colnames:
sigma = source_table['sigma']
else:
sigma = model.sigma.value # default
colnames = source_table.colnames
if 'flux' not in colnames and 'amplitude' in colnames:
source_table = source_table.copy()
source_table['flux'] = (source_table['amplitude'] *
(2. * np.pi * sigma * sigma))
return make_model_sources_image(shape, model, source_table,
oversample=1) | Make an image containing 2D Gaussian sources.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
source_table : `~astropy.table.Table`
Table of parameters for the Gaussian sources. Each row of the
table corresponds to a Gaussian source whose parameters are
defined by the column names. With the exception of ``'flux'``,
column names that do not match model parameters will be ignored
(flux will be converted to amplitude). If both ``'flux'`` and
``'amplitude'`` are present, then ``'flux'`` will be ignored.
Model parameters not defined in the table will be set to the
default value.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing 2D Gaussian sources.
See Also
--------
make_model_sources_image, make_random_gaussians_table
Examples
--------
.. plot::
:include-source:
# make a table of Gaussian sources
from astropy.table import Table
table = Table()
table['amplitude'] = [50, 70, 150, 210]
table['x_0'] = [160, 25, 150, 90]
table['y_0'] = [70, 40, 25, 60]
table['sigma'] = [15.2, 5.1, 3., 8.1]
# make an image of the sources without noise, with Gaussian
# noise, and with Poisson noise
from photutils.datasets import make_gaussian_prf_sources_image
from photutils.datasets import make_noise_image
shape = (100, 200)
image1 = make_gaussian_prf_sources_image(shape, table)
image2 = image1 + make_noise_image(shape, type='gaussian', mean=5.,
stddev=5.)
image3 = image1 + make_noise_image(shape, type='poisson', mean=5.)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(8, 12))
ax1.imshow(image1, origin='lower', interpolation='nearest')
ax1.set_title('Original image')
ax2.imshow(image2, origin='lower', interpolation='nearest')
ax2.set_title('Original image with added Gaussian noise'
' ($\\mu = 5, \\sigma = 5$)')
ax3.imshow(image3, origin='lower', interpolation='nearest')
ax3.set_title('Original image with added Poisson noise ($\\mu = 5$)') |
def list_secrets(self, secure_data_path):
"""Return json secrets based on the secure_data_path, this will list keys in a folder"""
# Because of the addition of versionId and the way URLs are constructed, secure_data_path should
# always end in a '/'.
secure_data_path = self._add_slash(secure_data_path)
secret_resp = get_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path + '?list=true',
headers=self.HEADERS)
throw_if_bad_response(secret_resp)
return secret_resp.json() | Return json secrets based on the secure_data_path, this will list keys in a folder |
def run_simulations(self, parameter_list, data_folder):
"""
This function runs multiple simulations in parallel.
Args:
parameter_list (list): list of parameter combinations to simulate.
data_folder (str): folder in which to create output folders.
"""
self.data_folder = data_folder
with Pool(processes=MAX_PARALLEL_PROCESSES) as pool:
for result in pool.imap_unordered(self.launch_simulation,
parameter_list):
yield result | This function runs multiple simulations in parallel.
Args:
parameter_list (list): list of parameter combinations to simulate.
data_folder (str): folder in which to create output folders. |
def walk(self):
"""Return view on configured steps slice.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.core.timesteps: the slice of timesteps.
"""
if conf.core.snapshots is not None:
return self.snaps[conf.core.snapshots]
elif conf.core.timesteps is not None:
return self.steps[conf.core.timesteps]
return self.snaps[-1:] | Return view on configured steps slice.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.core.timesteps: the slice of timesteps. |
def get_nve_vni_switch_bindings(vni, switch_ip):
"""Return the nexus nve binding(s) per switch."""
LOG.debug("get_nve_vni_switch_bindings() called")
session = bc.get_reader_session()
try:
return (session.query(nexus_models_v2.NexusNVEBinding).
filter_by(vni=vni, switch_ip=switch_ip).all())
except sa_exc.NoResultFound:
return None | Return the nexus nve binding(s) per switch. |
def MakeSuiteFromList(t, name=''):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
name: string name for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t)
d = hist.GetDict()
return MakeSuiteFromDict(d) | Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
name: string name for this suite
Returns:
Suite object |
def on_slice(self, node): # ():('lower', 'upper', 'step')
"""Simple slice."""
return slice(self.run(node.lower),
self.run(node.upper),
self.run(node.step)) | Simple slice. |
def exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
data = exc.detail
else:
data = {'message': exc.detail}
set_rollback()
return Response(data, status=exc.status_code, headers=headers)
elif isinstance(exc, Http404):
msg = _('Not found.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, PermissionDenied):
msg = _('Permission denied.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_403_FORBIDDEN)
# Note: Unhandled exceptions will raise a 500 error.
return None | Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised. |
def _is_compact_jws(self, jws):
"""
Check if we've got a compact signed JWT
:param jws: The message
:return: True/False
"""
try:
jwt = JWSig().unpack(jws)
except Exception as err:
logger.warning('Could not parse JWS: {}'.format(err))
return False
if "alg" not in jwt.headers:
return False
if jwt.headers["alg"] is None:
jwt.headers["alg"] = "none"
if jwt.headers["alg"] not in SIGNER_ALGS:
logger.debug("UnknownSignerAlg: %s" % jwt.headers["alg"])
return False
self.jwt = jwt
return True | Check if we've got a compact signed JWT
:param jws: The message
:return: True/False |
def serialize_parameters(self):
"""
Get the parameter data in its serialized form.
Data is serialized by each parameter's :meth:`Parameter.serialize`
implementation.
:return: serialized parameter data in the form: ``{<name>: <serial data>, ...}``
:rtype: :class:`dict`
"""
# Get parameter data
class_params = self.class_params()
instance_params = self.params()
# Serialize each parameter
serialized = {}
for name in class_params.keys():
param = class_params[name]
value = instance_params[name]
serialized[name] = param.serialize(value)
return serialized | Get the parameter data in its serialized form.
Data is serialized by each parameter's :meth:`Parameter.serialize`
implementation.
:return: serialized parameter data in the form: ``{<name>: <serial data>, ...}``
:rtype: :class:`dict` |
def get_comments_are_open(instance):
"""
Check if comments are open for the instance
"""
if not IS_INSTALLED:
return False
try:
# Get the moderator which is installed for this model.
mod = moderator._registry[instance.__class__]
except KeyError:
# No moderator = no restrictions
return True
# Check the 'enable_field', 'auto_close_field' and 'close_after',
# by reusing the basic Django policies.
return CommentModerator.allow(mod, None, instance, None) | Check if comments are open for the instance |
def create_timeline(self, timeline, scope_identifier, hub_name, plan_id):
"""CreateTimeline.
:param :class:`<Timeline> <azure.devops.v5_0.task.models.Timeline>` timeline:
:param str scope_identifier: The project GUID to scope the request
:param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server
:param str plan_id:
:rtype: :class:`<Timeline> <azure.devops.v5_0.task.models.Timeline>`
"""
route_values = {}
if scope_identifier is not None:
route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str')
if hub_name is not None:
route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str')
if plan_id is not None:
route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str')
content = self._serialize.body(timeline, 'Timeline')
response = self._send(http_method='POST',
location_id='83597576-cc2c-453c-bea6-2882ae6a1653',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('Timeline', response) | CreateTimeline.
:param :class:`<Timeline> <azure.devops.v5_0.task.models.Timeline>` timeline:
:param str scope_identifier: The project GUID to scope the request
:param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server
:param str plan_id:
:rtype: :class:`<Timeline> <azure.devops.v5_0.task.models.Timeline>` |
def wait_until_done(self):
"""
This method will not return until the job is either complete or has
reached an error state. This queries the server periodically to check
for an update in status.
"""
wait = 1
while True:
time.sleep(wait)
self.get_info()
if self.info['status']['isFinished']:
break
# implements a crude exponential back off
wait = min(wait * 2, 60) | This method will not return until the job is either complete or has
reached an error state. This queries the server periodically to check
for an update in status. |
def get_config(self):
"""Save configurations of metric. Can be recreated
from configs with metric.create(``**config``)
"""
config = self._kwargs.copy()
config.update({
'metric': self.__class__.__name__,
'name': self.name,
'output_names': self.output_names,
'label_names': self.label_names})
return config | Save configurations of metric. Can be recreated
from configs with metric.create(``**config``) |
def update_name(self, force=False, create_term=False, report_unchanged=True):
"""Generate the Root.Name term from DatasetName, Version, Origin, TIme and Space"""
updates = []
self.ensure_identifier()
name_term = self.find_first('Root.Name')
if not name_term:
if create_term:
name_term = self['Root'].new_term('Root.Name','')
else:
updates.append("No Root.Name, can't update name")
return updates
orig_name = name_term.value
identifier = self.get_value('Root.Identifier')
datasetname = self.get_value('Root.Dataset')
if datasetname:
name = self._generate_identity_name()
if name != orig_name or force:
name_term.value = name
updates.append("Changed Name")
else:
if report_unchanged:
updates.append("Name did not change")
elif not orig_name:
if not identifier:
updates.append("Failed to find DatasetName term or Identity term. Giving up")
else:
updates.append("Setting the name to the identifier")
name_term.value = identifier
elif orig_name == identifier:
if report_unchanged:
updates.append("Name did not change")
else:
# There is no DatasetName, so we can't gneerate name, and the Root.Name is not empty, so we should
# not set it to the identity.
updates.append("No Root.Dataset, so can't update the name")
return updates | Generate the Root.Name term from DatasetName, Version, Origin, TIme and Space |
def get_container_host_config_kwargs(self, action, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict
"""
container_map = action.container_map
container_config = action.config
client_config = action.client_config
config_id = action.config_id
map_name = config_id.map_name
policy = self._policy
cname = policy.cname
supports_volumes = client_config.features['volumes']
c_kwargs = dict(
links=[(cname(map_name, l_name), alias or policy.get_hostname(l_name))
for l_name, alias in container_config.links],
binds=get_host_binds(container_map, config_id.config_name, container_config, config_id.instance_name,
policy, supports_volumes),
volumes_from=get_volumes_from(container_map, config_id.config_name, container_config,
policy, not supports_volumes),
port_bindings=get_port_bindings(container_config, client_config),
)
network_mode = container_config.network_mode
if isinstance(network_mode, tuple):
c_kwargs['network_mode'] = 'container:{0}'.format(cname(map_name, *network_mode))
elif isinstance(network_mode, string_types):
c_kwargs['network_mode'] = network_mode
if container_name:
c_kwargs['container'] = container_name
update_kwargs(c_kwargs, init_options(container_config.host_config), kwargs)
return c_kwargs | Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict |
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options) | Equals :meth:`route` with a ``DELETE`` method parameter. |
def error_name(self) :
"the error name for a DBUS.MESSAGE_TYPE_ERROR message."
result = dbus.dbus_message_get_error_name(self._dbobj)
if result != None :
result = result.decode()
#end if
return \
result | the error name for a DBUS.MESSAGE_TYPE_ERROR message. |
def __create_file_name(self, message_no):
""" Create the filename to save to """
cwd = os.getcwd()
filename = '{0}_{1}.xml'.format(self.output_prefix, message_no)
return os.path.join(cwd, filename) | Create the filename to save to |
def trim_wav_sox(in_path: Path, out_path: Path,
start_time: int, end_time: int) -> None:
""" Crops the wav file at in_fn so that the audio between start_time and
end_time is output to out_fn. Measured in milliseconds.
"""
if out_path.is_file():
logger.info("Output path %s already exists, not trimming file", out_path)
return
start_time_secs = millisecs_to_secs(start_time)
end_time_secs = millisecs_to_secs(end_time)
args = [config.SOX_PATH, str(in_path), str(out_path),
"trim", str(start_time_secs), "=" + str(end_time_secs)]
logger.info("Cropping file %s, from start time %d (seconds) to end time %d (seconds), outputting to %s",
in_path, start_time_secs, end_time_secs, out_path)
subprocess.run(args, check=True) | Crops the wav file at in_fn so that the audio between start_time and
end_time is output to out_fn. Measured in milliseconds. |
def accept_moderator_invite(self, subreddit):
"""Accept a moderator invite to the given subreddit.
Callable upon an instance of Subreddit with no arguments.
:returns: The json response from the server.
"""
data = {'r': six.text_type(subreddit)}
# Clear moderated subreddits and cache
self.user._mod_subs = None # pylint: disable=W0212
self.evict(self.config['my_mod_subreddits'])
return self.request_json(self.config['accept_mod_invite'], data=data) | Accept a moderator invite to the given subreddit.
Callable upon an instance of Subreddit with no arguments.
:returns: The json response from the server. |
def class_logit(layer, label):
"""Like channel, but for softmax layers.
Args:
layer: A layer name string.
label: Either a string (refering to a label in model.labels) or an int
label position.
Returns:
Objective maximizing a logit.
"""
def inner(T):
if isinstance(label, int):
class_n = label
else:
class_n = T("labels").index(label)
logits = T(layer)
logit = tf.reduce_sum(logits[:, class_n])
return logit
return inner | Like channel, but for softmax layers.
Args:
layer: A layer name string.
label: Either a string (refering to a label in model.labels) or an int
label position.
Returns:
Objective maximizing a logit. |
def equal_set(self, a, b):
"See if a and b have the same elements"
if len(a) != len(b):
return 0
if a == b:
return 1
return self.subset(a, b) and self.subset(b, a) | See if a and b have the same elements |
def _eb_env_tags(envs, session_factory, retry):
"""Augment ElasticBeanstalk Environments with their tags."""
client = local_session(session_factory).client('elasticbeanstalk')
def process_tags(eb_env):
try:
eb_env['Tags'] = retry(
client.list_tags_for_resource,
ResourceArn=eb_env['EnvironmentArn'])['ResourceTags']
except client.exceptions.ResourceNotFoundException:
return
return eb_env
# Handle API rate-limiting, which is a problem for accounts with many
# EB Environments
return list(map(process_tags, envs)) | Augment ElasticBeanstalk Environments with their tags. |
def _encode_ids(*args):
"""
Do url-encode resource ids
"""
ids = []
for v in args:
if isinstance(v, basestring):
qv = v.encode('utf-8') if isinstance(v, unicode) else v
ids.append(urllib.quote(qv))
else:
qv = str(v)
ids.append(urllib.quote(qv))
return ';'.join(ids) | Do url-encode resource ids |
def fft_propagate(fftfield, d, nm, res, method="helmholtz",
ret_fft=False):
"""Propagates a 1D or 2D Fourier transformed field
Parameters
----------
fftfield : 1-dimensional or 2-dimensional ndarray
Fourier transform of 1D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster).
"""
fshape = len(fftfield.shape)
assert fshape in [1, 2], "Dimension of `fftfield` must be 1 or 2."
if fshape == 1:
func = fft_propagate_2d
else:
func = fft_propagate_3d
names = func.__code__.co_varnames[:func.__code__.co_argcount]
loc = locals()
vardict = dict()
for name in names:
vardict[name] = loc[name]
return func(**vardict) | Propagates a 1D or 2D Fourier transformed field
Parameters
----------
fftfield : 1-dimensional or 2-dimensional ndarray
Fourier transform of 1D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster). |
def bland_altman(x, y, interval=None, indep_conf=None, ax=None, c=None, **kwargs):
"""
Draw a Bland-Altman plot of x and y data.
https://en.wikipedia.org/wiki/Bland%E2%80%93Altman_plot
Parameters
----------
x, y : array-like
x and y data to compare.
interval : float
Percentile band to draw on the residuals.
indep_conf : float
Independently determined confidence interval
to draw on the plot
ax : matplotlib.axesobject
The axis on which to draw the plot
**kwargs
Passed to ax.scatter
"""
ret = False
if ax is None:
fig, ax = plt.subplots(1, 1)
ret = True
# NaN screening
ind = ~(np.isnan(x) | np.isnan(y))
x = x[ind]
y = y[ind]
xy_mean = (x + y) / 2
xy_resid = (y - x)
ax.scatter(xy_mean, xy_resid, lw=0.5, edgecolor='k', alpha=0.6, c=c, s=15, **kwargs)
# markup
ax.axhline(0, ls='dashed', c='k', alpha=0.6, zorder=-1)
ax.axhline(np.median(xy_resid), ls='dashed', c=c, alpha=0.8)
if interval is not None:
perc = 100 - interval * 100
ints = [perc / 2, 100 - perc / 2]
lims = np.percentile(xy_resid, ints)
ax.axhspan(*lims, color=c, alpha=0.1, zorder=-3)
if indep_conf is not None:
ax.axhspan(-indep_conf, indep_conf, color=(0,0,0,0.1), zorder=-2)
# labels
ax.set_ylabel('y - x')
ax.set_xlabel('mean (x, y)')
if ret:
return fig, ax | Draw a Bland-Altman plot of x and y data.
https://en.wikipedia.org/wiki/Bland%E2%80%93Altman_plot
Parameters
----------
x, y : array-like
x and y data to compare.
interval : float
Percentile band to draw on the residuals.
indep_conf : float
Independently determined confidence interval
to draw on the plot
ax : matplotlib.axesobject
The axis on which to draw the plot
**kwargs
Passed to ax.scatter |
def _log_board_ports(self, ports):
"""
A board with no ports is allowed.
In the logfile, ports must be sorted
- ascending by tile identifier (primary)
- alphabetical by edge direction (secondary)
:param ports: list of catan.board.Port objects
"""
ports = sorted(ports, key=lambda port: (port.tile_id, port.direction))
self._logln('ports: {0}'.format(' '.join('{}({} {})'.format(p.type.value, p.tile_id, p.direction)
for p in ports))) | A board with no ports is allowed.
In the logfile, ports must be sorted
- ascending by tile identifier (primary)
- alphabetical by edge direction (secondary)
:param ports: list of catan.board.Port objects |
def _get_binary_from_ipv4(self, ip_addr):
"""Converts IPv4 address to binary form."""
return struct.unpack("!L", socket.inet_pton(socket.AF_INET,
ip_addr))[0] | Converts IPv4 address to binary form. |
def gru_state_tuples(num_nodes, name):
"""Convenience so that the names of the vars are defined in the same file."""
if not isinstance(num_nodes, tf.compat.integral_types):
raise ValueError('num_nodes must be an integer: %s' % num_nodes)
return [(STATE_NAME % name + '_0', tf.float32, num_nodes)] | Convenience so that the names of the vars are defined in the same file. |
def register(self, src, trg, trg_mask=None, src_mask=None):
""" Implementation of pair-wise registration using thunder-registration
For more information on the model estimation, refer to https://github.com/thunder-project/thunder-registration
This function takes two 2D single channel images and estimates a 2D translation that best aligns the pair. The
estimation is done by maximising the correlation of the Fourier transforms of the images. Once, the translation
is estimated, it is applied to the (multi-channel) image to warp and, possibly, ot hte ground-truth. Different
interpolations schemes could be more suitable for images and ground-truth values (or masks).
:param src: 2D single channel source moving image
:param trg: 2D single channel target reference image
:param src_mask: Mask of source image. Not used in this method.
:param trg_mask: Mask of target image. Not used in this method.
:return: Estimated 2D transformation matrix of shape 2x3
"""
# Initialise instance of CrossCorr object
ccreg = registration.CrossCorr()
# padding_value = 0
# Compute translation between pair of images
model = ccreg.fit(src, reference=trg)
# Get translation as an array
translation = [-x for x in model.toarray().tolist()[0]]
# Fill in transformation matrix
warp_matrix = np.eye(2, 3)
warp_matrix[0, 2] = translation[1]
warp_matrix[1, 2] = translation[0]
# Return transformation matrix
return warp_matrix | Implementation of pair-wise registration using thunder-registration
For more information on the model estimation, refer to https://github.com/thunder-project/thunder-registration
This function takes two 2D single channel images and estimates a 2D translation that best aligns the pair. The
estimation is done by maximising the correlation of the Fourier transforms of the images. Once, the translation
is estimated, it is applied to the (multi-channel) image to warp and, possibly, ot hte ground-truth. Different
interpolations schemes could be more suitable for images and ground-truth values (or masks).
:param src: 2D single channel source moving image
:param trg: 2D single channel target reference image
:param src_mask: Mask of source image. Not used in this method.
:param trg_mask: Mask of target image. Not used in this method.
:return: Estimated 2D transformation matrix of shape 2x3 |
def refresh_from_server(self):
"""Refresh the group from the server in place."""
group = self.manager.get(id=self.id)
self.__init__(self.manager, **group.data) | Refresh the group from the server in place. |
def prepped_value(self):
"""
Returns the value after being processed by the internationalized string
preparation as specified by RFC 5280
:return:
A unicode string
"""
if self._prepped is None:
self._prepped = self._ldap_string_prep(self['value'].native)
return self._prepped | Returns the value after being processed by the internationalized string
preparation as specified by RFC 5280
:return:
A unicode string |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.