code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def parse_rule(cls, txt):
"""Parse a rule from a string.
See rezconfig.package_filter for an overview of valid strings.
Args:
txt (str): String to parse.
Returns:
`Rule` instance.
"""
types = {"glob": GlobRule,
"regex": RegexRule,
"range": RangeRule,
"before": TimestampRule,
"after": TimestampRule}
# parse form 'x(y)' into x, y
label, txt = Rule._parse_label(txt)
if label is None:
if '*' in txt:
label = "glob"
else:
label = "range"
elif label not in types:
raise ConfigurationError(
"'%s' is not a valid package filter type" % label)
rule_cls = types[label]
txt_ = "%s(%s)" % (label, txt)
try:
rule = rule_cls._parse(txt_)
except Exception as e:
raise ConfigurationError("Error parsing package filter '%s': %s: %s"
% (txt_, e.__class__.__name__, str(e)))
return rule | Parse a rule from a string.
See rezconfig.package_filter for an overview of valid strings.
Args:
txt (str): String to parse.
Returns:
`Rule` instance. |
def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock:
"""
Returns the requested block as specified by the block header.
"""
vm = self.get_vm(block_header)
return vm.block | Returns the requested block as specified by the block header. |
def bls_parallel_pfind(
times, mags, errs,
magsarefluxes=False,
startp=0.1, # by default, search from 0.1 d to...
endp=100.0, # ... 100.0 d -- don't search full timebase
stepsize=1.0e-4,
mintransitduration=0.01, # minimum transit length in phase
maxtransitduration=0.4, # maximum transit length in phase
nphasebins=200,
autofreq=True, # figure out f0, nf, and df automatically
nbestpeaks=5,
periodepsilon=0.1, # 0.1
sigclip=10.0,
verbose=True,
nworkers=None,
get_stats=True,
):
'''Runs the Box Least Squares Fitting Search for transit-shaped signals.
Based on eebls.f from Kovacs et al. 2002 and python-bls from Foreman-Mackey
et al. 2015. Breaks up the full frequency space into chunks and passes them
to parallel BLS workers.
NOTE: the combined BLS spectrum produced by this function is not identical
to that produced by running BLS in one shot for the entire frequency
space. There are differences on the order of 1.0e-3 or so in the respective
peak values, but peaks appear at the same frequencies for both methods. This
is likely due to different aliasing caused by smaller chunks of the
frequency space used by the parallel workers in this function. When in
doubt, confirm results for this parallel implementation by comparing to
those from the serial implementation above.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
mintransitduration,maxtransitduration : float
The minimum and maximum transitdurations (in units of phase) to consider
for the transit search.
nphasebins : int
The number of phase bins to use in the period search.
autofreq : bool
If this is True, the values of `stepsize` and `nphasebins` will be
ignored, and these, along with a frequency-grid, will be determined
based on the following relations::
nphasebins = int(ceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
stepsize = 0.25*mintransitduration/(times.max()-times.min())
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(ceil((maxfreq - minfreq)/stepsize))
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
nworkers : int or None
The number of parallel workers to launch for period-search. If None,
nworkers = NCPUS.
get_stats : bool
If True, runs :py:func:`.bls_stats_singleperiod` for each of the best
periods in the output and injects the output into the output dict so you
only have to run this function to get the periods and their stats.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'stats': list of stats dicts returned for each best period,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'blsresult': list of result dicts from eebls.f wrapper functions,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'nphasebins': the actual nphasebins used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# if we're setting up everything automatically
if autofreq:
# figure out the best number of phasebins to use
nphasebins = int(npceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
# use heuristic to figure out best timestep
stepsize = 0.25*mintransitduration/(stimes.max()-stimes.min())
# now figure out the frequencies to use
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = True: using AUTOMATIC values for '
'freq stepsize: %s, nphasebins: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, nphasebins,
mintransitduration, maxtransitduration))
else:
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = False: using PROVIDED values for '
'freq stepsize: %s, nphasebins: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, nphasebins,
mintransitduration, maxtransitduration))
# check the minimum frequency
if minfreq < (1.0/(stimes.max() - stimes.min())):
minfreq = 2.0/(stimes.max() - stimes.min())
if verbose:
LOGWARNING('the requested max P = %.3f is larger than '
'the time base of the observations = %.3f, '
' will make minfreq = 2 x 1/timebase'
% (endp, stimes.max() - stimes.min()))
LOGINFO('new minfreq: %s, maxfreq: %s' %
(minfreq, maxfreq))
#############################
## NOW RUN BLS IN PARALLEL ##
#############################
# fix number of CPUs if needed
if not nworkers or nworkers > NCPUS:
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers)
# the frequencies array to be searched
frequencies = minfreq + nparange(nfreq)*stepsize
# break up the tasks into chunks
csrem = int(fmod(nfreq, nworkers))
csint = int(float(nfreq/nworkers))
chunk_minfreqs, chunk_nfreqs = [], []
for x in range(nworkers):
this_minfreqs = frequencies[x*csint]
# handle usual nfreqs
if x < (nworkers - 1):
this_nfreqs = frequencies[x*csint:x*csint+csint].size
else:
this_nfreqs = frequencies[x*csint:x*csint+csint+csrem].size
chunk_minfreqs.append(this_minfreqs)
chunk_nfreqs.append(this_nfreqs)
# populate the tasks list
tasks = [(stimes, smags,
chunk_minf, chunk_nf,
stepsize, nphasebins,
mintransitduration, maxtransitduration)
for (chunk_nf, chunk_minf)
in zip(chunk_minfreqs, chunk_nfreqs)]
if verbose:
for ind, task in enumerate(tasks):
LOGINFO('worker %s: minfreq = %.6f, nfreqs = %s' %
(ind+1, task[3], task[2]))
LOGINFO('running...')
# return tasks
# start the pool
pool = Pool(nworkers)
results = pool.map(_parallel_bls_worker, tasks)
pool.close()
pool.join()
del pool
# now concatenate the output lsp arrays
lsp = npconcatenate([x['power'] for x in results])
periods = 1.0/frequencies
# find the nbestpeaks for the periodogram: 1. sort the lsp array
# by highest value first 2. go down the values until we find
# five values that are separated by at least periodepsilon in
# period
# make sure to get only the finite peaks in the periodogram
# this is needed because BLS may produce infs for some peaks
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'blsresult':None,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'nphasebins':nphasebins,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval in zip(sortedlspperiods, sortedlspvals):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different
# peak in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period) for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
peakcount = peakcount + 1
prevperiod = period
# generate the return dict
resultdict = {
'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'frequencies':frequencies,
'periods':periods,
'blsresult':results,
'stepsize':stepsize,
'nfreq':nfreq,
'nphasebins':nphasebins,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'nphasebins':nphasebins,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
# get stats if requested
if get_stats:
resultdict['stats'] = []
for bp in nbestperiods.copy():
if verbose:
LOGINFO("Getting stats for best period: %.6f" % bp)
this_pstats = bls_stats_singleperiod(
times, mags, errs, bp,
magsarefluxes=resultdict['kwargs']['magsarefluxes'],
sigclip=resultdict['kwargs']['sigclip'],
nphasebins=resultdict['nphasebins'],
mintransitduration=resultdict['mintransitduration'],
maxtransitduration=resultdict['maxtransitduration'],
verbose=verbose,
)
resultdict['stats'].append(this_pstats)
return resultdict
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'blsresult':None,
'stepsize':stepsize,
'nfreq':None,
'nphasebins':None,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'nphasebins':nphasebins,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}} | Runs the Box Least Squares Fitting Search for transit-shaped signals.
Based on eebls.f from Kovacs et al. 2002 and python-bls from Foreman-Mackey
et al. 2015. Breaks up the full frequency space into chunks and passes them
to parallel BLS workers.
NOTE: the combined BLS spectrum produced by this function is not identical
to that produced by running BLS in one shot for the entire frequency
space. There are differences on the order of 1.0e-3 or so in the respective
peak values, but peaks appear at the same frequencies for both methods. This
is likely due to different aliasing caused by smaller chunks of the
frequency space used by the parallel workers in this function. When in
doubt, confirm results for this parallel implementation by comparing to
those from the serial implementation above.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
mintransitduration,maxtransitduration : float
The minimum and maximum transitdurations (in units of phase) to consider
for the transit search.
nphasebins : int
The number of phase bins to use in the period search.
autofreq : bool
If this is True, the values of `stepsize` and `nphasebins` will be
ignored, and these, along with a frequency-grid, will be determined
based on the following relations::
nphasebins = int(ceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
stepsize = 0.25*mintransitduration/(times.max()-times.min())
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(ceil((maxfreq - minfreq)/stepsize))
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
nworkers : int or None
The number of parallel workers to launch for period-search. If None,
nworkers = NCPUS.
get_stats : bool
If True, runs :py:func:`.bls_stats_singleperiod` for each of the best
periods in the output and injects the output into the output dict so you
only have to run this function to get the periods and their stats.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'stats': list of stats dicts returned for each best period,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'blsresult': list of result dicts from eebls.f wrapper functions,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'nphasebins': the actual nphasebins used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}} |
def post(self, request, bot_id, format=None):
"""
Add a new environment variable
---
serializer: EnvironmentVarSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
return super(EnvironmentVarList, self).post(request, bot_id, format) | Add a new environment variable
---
serializer: EnvironmentVarSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request |
def crossing_times(ts, c=0.0, d=0.0):
"""For a single variable timeseries, find the times at which the
value crosses ``c`` from above or below. Can optionally set a non-zero
``d`` to impose the condition that the value must wander at least ``d``
units away from ``c`` between crossings.
If the timeseries begins (or ends) exactly at ``c``, then time zero
(or the ending time) is also included as a crossing event,
so that the boundaries of the first and last excursions are included.
If the actual crossing time falls between two time steps, linear
interpolation is used to estimate the crossing time.
Args:
ts: Timeseries (single variable)
c (float): Critical value at which to report crossings.
d (float): Optional min distance from c to be attained between crossings.
Returns:
array of float
"""
#TODO support multivariate time series
ts = ts.squeeze()
if ts.ndim is not 1:
raise ValueError('Currently can only use on single variable timeseries')
# Translate to put the critical value at zero:
ts = ts - c
tsa = ts[0:-1]
tsb = ts[1:]
# Time indices where phase crosses or reaches zero from below or above
zc = np.nonzero((tsa < 0) & (tsb >= 0) | (tsa > 0) & (tsb <= 0))[0] + 1
# Estimate crossing time interpolated linearly within a single time step
va = ts[zc-1]
vb = ts[zc]
ct = (np.abs(vb)*ts.tspan[zc-1] +
np.abs(va)*ts.tspan[zc]) / np.abs(vb - va) # denominator always !=0
# Also include starting time if we started exactly at zero
if ts[0] == 0.0:
zc = np.r_[np.array([0]), zc]
ct = np.r_[np.array([ts.tspan[0]]), ct]
if d == 0.0 or ct.shape[0] is 0:
return ct
# Time indices where value crosses c+d or c-d:
dc = np.nonzero((tsa < d) & (tsb >= d) | (tsa > -d) & (tsb <= -d))[0] + 1
# Select those zero-crossings separated by at least one d-crossing
splice = np.searchsorted(dc, zc)
which_zc = np.r_[np.array([0]), np.nonzero(splice[0:-1] - splice[1:])[0] +1]
return ct[which_zc] | For a single variable timeseries, find the times at which the
value crosses ``c`` from above or below. Can optionally set a non-zero
``d`` to impose the condition that the value must wander at least ``d``
units away from ``c`` between crossings.
If the timeseries begins (or ends) exactly at ``c``, then time zero
(or the ending time) is also included as a crossing event,
so that the boundaries of the first and last excursions are included.
If the actual crossing time falls between two time steps, linear
interpolation is used to estimate the crossing time.
Args:
ts: Timeseries (single variable)
c (float): Critical value at which to report crossings.
d (float): Optional min distance from c to be attained between crossings.
Returns:
array of float |
def isentropic_exponent(self):
r'''Gas-phase ideal-gas isentropic exponent of the chemical at its
current temperature, [dimensionless]. Does not include
pressure-compensation from an equation of state.
Examples
--------
>>> Chemical('hydrogen').isentropic_exponent
1.405237786321222
'''
Cp, Cv = self.Cpg, self.Cvg
if all((Cp, Cv)):
return isentropic_exponent(Cp, Cv)
return None | r'''Gas-phase ideal-gas isentropic exponent of the chemical at its
current temperature, [dimensionless]. Does not include
pressure-compensation from an equation of state.
Examples
--------
>>> Chemical('hydrogen').isentropic_exponent
1.405237786321222 |
def check_inasafe_fields(layer, keywords_only=False):
"""Helper to check inasafe_fields.
:param layer: The layer to check.
:type layer: QgsVectorLayer
:param keywords_only: If we should check from the keywords only. False by
default, we will check also from the layer.
:type keywords_only: bool
:return: Return True if the layer is valid.
:rtype: bool
:raises: Exception with a message if the layer is not correct.
"""
inasafe_fields = layer.keywords['inasafe_fields']
real_fields = [field.name() for field in layer.fields().toList()]
inasafe_fields_flat = []
for value in list(inasafe_fields.values()):
if isinstance(value, list):
inasafe_fields_flat.extend(value)
else:
inasafe_fields_flat.append(value)
difference = set(inasafe_fields_flat).difference(real_fields)
if len(difference):
message = tr(
'inasafe_fields has more fields than the layer %s itself : %s'
% (layer.keywords['layer_purpose'], difference))
raise InvalidLayerError(message)
if keywords_only:
# We don't check if it's valid from the layer. The layer may have more
# fields than inasafe_fields.
return True
difference = set(real_fields).difference(inasafe_fields_flat)
if len(difference):
message = tr(
'The layer %s has more fields than inasafe_fields : %s'
% (layer.title(), difference))
raise InvalidLayerError(message)
return True | Helper to check inasafe_fields.
:param layer: The layer to check.
:type layer: QgsVectorLayer
:param keywords_only: If we should check from the keywords only. False by
default, we will check also from the layer.
:type keywords_only: bool
:return: Return True if the layer is valid.
:rtype: bool
:raises: Exception with a message if the layer is not correct. |
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
while True:
try:
return self._sslobj.do_handshake()
except SSLError:
ex = sys.exc_info()[1]
if ex.args[0] == SSL_ERROR_WANT_READ:
if self.timeout == 0.0:
raise
six.exc_clear()
self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorHandshakeTimeout)
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
if self.timeout == 0.0:
raise
six.exc_clear()
self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorHandshakeTimeout)
else:
raise | Perform a TLS/SSL handshake. |
def check_path_traversal(path, user='root', skip_perm_errors=False):
'''
Walk from the root up to a directory and verify that the current
user has access to read each directory. This is used for making
sure a user can read all parent directories of the minion's key
before trying to go and generate a new key and raising an IOError
'''
for tpath in list_path_traversal(path):
if not os.access(tpath, os.R_OK):
msg = 'Could not access {0}.'.format(tpath)
if not os.path.exists(tpath):
msg += ' Path does not exist.'
else:
current_user = salt.utils.user.get_user()
# Make the error message more intelligent based on how
# the user invokes salt-call or whatever other script.
if user != current_user:
msg += ' Try running as user {0}.'.format(user)
else:
msg += ' Please give {0} read permissions.'.format(user)
# We don't need to bail on config file permission errors
# if the CLI
# process is run with the -a flag
if skip_perm_errors:
return
# Propagate this exception up so there isn't a sys.exit()
# in the middle of code that could be imported elsewhere.
raise SaltClientError(msg) | Walk from the root up to a directory and verify that the current
user has access to read each directory. This is used for making
sure a user can read all parent directories of the minion's key
before trying to go and generate a new key and raising an IOError |
def hybrid_forward(self, F, scores, target_dists, finished, best_hyp_indices):
"""
Choose an extension of each hypothesis from its softmax distribution.
:param scores: Vocabulary scores for the next beam step. (batch_size * beam_size, target_vocabulary_size)
:param target_dists: The non-cumulative target distributions (ignored).
:param finished: The list of finished hypotheses.
:param best_hyp_indices: Best hypothesis indices constant.
:return: The row indices, column indices, and values of the sampled words.
"""
# Map the negative logprobs to probabilities so as to have a distribution
target_dists = F.exp(-target_dists)
# n == 0 means sample from the full vocabulary. Otherwise, we sample from the top n.
if self.n != 0:
# select the top n in each row, via a mask
masked_items = F.topk(target_dists, k=self.n, ret_typ='mask', axis=1, is_ascend=False)
# set unmasked items to 0
masked_items = F.where(masked_items, target_dists, masked_items)
# renormalize
target_dists = F.broadcast_div(masked_items, F.sum(masked_items, axis=1, keepdims=True))
# Sample from the target distributions over words, then get the corresponding values from the cumulative scores
best_word_indices = F.random.multinomial(target_dists, get_prob=False)
# Zeroes for finished hypotheses.
best_word_indices = F.where(finished, F.zeros_like(best_word_indices), best_word_indices)
values = F.pick(scores, best_word_indices, axis=1, keepdims=True)
best_hyp_indices = F.slice_like(best_hyp_indices, best_word_indices, axes=(0,))
return best_hyp_indices, best_word_indices, values | Choose an extension of each hypothesis from its softmax distribution.
:param scores: Vocabulary scores for the next beam step. (batch_size * beam_size, target_vocabulary_size)
:param target_dists: The non-cumulative target distributions (ignored).
:param finished: The list of finished hypotheses.
:param best_hyp_indices: Best hypothesis indices constant.
:return: The row indices, column indices, and values of the sampled words. |
def update_views(self):
"""Update stats views."""
# Call the father's method
super(Plugin, self).update_views()
# Add specifics informations
# Alert
for i in self.stats:
ifrealname = i['interface_name'].split(':')[0]
# Convert rate in bps ( to be able to compare to interface speed)
bps_rx = int(i['rx'] // i['time_since_update'] * 8)
bps_tx = int(i['tx'] // i['time_since_update'] * 8)
# Decorate the bitrate with the configuration file thresolds
alert_rx = self.get_alert(bps_rx, header=ifrealname + '_rx')
alert_tx = self.get_alert(bps_tx, header=ifrealname + '_tx')
# If nothing is define in the configuration file...
# ... then use the interface speed (not available on all systems)
if alert_rx == 'DEFAULT' and 'speed' in i and i['speed'] != 0:
alert_rx = self.get_alert(current=bps_rx,
maximum=i['speed'],
header='rx')
if alert_tx == 'DEFAULT' and 'speed' in i and i['speed'] != 0:
alert_tx = self.get_alert(current=bps_tx,
maximum=i['speed'],
header='tx')
# then decorates
self.views[i[self.get_key()]]['rx']['decoration'] = alert_rx
self.views[i[self.get_key()]]['tx']['decoration'] = alert_tx | Update stats views. |
def overlay_gateway_attach_vlan_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
attach = ET.SubElement(overlay_gateway, "attach")
vlan = ET.SubElement(attach, "vlan")
vid_key = ET.SubElement(vlan, "vid")
vid_key.text = kwargs.pop('vid')
mac = ET.SubElement(vlan, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def get_operation_root_type(
schema: GraphQLSchema,
operation: Union[OperationDefinitionNode, OperationTypeDefinitionNode],
) -> GraphQLObjectType:
"""Extract the root type of the operation from the schema."""
operation_type = operation.operation
if operation_type == OperationType.QUERY:
query_type = schema.query_type
if not query_type:
raise GraphQLError(
"Schema does not define the required query root type.", operation
)
return query_type
elif operation_type == OperationType.MUTATION:
mutation_type = schema.mutation_type
if not mutation_type:
raise GraphQLError("Schema is not configured for mutations.", operation)
return mutation_type
elif operation_type == OperationType.SUBSCRIPTION:
subscription_type = schema.subscription_type
if not subscription_type:
raise GraphQLError("Schema is not configured for subscriptions.", operation)
return subscription_type
else:
raise GraphQLError(
"Can only have query, mutation and subscription operations.", operation
) | Extract the root type of the operation from the schema. |
def _make_A_and_part_of_b_adjacent(self, ref_crds):
"""
Make A and part of b. See docstring of this class
for answer to "What are A and b?"
"""
rot = self._rotate_rows(ref_crds)
A = 2*(rot - ref_crds)
partial_b = (rot**2 - ref_crds**2).sum(1)
return A, partial_b | Make A and part of b. See docstring of this class
for answer to "What are A and b?" |
def get(self, request, *args, **kwargs):
""" return profile of current user if authenticated otherwise 401 """
serializer = self.serializer_reader_class
if request.user.is_authenticated():
return Response(serializer(request.user, context=self.get_serializer_context()).data)
else:
return Response({'detail': _('Authentication credentials were not provided')}, status=401) | return profile of current user if authenticated otherwise 401 |
def register_classes():
"""Register these classes with the `LinkFactory` """
Gtlink_exphpsun.register_class()
Gtlink_suntemp.register_class()
Gtexphpsun_SG.register_class()
Gtsuntemp_SG.register_class()
SunMoonChain.register_class() | Register these classes with the `LinkFactory` |
def select_paths(cls, dataset, selection):
"""
Allows selecting paths with usual NumPy slicing index.
"""
return [s[0] for s in np.array([{0: p} for p in dataset.data])[selection]] | Allows selecting paths with usual NumPy slicing index. |
def one_storage_per_feeder(edisgo, storage_timeseries,
storage_nominal_power=None, **kwargs):
"""
Allocates the given storage capacity to multiple smaller storages.
For each feeder with load or voltage issues it is checked if integrating a
storage will reduce peaks in the feeder, starting with the feeder with
the highest theoretical grid expansion costs. A heuristic approach is used
to estimate storage sizing and siting while storage operation is carried
over from the given storage operation.
Parameters
-----------
edisgo : :class:`~.grid.network.EDisGo`
storage_timeseries : :pandas:`pandas.DataFrame<dataframe>`
Total active and reactive power time series that will be allocated to
the smaller storages in feeders with load or voltage issues. Columns of
the dataframe are 'p' containing active power time series in kW and 'q'
containing the reactive power time series in kvar. Index is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
storage_nominal_power : :obj:`float` or None
Nominal power in kW that will be allocated to the smaller storages in
feeders with load or voltage issues. If no nominal power is provided
the maximum active power given in `storage_timeseries` is used.
Default: None.
debug : :obj:`Boolean`, optional
If dedug is True a dataframe with storage size and path to storage of
all installed and possibly discarded storages is saved to a csv file
and a plot with all storage positions is created and saved, both to the
current working directory with filename `storage_results_{MVgrid_id}`.
Default: False.
check_costs_reduction : :obj:`Boolean` or :obj:`str`, optional
This parameter specifies when and whether it should be checked if a
storage reduced grid expansion costs or not. It can be used as a safety
check but can be quite time consuming. Possible options are:
* 'each_feeder'
Costs reduction is checked for each feeder. If the storage did not
reduce grid expansion costs it is discarded.
* 'once'
Costs reduction is checked after the total storage capacity is
allocated to the feeders. If the storages did not reduce grid
expansion costs they are all discarded.
* False
Costs reduction is never checked.
Default: False.
"""
def _feeder_ranking(grid_expansion_costs):
"""
Get feeder ranking from grid expansion costs DataFrame.
MV feeders are ranked descending by grid expansion costs that are
attributed to that feeder.
Parameters
----------
grid_expansion_costs : :pandas:`pandas.DataFrame<dataframe>`
grid_expansion_costs DataFrame from :class:`~.grid.network.Results`
of the copied edisgo object.
Returns
-------
:pandas:`pandas.Series<series>`
Series with ranked MV feeders (in the copied graph) of type
:class:`~.grid.components.Line`. Feeders are ranked by total grid
expansion costs of all measures conducted in the feeder. The
feeder with the highest costs is in the first row and the feeder
with the lowest costs in the last row.
"""
return grid_expansion_costs.groupby(
['mv_feeder'], sort=False).sum().reset_index().sort_values(
by=['total_costs'], ascending=False)['mv_feeder']
def _shortest_path(node):
if isinstance(node, LVStation):
return len(nx.shortest_path(
node.mv_grid.graph, node.mv_grid.station, node))
else:
return len(nx.shortest_path(
node.grid.graph, node.grid.station, node))
def _find_battery_node(edisgo, critical_lines_feeder,
critical_nodes_feeder):
"""
Evaluates where to install the storage.
Parameters
-----------
edisgo : :class:`~.grid.network.EDisGo`
The original edisgo object.
critical_lines_feeder : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines in MV feeder, their maximum
relative over-loading and the corresponding time step. See
:func:`edisgo.flex_opt.check_tech_constraints.mv_line_load` for
more information.
critical_nodes_feeder : :obj:`list`
List with all nodes in MV feeder with voltage issues.
Returns
-------
:obj:`float`
Node where storage is installed.
"""
# if there are overloaded lines in the MV feeder the battery storage
# will be installed at the node farthest away from the MV station
if not critical_lines_feeder.empty:
logger.debug("Storage positioning due to overload.")
# dictionary with nodes and their corresponding path length to
# MV station
path_length_dict = {}
for l in critical_lines_feeder.index:
nodes = l.grid.graph.nodes_from_line(l)
for node in nodes:
path_length_dict[node] = _shortest_path(node)
# return node farthest away
return [_ for _ in path_length_dict.keys()
if path_length_dict[_] == max(
path_length_dict.values())][0]
# if there are voltage issues in the MV grid the battery storage will
# be installed at the first node in path that exceeds 2/3 of the line
# length from station to critical node with highest voltage deviation
if critical_nodes_feeder:
logger.debug("Storage positioning due to voltage issues.")
node = critical_nodes_feeder[0]
# get path length from station to critical node
get_weight = lambda u, v, data: data['line'].length
path_length = dijkstra_shortest_path_length(
edisgo.network.mv_grid.graph,
edisgo.network.mv_grid.station,
get_weight, target=node)
# find first node in path that exceeds 2/3 of the line length
# from station to critical node farthest away from the station
path = nx.shortest_path(edisgo.network.mv_grid.graph,
edisgo.network.mv_grid.station,
node)
return next(j for j in path
if path_length[j] >= path_length[node] * 2 / 3)
return None
def _calc_storage_size(edisgo, feeder, max_storage_size):
"""
Calculates storage size that reduces residual load.
Parameters
-----------
edisgo : :class:`~.grid.network.EDisGo`
The original edisgo object.
feeder : :class:`~.grid.components.Line`
MV feeder the storage will be connected to. The line object is an
object from the copied graph.
Returns
-------
:obj:`float`
Storage size that reduced the residual load in the feeder.
"""
step_size = 200
sizes = [0] + list(np.arange(
p_storage_min, max_storage_size + 0.5 * step_size, step_size))
p_feeder = edisgo.network.results.pfa_p.loc[:, repr(feeder)]
q_feeder = edisgo.network.results.pfa_q.loc[:, repr(feeder)]
p_slack = edisgo.network.pypsa.generators_t.p.loc[
:, 'Generator_slack'] * 1e3
# get sign of p and q
l = edisgo.network.pypsa.lines.loc[repr(feeder), :]
mv_station_bus = 'bus0' if l.loc['bus0'] == 'Bus_'.format(
repr(edisgo.network.mv_grid.station)) else 'bus1'
if mv_station_bus == 'bus0':
diff = edisgo.network.pypsa.lines_t.p1.loc[:, repr(feeder)] - \
edisgo.network.pypsa.lines_t.p0.loc[:, repr(feeder)]
diff_q = edisgo.network.pypsa.lines_t.q1.loc[:, repr(feeder)] - \
edisgo.network.pypsa.lines_t.q0.loc[:, repr(feeder)]
else:
diff = edisgo.network.pypsa.lines_t.p0.loc[:, repr(feeder)] - \
edisgo.network.pypsa.lines_t.p1.loc[:, repr(feeder)]
diff_q = edisgo.network.pypsa.lines_t.q0.loc[:, repr(feeder)] - \
edisgo.network.pypsa.lines_t.q1.loc[:, repr(feeder)]
p_sign = pd.Series([-1 if _ < 0 else 1 for _ in diff],
index=p_feeder.index)
q_sign = pd.Series([-1 if _ < 0 else 1 for _ in diff_q],
index=p_feeder.index)
# get allowed load factors per case
lf = {'feedin_case': edisgo.network.config[
'grid_expansion_load_factors']['mv_feedin_case_line'],
'load_case': network.config[
'grid_expansion_load_factors']['mv_load_case_line']}
# calculate maximum apparent power for each storage size to find
# storage size that minimizes apparent power in the feeder
p_feeder = p_feeder.multiply(p_sign)
q_feeder = q_feeder.multiply(q_sign)
s_max = []
for size in sizes:
share = size / storage_nominal_power
p_storage = storage_timeseries.p * share
q_storage = storage_timeseries.q * share
p_total = p_feeder + p_storage
q_total = q_feeder + q_storage
p_hv_mv_station = p_slack - p_storage
lf_ts = p_hv_mv_station.apply(
lambda _: lf['feedin_case'] if _ < 0 else lf['load_case'])
s_max_ts = (p_total ** 2 + q_total ** 2).apply(
sqrt).divide(lf_ts)
s_max.append(max(s_max_ts))
return sizes[pd.Series(s_max).idxmin()]
def _critical_nodes_feeder(edisgo, feeder):
"""
Returns all nodes in MV feeder with voltage issues.
Parameters
-----------
edisgo : :class:`~.grid.network.EDisGo`
The original edisgo object.
feeder : :class:`~.grid.components.Line`
MV feeder the storage will be connected to. The line object is an
object from the copied graph.
Returns
-------
:obj:`list`
List with all nodes in MV feeder with voltage issues.
"""
# get all nodes with voltage issues in MV grid
critical_nodes = check_tech_constraints.mv_voltage_deviation(
edisgo.network, voltage_levels='mv')
if critical_nodes:
critical_nodes = critical_nodes[edisgo.network.mv_grid]
else:
return []
# filter nodes with voltage issues in feeder
critical_nodes_feeder = []
for n in critical_nodes.index:
if repr(n.mv_feeder) == repr(feeder):
critical_nodes_feeder.append(n)
return critical_nodes_feeder
def _critical_lines_feeder(edisgo, feeder):
"""
Returns all lines in MV feeder with overload issues.
Parameters
-----------
edisgo : :class:`~.grid.network.EDisGo`
The original edisgo object.
feeder : :class:`~.grid.components.Line`
MV feeder the storage will be connected to. The line object is an
object from the copied graph.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines in MV feeder, their maximum
relative over-loading and the corresponding time step. See
:func:`edisgo.flex_opt.check_tech_constraints.mv_line_load` for
more information.
"""
# get all overloaded MV lines
critical_lines = check_tech_constraints.mv_line_load(edisgo.network)
# filter overloaded lines in feeder
critical_lines_feeder = []
for l in critical_lines.index:
if repr(tools.get_mv_feeder_from_line(l)) == repr(feeder):
critical_lines_feeder.append(l)
return critical_lines.loc[critical_lines_feeder, :]
def _estimate_new_number_of_lines(critical_lines_feeder):
number_parallel_lines = 0
for crit_line in critical_lines_feeder.index:
number_parallel_lines += ceil(critical_lines_feeder.loc[
crit_line, 'max_rel_overload'] * crit_line.quantity) - \
crit_line.quantity
return number_parallel_lines
debug = kwargs.get('debug', False)
check_costs_reduction = kwargs.get('check_costs_reduction', False)
# global variables
# minimum and maximum storage power to be connected to the MV grid
p_storage_min = 300
p_storage_max = 4500
# remaining storage nominal power
if storage_nominal_power is None:
storage_nominal_power = max(abs(storage_timeseries.p))
p_storage_remaining = storage_nominal_power
if debug:
feeder_repr = []
storage_path = []
storage_repr = []
storage_size = []
# rank MV feeders by grid expansion costs
# conduct grid reinforcement on copied edisgo object on worst-case time
# steps
grid_expansion_results_init = edisgo.reinforce(
copy_graph=True, timesteps_pfa='snapshot_analysis')
# only analyse storage integration if there were any grid expansion needs
if grid_expansion_results_init.equipment_changes.empty:
logger.debug('No storage integration necessary since there are no '
'grid expansion needs.')
return
else:
equipment_changes_reinforcement_init = \
grid_expansion_results_init.equipment_changes.loc[
grid_expansion_results_init.equipment_changes.iteration_step >
0]
total_grid_expansion_costs = \
grid_expansion_results_init.grid_expansion_costs.total_costs.sum()
if equipment_changes_reinforcement_init.empty:
logger.debug('No storage integration necessary since there are no '
'grid expansion needs.')
return
else:
network = equipment_changes_reinforcement_init.index[
0].grid.network
# calculate grid expansion costs without costs for new generators
# to be used in feeder ranking
grid_expansion_costs_feeder_ranking = costs.grid_expansion_costs(
network, without_generator_import=True)
ranked_feeders = _feeder_ranking(grid_expansion_costs_feeder_ranking)
count = 1
storage_obj_list = []
total_grid_expansion_costs_new = 'not calculated'
for feeder in ranked_feeders.values:
logger.debug('Feeder: {}'.format(count))
count += 1
# first step: find node where storage will be installed
critical_nodes_feeder = _critical_nodes_feeder(edisgo, feeder)
critical_lines_feeder = _critical_lines_feeder(edisgo, feeder)
# get node the storage will be connected to (in original graph)
battery_node = _find_battery_node(edisgo, critical_lines_feeder,
critical_nodes_feeder)
if battery_node:
# add to output lists
if debug:
feeder_repr.append(repr(feeder))
storage_path.append(nx.shortest_path(
edisgo.network.mv_grid.graph,
edisgo.network.mv_grid.station,
battery_node))
# second step: calculate storage size
max_storage_size = min(p_storage_remaining, p_storage_max)
p_storage = _calc_storage_size(edisgo, feeder, max_storage_size)
# if p_storage is greater than or equal to the minimum storage
# power required, do storage integration
if p_storage >= p_storage_min:
# third step: integrate storage
share = p_storage / storage_nominal_power
edisgo.integrate_storage(
timeseries=storage_timeseries.p * share,
position=battery_node,
voltage_level='mv',
timeseries_reactive_power=storage_timeseries.q * share)
tools.assign_mv_feeder_to_nodes(edisgo.network.mv_grid)
# get new storage object
storage_obj = [_
for _ in
edisgo.network.mv_grid.graph.nodes_by_attribute(
'storage') if _ in
edisgo.network.mv_grid.graph.neighbors(
battery_node)][0]
storage_obj_list.append(storage_obj)
logger.debug(
'Storage with nominal power of {} kW connected to '
'node {} (path to HV/MV station {}).'.format(
p_storage, battery_node, nx.shortest_path(
battery_node.grid.graph, battery_node.grid.station,
battery_node)))
# fourth step: check if storage integration reduced grid
# reinforcement costs or number of issues
if check_costs_reduction == 'each_feeder':
# calculate new grid expansion costs
grid_expansion_results_new = edisgo.reinforce(
copy_graph=True, timesteps_pfa='snapshot_analysis')
total_grid_expansion_costs_new = \
grid_expansion_results_new.grid_expansion_costs.\
total_costs.sum()
costs_diff = total_grid_expansion_costs - \
total_grid_expansion_costs_new
if costs_diff > 0:
logger.debug(
'Storage integration in feeder {} reduced grid '
'expansion costs by {} kEuro.'.format(
feeder, costs_diff))
if debug:
storage_repr.append(repr(storage_obj))
storage_size.append(storage_obj.nominal_power)
total_grid_expansion_costs = \
total_grid_expansion_costs_new
else:
logger.debug(
'Storage integration in feeder {} did not reduce '
'grid expansion costs (costs increased by {} '
'kEuro).'.format(feeder, -costs_diff))
tools.disconnect_storage(edisgo.network, storage_obj)
p_storage = 0
if debug:
storage_repr.append(None)
storage_size.append(0)
edisgo.integrate_storage(
timeseries=storage_timeseries.p * 0,
position=battery_node,
voltage_level='mv',
timeseries_reactive_power=
storage_timeseries.q * 0)
tools.assign_mv_feeder_to_nodes(
edisgo.network.mv_grid)
else:
number_parallel_lines_before = \
_estimate_new_number_of_lines(critical_lines_feeder)
edisgo.analyze()
critical_lines_feeder_new = _critical_lines_feeder(
edisgo, feeder)
critical_nodes_feeder_new = _critical_nodes_feeder(
edisgo, feeder)
number_parallel_lines = _estimate_new_number_of_lines(
critical_lines_feeder_new)
# if there are critical lines check if number of parallel
# lines was reduced
if not critical_lines_feeder.empty:
diff_lines = number_parallel_lines_before - \
number_parallel_lines
# if it was not reduced check if there are critical
# nodes and if the number was reduced
if diff_lines <= 0:
# if there are no critical nodes remove storage
if not critical_nodes_feeder:
logger.debug(
'Storage integration in feeder {} did not '
'reduce number of critical lines (number '
'increased by {}), storage '
'is therefore removed.'.format(
feeder, -diff_lines))
tools.disconnect_storage(edisgo.network,
storage_obj)
p_storage = 0
if debug:
storage_repr.append(None)
storage_size.append(0)
edisgo.integrate_storage(
timeseries=storage_timeseries.p * 0,
position=battery_node,
voltage_level='mv',
timeseries_reactive_power=
storage_timeseries.q * 0)
tools.assign_mv_feeder_to_nodes(
edisgo.network.mv_grid)
else:
logger.debug(
'Critical nodes in feeder {} '
'before and after storage integration: '
'{} vs. {}'.format(
feeder, critical_nodes_feeder,
critical_nodes_feeder_new))
if debug:
storage_repr.append(repr(storage_obj))
storage_size.append(
storage_obj.nominal_power)
else:
logger.debug(
'Storage integration in feeder {} reduced '
'number of critical lines.'.format(feeder))
if debug:
storage_repr.append(repr(storage_obj))
storage_size.append(storage_obj.nominal_power)
# if there are no critical lines
else:
logger.debug(
'Critical nodes in feeder {} '
'before and after storage integration: '
'{} vs. {}'.format(
feeder, critical_nodes_feeder,
critical_nodes_feeder_new))
if debug:
storage_repr.append(repr(storage_obj))
storage_size.append(storage_obj.nominal_power)
# fifth step: if there is storage capacity left, rerun
# the past steps for the next feeder in the ranking
# list
p_storage_remaining = p_storage_remaining - p_storage
if not p_storage_remaining > p_storage_min:
break
else:
logger.debug('No storage integration in feeder {}.'.format(
feeder))
if debug:
storage_repr.append(None)
storage_size.append(0)
edisgo.integrate_storage(
timeseries=storage_timeseries.p * 0,
position=battery_node,
voltage_level='mv',
timeseries_reactive_power=storage_timeseries.q * 0)
tools.assign_mv_feeder_to_nodes(edisgo.network.mv_grid)
else:
logger.debug('No storage integration in feeder {} because there '
'are neither overloading nor voltage issues.'.format(
feeder))
if debug:
storage_repr.append(None)
storage_size.append(0)
feeder_repr.append(repr(feeder))
storage_path.append([])
if check_costs_reduction == 'once':
# check costs reduction and discard all storages if costs were not
# reduced
grid_expansion_results_new = edisgo.reinforce(
copy_graph=True, timesteps_pfa='snapshot_analysis')
total_grid_expansion_costs_new = \
grid_expansion_results_new.grid_expansion_costs. \
total_costs.sum()
costs_diff = total_grid_expansion_costs - \
total_grid_expansion_costs_new
if costs_diff > 0:
logger.info(
'Storage integration in grid {} reduced grid '
'expansion costs by {} kEuro.'.format(
edisgo.network.id, costs_diff))
else:
logger.info(
'Storage integration in grid {} did not reduce '
'grid expansion costs (costs increased by {} '
'kEuro).'.format(edisgo.network.id, -costs_diff))
for storage in storage_obj_list:
tools.disconnect_storage(edisgo.network, storage)
elif check_costs_reduction == 'each_feeder':
# if costs redcution was checked after each storage only give out
# total costs reduction
if total_grid_expansion_costs_new == 'not calculated':
costs_diff = 0
else:
total_grid_expansion_costs = grid_expansion_results_init.\
grid_expansion_costs.total_costs.sum()
costs_diff = total_grid_expansion_costs - \
total_grid_expansion_costs_new
logger.info(
'Storage integration in grid {} reduced grid '
'expansion costs by {} kEuro.'.format(
edisgo.network.id, costs_diff))
if debug:
plots.storage_size(edisgo.network.mv_grid, edisgo.network.pypsa,
filename='storage_results_{}.pdf'.format(
edisgo.network.id), lopf=False)
storages_df = pd.DataFrame({'path': storage_path,
'repr': storage_repr,
'p_nom': storage_size},
index=feeder_repr)
storages_df.to_csv('storage_results_{}.csv'.format(edisgo.network.id))
edisgo.network.results.storages_costs_reduction = pd.DataFrame(
{'grid_expansion_costs_initial': total_grid_expansion_costs,
'grid_expansion_costs_with_storages': total_grid_expansion_costs_new},
index=[edisgo.network.id]) | Allocates the given storage capacity to multiple smaller storages.
For each feeder with load or voltage issues it is checked if integrating a
storage will reduce peaks in the feeder, starting with the feeder with
the highest theoretical grid expansion costs. A heuristic approach is used
to estimate storage sizing and siting while storage operation is carried
over from the given storage operation.
Parameters
-----------
edisgo : :class:`~.grid.network.EDisGo`
storage_timeseries : :pandas:`pandas.DataFrame<dataframe>`
Total active and reactive power time series that will be allocated to
the smaller storages in feeders with load or voltage issues. Columns of
the dataframe are 'p' containing active power time series in kW and 'q'
containing the reactive power time series in kvar. Index is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
storage_nominal_power : :obj:`float` or None
Nominal power in kW that will be allocated to the smaller storages in
feeders with load or voltage issues. If no nominal power is provided
the maximum active power given in `storage_timeseries` is used.
Default: None.
debug : :obj:`Boolean`, optional
If dedug is True a dataframe with storage size and path to storage of
all installed and possibly discarded storages is saved to a csv file
and a plot with all storage positions is created and saved, both to the
current working directory with filename `storage_results_{MVgrid_id}`.
Default: False.
check_costs_reduction : :obj:`Boolean` or :obj:`str`, optional
This parameter specifies when and whether it should be checked if a
storage reduced grid expansion costs or not. It can be used as a safety
check but can be quite time consuming. Possible options are:
* 'each_feeder'
Costs reduction is checked for each feeder. If the storage did not
reduce grid expansion costs it is discarded.
* 'once'
Costs reduction is checked after the total storage capacity is
allocated to the feeders. If the storages did not reduce grid
expansion costs they are all discarded.
* False
Costs reduction is never checked.
Default: False. |
def append(self,text):
"""Add a text (or speech) to the document:
Example 1::
doc.append(folia.Text)
Example 2::
doc.append( folia.Text(doc, id='example.text') )
Example 3::
doc.append(folia.Speech)
"""
if text is Text:
text = Text(self, id=self.id + '.text.' + str(len(self.data)+1) )
elif text is Speech:
text = Speech(self, id=self.id + '.speech.' + str(len(self.data)+1) ) #pylint: disable=redefined-variable-type
else:
assert isinstance(text, Text) or isinstance(text, Speech)
self.data.append(text)
return text | Add a text (or speech) to the document:
Example 1::
doc.append(folia.Text)
Example 2::
doc.append( folia.Text(doc, id='example.text') )
Example 3::
doc.append(folia.Speech) |
def _sampleV_preoptimized(self,R,z,maxVT):
"""
NAME:
_sampleV_preoptimized
PURPOSE:
sample a radial, azimuthal, and vertical velocity at R,z;
R,z can be an array of positions maxVT is already optimized
INPUT:
R - Galactocentric distance (can be Quantity)
z - height (can be Quantity)
maxVT - an array of pre-optimized maximum vT at corresponding R,z
OUTPUT:
a numpy array containing the sampled velocity, (vR, vT, vz),
where each row correspond to the row of (R,z)
HISTORY:
2018-08-09 - Written - Samuel Wong (University of Toronto)
"""
length = numpy.size(R)
out= numpy.empty((length,3)) #Initialize output
#Determine the maximum of the velocity distribution
maxVR= numpy.zeros(length)
maxVz= numpy.zeros(length)
logmaxVD= self(R,maxVR,maxVT,z,maxVz,log=True,use_physical=False)
#Now rejection-sample
#Intiialize boolean index of position remaining to be sampled
remain_indx = numpy.full(length,True)
while numpy.any(remain_indx):
nmore= numpy.sum(remain_indx)
propvR= numpy.random.normal(size=nmore)*2.*self._sr
propvT= numpy.random.normal(size=nmore)*2.*self._sr+maxVT[remain_indx]
propvz= numpy.random.normal(size=nmore)*2.*self._sz
VDatprop= self(R[remain_indx],propvR,propvT,z[remain_indx],propvz,
log=True, use_physical=False)-logmaxVD[remain_indx]
VDatprop-= -0.5*(propvR**2./4./self._sr**2.+
propvz**2./4./self._sz**2.+
(propvT-maxVT[remain_indx])**2./4./self._sr**2.)
accept_indx= (VDatprop > numpy.log(numpy.random.random(size=nmore)))
vR_accept= propvR[accept_indx]
vT_accept= propvT[accept_indx]
vz_accept= propvz[accept_indx]
#Get the indexing of rows of output array that need to be updated
#with newly accepted velocity
to_change= numpy.copy(remain_indx)
to_change[remain_indx]= accept_indx
out[to_change]= numpy.stack((vR_accept,vT_accept,vz_accept), axis = 1)
#Removing accepted sampled from remain index
remain_indx[remain_indx]= ~accept_indx
return out | NAME:
_sampleV_preoptimized
PURPOSE:
sample a radial, azimuthal, and vertical velocity at R,z;
R,z can be an array of positions maxVT is already optimized
INPUT:
R - Galactocentric distance (can be Quantity)
z - height (can be Quantity)
maxVT - an array of pre-optimized maximum vT at corresponding R,z
OUTPUT:
a numpy array containing the sampled velocity, (vR, vT, vz),
where each row correspond to the row of (R,z)
HISTORY:
2018-08-09 - Written - Samuel Wong (University of Toronto) |
def get_extracted_events(fnames):
"""Get a full list of all extracted event IDs from a list of EKB files"""
event_list = []
for fn in fnames:
tp = trips.process_xml_file(fn)
ed = tp.extracted_events
for k, v in ed.items():
event_list += v
return event_list | Get a full list of all extracted event IDs from a list of EKB files |
def convert_to_type(self, value, typename, **kwargs):
"""
Convert value to type 'typename'
If the conversion routine takes various kwargs to
modify the conversion process, \\**kwargs is passed
through to the underlying conversion function
"""
try:
if isinstance(value, bytearray):
return self.convert_from_binary(value, typename, **kwargs)
typeobj = self.get_type(typename)
conv = typeobj.convert(value, **kwargs)
return conv
except (ValueError, TypeError) as exc:
raise ValidationError("Could not convert value", type=typename, value=value, error_message=str(exc)) | Convert value to type 'typename'
If the conversion routine takes various kwargs to
modify the conversion process, \\**kwargs is passed
through to the underlying conversion function |
def parse_has_combinator(self, sel, m, has_selector, selectors, rel_type, index):
"""Parse combinator tokens."""
combinator = m.group('relation').strip()
if not combinator:
combinator = WS_COMBINATOR
if combinator == COMMA_COMBINATOR:
if not has_selector:
# If we've not captured any selector parts, the comma is either at the beginning of the pattern
# or following another comma, both of which are unexpected. Commas must split selectors.
raise SelectorSyntaxError(
"The combinator '{}' at postion {}, must have a selector before it".format(combinator, index),
self.pattern,
index
)
sel.rel_type = rel_type
selectors[-1].relations.append(sel)
rel_type = ":" + WS_COMBINATOR
selectors.append(_Selector())
else:
if has_selector:
# End the current selector and associate the leading combinator with this selector.
sel.rel_type = rel_type
selectors[-1].relations.append(sel)
elif rel_type[1:] != WS_COMBINATOR:
# It's impossible to have two whitespace combinators after each other as the patterns
# will gobble up trailing whitespace. It is also impossible to have a whitespace
# combinator after any other kind for the same reason. But we could have
# multiple non-whitespace combinators. So if the current combinator is not a whitespace,
# then we've hit the multiple combinator case, so we should fail.
raise SelectorSyntaxError(
'The multiple combinators at position {}'.format(index),
self.pattern,
index
)
# Set the leading combinator for the next selector.
rel_type = ':' + combinator
sel = _Selector()
has_selector = False
return has_selector, sel, rel_type | Parse combinator tokens. |
def get_default_for(prop, value):
""" Ensures complex property types have the correct default values """
prop = prop.strip('_') # Handle alternate props (leading underscores)
val = reduce_value(value) # Filtering of value happens here
if prop in _COMPLEX_LISTS:
return wrap_value(val)
elif prop in _COMPLEX_STRUCTS:
return val or {}
else:
return u'' if val is None else val | Ensures complex property types have the correct default values |
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
sys.stdout.write('Content-Type: text/xml\n')
sys.stdout.write('Content-Length: %d\n' % len(response))
sys.stdout.write('\n')
sys.stdout.write(response) | Handle a single XML-RPC request |
def _generic_filter_execs(self, module):
"""Filters the specified dict of executables to include only those that are not referenced
in a derived type or an interface.
:arg module: the module whose executables should be filtered.
"""
interface_execs = []
for ikey in module.interfaces:
interface_execs.extend(module.interfaces[ikey].procedures)
return {k: module.executables[k] for k in module.executables if
("{}.{}".format(module.name, k) not in interface_execs and
not module.executables[k].is_type_target)} | Filters the specified dict of executables to include only those that are not referenced
in a derived type or an interface.
:arg module: the module whose executables should be filtered. |
def compute_node_positions(self):
"""
Extracts the node positions based on the specified longitude and
latitude keyword arguments.
"""
xs = []
ys = []
self.locs = dict()
for node in self.nodes:
x = self.graph.node[node][self.node_lon]
y = self.graph.node[node][self.node_lat]
xs.append(x)
ys.append(y)
self.locs[node] = (x, y)
self.node_coords = {"x": xs, "y": ys} | Extracts the node positions based on the specified longitude and
latitude keyword arguments. |
def output(self):
"""
Returns the next available output token.
:return: the next token, None if none available
:rtype: Token
"""
if (self._output is None) or (len(self._output) == 0):
result = None
else:
result = self._output.pop(0)
return result | Returns the next available output token.
:return: the next token, None if none available
:rtype: Token |
def fmt_type(data_type, inside_namespace=None):
"""
Returns a TypeScript type annotation for a data type.
May contain a union of enumerated subtypes.
inside_namespace should be set to the namespace that the type reference
occurs in, or None if this parameter is not relevant.
"""
if is_struct_type(data_type) and data_type.has_enumerated_subtypes():
possible_types = []
possible_subtypes = data_type.get_all_subtypes_with_tags()
for _, subtype in possible_subtypes:
possible_types.append(fmt_polymorphic_type_reference(subtype, inside_namespace))
if data_type.is_catch_all():
possible_types.append(fmt_polymorphic_type_reference(data_type, inside_namespace))
return fmt_union(possible_types)
else:
return fmt_type_name(data_type, inside_namespace) | Returns a TypeScript type annotation for a data type.
May contain a union of enumerated subtypes.
inside_namespace should be set to the namespace that the type reference
occurs in, or None if this parameter is not relevant. |
async def replace_local_did_metadata(self, loc_did: str, metadata: dict) -> DIDInfo:
"""
Replace the metadata associated with a local DID.
Raise WalletState if wallet is closed, AbsentRecord for no such local DID.
:param loc_did: local DID of interest
:param metadata: new metadata to store
:return: DIDInfo for local DID after write
"""
LOGGER.debug('Wallet.replace_local_did_metadata >>> loc_did: %s, metadata: %s', loc_did, metadata)
old = await self.get_local_did(loc_did) # raises exceptions if applicable
now = int(time())
loc_did_metadata = {**(metadata or {}), 'since': (old.metadata or {}).get('since', now), 'modified': now}
try:
await did.set_did_metadata(self.handle, loc_did, json.dumps(loc_did_metadata))
except IndyError as x_indy:
LOGGER.debug('Wallet.replace_local_did_metadata <!< indy-sdk raised error %s', x_indy.error_code)
raise
rv = await self.get_local_did(loc_did)
LOGGER.debug('Wallet.replace_local_did_metadata <<< %s', rv)
return rv | Replace the metadata associated with a local DID.
Raise WalletState if wallet is closed, AbsentRecord for no such local DID.
:param loc_did: local DID of interest
:param metadata: new metadata to store
:return: DIDInfo for local DID after write |
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
# Convert from pandas data types
if is_dataframe(X):
# Get column names before reverting to an np.ndarray
if self.features_ is None:
self.features_ = np.array(X.columns)
X = X.values
if is_series(y):
y = y.values
# Assign integer labels to the feature columns from the input
if self.features_ is None:
self.features_ = np.arange(0, X.shape[1])
# Ensure that all classes are represented in the color mapping (before sample)
# NOTE: np.unique also specifies the ordering of the classes
if self.classes_ is None:
self.classes_ = [str(label) for label in np.unique(y)]
# Create the color mapping for each class
# TODO: Allow both colormap, listed colors, and palette definition
# TODO: Make this an independent function or property for override!
color_values = resolve_colors(
n_colors=len(self.classes_), colormap=self.colormap, colors=self.color
)
self._colors = dict(zip(self.classes_, color_values))
# Ticks for each feature specified
self._increments = np.arange(len(self.features_))
# Subsample instances
X, y = self._subsample(X, y)
# Normalize instances
if self.normalize is not None:
X = self.NORMALIZERS[self.normalize].fit_transform(X)
# the super method calls draw and returns self
return super(ParallelCoordinates, self).fit(X, y, **kwargs) | The fit method is the primary drawing input for the
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer |
def onCMapSave(self, event=None, col='int'):
"""save color table image"""
file_choices = 'PNG (*.png)|*.png'
ofile = 'Colormap.png'
dlg = wx.FileDialog(self, message='Save Colormap as...',
defaultDir=os.getcwd(),
defaultFile=ofile,
wildcard=file_choices,
style=wx.FD_SAVE|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
self.cmap_panels[0].cmap_canvas.print_figure(dlg.GetPath(), dpi=600) | save color table image |
def register_button_handler(self, button_handler, buttons):
"""
Register a handler function which will be called when a button is pressed
:param button_handler:
A function which will be called when any of the specified buttons are pressed. The
function is called with the Button that was pressed as the sole argument.
:param [Button] buttons:
A list or one or more buttons which should trigger the handler when pressed. Buttons
are specified as :class:`approxeng.input.Button` instances, in general controller implementations will
expose these as constants such as SixAxis.BUTTON_CIRCLE. A single Button can be specified if only one button
binding is required.
:return:
A no-arg function which can be used to remove this registration
"""
if not isinstance(buttons, list):
buttons = [buttons]
for button in buttons:
state = self.buttons.get(button)
if state is not None:
state.button_handlers.append(button_handler)
def remove():
for button_to_remove in buttons:
state_to_remove = self.buttons.get(button_to_remove)
if state_to_remove is not None:
state_to_remove.button_handlers.remove(button_handler)
return remove | Register a handler function which will be called when a button is pressed
:param button_handler:
A function which will be called when any of the specified buttons are pressed. The
function is called with the Button that was pressed as the sole argument.
:param [Button] buttons:
A list or one or more buttons which should trigger the handler when pressed. Buttons
are specified as :class:`approxeng.input.Button` instances, in general controller implementations will
expose these as constants such as SixAxis.BUTTON_CIRCLE. A single Button can be specified if only one button
binding is required.
:return:
A no-arg function which can be used to remove this registration |
def invalidate_object(self, address, state = 'stale'):
"""Force cache item state change (to 'worse' state only).
:Parameters:
- `state`: the new state requested.
:Types:
- `state`: `str`"""
self._lock.acquire()
try:
item = self.get_item(address)
if item and item.state_value<_state_values[state]:
item.state=state
item.update_state()
self._items_list.sort()
finally:
self._lock.release() | Force cache item state change (to 'worse' state only).
:Parameters:
- `state`: the new state requested.
:Types:
- `state`: `str` |
def print_unfinished_line(self):
"""The unfinished line stayed long enough in the buffer to be printed"""
if self.state is STATE_RUNNING:
if not callbacks.process(self.read_buffer):
self.print_lines(self.read_buffer)
self.read_buffer = b'' | The unfinished line stayed long enough in the buffer to be printed |
def _flush(self, finish=False):
"""Internal API to flush.
Buffer is flushed to GCS only when the total amount of buffered data is at
least self._blocksize, or to flush the final (incomplete) block of
the file with finish=True.
"""
while ((finish and self._buffered >= 0) or
(not finish and self._buffered >= self._blocksize)):
tmp_buffer = []
tmp_buffer_len = 0
excess = 0
while self._buffer:
buf = self._buffer.popleft()
size = len(buf)
self._buffered -= size
tmp_buffer.append(buf)
tmp_buffer_len += size
if tmp_buffer_len >= self._maxrequestsize:
excess = tmp_buffer_len - self._maxrequestsize
break
if not finish and (
tmp_buffer_len % self._blocksize + self._buffered <
self._blocksize):
excess = tmp_buffer_len % self._blocksize
break
if excess:
over = tmp_buffer.pop()
size = len(over)
assert size >= excess
tmp_buffer_len -= size
head, tail = over[:-excess], over[-excess:]
self._buffer.appendleft(tail)
self._buffered += len(tail)
if head:
tmp_buffer.append(head)
tmp_buffer_len += len(head)
data = ''.join(tmp_buffer)
file_len = '*'
if finish and not self._buffered:
file_len = self._written + len(data)
self._send_data(data, self._written, file_len)
self._written += len(data)
if file_len != '*':
break | Internal API to flush.
Buffer is flushed to GCS only when the total amount of buffered data is at
least self._blocksize, or to flush the final (incomplete) block of
the file with finish=True. |
def start_stream_subscriber(self):
""" Starts the stream consumer's main loop.
Called when the stream consumer has been set up with the correct callbacks.
"""
if not self._stream_process_started: # pragma: no cover
if sys.platform.startswith("win"): # if we're on windows we can't expect multiprocessing to work
self._stream_process_started = True
self._stream()
self._stream_process_started = True
self._stream_process.start() | Starts the stream consumer's main loop.
Called when the stream consumer has been set up with the correct callbacks. |
def layers_intersect(layer_a, layer_b):
"""Check if extents of two layers intersect.
:param layer_a: One of the two layers to test overlapping
:type layer_a: QgsMapLayer
:param layer_b: The second of the two layers to test overlapping
:type layer_b: QgsMapLayer
:returns: true if the layers intersect, false if they are disjoint
:rtype: boolean
"""
extent_a = layer_a.extent()
extent_b = layer_b.extent()
if layer_a.crs() != layer_b.crs():
coord_transform = QgsCoordinateTransform(
layer_a.crs(), layer_b.crs(), QgsProject.instance())
extent_b = (coord_transform.transform(
extent_b, QgsCoordinateTransform.ReverseTransform))
return extent_a.intersects(extent_b) | Check if extents of two layers intersect.
:param layer_a: One of the two layers to test overlapping
:type layer_a: QgsMapLayer
:param layer_b: The second of the two layers to test overlapping
:type layer_b: QgsMapLayer
:returns: true if the layers intersect, false if they are disjoint
:rtype: boolean |
def stream(self, report, callback=None):
"""Stream a report asynchronously.
If no one is listening for the report, the report may be dropped,
otherwise it will be queued for sending
Args:
report (IOTileReport): The report that should be streamed
callback (callable): Optional callback to get notified when
this report is actually sent.
"""
if self._push_channel is None:
return
self._push_channel.stream(report, callback=callback) | Stream a report asynchronously.
If no one is listening for the report, the report may be dropped,
otherwise it will be queued for sending
Args:
report (IOTileReport): The report that should be streamed
callback (callable): Optional callback to get notified when
this report is actually sent. |
def get_bit_values(number, size=32):
"""
Get bit values as a list for a given number
>>> get_bit_values(1) == [0]*31 + [1]
True
>>> get_bit_values(0xDEADBEEF)
[1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, \
1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1]
You may override the default word size of 32-bits to match your actual
application.
>>> get_bit_values(0x3, 2)
[1, 1]
>>> get_bit_values(0x3, 4)
[0, 0, 1, 1]
"""
number += 2**size
return list(map(int, bin(number)[-size:])) | Get bit values as a list for a given number
>>> get_bit_values(1) == [0]*31 + [1]
True
>>> get_bit_values(0xDEADBEEF)
[1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, \
1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1]
You may override the default word size of 32-bits to match your actual
application.
>>> get_bit_values(0x3, 2)
[1, 1]
>>> get_bit_values(0x3, 4)
[0, 0, 1, 1] |
def iterator(cls, path=None, objtype=None, query=None, page_size=1000, **kwargs):
""""
Linear time, constant memory, iterator for a mongo collection.
@param path: the path of the database to query, in the form
"database.colletion"; pass None to use the value of the
PATH property of the object or, if that is none, the
PATH property of OBJTYPE
@param objtype: the object type to use for these DatabaseObjects;
pass None to use the OBJTYPE property of the class
@param query: a dictionary specifying key-value pairs that the result
must match. If query is None, use kwargs in it's place
@param page_size: the number of items to fetch per page of iteration
@param **kwargs: used as query parameters if query is None
"""
if not objtype:
objtype = cls.OBJTYPE
if not path:
path = cls.PATH
db = objtype.db(path)
if not query:
query = kwargs
results = list(db.find(query).sort(ID_KEY, ASCENDING).limit(page_size))
while results:
page = [objtype(path=path, _new_object=result) for result in results]
for obj in page:
yield obj
query[ID_KEY] = {GT: results[-1][ID_KEY]}
results = list(db.find(query).sort(ID_KEY, ASCENDING).limit(page_size)) | Linear time, constant memory, iterator for a mongo collection.
@param path: the path of the database to query, in the form
"database.colletion"; pass None to use the value of the
PATH property of the object or, if that is none, the
PATH property of OBJTYPE
@param objtype: the object type to use for these DatabaseObjects;
pass None to use the OBJTYPE property of the class
@param query: a dictionary specifying key-value pairs that the result
must match. If query is None, use kwargs in it's place
@param page_size: the number of items to fetch per page of iteration
@param **kwargs: used as query parameters if query is None |
def read(fname):
"""Quick way to read a file content."""
content = None
with open(os.path.join(here, fname)) as f:
content = f.read()
return content | Quick way to read a file content. |
def extend(*args):
"""shallow dictionary merge
Args:
a: dict to extend
b: dict to apply to a
Returns:
new instance of the same type as _a_, with _a_ and _b_ merged.
"""
if not args:
return {}
first = args[0]
rest = args[1:]
out = type(first)(first)
for each in rest:
out.update(each)
return out | shallow dictionary merge
Args:
a: dict to extend
b: dict to apply to a
Returns:
new instance of the same type as _a_, with _a_ and _b_ merged. |
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret | List virtual machines
.. code-block:: bash
salt-cloud -Q |
def result(self):
"""Get the result(s) for this hook call (DEPRECATED in favor of ``get_result()``)."""
msg = "Use get_result() which forces correct exception handling"
warnings.warn(DeprecationWarning(msg), stacklevel=2)
return self._result | Get the result(s) for this hook call (DEPRECATED in favor of ``get_result()``). |
def createlabel(self, project_id, name, color):
"""
Creates a new label for given repository with given name and color.
:param project_id: The ID of a project
:param name: The name of the label
:param color: Color of the label given in 6-digit hex notation with leading '#' sign (e.g. #FFAABB)
:return:
"""
data = {'name': name, 'color': color}
request = requests.post(
'{0}/{1}/labels'.format(self.projects_url, project_id), data=data,
verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout)
if request.status_code == 201:
return request.json()
else:
return False | Creates a new label for given repository with given name and color.
:param project_id: The ID of a project
:param name: The name of the label
:param color: Color of the label given in 6-digit hex notation with leading '#' sign (e.g. #FFAABB)
:return: |
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
tf.logging.info("features = %s labels = %s mode = %s params=%s" %
(features, labels, mode, params))
global_step = tf.train.get_global_step()
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
logits, loss = mnist_model(features, labels, mesh)
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
mesh_size = mesh_shape.size
mesh_devices = [""] * mesh_size
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
mesh_shape, layout_rules, mesh_devices)
if mode == tf.estimator.ModeKeys.TRAIN:
var_grads = mtf.gradients(
[loss], [v.outputs[0] for v in graph.trainable_variables])
optimizer = mtf.optimize.AdafactorOptimizer()
update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables)
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
restore_hook = mtf.MtfRestoreHook(lowering)
tf_logits = lowering.export_to_tf_tensor(logits)
if mode != tf.estimator.ModeKeys.PREDICT:
tf_loss = lowering.export_to_tf_tensor(loss)
tf.summary.scalar("loss", tf_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
train_op = tf.group(tf_update_ops)
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False, save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
FLAGS.model_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
accuracy = tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(tf_logits, axis=1))
# Name tensors to be logged with LoggingTensorHook.
tf.identity(tf_loss, "cross_entropy")
tf.identity(accuracy[1], name="train_accuracy")
# Save accuracy scalar to Tensorboard output.
tf.summary.scalar("train_accuracy", accuracy[1])
# restore_hook must come before saver_hook
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op,
training_chief_hooks=[restore_hook, saver_hook])
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"classes": tf.argmax(tf_logits, axis=1),
"probabilities": tf.nn.softmax(tf_logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
prediction_hooks=[restore_hook],
export_outputs={
"classify": tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=tf_loss,
evaluation_hooks=[restore_hook],
eval_metric_ops={
"accuracy":
tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(tf_logits, axis=1)),
}) | The model_fn argument for creating an Estimator. |
async def mark_fixed(self, *, comment: str = None):
"""Mark fixes.
:param comment: Reason machine is fixed.
:type comment: `str`
"""
params = {
"system_id": self.system_id
}
if comment:
params["comment"] = comment
self._data = await self._handler.mark_fixed(**params)
return self | Mark fixes.
:param comment: Reason machine is fixed.
:type comment: `str` |
async def insert(query):
"""Perform INSERT query asynchronously. Returns last insert ID.
This function is called by object.create for single objects only.
"""
assert isinstance(query, peewee.Insert),\
("Error, trying to run insert coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
try:
if query._returning:
row = await cursor.fetchone()
result = row[0]
else:
database = _query_db(query)
last_id = await database.last_insert_id_async(cursor)
result = last_id
finally:
await cursor.release()
return result | Perform INSERT query asynchronously. Returns last insert ID.
This function is called by object.create for single objects only. |
def error(cls, name, message, *args):
"""
Convenience function to log a message at the ERROR level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
"""
cls.getLogger(name).error(message, *args) | Convenience function to log a message at the ERROR level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function. |
def _has_converged(centroids, old_centroids):
"""
Stop if centroids stop to update
Parameters
-----------
centroids: array-like, shape=(K, n_samples)
old_centroids: array-like, shape=(K, n_samples)
------------
returns
True: bool
"""
return (set([tuple(a) for a in centroids]) == set([tuple(a) for a in old_centroids])) | Stop if centroids stop to update
Parameters
-----------
centroids: array-like, shape=(K, n_samples)
old_centroids: array-like, shape=(K, n_samples)
------------
returns
True: bool |
def complete_json_get(self, cmd_param_text, full_cmd, *rest):
""" TODO: prefetch & parse znodes & suggest keys """
complete_keys = partial(complete_values, ["key1", "key2", "#{key1.key2}"])
completers = [self._complete_path, complete_keys, complete_labeled_boolean("recursive")]
return complete(completers, cmd_param_text, full_cmd, *rest) | TODO: prefetch & parse znodes & suggest keys |
def flush(self, using=None, **kwargs):
"""
Preforms a flush operation on the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.flush`` unchanged.
"""
return self._get_connection(using).indices.flush(index=self._name, **kwargs) | Preforms a flush operation on the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.flush`` unchanged. |
def dicom_to_nifti(dicom_input, output_file=None):
"""
This is the main dicom to nifti conversion fuction for ge images.
As input ge images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: the filepath to the output nifti file
:param dicom_input: list with dicom objects
"""
assert common.is_ge(dicom_input)
logger.info('Reading and sorting dicom files')
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if _is_4d(grouped_dicoms):
logger.info('Found sequence type: 4D')
return _4d_to_nifti(grouped_dicoms, output_file)
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file) | This is the main dicom to nifti conversion fuction for ge images.
As input ge images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: the filepath to the output nifti file
:param dicom_input: list with dicom objects |
def parse_int_list(s):
"""
Parse a comma-separated list of strings.
The list may additionally contain ranges such as "1-5",
which will be expanded into "1,2,3,4,5".
"""
result = []
for item in s.split(','):
item = item.strip().split('-')
if len(item) == 1:
result.append(int(item[0]))
elif len(item) == 2:
start, end = item
result.extend(range(int(start), int(end)+1))
else:
raise ValueError("invalid range: '{0}'".format(s))
return result | Parse a comma-separated list of strings.
The list may additionally contain ranges such as "1-5",
which will be expanded into "1,2,3,4,5". |
def getservers(self, vhost = None):
'''
Return current servers
:param vhost: return only servers of vhost if specified. '' to return only default servers.
None for all servers.
'''
if vhost is not None:
return [s for s in self.connections if s.protocol.vhost == vhost]
else:
return list(self.connections) | Return current servers
:param vhost: return only servers of vhost if specified. '' to return only default servers.
None for all servers. |
def serialize(cls, installation_context):
"""
:type installation_context: context.InstallationContext
:rtype: dict
"""
return {
cls._FIELD_TOKEN: installation_context.token,
cls._FIELD_PUBLIC_KEY_CLIENT: security.public_key_to_string(
installation_context.private_key_client.publickey()
),
cls._FIELD_PRIVATE_KEY_CLIENT: security.private_key_to_string(
installation_context.private_key_client
),
cls._FIELD_PUBLIC_KEY_SERVER: security.public_key_to_string(
installation_context.public_key_server
),
} | :type installation_context: context.InstallationContext
:rtype: dict |
def get_users(self, capacity=None):
# type: (Optional[str]) -> List[User]
"""Returns the organization's users.
Args:
capacity (Optional[str]): Filter by capacity eg. member, admin. Defaults to None.
Returns:
List[User]: Organization's users.
"""
users = list()
usersdicts = self.data.get('users')
if usersdicts is not None:
for userdata in usersdicts:
if capacity is not None and userdata['capacity'] != capacity:
continue
id = userdata.get('id')
if id is None:
id = userdata['name']
user = hdx.data.user.User.read_from_hdx(id, configuration=self.configuration)
user['capacity'] = userdata['capacity']
users.append(user)
return users | Returns the organization's users.
Args:
capacity (Optional[str]): Filter by capacity eg. member, admin. Defaults to None.
Returns:
List[User]: Organization's users. |
def tostring(self, inject):
"""
Convert an element to a single string and allow the passed inject method to place content before any
element.
"""
return inject(self, '\n'.join(f'{division.tostring(inject)}' for division in self.divisions)) | Convert an element to a single string and allow the passed inject method to place content before any
element. |
def mac_address(self, mac_address):
"""
Sets the MAC address for this QEMU VM.
:param mac_address: MAC address
"""
if not mac_address:
# use the node UUID to generate a random MAC address
self._mac_address = "52:%s:%s:%s:%s:00" % (self.project.id[-4:-2], self.project.id[-2:], self.id[-4:-2], self.id[-2:])
else:
self._mac_address = mac_address
log.info('QEMU VM "{name}" [{id}]: MAC address changed to {mac_addr}'.format(name=self._name,
id=self._id,
mac_addr=self._mac_address)) | Sets the MAC address for this QEMU VM.
:param mac_address: MAC address |
def get_symbol_train(network, data_shape, **kwargs):
"""Wrapper for get symbol for train
Parameters
----------
network : str
name for the base network symbol
data_shape : int
input shape
kwargs : dict
see symbol_builder.get_symbol_train for more details
"""
if network.startswith('legacy'):
logging.warn('Using legacy model.')
return symbol_builder.import_module(network).get_symbol_train(**kwargs)
config = get_config(network, data_shape, **kwargs).copy()
config.update(kwargs)
return symbol_builder.get_symbol_train(**config) | Wrapper for get symbol for train
Parameters
----------
network : str
name for the base network symbol
data_shape : int
input shape
kwargs : dict
see symbol_builder.get_symbol_train for more details |
def _allocate_subnets(self, conf):
"""
Allocate all the subnets needed by the given configuration spec
Args:
conf (dict): Configuration spec where to get the nets definitions
from
Returns:
tuple(list, dict): allocated subnets and modified conf
"""
allocated_subnets = []
try:
for net_spec in conf.get('nets', {}).itervalues():
if net_spec['type'] != 'nat':
continue
gateway = net_spec.get('gw')
if gateway:
allocated_subnet = self._subnet_store.acquire(
self.paths.uuid(), gateway
)
else:
allocated_subnet = self._subnet_store.acquire(
self.paths.uuid()
)
net_spec['gw'] = str(allocated_subnet.iter_hosts().next())
allocated_subnets.append(allocated_subnet)
except:
self._subnet_store.release(allocated_subnets)
raise
return allocated_subnets, conf | Allocate all the subnets needed by the given configuration spec
Args:
conf (dict): Configuration spec where to get the nets definitions
from
Returns:
tuple(list, dict): allocated subnets and modified conf |
def help(self, *args):
"""
Can be overridden (and for example _Menu does).
"""
if args:
self.messages.error(
self.messages.command_does_not_accept_arguments)
else:
print(self.helpfull) | Can be overridden (and for example _Menu does). |
def pci_lookup_name2(
access: (IN, ctypes.POINTER(pci_access)),
buf: (IN, ctypes.c_char_p),
size: (IN, ctypes.c_int),
flags: (IN, ctypes.c_int),
arg1: (IN, ctypes.c_int),
arg2: (IN, ctypes.c_int),
) -> ctypes.c_char_p:
"""
Conversion of PCI ID's to names (according to the pci.ids file).
char *pci_lookup_name(
struct pci_access *a, char *buf, int size, int flags, ...
) PCI_ABI;
This is a variant of pci_lookup_name() that gets called with two arguments.
It is required because ctypes doesn't support varadic functions.
"""
pass | Conversion of PCI ID's to names (according to the pci.ids file).
char *pci_lookup_name(
struct pci_access *a, char *buf, int size, int flags, ...
) PCI_ABI;
This is a variant of pci_lookup_name() that gets called with two arguments.
It is required because ctypes doesn't support varadic functions. |
def update_hints(obj, data):
"""Update the hints on the root-node of a workflow. Usually, schedule
hints are fixed per function. Sometimes a user may want to set hints
manually on a specific promised object. :func:`update_hints` uses the
`update` method on the hints dictionary with `data` as its argument.
:param obj: a :py:class:`PromisedObject`.
:param data: a :py:class:`dict` containing additional hints.
The hints are modified, in place, on the node. All workflows that contain
the node are affected."""
root = obj._workflow.root
obj._workflow.nodes[root].hints.update(data) | Update the hints on the root-node of a workflow. Usually, schedule
hints are fixed per function. Sometimes a user may want to set hints
manually on a specific promised object. :func:`update_hints` uses the
`update` method on the hints dictionary with `data` as its argument.
:param obj: a :py:class:`PromisedObject`.
:param data: a :py:class:`dict` containing additional hints.
The hints are modified, in place, on the node. All workflows that contain
the node are affected. |
def get_recent_trades(self, pair="SWTH_NEO"):
"""
Function to fetch a list of the 20 most recently filled trades for the parameters requested.
Execution of this function is as follows::
get_recent_trades(pair="SWTH_NEO")
The expected return result for this function is as follows::
[{
'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0',
'fill_amount': 100000000,
'take_amount': 100000000,
'event_time': '2018-08-04T15:00:12.634Z',
'is_buy': True
}, {
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'fill_amount': 47833882,
'take_amount': 97950000000,
'event_time': '2018-08-03T02:44:47.706Z',
'is_buy': True
}, ...., {
'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f',
'fill_amount': 1001117,
'take_amount': 2050000000,
'event_time': '2018-08-03T02:32:50.703Z',
'is_buy': True
}]
:param pair: The trading pair that will be used to request filled trades.
:type pair: str
:return: List of 20 dictionaries consisting of filled orders for the trade pair.
"""
api_params = {
"pair": pair
}
return self.request.get(path='/trades/recent', params=api_params) | Function to fetch a list of the 20 most recently filled trades for the parameters requested.
Execution of this function is as follows::
get_recent_trades(pair="SWTH_NEO")
The expected return result for this function is as follows::
[{
'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0',
'fill_amount': 100000000,
'take_amount': 100000000,
'event_time': '2018-08-04T15:00:12.634Z',
'is_buy': True
}, {
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'fill_amount': 47833882,
'take_amount': 97950000000,
'event_time': '2018-08-03T02:44:47.706Z',
'is_buy': True
}, ...., {
'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f',
'fill_amount': 1001117,
'take_amount': 2050000000,
'event_time': '2018-08-03T02:32:50.703Z',
'is_buy': True
}]
:param pair: The trading pair that will be used to request filled trades.
:type pair: str
:return: List of 20 dictionaries consisting of filled orders for the trade pair. |
def integer_entry(self, prompt, message=None, min=None, max=None, rofi_args=None, **kwargs):
"""Prompt the user to enter an integer.
Parameters
----------
prompt: string
Prompt to display to the user.
message: string, optional
Message to display under the entry line.
min, max: integer, optional
Minimum and maximum values to allow. If None, no limit is imposed.
Returns
-------
integer, or None if the dialog is cancelled.
"""
# Sanity check.
if (min is not None) and (max is not None) and not (max > min):
raise ValueError("Maximum limit has to be more than the minimum limit.")
def integer_validator(text):
error = None
# Attempt to convert to integer.
try:
value = int(text)
except ValueError:
return None, "Please enter an integer value."
# Check its within limits.
if (min is not None) and (value < min):
return None, "The minimum allowable value is {0:d}.".format(min)
if (max is not None) and (value > max):
return None, "The maximum allowable value is {0:d}.".format(max)
return value, None
return self.generic_entry(prompt, integer_validator, message, rofi_args, **kwargs) | Prompt the user to enter an integer.
Parameters
----------
prompt: string
Prompt to display to the user.
message: string, optional
Message to display under the entry line.
min, max: integer, optional
Minimum and maximum values to allow. If None, no limit is imposed.
Returns
-------
integer, or None if the dialog is cancelled. |
def gopro_set_response_send(self, cmd_id, status, force_mavlink1=False):
'''
Response from a GOPRO_COMMAND set request
cmd_id : Command ID (uint8_t)
status : Status (uint8_t)
'''
return self.send(self.gopro_set_response_encode(cmd_id, status), force_mavlink1=force_mavlink1) | Response from a GOPRO_COMMAND set request
cmd_id : Command ID (uint8_t)
status : Status (uint8_t) |
def ExpandUsersVariablePath(cls, path, path_separator, user_accounts):
"""Expands a path with a users variable, e.g. %%users.homedir%%.
Args:
path (str): path with users variable.
path_separator (str): path segment separator.
user_accounts (list[UserAccountArtifact]): user accounts.
Returns:
list[str]: paths for which the users variables have been expanded.
"""
path_segments = path.split(path_separator)
return cls._ExpandUsersVariablePathSegments(
path_segments, path_separator, user_accounts) | Expands a path with a users variable, e.g. %%users.homedir%%.
Args:
path (str): path with users variable.
path_separator (str): path segment separator.
user_accounts (list[UserAccountArtifact]): user accounts.
Returns:
list[str]: paths for which the users variables have been expanded. |
def set_response_headers(self, response: HttpResponse) -> HttpResponse:
"""
Appends default headers to every response returned by the API
:param response HttpResponse
:rtype: HttpResponse
"""
public_name = os.environ.get('SERVER_PUBLIC_NAME')
response_headers = {
'access-control-allow-headers': self.app.allowed_headers,
'access-control-allow-methods': self.app.allowed_methods,
'access-control-allow-origin': self.app.allowed_origins,
'access-control-allow-credentials': True,
'www-authenticate': "Bearer",
'server-public-name': public_name if public_name else "No one",
'user-info': "Rinzler Framework rulez!"
}
response_headers.update(self.app.default_headers)
for key in response_headers:
response[key] = response_headers[key]
status = response.status_code
if status != 404:
self.app.log.info("< {0}".format(status))
return response | Appends default headers to every response returned by the API
:param response HttpResponse
:rtype: HttpResponse |
def usergroup_get(name=None, usrgrpids=None, userids=None, **kwargs):
'''
.. versionadded:: 2016.3.0
Retrieve user groups according to the given parameters
.. note::
This function accepts all usergroup_get properties: keyword argument
names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/get
:param name: names of the user groups
:param usrgrpids: return only user groups with the given IDs
:param userids: return only user groups that contain the given users
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Array with convenient user groups details, False if no user group found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usergroup_get Guests
'''
conn_args = _login(**kwargs)
zabbix_version = apiinfo_version(**kwargs)
ret = {}
try:
if conn_args:
method = 'usergroup.get'
# Versions above 2.4 allow retrieving user group permissions
if _LooseVersion(zabbix_version) > _LooseVersion("2.5"):
params = {"selectRights": "extend", "output": "extend", "filter": {}}
else:
params = {"output": "extend", "filter": {}}
if not name and not usrgrpids and not userids:
return False
if name:
params['filter'].setdefault('name', name)
if usrgrpids:
params.setdefault('usrgrpids', usrgrpids)
if userids:
params.setdefault('userids', userids)
params = _params_extend(params, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return False if not ret['result'] else ret['result']
else:
raise KeyError
except KeyError:
return ret | .. versionadded:: 2016.3.0
Retrieve user groups according to the given parameters
.. note::
This function accepts all usergroup_get properties: keyword argument
names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/get
:param name: names of the user groups
:param usrgrpids: return only user groups with the given IDs
:param userids: return only user groups that contain the given users
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Array with convenient user groups details, False if no user group found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usergroup_get Guests |
def language_id(self):
"""
Get or set the language id of this |Font| instance. The language id
is a member of the :ref:`MsoLanguageId` enumeration. Assigning |None|
removes any language setting, the same behavior as assigning
`MSO_LANGUAGE_ID.NONE`.
"""
lang = self._rPr.lang
if lang is None:
return MSO_LANGUAGE_ID.NONE
return self._rPr.lang | Get or set the language id of this |Font| instance. The language id
is a member of the :ref:`MsoLanguageId` enumeration. Assigning |None|
removes any language setting, the same behavior as assigning
`MSO_LANGUAGE_ID.NONE`. |
def queue_call(self, delay, callback, *args, **kwds):
"""Schedule a function call at a specific time in the future."""
if delay is None:
self.current.append((callback, args, kwds))
return
if delay < 1e9:
when = delay + self.clock.now()
else:
# Times over a billion seconds are assumed to be absolute.
when = delay
self.insort_event_right((when, callback, args, kwds)) | Schedule a function call at a specific time in the future. |
def encrypt(self, key, data, mode, padding):
# this can be disabled by _disable_encryption, so pylint: disable=method-hidden
"""Encrypt data using the supplied values.
:param bytes key: Loaded encryption key
:param bytes data: Data to encrypt
:param JavaMode mode: Encryption mode to use
:param JavaPadding padding: Padding mode to use
:returns: IV prepended to encrypted data
:rtype: bytes
"""
try:
block_size = self.cipher.block_size
iv_len = block_size // 8
iv = os.urandom(iv_len)
encryptor = Cipher(self.cipher(key), mode.build(iv), backend=default_backend()).encryptor()
padder = padding.build(block_size).padder()
padded_data = padder.update(data) + padder.finalize()
return iv + encryptor.update(padded_data) + encryptor.finalize()
except Exception:
error_message = "Encryption failed"
_LOGGER.exception(error_message)
raise EncryptionError(error_message) | Encrypt data using the supplied values.
:param bytes key: Loaded encryption key
:param bytes data: Data to encrypt
:param JavaMode mode: Encryption mode to use
:param JavaPadding padding: Padding mode to use
:returns: IV prepended to encrypted data
:rtype: bytes |
def ensure_obj(f):
"""A decorator that ensures context.obj exists. If it doesn't,
a new dict() is created and stored as obj.
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
if ctx.obj is None:
ctx.obj = {}
return ctx.invoke(f, *args, **kwargs)
return update_wrapper(new_func, f) | A decorator that ensures context.obj exists. If it doesn't,
a new dict() is created and stored as obj. |
def queue_setup(filename, mode, batch_size, num_readers, min_examples):
""" Sets up the queue runners for data input """
filename_queue = tf.train.string_input_producer([filename], shuffle=True, capacity=16)
if mode == "train":
examples_queue = tf.RandomShuffleQueue(capacity=min_examples + 3 * batch_size,
min_after_dequeue=min_examples, dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(capacity=min_examples + 3 * batch_size, dtypes=[tf.string])
enqueue_ops = list()
for _ in range(num_readers):
reader = tf.TFRecordReader()
_, value = reader.read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
return example_serialized | Sets up the queue runners for data input |
def resolve_path(path, expected=None, multi_projects=False, allow_empty_string=True):
'''
:param path: A path to a data object to attempt to resolve
:type path: string
:param expected: one of the following: "folder", "entity", or None
to indicate whether the expected path is a folder, a data
object, or either
:type expected: string or None
:returns: A tuple of 3 values: container_ID, folderpath, entity_name
:rtype: string, string, string
:raises: exc:`ResolutionError` if the project cannot be resolved by
name or the path is malformed
:param allow_empty_string: If false, a ResolutionError will be
raised if *path* is an empty string. Use this when resolving
the empty string could result in unexpected behavior.
:type allow_empty_string: boolean
Attempts to resolve *path* to a project or container ID, a folder
path, and a data object or folder name. This method will NOT
raise an exception if the specified folder or object does not
exist. This method is primarily for parsing purposes.
Returns one of the following:
(project, folder, maybe_name)
where
project is a container ID (non-null)
folder is a folder path
maybe_name is a string if the path could represent a folder or an object, or
maybe_name is None if the path could only represent a folder
OR
(maybe_project, None, object_id)
where
maybe_project is a container ID or None
object_id is a dataobject, app, or execution (specified by ID, not name)
OR
(job_id, None, output_name)
where
job_id and output_name are both non-null
'''
# TODO: callers that intend to obtain a data object probably won't be happy
# with an app or execution ID. Callers should probably have to specify
# whether they are okay with getting an execution ID or not.
# TODO: callers that are looking for a place to write data, rather than
# read it, probably won't be happy with receiving an object ID, or a
# JBOR. Callers should probably specify whether they are looking for an
# "LHS" expression or not.
if '_DX_FUSE' in os.environ:
from xattr import xattr
path = xattr(path)['project'] + ":" + xattr(path)['id']
if path == '' and not allow_empty_string:
raise ResolutionError('Cannot parse ""; expected the path to be a non-empty string')
path = _maybe_convert_stringified_dxlink(path)
# Easy case: ":"
if path == ':':
if dxpy.WORKSPACE_ID is None:
raise ResolutionError("Cannot resolve \":\": expected a project name or ID "
"to the left of the colon, or for a current project to be set")
return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), '/', None
# Second easy case: empty string
if path == '':
if dxpy.WORKSPACE_ID is None:
raise ResolutionError('Expected a project name or ID to the left of a colon, '
'or for a current project to be set')
return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), dxpy.config.get('DX_CLI_WD', '/'), None
# Third easy case: hash ID
if is_container_id(path):
return ([path] if multi_projects else path), '/', None
elif is_hashid(path):
return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), None, path
# using a numerical sentinel value to indicate that it hasn't been
# set in case dxpy.WORKSPACE_ID is actually None
project = 0
folderpath = None
entity_name = None
wd = dxpy.config.get('DX_CLI_WD', u'/')
# Test for multiple colons
last_colon = get_last_pos_of_char(':', path)
if last_colon >= 0:
last_last_colon = get_last_pos_of_char(':', path[:last_colon])
if last_last_colon >= 0:
raise ResolutionError('Cannot parse "' + path + '" as a path; at most one unescaped colon can be present')
substrings = split_unescaped(':', path)
if len(substrings) == 2:
# One of the following:
# 1) job-id:fieldname
# 2) project-name-or-id:folderpath/to/possible/entity
if is_job_id(substrings[0]):
return ([substrings[0]] if multi_projects else substrings[0]), None, substrings[1]
if multi_projects:
project_ids = resolve_container_id_or_name(substrings[0], is_error=True, multi=True)
else:
project = resolve_container_id_or_name(substrings[0], is_error=True)
wd = '/'
elif get_last_pos_of_char(':', path) >= 0:
# :folderpath/to/possible/entity OR project-name-or-id:
# Colon is either at the beginning or at the end
wd = '/'
if path.startswith(':'):
if dxpy.WORKSPACE_ID is None:
raise ResolutionError('Cannot resolve "%s": expected a project name or ID to the left of the '
'colon, or for a current project to be set' % (path,))
project = dxpy.WORKSPACE_ID
else:
# One nonempty string to the left of a colon
project = resolve_container_id_or_name(substrings[0], is_error=True)
folderpath = '/'
else:
# One nonempty string, no colon present, do NOT interpret as
# project
project = dxpy.WORKSPACE_ID
if project is None:
raise ResolutionError('Cannot resolve "%s": expected the path to be qualified with a project name or ID, '
'and a colon; or for a current project to be set' % (path,))
# Determine folderpath and entity_name if necessary
if folderpath is None:
folderpath = substrings[-1]
folderpath, entity_name = clean_folder_path(('' if folderpath.startswith('/') else wd + '/') + folderpath, expected)
if multi_projects:
return (project_ids if project == 0 else [project]), folderpath, entity_name
else:
return project, folderpath, entity_name | :param path: A path to a data object to attempt to resolve
:type path: string
:param expected: one of the following: "folder", "entity", or None
to indicate whether the expected path is a folder, a data
object, or either
:type expected: string or None
:returns: A tuple of 3 values: container_ID, folderpath, entity_name
:rtype: string, string, string
:raises: exc:`ResolutionError` if the project cannot be resolved by
name or the path is malformed
:param allow_empty_string: If false, a ResolutionError will be
raised if *path* is an empty string. Use this when resolving
the empty string could result in unexpected behavior.
:type allow_empty_string: boolean
Attempts to resolve *path* to a project or container ID, a folder
path, and a data object or folder name. This method will NOT
raise an exception if the specified folder or object does not
exist. This method is primarily for parsing purposes.
Returns one of the following:
(project, folder, maybe_name)
where
project is a container ID (non-null)
folder is a folder path
maybe_name is a string if the path could represent a folder or an object, or
maybe_name is None if the path could only represent a folder
OR
(maybe_project, None, object_id)
where
maybe_project is a container ID or None
object_id is a dataobject, app, or execution (specified by ID, not name)
OR
(job_id, None, output_name)
where
job_id and output_name are both non-null |
def as_json(self):
# type: () -> dict
"""Represent effect as JSON dict."""
self._config['applyCss'] = self.applyCss
self._json['config'] = self._config
return self._json | Represent effect as JSON dict. |
def update_network(cx_str, network_id, ndex_cred=None):
"""Update an existing CX network on NDEx with new CX content.
Parameters
----------
cx_str : str
String containing the CX content.
network_id : str
UUID of the network on NDEx.
ndex_cred : dict
A dictionary with the following entries:
'user': NDEx user name
'password': NDEx password
"""
server = 'http://public.ndexbio.org'
username, password = get_default_ndex_cred(ndex_cred)
nd = ndex2.client.Ndex2(server, username, password)
try:
logger.info('Getting network summary...')
summary = nd.get_network_summary(network_id)
except Exception as e:
logger.error('Could not get NDEx network summary.')
logger.error(e)
return
# Update network content
try:
logger.info('Updating network...')
cx_stream = io.BytesIO(cx_str.encode('utf-8'))
nd.update_cx_network(cx_stream, network_id)
except Exception as e:
logger.error('Could not update NDEx network.')
logger.error(e)
return
# Update network profile
ver_str = summary.get('version')
new_ver = _increment_ndex_ver(ver_str)
profile = {'name': summary.get('name'),
'description': summary.get('description'),
'version': new_ver,
}
logger.info('Updating NDEx network (%s) profile to %s',
network_id, profile)
profile_retries = 5
for _ in range(profile_retries):
try:
time.sleep(5)
nd.update_network_profile(network_id, profile)
break
except Exception as e:
logger.error('Could not update NDEx network profile.')
logger.error(e)
set_style(network_id, ndex_cred) | Update an existing CX network on NDEx with new CX content.
Parameters
----------
cx_str : str
String containing the CX content.
network_id : str
UUID of the network on NDEx.
ndex_cred : dict
A dictionary with the following entries:
'user': NDEx user name
'password': NDEx password |
def container(self, name, length, type, *parameters):
"""Define a container with given length.
This is a convenience method creating a `Struct` with `length` containing fields defined in `type`.
"""
self.new_struct('Container', name, 'length=%s' % length)
BuiltIn().run_keyword(type, *parameters)
self.end_struct() | Define a container with given length.
This is a convenience method creating a `Struct` with `length` containing fields defined in `type`. |
def create_book(self, *args, **kwargs):
"""Pass through to provider BookAdminSession.create_book"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.create_bin
return Book(
self._provider_manager,
self._get_provider_session('book_admin_session').create_book(*args, **kwargs),
self._runtime,
self._proxy) | Pass through to provider BookAdminSession.create_book |
def items(self):
"""ItemList of children."""
if self.scraper.current_item is not self:
self._move_here()
if self._items is None:
self._items = ItemList()
self._items.scraper = self.scraper
self._items.collection = self
for i in self.scraper._fetch_itemslist(self):
i.parent = self
if i.type == TYPE_DATASET and i.dialect is None:
i.dialect = self.scraper.dialect
self._items.append(i)
return self._items | ItemList of children. |
def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurary 2011 to implement subrectangles.
"""
# Default use full image and place at upper left
if xy is None:
xy = (0, 0)
# Image separator,
bb = b'\x2C'
# Image position and size
bb += intToBin(xy[0]) # Left position
bb += intToBin(xy[1]) # Top position
bb += intToBin(im.size[0]) # image width
bb += intToBin(im.size[1]) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += b'\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb | getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurary 2011 to implement subrectangles. |
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as file_handler:
return file_handler.read() | Build a file path from *paths* and return the contents. |
def setup_environ(manage_file, settings=None, more_pythonic=False):
"""Sets up a Django app within a manage.py file.
Keyword Arguments
**settings**
An imported settings module. Without this, playdoh tries to import
these modules (in order): DJANGO_SETTINGS_MODULE, settings
**more_pythonic**
When True, does not do any path hackery besides adding the vendor dirs.
This requires a newer Playdoh layout without top level apps, lib, etc.
"""
# sys is global to avoid undefined local
global sys, current_settings, execute_from_command_line, ROOT
ROOT = os.path.dirname(os.path.abspath(manage_file))
# Adjust the python path and put local packages in front.
prev_sys_path = list(sys.path)
# Make root application importable without the need for
# python setup.py install|develop
sys.path.append(ROOT)
if not more_pythonic:
warnings.warn("You're using an old-style Playdoh layout with a top "
"level __init__.py and apps directories. This is error "
"prone and fights the Zen of Python. "
"See http://playdoh.readthedocs.org/en/latest/"
"getting-started/upgrading.html")
# Give precedence to your app's parent dir, which contains __init__.py
sys.path.append(os.path.abspath(os.path.join(ROOT, os.pardir)))
site.addsitedir(path('apps'))
site.addsitedir(path('lib'))
# Local (project) vendor library
site.addsitedir(path('vendor-local'))
site.addsitedir(path('vendor-local/lib/python'))
# Global (upstream) vendor library
site.addsitedir(path('vendor'))
site.addsitedir(path('vendor/lib/python'))
# Move the new items to the front of sys.path. (via virtualenv)
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
from django.core.management import execute_from_command_line # noqa
if not settings:
if 'DJANGO_SETTINGS_MODULE' in os.environ:
settings = import_mod_by_name(os.environ['DJANGO_SETTINGS_MODULE'])
elif os.path.isfile(os.path.join(ROOT, 'settings_local.py')):
import settings_local as settings
warnings.warn("Using settings_local.py is deprecated. See "
"http://playdoh.readthedocs.org/en/latest/upgrading.html",
DeprecationWarning)
else:
import settings
current_settings = settings
validate_settings(settings) | Sets up a Django app within a manage.py file.
Keyword Arguments
**settings**
An imported settings module. Without this, playdoh tries to import
these modules (in order): DJANGO_SETTINGS_MODULE, settings
**more_pythonic**
When True, does not do any path hackery besides adding the vendor dirs.
This requires a newer Playdoh layout without top level apps, lib, etc. |
def _add_decision_criteria(self, criteria_dict):
"""
Adds Decision Criteria to the ProbModelXML.
Parameters
----------
criteria_dict: dict
Dictionary containing Deecision Criteria data.
For example: {'effectiveness': {}, 'cost': {}}
Examples
-------
>>> writer = ProbModelXMLWriter(model)
>>> writer._add_decision_criteria(criteria_dict)
"""
decision_tag = etree.SubElement(self.xml, 'DecisionCriteria', attrib={})
for criteria in sorted(criteria_dict):
criteria_tag = etree.SubElement(decision_tag, 'Criterion', attrib={'name': criteria})
self._add_additional_properties(criteria_tag, criteria_dict[criteria]) | Adds Decision Criteria to the ProbModelXML.
Parameters
----------
criteria_dict: dict
Dictionary containing Deecision Criteria data.
For example: {'effectiveness': {}, 'cost': {}}
Examples
-------
>>> writer = ProbModelXMLWriter(model)
>>> writer._add_decision_criteria(criteria_dict) |
def sign(self, data, b64=True):
"""Sign data with the private key and return the signed data.
The signed data will be Base64 encoded if b64 is True.
"""
padder = padding.PKCS1v15()
signer = self.private_key.signer(padder, None)
if not isinstance(data, six.binary_type):
data = data.encode('utf_8')
signer.update(data)
signed = signer.finalize()
if b64:
signed = base64.b64encode(signed)
return signed | Sign data with the private key and return the signed data.
The signed data will be Base64 encoded if b64 is True. |
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name)
del self._registry[model] | Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation. |
def kernel_output(self, user_name, kernel_slug, **kwargs): # noqa: E501
"""Download the latest output from a kernel # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.kernel_output(user_name, kernel_slug, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_name: Kernel owner (required)
:param str kernel_slug: Kernel name (required)
:return: Result
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.kernel_output_with_http_info(user_name, kernel_slug, **kwargs) # noqa: E501
else:
(data) = self.kernel_output_with_http_info(user_name, kernel_slug, **kwargs) # noqa: E501
return data | Download the latest output from a kernel # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.kernel_output(user_name, kernel_slug, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_name: Kernel owner (required)
:param str kernel_slug: Kernel name (required)
:return: Result
If the method is called asynchronously,
returns the request thread. |
def global_request_interceptor(self):
# type: () -> Callable
"""Decorator that can be used to add global request
interceptors easily to the builder.
The returned wrapper function can be applied as a decorator on
any function that processes the input. The function should
follow the signature of the process function in
:py:class:`ask_sdk_runtime.dispatch_components.request_components.AbstractRequestInterceptor`
class.
:return: Wrapper function that can be decorated on a
interceptor process function.
"""
def wrapper(process_func):
if not callable(process_func):
raise SkillBuilderException(
"Global Request Interceptor process_func input parameter "
"should be callable")
class_attributes = {
"process": lambda self, handler_input: process_func(
handler_input)
}
request_interceptor = type(
"RequestInterceptor{}".format(
process_func.__name__.title().replace("_", "")),
(AbstractRequestInterceptor,), class_attributes)
self.add_global_request_interceptor(
request_interceptor=request_interceptor())
return wrapper | Decorator that can be used to add global request
interceptors easily to the builder.
The returned wrapper function can be applied as a decorator on
any function that processes the input. The function should
follow the signature of the process function in
:py:class:`ask_sdk_runtime.dispatch_components.request_components.AbstractRequestInterceptor`
class.
:return: Wrapper function that can be decorated on a
interceptor process function. |
def encompasses(self, span):
"""
Returns true if the given span fits inside this one
"""
if isinstance(span, list):
return [sp for sp in span if self._encompasses(sp)]
return self._encompasses(span) | Returns true if the given span fits inside this one |
def emit(self, record):
"""
Handler instance that records to the database.
try and except is preventing a circular import.
Reference:
http://stackoverflow.com/questions/4379042/django-circular-model-import-issue
"""
try:
from .models import Model, Application, ModelObject
from django.contrib.contenttypes.models import ContentType
if not hasattr(record, 'action'):
from .settings import AUTOMATED_LOGGING
signal = True
for excluded in AUTOMATED_LOGGING['exclude']:
if record.module.startswith(excluded):
signal = False
break
if 'unspecified' not in AUTOMATED_LOGGING['modules']:
signal = False
if signal:
from .models import Unspecified
entry = Unspecified()
if hasattr(record, 'message'):
entry.message = record.message
entry.level = record.levelno
entry.file = record.pathname
entry.line = record.lineno
entry.save()
if self.maxage:
current = timezone.now()
Unspecified.objects.filter(created_at__lte=current - self.maxage)\
.delete()
return
if record.action == 'model':
if ('al_evt' in record.data['instance'].__dict__.keys() and
record.data['instance'].al_evt):
entry = record.data['instance'].al_evt
else:
entry = Model()
entry.user = record.data['user']
entry.save()
name = record.data['instance']._meta.app_label
entry.application = Application.objects.get_or_create(name=name)[0]
if hasattr(record, 'message'):
entry.message = record.message
if record.data['status'] == 'add':
status = 1
elif record.data['status'] == 'change':
status = 2
elif record.data['status'] == 'delete':
status = 3
else:
status = 0
from automated_logging.settings import AUTOMATED_LOGGING
if not AUTOMATED_LOGGING['save_na']:
entry.delete()
return None
entry.action = status
entry.information = ModelObject()
entry.information.value = repr(record.data['instance'])[:255]
ct = ContentType.objects.get_for_model(record.data['instance'])
try:
# check if the ContentType actually exists.
ContentType.objects.get(pk=ct.pk)
ct_exists = True
except ContentType.DoesNotExist:
ct_exists = False
if ct_exists:
entry.information.type = ct
entry.information.save()
record.data['instance'].al_evt = entry
if ('al_chl' in record.data['instance'].__dict__.keys() and
record.data['instance'].al_chl and
not entry.modification):
entry.modification = record.data['instance'].al_chl
entry.save()
if self.maxage:
current = timezone.now()
Model.objects.filter(created_at__lte=current - self.maxage).delete()
elif record.action == 'request':
from .models import Request
if record.data['uri'] is not None:
entry = Request()
entry.application = record.data['application']
entry.uri = record.data['uri'][:255]
entry.user = record.data['user']
entry.status = record.data['status']
entry.method = record.data['method']
entry.save()
if self.maxage:
current = timezone.now()
Request.objects.filter(created_at__lte=current - self.maxage).delete()
except Exception as e:
print(e) | Handler instance that records to the database.
try and except is preventing a circular import.
Reference:
http://stackoverflow.com/questions/4379042/django-circular-model-import-issue |
def strip_required_prefix(string, prefix):
"""
>>> strip_required_prefix('abcdef', 'abc')
'def'
>>> strip_required_prefix('abcdef', '123')
Traceback (most recent call last):
...
AssertionError: String starts with 'abc', not '123'
"""
if string.startswith(prefix):
return string[len(prefix):]
raise AssertionError('String starts with %r, not %r' % (string[:len(prefix)], prefix)) | >>> strip_required_prefix('abcdef', 'abc')
'def'
>>> strip_required_prefix('abcdef', '123')
Traceback (most recent call last):
...
AssertionError: String starts with 'abc', not '123' |
def handle(self):
"""
Handles a TCP client
"""
_logger.info(
"RemoteConsole client connected: [%s]:%d",
self.client_address[0],
self.client_address[1],
)
# Prepare the session
session = beans.ShellSession(
beans.IOHandler(self.rfile, self.wfile),
{"remote_client_ip": self.client_address[0]},
)
# Print the banner
def get_ps1():
"""
Gets the prompt string from the session of the shell service
:return: The prompt string
"""
try:
return session.get("PS1")
except KeyError:
return self._shell.get_ps1()
self.send(self._shell.get_banner())
self.send(get_ps1())
try:
while self._active.get_value():
# Wait for data
rlist = select([self.connection], [], [], .5)[0]
if not rlist:
# Nothing to do (poll timed out)
continue
data = self.rfile.readline()
if not data:
# End of stream (client gone)
break
# Strip the line
line = data.strip()
if not data:
# Empty line
continue
# Execute it
try:
self._shell.handle_line(line, session)
except KeyboardInterrupt:
# Stop there on interruption
self.send("\nInterruption received.")
return
except IOError as ex:
# I/O errors are fatal
_logger.exception(
"Error communicating with a client: %s", ex
)
break
except Exception as ex:
# Other exceptions are not important
import traceback
self.send("\nError during last command: {0}\n".format(ex))
self.send(traceback.format_exc())
# Print the prompt
self.send(get_ps1())
finally:
_logger.info(
"RemoteConsole client gone: [%s]:%d",
self.client_address[0],
self.client_address[1],
)
try:
# Be polite
self.send("\nSession closed. Good bye.\n")
self.finish()
except IOError as ex:
_logger.warning("Error cleaning up connection: %s", ex) | Handles a TCP client |
def _remove_exts(self,string):
"""
Sets the string, to create the Robohash
"""
# If the user hasn't disabled it, we will detect image extensions, such as .png, .jpg, etc.
# We'll remove them from the string before hashing.
# This ensures that /Bear.png and /Bear.bmp will send back the same image, in different formats.
if string.lower().endswith(('.png','.gif','.jpg','.bmp','.jpeg','.ppm','.datauri')):
format = string[string.rfind('.') +1 :len(string)]
if format.lower() == 'jpg':
format = 'jpeg'
self.format = format
string = string[0:string.rfind('.')]
return string | Sets the string, to create the Robohash |
def pdf_link(self, link_f, y, Y_metadata=None):
"""
Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\lambda(f_{i})^{y_{i}}}{y_{i}!}e^{-\\lambda(f_{i})}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
return np.exp(self.logpdf_link(link_f, y, Y_metadata)) | Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\lambda(f_{i})^{y_{i}}}{y_{i}!}e^{-\\lambda(f_{i})}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float |
def toggle_logo_path(self):
"""Set state of logo path line edit and button."""
is_checked = self.custom_organisation_logo_check_box.isChecked()
if is_checked:
# Use previous org logo path
path = setting(
key='organisation_logo_path',
default=supporters_logo_path(),
expected_type=str,
qsettings=self.settings)
else:
# Set organisation path line edit to default one
path = supporters_logo_path()
self.organisation_logo_path_line_edit.setText(path)
self.organisation_logo_path_line_edit.setEnabled(is_checked)
self.open_organisation_logo_path_button.setEnabled(is_checked) | Set state of logo path line edit and button. |
def update_looks(self):
""" Updates the looks of the gate depending on state. """
if self.state == 'active':
style = {'color': 'red', 'linestyle': 'solid', 'fill': False}
else:
style = {'color': 'black', 'fill': False}
self.poly.update(style) | Updates the looks of the gate depending on state. |
def time2pbspro(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the PbsPro convention: "hours:minutes:seconds".
>>> assert time2pbspro(2, unit="d") == '48:0:0'
"""
h, m, s = 3600, 60, 1
timeval = Time(timeval, unit).to("s")
hours, minutes = divmod(timeval, h)
minutes, secs = divmod(minutes, m)
return "%d:%d:%d" % (hours, minutes, secs) | Convert a number representing a time value in the given unit (Default: seconds)
to a string following the PbsPro convention: "hours:minutes:seconds".
>>> assert time2pbspro(2, unit="d") == '48:0:0' |
def _on_sphinx_thread_error_msg(self, error_msg):
""" Display error message on Sphinx rich text failure"""
self._sphinx_thread.wait()
self.plain_text_action.setChecked(True)
sphinx_ver = programs.get_module_version('sphinx')
QMessageBox.critical(self,
_('Help'),
_("The following error occured when calling "
"<b>Sphinx %s</b>. <br>Incompatible Sphinx "
"version or doc string decoding failed."
"<br><br>Error message:<br>%s"
) % (sphinx_ver, error_msg)) | Display error message on Sphinx rich text failure |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.