code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
try:
return legacy_decrypt(*args, **kwargs)
except (NotYetValid, Expired) as e:
# these should be raised immediately.
# The token has been decrypted successfully to get to here.
# decrypting using `legacy_decrypt` will not help things.
raise e
except (Error, ValueError) as e:
return spec_compliant_decrypt(*args, **kwargs)
|
def decrypt(*args, **kwargs)
|
Decrypts legacy or spec-compliant JOSE token.
First attempts to decrypt the token in a legacy mode
(https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-19).
If it is not a valid legacy token then attempts to decrypt it in a
spec-compliant way (http://tools.ietf.org/html/rfc7519)
| 9.584394
| 7.345369
| 1.304821
|
(hash_fn, _), mod = JWA[alg]
header = dict((add_header or {}).items() + [(HEADER_ALG, alg)])
header, payload = map(b64encode_url, map(json_encode, (header, claims)))
sig = b64encode_url(hash_fn(_jws_hash_str(header, payload), jwk['k'],
mod=mod))
return JWS(header, payload, sig)
|
def sign(claims, jwk, add_header=None, alg='HS256')
|
Signs the given claims and produces a :class:`~jose.JWS`
:param claims: A `dict` representing the claims for this
:class:`~jose.JWS`.
:param jwk: A `dict` representing the JWK to be used for signing of the
:class:`~jose.JWS`. This parameter is algorithm-specific.
:parameter add_header: Additional items to be added to the header.
Additional headers *will* be authenticated.
:parameter alg: The algorithm to use to produce the signature.
:rtype: :class:`~jose.JWS`
| 6.221216
| 7.604618
| 0.818084
|
header, payload, sig = map(b64decode_url, jws)
header = json_decode(header)
if alg != header[HEADER_ALG]:
raise Error('Invalid algorithm')
(_, verify_fn), mod = JWA[header[HEADER_ALG]]
if not verify_fn(_jws_hash_str(jws.header, jws.payload),
jwk['k'], sig, mod=mod):
raise Error('Mismatched signatures')
claims = json_decode(b64decode_url(jws.payload))
_validate(claims, validate_claims, expiry_seconds)
return JWT(header, claims)
|
def verify(jws, jwk, alg, validate_claims=True, expiry_seconds=None)
|
Verifies the given :class:`~jose.JWS`
:param jws: The :class:`~jose.JWS` to be verified.
:param jwk: A `dict` representing the JWK to use for verification. This
parameter is algorithm-specific.
:param alg: The algorithm to verify the signature with.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE
| 5.475872
| 6.144145
| 0.891234
|
istr = encode_safe(istr)
try:
return urlsafe_b64decode(istr + '=' * (4 - (len(istr) % 4)))
except TypeError as e:
raise Error('Unable to decode base64: %s' % (e))
|
def b64decode_url(istr)
|
JWT Tokens may be truncated without the usual trailing padding '='
symbols. Compensate by padding to the nearest 4 bytes.
| 3.124312
| 3.276864
| 0.953446
|
if not validate_claims:
return
now = time()
# TODO: implement support for clock skew
# The exp (expiration time) claim identifies the expiration time on or
# after which the JWT MUST NOT be accepted for processing. The
# processing of the exp claim requires that the current date/time MUST
# be before the expiration date/time listed in the exp claim.
try:
expiration_time = claims[CLAIM_EXPIRATION_TIME]
except KeyError:
pass
else:
_check_expiration_time(now, expiration_time)
# The iat (issued at) claim identifies the time at which the JWT was
# issued. This claim can be used to determine the age of the JWT.
# If expiry_seconds is provided, and the iat claims is present,
# determine the age of the token and check if it has expired.
try:
issued_at = claims[CLAIM_ISSUED_AT]
except KeyError:
pass
else:
if expiry_seconds is not None:
_check_expiration_time(now, issued_at + expiry_seconds)
# The nbf (not before) claim identifies the time before which the JWT
# MUST NOT be accepted for processing. The processing of the nbf claim
# requires that the current date/time MUST be after or equal to the
# not-before date/time listed in the nbf claim.
try:
not_before = claims[CLAIM_NOT_BEFORE]
except KeyError:
pass
else:
_check_not_before(now, not_before)
|
def _validate(claims, validate_claims, expiry_seconds)
|
Validate expiry related claims.
If validate_claims is False, do nothing.
Otherwise, validate the exp and nbf claims if they are present, and
validate the iat claim if expiry_seconds is provided.
| 2.000046
| 1.926648
| 1.038097
|
return global_registry().gauge(key, gauge=gauge, default=default, **dims)
|
def gauge(key, gauge=None, default=float("nan"), **dims)
|
Adds gauge with dimensions to the global pyformance registry
| 4.755735
| 3.447481
| 1.379481
|
def counter_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
counter("%s_calls" %
pyformance.registry.get_qualname(fn), **dims).inc()
return fn(*args, **kwargs)
return fn_wrapper
return counter_wrapper
|
def count_calls_with_dims(**dims)
|
Decorator to track the number of times a function is called
with with dimensions.
| 4.08225
| 3.978266
| 1.026138
|
def meter_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
meter("%s_calls" %
pyformance.registry.get_qualname(fn), **dims).mark()
return fn(*args, **kwargs)
return fn_wrapper
return meter_wrapper
|
def meter_calls_with_dims(**dims)
|
Decorator to track the rate at which a function is called
with dimensions.
| 3.906639
| 3.768207
| 1.036737
|
@functools.wraps(fn)
def wrapper(*args, **kwargs):
_histogram = histogram(
"%s_calls" % pyformance.registry.get_qualname(fn))
rtn = fn(*args, **kwargs)
if type(rtn) in (int, float):
_histogram.add(rtn)
return rtn
return wrapper
|
def hist_calls(fn)
|
Decorator to check the distribution of return values of a function.
| 4.0793
| 3.87478
| 1.052782
|
def hist_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
_histogram = histogram(
"%s_calls" % pyformance.registry.get_qualname(fn), **dims)
rtn = fn(*args, **kwargs)
if type(rtn) in (int, float):
_histogram.add(rtn)
return rtn
return fn_wrapper
return hist_wrapper
|
def hist_calls_with_dims(**dims)
|
Decorator to check the distribution of return values of a
function with dimensions.
| 3.731494
| 3.549853
| 1.051169
|
def time_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
_timer = timer("%s_calls" %
pyformance.registry.get_qualname(fn), **dims)
with _timer.time(fn=pyformance.registry.get_qualname(fn)):
return fn(*args, **kwargs)
return fn_wrapper
return time_wrapper
|
def time_calls_with_dims(**dims)
|
Decorator to time the execution of the function with dimensions.
| 4.049574
| 3.791468
| 1.068075
|
return super(MetricsRegistry, self).add(
self.metadata.register(key, **dims), metric)
|
def add(self, key, metric, **dims)
|
Adds custom metric instances to the registry with dimensions
which are not created with their constructors default arguments
| 10.306337
| 9.417229
| 1.094413
|
return super(MetricsRegistry, self).counter(
self.metadata.register(key, **dims))
|
def counter(self, key, **dims)
|
Adds counter with dimensions to the registry
| 13.512028
| 10.281936
| 1.314152
|
return super(MetricsRegistry, self).histogram(
self.metadata.register(key, **dims))
|
def histogram(self, key, **dims)
|
Adds histogram with dimensions to the registry
| 14.685633
| 10.788639
| 1.361213
|
return super(MetricsRegistry, self).gauge(
self.metadata.register(key, **dims), gauge=gauge, default=default)
|
def gauge(self, key, gauge=None, default=float("nan"), **dims)
|
Adds gauge with dimensions to the registry
| 7.212727
| 6.434132
| 1.12101
|
return super(MetricsRegistry, self).meter(
self.metadata.register(key, **dims))
|
def meter(self, key, **dims)
|
Adds meter with dimensions to the registry
| 13.783063
| 10.141212
| 1.359114
|
return super(MetricsRegistry, self).timer(
self.metadata.register(key, **dims))
|
def timer(self, key, **dims)
|
Adds timer with dimensions to the registry
| 13.874782
| 10.571656
| 1.312451
|
return super(RegexRegistry, self).timer(self._get_key(key), **dims)
|
def timer(self, key, **dims)
|
Adds timer with dimensions to the registry
| 8.599865
| 6.737528
| 1.276412
|
return super(RegexRegistry, self).histogram(self._get_key(key), **dims)
|
def histogram(self, key, **dims)
|
Adds histogram with dimensions to the registry
| 9.164052
| 6.980687
| 1.312772
|
return super(RegexRegistry, self).counter(self._get_key(key), **dims)
|
def counter(self, key, **dims)
|
Adds counter with dimensions to the registry
| 8.906584
| 6.798339
| 1.310112
|
return super(RegexRegistry, self).gauge(
self._get_key(key), gauge=gauge, default=default, **dims)
|
def gauge(self, key, gauge=None, default=float("nan"), **dims)
|
Adds gauge with dimensions to the registry
| 5.311579
| 4.527774
| 1.173111
|
return super(RegexRegistry, self).meter(self._get_key(key), **dims)
|
def meter(self, key, **dims)
|
Adds meter with dimensions to the registry
| 8.883645
| 6.786922
| 1.308936
|
with self._lock:
for dimension in dimension_names:
if dimension in self._extra_dimensions:
del self._extra_dimensions[dimension]
|
def remove_dimensions(self, dimension_names)
|
Removes extra dimensions added by the add_dimensions() function.
Ignores dimension names that don't exist.
Args:
dimension_names (list): List of dimension names to remove.
| 3.293815
| 3.419626
| 0.963209
|
if not gauges and not cumulative_counters and not counters:
return
data = {
'cumulative_counter': cumulative_counters,
'gauge': gauges,
'counter': counters,
}
_logger.debug('Sending datapoints to SignalFx: %s', data)
for metric_type, datapoints in data.items():
if not datapoints:
continue
if not isinstance(datapoints, list):
raise TypeError('Datapoints not of type list %s', datapoints)
for datapoint in datapoints:
self._add_extra_dimensions(datapoint)
self._add_to_queue(metric_type, datapoint)
# Ensure the sending thread is running.
self._start_thread()
|
def send(self, cumulative_counters=None, gauges=None, counters=None)
|
Send the given metrics to SignalFx.
Args:
cumulative_counters (list): a list of dictionaries representing the
cumulative counters to report.
gauges (list): a list of dictionaries representing the gauges to
report.
counters (list): a list of dictionaries representing the counters
to report.
| 3.183112
| 2.996759
| 1.062185
|
if category and category not in SUPPORTED_EVENT_CATEGORIES:
raise ValueError('Event category is not one of the supported' +
'types: {' +
', '.join(SUPPORTED_EVENT_CATEGORIES) + '}')
data = {
'eventType': event_type,
'category': category,
'dimensions': dimensions or {},
'properties': properties or {},
'timestamp': int(timestamp) if timestamp else None,
}
_logger.debug('Sending event to SignalFx: %s', data)
self._add_extra_dimensions(data)
return self._send_event(event_data=data, url='{0}/{1}'.format(
self._endpoint, self._INGEST_ENDPOINT_EVENT_SUFFIX),
session=self._session)
|
def send_event(self, event_type, category=None, dimensions=None,
properties=None, timestamp=None)
|
Send an event to SignalFx.
Args:
event_type (string): the event type (name of the event time
series).
category (string): the category of the event.
dimensions (dict): a map of event dimensions.
properties (dict): a map of extra properties on that event.
timestamp (float): timestamp when the event has occured
| 3.578663
| 3.733905
| 0.958424
|
with self._lock:
if not self._thread_running:
return
self._thread_running = False
self._queue.put(_BaseSignalFxIngestClient._QUEUE_STOP)
self._send_thread.join()
_logger.debug(msg)
|
def stop(self, msg='Thread stopped')
|
Stop send thread and flush points for a safe exit.
| 6.794167
| 5.963403
| 1.13931
|
# bool inherits int, so bool instance check must be executed prior to
# checking for integer types
if isinstance(value, bool) and _bool is True:
pbuf_obj.value.boolValue = value
elif isinstance(value, six.integer_types) and \
not isinstance(value, bool) and _integer is True:
if value < INTEGER_MIN or value > INTEGER_MAX:
raise ValueError(
('{}: {} exceeds signed 64 bit integer range '
'as defined by ProtocolBuffers ({} to {})')
.format(error_prefix, str(value),
str(INTEGER_MIN), str(INTEGER_MAX)))
pbuf_obj.value.intValue = value
elif isinstance(value, float) and _float is True:
pbuf_obj.value.doubleValue = value
elif isinstance(value, six.string_types) and _string is True:
pbuf_obj.value.strValue = value
else:
raise ValueError(
'{}: {} is of invalid type {}'
.format(error_prefix, str(value), str(type(value))))
|
def _assign_value_by_type(self, pbuf_obj, value, _bool=True, _float=True,
_integer=True, _string=True, error_prefix='')
|
Assigns the supplied value to the appropriate protobuf value type
| 2.646549
| 2.689675
| 0.983966
|
self._assign_value_by_type(pbuf_dp, value, _bool=False,
error_prefix='Invalid value')
|
def _assign_value(self, pbuf_dp, value)
|
Assigns a value to the protobuf obj
| 10.672882
| 9.943721
| 1.073329
|
iterator = iter(self._stream)
while self._state < Computation.STATE_COMPLETED:
try:
message = next(iterator)
except StopIteration:
if self._state < Computation.STATE_COMPLETED:
self._stream = self._execute()
iterator = iter(self._stream)
continue
if isinstance(message, messages.StreamStartMessage):
self._state = Computation.STATE_STREAM_STARTED
continue
if isinstance(message, messages.JobStartMessage):
self._state = Computation.STATE_COMPUTATION_STARTED
self._id = message.handle
yield message
continue
if isinstance(message, messages.JobProgressMessage):
yield message
continue
if isinstance(message, messages.ChannelAbortMessage):
self._state = Computation.STATE_ABORTED
raise errors.ComputationAborted(message.abort_info)
if isinstance(message, messages.EndOfChannelMessage):
self._state = Computation.STATE_COMPLETED
continue
# Intercept metadata messages to accumulate received metadata...
if isinstance(message, messages.MetadataMessage):
self._metadata[message.tsid] = message.properties
yield message
continue
# ...as well as expired-tsid messages to clean it up.
if isinstance(message, messages.ExpiredTsIdMessage):
if message.tsid in self._metadata:
del self._metadata[message.tsid]
yield message
continue
if isinstance(message, messages.InfoMessage):
self._process_info_message(message.message)
self._batch_count_detected = True
if self._current_batch_message:
yield self._get_batch_to_yield()
continue
# Accumulate data messages and release them when we have received
# all batches for the same logical timestamp.
if isinstance(message, messages.DataMessage):
self._state = Computation.STATE_DATA_RECEIVED
if not self._batch_count_detected:
self._expected_batches += 1
if not self._current_batch_message:
self._current_batch_message = message
self._current_batch_count = 1
elif (message.logical_timestamp_ms ==
self._current_batch_message.logical_timestamp_ms):
self._current_batch_message.add_data(message.data)
self._current_batch_count += 1
else:
self._batch_count_detected = True
if (self._batch_count_detected and
self._current_batch_count == self._expected_batches):
yield self._get_batch_to_yield()
continue
if isinstance(message, messages.EventMessage):
yield message
continue
if isinstance(message, messages.ErrorMessage):
raise errors.ComputationFailed(message.errors)
# Yield last batch, even if potentially incomplete.
if self._current_batch_message:
yield self._get_batch_to_yield()
|
def stream(self)
|
Iterate over the messages from the computation's output.
Control and metadata messages are intercepted and interpreted to
enhance this Computation's object knowledge of the computation's
context. Data and event messages are yielded back to the caller as a
generator.
| 2.74416
| 2.60647
| 1.052826
|
# Extract the output resolution from the appropriate message, if
# it's present.
if message['messageCode'] == 'JOB_RUNNING_RESOLUTION':
self._resolution = message['contents']['resolutionMs']
elif message['messageCode'] == 'FETCH_NUM_TIMESERIES':
self._num_input_timeseries += int(message['numInputTimeSeries'])
|
def _process_info_message(self, message)
|
Process an information message received from the computation.
| 10.522623
| 10.411176
| 1.010705
|
params = self._get_params(start=start, stop=stop,
resolution=resolution,
maxDelay=max_delay,
persistent=persistent,
immediate=immediate,
disableAllMetricPublishes=disable_all_metric_publishes)
def exec_fn(since=None):
if since:
params['start'] = since
return self._transport.execute(program, params)
c = computation.Computation(exec_fn)
self._computations.add(c)
return c
|
def execute(self, program, start=None, stop=None, resolution=None,
max_delay=None, persistent=False, immediate=False,
disable_all_metric_publishes=None)
|
Execute the given SignalFlow program and stream the output back.
| 3.03948
| 3.020508
| 1.006281
|
params = self._get_params(start=start, stop=stop,
resolution=resolution,
maxDelay=max_delay)
def exec_fn(since=None):
if since:
params['start'] = since
return self._transport.preflight(program, params)
c = computation.Computation(exec_fn)
self._computations.add(c)
return c
|
def preflight(self, program, start, stop, resolution=None,
max_delay=None)
|
Preflight the given SignalFlow program and stream the output
back.
| 4.563303
| 4.916777
| 0.928109
|
params = self._get_params(start=start, stop=stop,
resolution=resolution,
maxDelay=max_delay)
self._transport.start(program, params)
|
def start(self, program, start=None, stop=None, resolution=None,
max_delay=None)
|
Start executing the given SignalFlow program without being attached
to the output of the computation.
| 3.762666
| 4.0155
| 0.937036
|
params = self._get_params(filters=filters, resolution=resolution)
c = computation.Computation(
lambda since: self._transport.attach(handle, params))
self._computations.add(c)
return c
|
def attach(self, handle, filters=None, resolution=None)
|
Attach to an existing SignalFlow computation.
| 6.469855
| 5.62562
| 1.15007
|
params = self._get_params(reason=reason)
self._transport.stop(handle, params)
|
def stop(self, handle, reason=None)
|
Stop a SignalFlow computation.
| 6.387542
| 6.306804
| 1.012802
|
_logger.debug('Performing an elasticsearch for %(qry)s at %(pt)s',
{'qry': query, 'pt': metadata_endpoint})
url_to_get = '{0}?query={1}'.format(self._u(metadata_endpoint), query)
if order_by is not None:
url_to_get += '&orderBy=' + order_by
# for offset and limit, use API defaults (by leaving them out of url)
if offset is not None:
url_to_get += '&offset=' + str(offset)
if limit is not None:
url_to_get += '&limit=' + str(limit)
timeout = timeout or self._timeout
resp = self._get(url_to_get, session=self._session, timeout=timeout)
resp.raise_for_status()
return resp.json()
|
def _search_metrics_and_metadata(self, metadata_endpoint, query,
order_by=None, offset=None,
limit=None, timeout=None)
|
generic function for elasticsearch queries; can search metrics,
dimensions, metrictimeseries by changing metadata_endpoint
Args:
metadata_endpoint (string): API endpoint suffix (e.g. 'v2/metric')
query (string): elasticsearch string query
order_by (optional[string]): property by which to order results
offset (optional[int]): number of results to skip for pagination
(default=0)
limit (optional[int]): how many results to return (default=50)
timeout (optional[int]): how long to wait for response (in seconds)
Returns:
dictionary of query result
| 3.392095
| 3.374108
| 1.005331
|
timeout = timeout or self._timeout
resp = self._get(self._u(object_endpoint, object_name),
session=self._session, timeout=timeout)
resp.raise_for_status()
return resp.json()
|
def _get_object_by_name(self, object_endpoint, object_name, timeout=None)
|
generic function to get object (metadata, tag, ) by name from SignalFx.
Args:
object_endpoint (string): API endpoint suffix (e.g. 'v2/tag')
object_name (string): name of the object (e.g. 'jvm.cpu.load')
Returns:
dictionary of response
| 3.723722
| 5.069815
| 0.734489
|
return self._search_metrics_and_metadata(
self._METRIC_ENDPOINT_SUFFIX, *args, **kwargs)
|
def search_metrics(self, *args, **kwargs)
|
Args:
query (string): elasticsearch string query
order_by (optional[string]): property by which to order results
offset (optional[int]): number of results to skip for pagination
(default=0)
limit (optional[int]): how many results to return (default=50)
timeout (optional[int]): how long to wait for response (in seconds)
Returns:
result of query search on metrics
| 10.608509
| 16.581841
| 0.639767
|
return self._get_object_by_name(self._METRIC_ENDPOINT_SUFFIX,
metric_name,
**kwargs)
|
def get_metric_by_name(self, metric_name, **kwargs)
|
get a metric by name
Args:
metric_name (string): name of metric
Returns:
dictionary of response
| 6.381822
| 8.299677
| 0.768924
|
data = {'type': metric_type.upper(),
'description': description or '',
'customProperties': custom_properties or {},
'tags': tags or []}
resp = self._put(self._u(self._METRIC_ENDPOINT_SUFFIX,
str(metric_name)),
data=data, **kwargs)
resp.raise_for_status()
return resp.json()
|
def update_metric_by_name(self, metric_name, metric_type, description=None,
custom_properties=None, tags=None, **kwargs)
|
Create or update a metric object
Args:
metric_name (string): name of metric
type (string): metric type, must be one of 'gauge', 'counter',
'cumulative_counter'
description (optional[string]): a description
custom_properties (optional[dict]): dictionary of custom properties
tags (optional[list of strings]): list of tags associated with
metric
| 3.235869
| 3.995282
| 0.809923
|
return self._search_metrics_and_metadata(
self._DIMENSION_ENDPOINT_SUFFIX, *args, **kwargs)
|
def search_dimensions(self, *args, **kwargs)
|
Args:
query (string): elasticsearch string query
order_by (optional[string]): property by which to order results
offset (optional[int]): number of results to skip for pagination
(default=0)
limit (optional[int]): how many results to return (default=50)
timeout (optional[int]): how long to wait for response (in seconds)
Returns:
result of query search on dimensions
| 12.109378
| 15.388577
| 0.786907
|
return self._get_object_by_name(self._DIMENSION_ENDPOINT_SUFFIX,
'{0}/{1}'.format(key, value),
**kwargs)
|
def get_dimension(self, key, value, **kwargs)
|
get a dimension by key and value
Args:
key (string): key of the dimension
value (string): value of the dimension
Returns:
dictionary of response
| 7.136742
| 8.348245
| 0.854879
|
data = {'description': description or '',
'customProperties': custom_properties or {},
'tags': tags or [],
'key': key,
'value': value}
resp = self._put(self._u(self._DIMENSION_ENDPOINT_SUFFIX, key, value),
data=data, **kwargs)
resp.raise_for_status()
return resp.json()
|
def update_dimension(self, key, value, description=None,
custom_properties=None, tags=None, **kwargs)
|
update a dimension
Args:
key (string): key of the dimension
value (string): value of the dimension
description (optional[string]): a description
custom_properties (optional[dict]): dictionary of custom properties
tags (optional[list of strings]): list of tags associated with
metric
| 3.1607
| 3.656062
| 0.864509
|
return self._search_metrics_and_metadata(self._MTS_ENDPOINT_SUFFIX,
*args, **kwargs)
|
def search_metric_time_series(self, *args, **kwargs)
|
Args:
query (string): elasticsearch string query
order_by (optional[string]): property by which to order results
offset (optional[int]): number of results to skip for pagination
(default=0)
limit (optional[int]): how many results to return (default=50)
timeout (optional[int]): how long to wait for response (in seconds)
Returns:
result of query search on metric time series
| 12.743593
| 18.221977
| 0.699353
|
return self._get_object_by_name(self._MTS_ENDPOINT_SUFFIX,
mts_id,
**kwargs)
|
def get_metric_time_series(self, mts_id, **kwargs)
|
get a metric time series by id
| 6.011555
| 5.845264
| 1.028449
|
return self._search_metrics_and_metadata(self._TAG_ENDPOINT_SUFFIX,
*args, **kwargs)
|
def search_tags(self, *args, **kwargs)
|
Args:
query (string): elasticsearch string query
order_by (optional[string]): property by which to order results
offset (optional[int]): number of results to skip for pagination
(default=0)
limit (optional[int]): how many results to return (default=50)
timeout (optional[int]): how long to wait for response (in seconds)
Returns:
result of query search on tags
| 13.552693
| 20.079788
| 0.674942
|
return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX,
tag_name,
**kwargs)
|
def get_tag(self, tag_name, **kwargs)
|
get a tag by name
Args:
tag_name (string): name of tag to get
Returns:
dictionary of the response
| 6.474005
| 8.801364
| 0.735568
|
data = {'description': description or '',
'customProperties': custom_properties or {}}
resp = self._put(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name),
data=data, **kwargs)
resp.raise_for_status()
return resp.json()
|
def update_tag(self, tag_name, description=None,
custom_properties=None, **kwargs)
|
update a tag by name
Args:
tag_name (string): name of tag to update
description (optional[string]): a description
custom_properties (optional[dict]): dictionary of custom properties
| 3.709884
| 4.433055
| 0.836869
|
resp = self._delete(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name),
**kwargs)
resp.raise_for_status()
# successful delete returns 204, which has no associated json
return resp
|
def delete_tag(self, tag_name, **kwargs)
|
delete a tag by name
Args:
tag_name (string): name of tag to delete
| 8.712335
| 9.301356
| 0.936674
|
resp = self._get(self._u(self._ORGANIZATION_ENDPOINT_SUFFIX),
**kwargs)
resp.raise_for_status()
return resp.json()
|
def get_organization(self, **kwargs)
|
Get the organization to which the user belongs
Returns:
dictionary of the response
| 5.424343
| 6.009565
| 0.902618
|
resp = self._get_object_by_name(self._CHART_ENDPOINT_SUFFIX, id,
**kwargs)
return resp
|
def get_chart(self, id, **kwargs)
|
Retrieve a (v2) chart by id.
| 7.338021
| 7.003904
| 1.047704
|
resp = self._get_object_by_name(self._DASHBOARD_ENDPOINT_SUFFIX, id,
**kwargs)
return resp
|
def get_dashboard(self, id, **kwargs)
|
Retrieve a (v2) dashboard by id.
| 7.637624
| 7.695833
| 0.992436
|
resp = self._get_object_by_name(self._DETECTOR_ENDPOINT_SUFFIX, id,
**kwargs)
return resp
|
def get_detector(self, id, **kwargs)
|
Retrieve a (v2) detector by id.
| 7.016281
| 6.688263
| 1.049044
|
detectors = []
offset = 0
while True:
resp = self._get(
self._u(self._DETECTOR_ENDPOINT_SUFFIX),
params={
'offset': offset,
'limit': batch_size,
'name': name,
'tags': tags or [],
},
**kwargs)
resp.raise_for_status()
data = resp.json()
detectors += data['results']
if len(detectors) == data['count']:
break
offset = len(detectors)
return detectors
|
def get_detectors(self, name=None, tags=None, batch_size=100, **kwargs)
|
Retrieve all (v2) detectors matching the given name; all (v2)
detectors otherwise.
Note that this method will loop through the paging of the results and
accumulate all detectors that match the query. This may be expensive.
| 2.527268
| 2.516818
| 1.004152
|
resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX, 'validate'),
data=detector)
resp.raise_for_status()
|
def validate_detector(self, detector)
|
Validate a detector.
Validates the given detector; throws a 400 Bad Request HTTP error if
the detector is invalid; otherwise doesn't return or throw anything.
Args:
detector (object): the detector model object. Will be serialized as
JSON.
| 9.00491
| 10.304794
| 0.873856
|
resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX),
data=detector)
resp.raise_for_status()
return resp.json()
|
def create_detector(self, detector)
|
Creates a new detector.
Args:
detector (object): the detector model object. Will be serialized as
JSON.
Returns:
dictionary of the response (created detector model).
| 6.066045
| 5.855909
| 1.035884
|
resp = self._put(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id),
data=detector)
resp.raise_for_status()
return resp.json()
|
def update_detector(self, detector_id, detector)
|
Update an existing detector.
Args:
detector_id (string): the ID of the detector.
detector (object): the detector model object. Will be serialized as
JSON.
Returns:
dictionary of the response (updated detector model).
| 5.141138
| 5.24892
| 0.979466
|
resp = self._delete(self._u(self._DETECTOR_ENDPOINT_SUFFIX,
detector_id),
**kwargs)
resp.raise_for_status()
# successful delete returns 204, which has no response json
return resp
|
def delete_detector(self, detector_id, **kwargs)
|
Remove a detector.
Args:
detector_id (string): the ID of the detector.
| 7.378069
| 8.561993
| 0.861723
|
resp = self._get(
self._u(self._DETECTOR_ENDPOINT_SUFFIX, id, 'incidents'),
None,
**kwargs
)
resp.raise_for_status()
return resp.json()
|
def get_detector_incidents(self, id, **kwargs)
|
Gets all incidents for a detector
| 4.906148
| 5.195816
| 0.94425
|
resp = self._get_object_by_name(self._INCIDENT_ENDPOINT_SUFFIX, id,
**kwargs)
return resp
|
def get_incident(self, id, **kwargs)
|
Retrieve a (v2) incident by id.
| 7.239147
| 7.202245
| 1.005124
|
resp = self._get(
self._u(self._INCIDENT_ENDPOINT_SUFFIX),
params={
'offset': offset,
'limit': limit,
'include_resolved': str(include_resolved).lower(),
},
**kwargs)
resp.raise_for_status()
return resp.json()
|
def get_incidents(self, offset=0, limit=None, include_resolved=False, **kwargs)
|
Retrieve all (v2) incidents.
| 3.1248
| 3.183892
| 0.98144
|
resp = self._put(
self._u(self._INCIDENT_ENDPOINT_SUFFIX, id, 'clear'),
None,
**kwargs
)
resp.raise_for_status()
return resp
|
def clear_incident(self, id, **kwargs)
|
Clear an incident.
| 5.563849
| 5.622322
| 0.9896
|
data = {}
metadata = {}
c = client.execute(program, start=start, stop=stop, resolution=resolution)
for msg in c.stream():
if isinstance(msg, messages.DataMessage):
if msg.logical_timestamp_ms in data:
data[msg.logical_timestamp_ms].update(msg.data)
else:
data[msg.logical_timestamp_ms] = msg.data
elif isinstance(msg, messages.MetadataMessage):
metadata[msg.tsid] = msg.properties
df = pandas.DataFrame.from_dict(data, orient='index')
df.metadata = metadata
return df
|
def get_data_frame(client, program, start, stop, resolution=None)
|
Executes the given program across the given time range (expressed in
millisecond timestamps since Epoch), and returns a Pandas DataFrame
containing the results, indexed by output timestamp.
If the program contains multiple publish() calls, their outputs are merged
into the returned DataFrame.
| 2.757176
| 2.782228
| 0.990996
|
r = requests.post('{0}/v2/session'.format(self._api_endpoint),
json={'email': email, 'password': password})
r.raise_for_status()
return r.json()['accessToken']
|
def login(self, email, password)
|
Authenticate a user with SignalFx to acquire a session token.
Note that data ingest can only be done with an organization or team API
access token, not with a user token obtained via this method.
Args:
email (string): the email login
password (string): the password
Returns a new, immediately-usable session token for the logged in user.
| 3.44459
| 3.212638
| 1.0722
|
from . import rest
return rest.SignalFxRestClient(
token=token,
endpoint=endpoint or self._api_endpoint,
timeout=timeout or self._timeout)
|
def rest(self, token, endpoint=None, timeout=None)
|
Obtain a metadata REST API client.
| 6.04498
| 4.80009
| 1.259347
|
from . import ingest
if ingest.sf_pbuf:
client = ingest.ProtoBufSignalFxIngestClient
else:
_logger.warn('Protocol Buffers not installed properly; '
'falling back to JSON.')
client = ingest.JsonSignalFxIngestClient
compress = compress if compress is not None else self._compress
return client(
token=token,
endpoint=endpoint or self._ingest_endpoint,
timeout=timeout or self._timeout,
compress=compress)
|
def ingest(self, token, endpoint=None, timeout=None, compress=None)
|
Obtain a datapoint and event ingest client.
| 5.539603
| 5.351142
| 1.035219
|
from . import signalflow
compress = compress if compress is not None else self._compress
return signalflow.SignalFlowClient(
token=token,
endpoint=endpoint or self._stream_endpoint,
timeout=timeout or self._timeout,
compress=compress)
|
def signalflow(self, token, endpoint=None, timeout=None, compress=None)
|
Obtain a SignalFlow API client.
| 3.6368
| 3.09353
| 1.175615
|
dimensions = dict((k, str(v)) for k, v in kwargs.items())
composite_key = self._composite_name(key, dimensions)
self._metadata[composite_key] = {
'metric': key,
'dimensions': dimensions
}
return composite_key
|
def register(self, key, **kwargs)
|
Registers metadata for a metric and returns a composite key
| 4.124486
| 3.04748
| 1.353409
|
request = {
'type': 'authenticate',
'token': self._token,
'userAgent': '{} ws4py/{}'.format(version.user_agent,
ws4py.__version__),
}
self.send(json.dumps(request))
|
def opened(self)
|
Handler called when the WebSocket connection is opened. The first
thing to do then is to authenticate ourselves.
| 6.426891
| 4.931684
| 1.303184
|
if code != 1000:
self._error = errors.SignalFlowException(code, reason)
_logger.info('Lost WebSocket connection with %s (%s: %s).',
self, code, reason)
for c in self._channels.values():
c.offer(WebSocketComputationChannel.END_SENTINEL)
self._channels.clear()
with self._connection_cv:
self._connected = False
self._connection_cv.notify()
|
def closed(self, code, reason=None)
|
Handler called when the WebSocket is closed. Status code 1000
denotes a normal close; all others are errors.
| 6.622135
| 6.54623
| 1.011595
|
try:
resp = requests.get(AWS_ID_URL, timeout=timeout).json()
except requests.exceptions.ConnectTimeout:
_logger.warning('Connection timeout when determining AWS unique '
'ID. Not using AWS unique ID.')
return None
else:
aws_id = "{0}_{1}_{2}".format(resp['instanceId'], resp['region'],
resp['accountId'])
_logger.debug('Using AWS unique ID %s.', aws_id)
return aws_id
|
def get_aws_unique_id(timeout=DEFAULT_AWS_TIMEOUT)
|
Determine the current AWS unique ID
Args:
timeout (int): How long to wait for a response from AWS metadata IP
| 3.171212
| 3.294455
| 0.962591
|
pwm = self._pca.pwm_regs[self._index]
if pwm[0] == 0x1000:
return 0xffff
return pwm[1] << 4
|
def duty_cycle(self)
|
16 bit value that dictates how much of one cycle is high (1) versus low (0). 0xffff will
always be high, 0 will always be low and 0x7fff will be half high and then half low.
| 8.740579
| 8.076617
| 1.082208
|
output = mkl_fft.fft(a, n, axis)
if _unitary(norm):
output *= 1 / sqrt(output.shape[axis])
return output
|
def fft(a, n=None, axis=-1, norm=None)
|
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
| 6.133804
| 15.758137
| 0.389247
|
unitary = _unitary(norm)
output = mkl_fft.ifft(a, n, axis)
if unitary:
output *= sqrt(output.shape[axis])
return output
|
def ifft(a, n=None, axis=-1, norm=None)
|
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
...
>>> plt.legend(('real', 'imaginary'))
...
>>> plt.show()
| 6.521132
| 21.109983
| 0.308912
|
unitary = _unitary(norm)
if unitary and n is None:
a = asarray(a)
n = a.shape[axis]
output = mkl_fft.rfft_numpy(a, n=n, axis=axis)
if unitary:
output *= 1 / sqrt(n)
return output
|
def rfft(a, n=None, axis=-1, norm=None)
|
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
| 4.062356
| 6.786771
| 0.59857
|
output = mkl_fft.irfft_numpy(a, n=n, axis=axis)
if _unitary(norm):
output *= sqrt(output.shape[axis])
return output
|
def irfft(a, n=None, axis=-1, norm=None)
|
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
| 6.412196
| 15.524574
| 0.413035
|
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
|
def hfft(a, n=None, axis=-1, norm=None)
|
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
| 4.985022
| 8.20077
| 0.607872
|
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
|
def ihfft(a, n=None, axis=-1, norm=None)
|
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
| 4.934489
| 8.785034
| 0.561693
|
return fftn(a, s=s, axes=axes, norm=norm)
|
def fft2(a, s=None, axes=(-2, -1), norm=None)
|
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
| 3.410544
| 9.879125
| 0.345227
|
return ifftn(a, s=s, axes=axes, norm=norm)
|
def ifft2(a, s=None, axes=(-2, -1), norm=None)
|
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
| 2.797055
| 9.479079
| 0.295077
|
unitary = _unitary(norm)
if unitary:
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
output = mkl_fft.rfftn_numpy(a, s, axes)
if unitary:
n_tot = prod(asarray(s, dtype=output.dtype))
output *= 1 / sqrt(n_tot)
return output
|
def rfftn(a, s=None, axes=None, norm=None)
|
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
| 5.463334
| 9.796214
| 0.557698
|
return rfftn(a, s, axes, norm)
|
def rfft2(a, s=None, axes=(-2, -1), norm=None)
|
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
| 4.971882
| 8.366845
| 0.594236
|
output = mkl_fft.irfftn_numpy(a, s, axes)
if _unitary(norm):
output *= sqrt(_tot_size(output, axes))
return output
|
def irfftn(a, s=None, axes=None, norm=None)
|
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
| 9.318543
| 21.364082
| 0.436178
|
return irfftn(a, s, axes, norm)
|
def irfft2(a, s=None, axes=(-2, -1), norm=None)
|
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
| 5.238524
| 8.349931
| 0.627373
|
client = obj['client']
if delete:
client.delete_blackout(delete)
else:
if not environment:
raise click.UsageError('Missing option "--environment" / "-E".')
try:
blackout = client.create_blackout(
environment=environment,
service=service,
resource=resource,
event=event,
group=group,
tags=tags,
customer=customer,
start=start,
duration=duration,
text=text
)
except Exception as e:
click.echo('ERROR: {}'.format(e))
sys.exit(1)
click.echo(blackout.id)
|
def cli(obj, environment, service, resource, event, group, tags, customer, start, duration, text, delete)
|
Suppress alerts for specified duration based on alert attributes.
| 2.194373
| 2.25763
| 0.971981
|
client = obj['client']
if delete:
client.delete_key(delete)
else:
try:
expires = datetime.utcnow() + timedelta(seconds=duration) if duration else None
key = client.create_key(username, scopes, expires, text, customer)
except Exception as e:
click.echo('ERROR: {}'.format(e))
sys.exit(1)
click.echo(key.key)
|
def cli(obj, username, scopes, duration, text, customer, delete)
|
Create or delete an API key.
| 2.523763
| 2.393107
| 1.054597
|
client = obj['client']
if delete:
client.delete_customer(delete)
else:
if not customer:
raise click.UsageError('Missing option "--customer".')
if not match:
raise click.UsageError('Missing option "--org" / "--group" / "--domain" / "--role".')
try:
customer = client.create_customer(customer, match)
except Exception as e:
click.echo('ERROR: {}'.format(e))
sys.exit(1)
click.echo(customer.id)
|
def cli(obj, customer, match, delete)
|
Add group/org/domain/role-to-customer or delete lookup entry.
| 2.897911
| 2.624212
| 1.104298
|
client = obj['client']
if delete:
client.delete_user(delete)
elif id:
if not any([name, email, password, status, roles, text, email_verified]):
click.echo('Nothing to update.')
sys.exit(1)
try:
r = client.update_user(
id, name=name, email=email, password=password, status=status,
roles=roles, attributes=None, text=text, email_verified=email_verified
)
except Exception as e:
click.echo('ERROR: {}'.format(e))
sys.exit(1)
if r['status'] == 'ok':
click.echo('Updated.')
else:
click.echo(r['message'])
else:
if not email:
raise click.UsageError('Need "--email" to create user.')
if not password:
password = click.prompt('Password', hide_input=True)
try:
user = client.create_user(
name=name, email=email, password=password, status=status,
roles=roles, attributes=None, text=text, email_verified=email_verified
)
except Exception as e:
click.echo('ERROR: {}'.format(e))
sys.exit(1)
click.echo(user.id)
|
def cli(obj, id, name, email, password, status, roles, text, email_verified, delete)
|
Create user or update user details, including password reset.
| 1.859588
| 1.857247
| 1.001261
|
client = obj['client']
click.echo('alerta {}'.format(client.mgmt_status()['version']))
click.echo('alerta client {}'.format(client_version))
click.echo('requests {}'.format(requests_version))
click.echo('click {}'.format(click.__version__))
ctx.exit()
|
def cli(ctx, obj)
|
Show Alerta server and client versions.
| 5.588152
| 4.254789
| 1.313379
|
client = obj['client']
userinfo = client.userinfo()
if show_userinfo:
for k, v in userinfo.items():
if isinstance(v, list):
v = ', '.join(v)
click.echo('{:20}: {}'.format(k, v))
else:
click.echo(userinfo['preferred_username'])
|
def cli(obj, show_userinfo)
|
Display logged in user or full userinfo.
| 2.513712
| 2.4514
| 1.025419
|
for k, v in obj.items():
if isinstance(v, list):
v = ', '.join(v)
click.echo('{:20}: {}'.format(k, v))
|
def cli(obj)
|
Display client config downloaded from API server.
| 2.717649
| 2.716561
| 1.000401
|
client = obj['client']
query = [('roles', r) for r in roles]
if obj['output'] == 'json':
r = client.http.get('/users', query)
click.echo(json.dumps(r['users'], sort_keys=True, indent=4, ensure_ascii=False))
else:
timezone = obj['timezone']
headers = {'id': 'ID', 'name': 'USER', 'email': 'EMAIL', 'roles': 'ROLES', 'status': 'STATUS', 'text': 'TEXT',
'createTime': 'CREATED', 'updateTime': 'LAST UPDATED', 'lastLogin': 'LAST LOGIN', 'email_verified': 'VERIFIED'}
click.echo(
tabulate([u.tabular(timezone) for u in client.get_users(query)], headers=headers, tablefmt=obj['output'])
)
|
def cli(obj, roles)
|
List users.
| 3.446305
| 3.286133
| 1.048742
|
client = obj['client']
if ids:
total = len(ids)
else:
if not (query or filters):
click.confirm('Deleting all alerts. Do you want to continue?', abort=True)
if query:
query = [('q', query)]
else:
query = build_query(filters)
total, _, _ = client.get_count(query)
ids = [a.id for a in client.get_alerts(query)]
with click.progressbar(ids, label='Deleting {} alerts'.format(total)) as bar:
for id in bar:
client.delete_alert(id)
|
def cli(obj, ids, query, filters)
|
Delete alerts.
| 3.051168
| 2.926767
| 1.042505
|
client = obj['client']
status = client.mgmt_status()
now = datetime.fromtimestamp(int(status['time']) / 1000.0)
uptime = datetime(1, 1, 1) + timedelta(seconds=int(status['uptime']) / 1000.0)
click.echo('{} up {} days {:02d}:{:02d}'.format(
now.strftime('%H:%M'),
uptime.day - 1, uptime.hour, uptime.minute
))
|
def cli(obj)
|
Display API server uptime in days, hours.
| 3.081118
| 2.792054
| 1.103531
|
client = obj['client']
if ids:
total = len(ids)
else:
if query:
query = [('q', query)]
else:
query = build_query(filters)
total, _, _ = client.get_count(query)
ids = [a.id for a in client.get_alerts(query)]
with click.progressbar(ids, label='Untagging {} alerts'.format(total)) as bar:
for id in bar:
client.untag_alert(id, tags)
|
def cli(obj, ids, query, filters, tags)
|
Remove tags from alerts.
| 3.369136
| 3.193235
| 1.055085
|
if details:
display = 'details'
else:
display = 'compact'
from_date = None
auto_refresh = True
while auto_refresh:
try:
auto_refresh, from_date = ctx.invoke(query_cmd, ids=ids, query=query,
filters=filters, display=display, from_date=from_date)
time.sleep(interval)
except (KeyboardInterrupt, SystemExit) as e:
sys.exit(e)
|
def cli(ctx, ids, query, filters, details, interval)
|
Watch for new alerts.
| 3.44035
| 3.537182
| 0.972625
|
client = obj['client']
metrics = client.mgmt_status()['metrics']
headers = {'title': 'METRIC', 'type': 'TYPE', 'name': 'NAME', 'value': 'VALUE', 'average': 'AVERAGE'}
click.echo(tabulate([{
'title': m['title'],
'type': m['type'],
'name': '{}.{}'.format(m['group'], m['name']),
'value': m.get('value', None) or m.get('count', 0),
'average': int(m['totalTime']) * 1.0 / int(m['count']) if m['type'] == 'timer' else None
} for m in metrics], headers=headers, tablefmt=obj['output']))
|
def cli(obj)
|
Display API server switch status and usage metrics.
| 3.597582
| 3.429985
| 1.048862
|
client = obj['client']
if ids:
total = len(ids)
else:
if query:
query = [('q', query)]
else:
query = build_query(filters)
total, _, _ = client.get_count(query)
ids = [a.id for a in client.get_alerts(query)]
with click.progressbar(ids, label='Updating {} alerts'.format(total)) as bar:
for id in bar:
client.update_attributes(id, dict(a.split('=') for a in attributes))
|
def cli(obj, ids, query, filters, attributes)
|
Update alert attributes.
| 3.769238
| 3.421448
| 1.10165
|
client = obj['client']
query = [('scopes', s) for s in scopes]
if obj['output'] == 'json':
r = client.http.get('/perms', query)
click.echo(json.dumps(r['permissions'], sort_keys=True, indent=4, ensure_ascii=False))
else:
headers = {'id': 'ID', 'scopes': 'SCOPES', 'match': 'ROLE'}
click.echo(tabulate([p.tabular() for p in client.get_perms(query)], headers=headers, tablefmt=obj['output']))
|
def cli(obj, scopes)
|
List permissions.
| 4.00827
| 3.714034
| 1.079223
|
client = obj['client']
provider = obj['provider']
client_id = obj['client_id']
try:
if provider == 'azure':
token = azure.login(client, obj['azure_tenant'], client_id)['token']
elif provider == 'github':
token = github.login(client, obj['github_url'], client_id)['token']
elif provider == 'gitlab':
token = gitlab.login(client, obj['gitlab_url'], client_id)['token']
elif provider == 'google':
if not username:
username = click.prompt('Email')
token = google.login(client, username, client_id)['token']
elif provider == 'openid':
token = oidc.login(client, obj['oidc_auth_url'], client_id)['token']
elif provider == 'basic':
if not username:
username = click.prompt('Email')
password = click.prompt('Password', hide_input=True)
token = client.login(username, password)['token']
else:
click.echo('ERROR: unknown provider {provider}'.format(provider=provider))
sys.exit(1)
except Exception as e:
raise AuthError(e)
jwt = Jwt()
preferred_username = jwt.parse(token)['preferred_username']
if preferred_username:
save_token(client.endpoint, preferred_username, token)
click.echo('Logged in as {}'.format(preferred_username))
else:
click.echo('Failed to login.')
sys.exit(1)
|
def cli(obj, username)
|
Authenticate using Azure, Github, Gitlab, Google OAuth2, OpenID or
Basic Auth username/password instead of using an API key.
| 2.271255
| 2.161259
| 1.050895
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.