code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
|
def get_items_by_ids(self, item_ids, item_type=None)
|
Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
| 3.33202
| 3.821178
| 0.871988
|
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
|
def get_user(self, user_id, expand=False)
|
Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
| 3.913018
| 3.44861
| 1.134665
|
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
|
def get_users_by_ids(self, user_ids)
|
Given a list of user ids, return all the User objects
| 5.734159
| 5.541481
| 1.03477
|
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
|
def top_stories(self, raw=False, limit=None)
|
Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
| 2.952041
| 4.544875
| 0.649532
|
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
|
def new_stories(self, raw=False, limit=None)
|
Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
| 3.197838
| 4.409503
| 0.725215
|
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
|
def ask_stories(self, raw=False, limit=None)
|
Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
| 3.338027
| 5.124004
| 0.651449
|
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
|
def show_stories(self, raw=False, limit=None)
|
Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
| 3.400489
| 4.411388
| 0.770843
|
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
|
def job_stories(self, raw=False, limit=None)
|
Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
| 3.000584
| 4.271363
| 0.702489
|
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
|
def updates(self)
|
Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
| 3.953593
| 3.610363
| 1.095068
|
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
|
def get_max_item(self, expand=False)
|
The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
| 4.122583
| 4.063109
| 1.014638
|
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
def get_last(self, num=10)
|
Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
| 5.083527
| 4.54809
| 1.117728
|
self.dct = wrapper.Dictionary.load(path)
return self
|
def load(self, path)
|
Loads DAWG from a file.
| 22.423473
| 21.405012
| 1.047581
|
return self._similar_keys("", key, self.dct.ROOT, replaces)
|
def similar_keys(self, key, replaces)
|
Returns all variants of ``key`` in this DAWG according to
``replaces``.
``replaces`` is an object obtained from
``DAWG.compile_replaces(mapping)`` where mapping is a dict
that maps single-char unicode sitrings to another single-char
unicode strings.
This may be useful e.g. for handling single-character umlauts.
| 31.499485
| 35.557346
| 0.885878
|
'''
Returns a list with keys of this DAWG that are prefixes of the ``key``.
'''
res = []
index = self.dct.ROOT
if not isinstance(key, bytes):
key = key.encode('utf8')
pos = 1
for ch in key:
index = self.dct.follow_char(int_from_byte(ch), index)
if not index:
break
if self._has_value(index):
res.append(key[:pos].decode('utf8'))
pos += 1
return res
|
def prefixes(self, key)
|
Returns a list with keys of this DAWG that are prefixes of the ``key``.
| 5.311822
| 3.843552
| 1.382008
|
self.dct = wrapper.Dictionary()
self.guide = wrapper.Guide()
with open(path, 'rb') as f:
self.dct.read(f)
self.guide.read(f)
return self
|
def load(self, path)
|
Loads DAWG from a file.
| 4.510033
| 4.294731
| 1.050132
|
if not isinstance(key, bytes):
key = key.encode('utf8')
return self.b_get_value(key) or default
|
def get(self, key, default=None)
|
Returns a list of payloads (as byte objects) for a given key
or ``default`` if the key is not found.
| 5.878466
| 5.505333
| 1.067776
|
return self._similar_items("", key, self.dct.ROOT, replaces)
|
def similar_items(self, key, replaces)
|
Returns a list of (key, value) tuples for all variants of ``key``
in this DAWG according to ``replaces``.
``replaces`` is an object obtained from
``DAWG.compile_replaces(mapping)`` where mapping is a dict
that maps single-char unicode sitrings to another single-char
unicode strings.
| 31.400381
| 41.940853
| 0.748682
|
return self._similar_item_values(0, key, self.dct.ROOT, replaces)
|
def similar_item_values(self, key, replaces)
|
Returns a list of values for all variants of the ``key``
in this DAWG according to ``replaces``.
``replaces`` is an object obtained from
``DAWG.compile_replaces(mapping)`` where mapping is a dict
that maps single-char unicode sitrings to another single-char
unicode strings.
| 14.258682
| 17.181906
| 0.829866
|
"Gets a value from a given index."
offset = units.offset(self._units[index])
value_index = (index ^ offset) & units.PRECISION_MASK
return units.value(self._units[value_index])
|
def value(self, index)
|
Gets a value from a given index.
| 8.029502
| 7.750914
| 1.035943
|
"Reads a dictionary from an input stream."
base_size = struct.unpack(str("=I"), fp.read(4))[0]
self._units.fromfile(fp, base_size)
|
def read(self, fp)
|
Reads a dictionary from an input stream.
| 8.230103
| 7.70448
| 1.068223
|
"Exact matching."
index = self.follow_bytes(key, self.ROOT)
if index is None:
return False
return self.has_value(index)
|
def contains(self, key)
|
Exact matching.
| 11.572659
| 8.984918
| 1.28801
|
"Exact matching (returns value)"
index = self.follow_bytes(key, self.ROOT)
if index is None:
return -1
if not self.has_value(index):
return -1
return self.value(index)
|
def find(self, key)
|
Exact matching (returns value)
| 8.196682
| 5.518026
| 1.485437
|
"Follows a transition"
offset = units.offset(self._units[index])
next_index = (index ^ offset ^ label) & units.PRECISION_MASK
if units.label(self._units[next_index]) != label:
return None
return next_index
|
def follow_char(self, label, index)
|
Follows a transition
| 9.50117
| 8.441636
| 1.125513
|
"Follows transitions."
for ch in s:
index = self.follow_char(int_from_byte(ch), index)
if index is None:
return None
return index
|
def follow_bytes(self, s, index)
|
Follows transitions.
| 8.453337
| 6.009402
| 1.406685
|
"Gets the next key"
if not self._index_stack:
return False
index = self._index_stack[-1]
if self._last_index != self._dic.ROOT:
child_label = self._guide.child(index) # UCharType
if child_label:
# Follows a transition to the first child.
index = self._follow(child_label, index)
if index is None:
return False
else:
while True:
sibling_label = self._guide.sibling(index)
# Moves to the previous node.
if len(self.key) > 0:
self.key.pop()
#self.key[-1] = 0
self._index_stack.pop()
if not self._index_stack:
return False
index = self._index_stack[-1]
if sibling_label:
# Follows a transition to the next sibling.
index = self._follow(sibling_label, index)
if index is None:
return False
break
return self._find_terminal(index)
|
def next(self)
|
Gets the next key
| 4.416802
| 4.482716
| 0.985296
|
if not check_response or not check_response.checkErrors:
return _IS_OK
# only check the first error for now, as per ESP
theError = check_response.checkErrors[0]
error_tuple = _CHECK_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN)
if error_tuple[1].find(u'{') == -1: # no replacements needed:
return error_tuple
updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.detail or u'')
return error_tuple[0], updated_msg, error_tuple[2]
|
def convert_response(check_response, project_id)
|
Computes a http status code and message `CheckResponse`
The return value a tuple (code, message, api_key_is_bad) where
code: is the http status code
message: is the message to return
api_key_is_bad: indicates that a given api_key is bad
Args:
check_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.CheckResponse`):
the response from calling an api
Returns:
tuple(code, message, bool)
| 5.984479
| 5.646531
| 1.059851
|
if not isinstance(check_request, sc_messages.CheckRequest):
raise ValueError(u'Invalid request')
op = check_request.operation
if op is None or op.operationName is None or op.consumerId is None:
logging.error(u'Bad %s: not initialized => not signed', check_request)
raise ValueError(u'check request must be initialized with an operation')
md5 = hashlib.md5()
md5.update(op.operationName.encode('utf-8'))
md5.update(b'\x00')
md5.update(op.consumerId.encode('utf-8'))
if op.labels:
signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))
for value_set in op.metricValueSets:
md5.update(b'\x00')
md5.update(value_set.metricName.encode('utf-8'))
for mv in value_set.metricValues:
metric_value.update_hash(md5, mv)
md5.update(b'\x00')
if op.quotaProperties:
# N.B: this differs form cxx implementation, which serializes the
# protobuf. This should be OK as the exact hash used does not need to
# match across implementations.
md5.update(repr(op.quotaProperties).encode('utf-8'))
md5.update(b'\x00')
return md5.digest()
|
def sign(check_request)
|
Obtains a signature for an operation in a `CheckRequest`
Args:
op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an
operation used in a `CheckRequest`
Returns:
string: a secure hash generated from the operation
| 4.009116
| 3.669513
| 1.092547
|
if not self.service_name:
raise ValueError(u'the service name must be set')
if not self.operation_id:
raise ValueError(u'the operation id must be set')
if not self.operation_name:
raise ValueError(u'the operation name must be set')
op = super(Info, self).as_operation(timer=timer)
labels = {}
if self.android_cert_fingerprint:
labels[_KNOWN_LABELS.SCC_ANDROID_CERT_FINGERPRINT.label_name] = self.android_cert_fingerprint
if self.android_package_name:
labels[_KNOWN_LABELS.SCC_ANDROID_PACKAGE_NAME.label_name] = self.android_package_name
if self.client_ip:
labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip
if self.ios_bundle_id:
labels[_KNOWN_LABELS.SCC_IOS_BUNDLE_ID.label_name] = self.ios_bundle_id
if self.referer:
labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer
# Forcibly add system label reporting here, as the base service
# config does not specify it as a label.
labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = SERVICE_AGENT
labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT
op.labels = encoding.PyValueToMessage(
sc_messages.Operation.LabelsValue, labels)
check_request = sc_messages.CheckRequest(operation=op)
return sc_messages.ServicecontrolServicesCheckRequest(
serviceName=self.service_name,
checkRequest=check_request)
|
def as_check_request(self, timer=datetime.utcnow)
|
Makes a `ServicecontrolServicesCheckRequest` from this instance
Returns:
a ``ServicecontrolServicesCheckRequest``
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesCheckRequest``
| 2.843292
| 2.592455
| 1.096757
|
if self._cache is None:
return []
with self._cache as c:
flushed_items = list(c.out_deque)
c.out_deque.clear()
cached_reqs = [item.extract_request() for item in flushed_items]
cached_reqs = [req for req in cached_reqs if req is not None]
return cached_reqs
|
def flush(self)
|
Flushes this instance's cache.
The driver of this instance should call this method every
`flush_interval`.
Returns:
list['CheckRequest']: corresponding to CheckRequests that were
pending
| 4.471954
| 4.293978
| 1.041448
|
if self._cache is not None:
with self._cache as c:
c.clear()
c.out_deque.clear()
|
def clear(self)
|
Clears this instance's cache.
| 9.211993
| 7.651115
| 1.204007
|
if self._cache is None:
return
signature = sign(req.checkRequest)
with self._cache as c:
now = self._timer()
quota_scale = 0 # WIP
item = c.get(signature)
if item is None:
c[signature] = CachedItem(
resp, self.service_name, now, quota_scale)
else:
# Update the cached item to reflect that it is updated
item.last_check_time = now
item.response = resp
item.quota_scale = quota_scale
item.is_flushing = False
c[signature] = item
|
def add_response(self, req, resp)
|
Adds the response from sending to `req` to this instance's cache.
Args:
req (`ServicecontrolServicesCheckRequest`): the request
resp (CheckResponse): the response from sending the request
| 6.173326
| 5.625138
| 1.097453
|
if self._cache is None:
return None # no cache, send request now
if not isinstance(req, sc_messages.ServicecontrolServicesCheckRequest):
raise ValueError(u'Invalid request')
if req.serviceName != self.service_name:
_logger.error(u'bad check(): service_name %s does not match ours %s',
req.serviceName, self.service_name)
raise ValueError(u'Service name mismatch')
check_request = req.checkRequest
if check_request is None:
_logger.error(u'bad check(): no check_request in %s', req)
raise ValueError(u'Expected operation not set')
op = check_request.operation
if op is None:
_logger.error(u'bad check(): no operation in %s', req)
raise ValueError(u'Expected operation not set')
if op.importance != sc_messages.Operation.ImportanceValueValuesEnum.LOW:
return None # op is important, send request now
signature = sign(check_request)
with self._cache as cache:
_logger.debug(u'checking the cache for %r\n%s', signature, cache)
item = cache.get(signature)
if item is None:
return None # signal to caller to send req
else:
return self._handle_cached_response(req, item)
|
def check(self, req)
|
Determine if ``req`` is in this instances cache.
Determine if there are cache hits for the request in this aggregator
instance.
Not in the cache
If req is not in the cache, it returns ``None`` to indicate that the
caller should send the request.
Cache Hit; response has errors
When a cached CheckResponse has errors, it's assumed that ``req`` would
fail as well, so the cached CheckResponse is returned. However, the
first CheckRequest after the flush interval has elapsed should be sent
to the server to refresh the CheckResponse, though until it's received,
subsequent CheckRequests should fail with the cached CheckResponse.
Cache behaviour - response passed
If the cached CheckResponse has no errors, it's assumed that ``req``
will succeed as well, so the CheckResponse is returned, with the quota
info updated to the same as requested. The requested tokens are
aggregated until flushed.
Args:
req (``ServicecontrolServicesCheckRequest``): to be sent to
the service control service
Raises:
ValueError: if the ``req`` service_name is not the same as
this instances
Returns:
``CheckResponse``: if an applicable response is cached by this
instance is available for use or None, if there is no applicable
response
| 3.797728
| 3.250549
| 1.168334
|
a_is_text = isinstance(a, basestring)
b_is_text = isinstance(b, basestring)
if type(a) != type(b) and not (a_is_text and b_is_text):
_logger.error(u'Cannot compare %s to %s, types differ %s!=%s',
a, b, type(a), type(b))
raise ValueError(u'cannot compare inputs of differing types')
if a_is_text:
a = from_rfc3339(a, with_nanos=True)
b = from_rfc3339(b, with_nanos=True)
if a < b:
return -1
elif a > b:
return 1
else:
return 0
|
def compare(a, b)
|
Compares two timestamps.
``a`` and ``b`` must be the same type, in addition to normal
representations of timestamps that order naturally, they can be rfc3339
formatted strings.
Args:
a (string|object): a timestamp
b (string|object): another timestamp
Returns:
int: -1 if a < b, 0 if a == b or 1 if a > b
Raises:
ValueError: if a or b are not the same type
ValueError: if a or b strings but not in valid rfc3339 format
| 2.41423
| 2.373975
| 1.016957
|
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp - _EPOCH_START
if not isinstance(timestamp, datetime.timedelta):
_logger.error(u'Could not convert %s to a rfc3339 time,', timestamp)
raise ValueError(u'Invalid timestamp type')
return strict_rfc3339.timestamp_to_rfc3339_utcoffset(
timestamp.total_seconds())
|
def to_rfc3339(timestamp)
|
Converts ``timestamp`` to an RFC 3339 date string format.
``timestamp`` can be either a ``datetime.datetime`` or a
``datetime.timedelta``. Instances of the later are assumed to be a delta
with the beginining of the unix epoch, 1st of January, 1970
The returned string is always Z-normalized. Examples of the return format:
'1972-01-01T10:00:20.021Z'
Args:
timestamp (datetime|timedelta): represents the timestamp to convert
Returns:
string: timestamp converted to a rfc3339 compliant string as above
Raises:
ValueError: if timestamp is not a datetime.datetime or datetime.timedelta
| 4.299332
| 4.34935
| 0.9885
|
timestamp = strict_rfc3339.rfc3339_to_timestamp(rfc3339_text)
result = datetime.datetime.utcfromtimestamp(timestamp)
if with_nanos:
return (result, int((timestamp - int(timestamp)) * 1e9))
else:
return result
|
def from_rfc3339(rfc3339_text, with_nanos=False)
|
Parse a RFC 3339 date string format to datetime.date.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
- By default, the result is a datetime.datetime
- If with_nanos is true, the result is a 2-tuple, (datetime.datetime,
nanos), where the second field represents the possible nanosecond
resolution component of the second field.
Args:
rfc3339_text (string): An rfc3339 formatted date string
with_nanos (bool): Determines if nanoseconds should be parsed from the
string
Raises:
ValueError: if ``rfc3339_text`` is invalid
Returns:
:class:`datetime.datetime`: when with_nanos is False
tuple(:class:`datetime.datetime`, int): when with_nanos is True
| 2.184905
| 2.681624
| 0.814769
|
now = timer()
op = sc_messages.Operation(
endTime=timestamp.to_rfc3339(now),
startTime=timestamp.to_rfc3339(now),
importance=sc_messages.Operation.ImportanceValueValuesEnum.LOW)
if self.operation_id:
op.operationId = self.operation_id
if self.operation_name:
op.operationName = self.operation_name
if self.api_key and self.api_key_valid:
op.consumerId = u'api_key:' + self.api_key
elif self.consumer_project_id:
op.consumerId = u'project:' + self.consumer_project_id
return op
|
def as_operation(self, timer=datetime.utcnow)
|
Makes an ``Operation`` from this instance.
Returns:
an ``Operation``
| 2.913354
| 3.069722
| 0.949061
|
result = encoding.CopyProtoMessage(self._op)
names = sorted(self._metric_values_by_name_then_sign.keys())
for name in names:
mvs = self._metric_values_by_name_then_sign[name]
result.metricValueSets.append(
sc_messages.MetricValueSet(
metricName=name, metricValues=mvs.values()))
return result
|
def as_operation(self)
|
Obtains a single `Operation` representing this instances contents.
Returns:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`
| 5.913153
| 5.510218
| 1.073125
|
self._op.logEntries.extend(other_op.logEntries)
self._merge_timestamps(other_op)
self._merge_metric_values(other_op)
|
def add(self, other_op)
|
Combines `other_op` with the operation held by this aggregator.
N.B. It merges the operations log entries and metric values, but makes
the assumption the operation is consistent. It's the callers
responsibility to ensure consistency
Args:
other_op (
class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`):
an operation merge into this one
| 6.172532
| 3.286728
| 1.878017
|
if num_finite_buckets <= 0:
raise ValueError(_BAD_NUM_FINITE_BUCKETS)
if growth_factor <= 1.0:
raise ValueError(_BAD_FLOAT_ARG % (u'growth factor', 1.0))
if scale <= 0.0:
raise ValueError(_BAD_FLOAT_ARG % (u'scale', 0.0))
return sc_messages.Distribution(
bucketCounts=[0] * (num_finite_buckets + 2),
exponentialBuckets=sc_messages.ExponentialBuckets(
numFiniteBuckets=num_finite_buckets,
growthFactor=growth_factor,
scale=scale))
|
def create_exponential(num_finite_buckets, growth_factor, scale)
|
Creates a new instance of distribution with exponential buckets
Args:
num_finite_buckets (int): initializes number of finite buckets
growth_factor (float): initializes the growth factor
scale (float): initializes the scale
Return:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`
Raises:
ValueError: if the args are invalid for creating an instance
| 2.7224
| 2.365363
| 1.150944
|
if num_finite_buckets <= 0:
raise ValueError(_BAD_NUM_FINITE_BUCKETS)
if width <= 0.0:
raise ValueError(_BAD_FLOAT_ARG % (u'width', 0.0))
return sc_messages.Distribution(
bucketCounts=[0] * (num_finite_buckets + 2),
linearBuckets=sc_messages.LinearBuckets(
numFiniteBuckets=num_finite_buckets,
width=width,
offset=offset))
|
def create_linear(num_finite_buckets, width, offset)
|
Creates a new instance of distribution with linear buckets.
Args:
num_finite_buckets (int): initializes number of finite buckets
width (float): initializes the width of each bucket
offset (float): initializes the offset
Return:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`
Raises:
ValueError: if the args are invalid for creating an instance
| 3.642112
| 3.033941
| 1.200456
|
safe_bounds = sorted(float(x) for x in bounds)
if len(safe_bounds) != len(set(safe_bounds)):
raise ValueError(u'Detected two elements of bounds that are the same')
return sc_messages.Distribution(
bucketCounts=[0] * (len(safe_bounds) + 1),
explicitBuckets=sc_messages.ExplicitBuckets(bounds=safe_bounds))
|
def create_explicit(bounds)
|
Creates a new instance of distribution with explicit buckets.
bounds is an iterable of ordered floats that define the explicit buckets
Args:
bounds (iterable[float]): initializes the bounds
Return:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`
Raises:
ValueError: if the args are invalid for creating an instance
| 5.674819
| 3.964565
| 1.431385
|
dist_type, _ = _detect_bucket_option(dist)
if dist_type == u'exponentialBuckets':
_update_general_statistics(a_float, dist)
_update_exponential_bucket_count(a_float, dist)
elif dist_type == u'linearBuckets':
_update_general_statistics(a_float, dist)
_update_linear_bucket_count(a_float, dist)
elif dist_type == u'explicitBuckets':
_update_general_statistics(a_float, dist)
_update_explicit_bucket_count(a_float, dist)
else:
_logger.error(u'Could not determine bucket option type for %s', dist)
raise ValueError(u'Unknown bucket option type')
|
def add_sample(a_float, dist)
|
Adds `a_float` to `dist`, updating its existing buckets.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
Raises:
ValueError: if `dist` does not have known bucket options defined
ValueError: if there are not enough bucket count fields in `dist`
| 2.754155
| 2.445215
| 1.126345
|
if not _buckets_nearly_equal(prior, latest):
_logger.error(u'Bucket options do not match. From %s To: %s',
prior,
latest)
raise ValueError(u'Bucket options do not match')
if len(prior.bucketCounts) != len(latest.bucketCounts):
_logger.error(u'Bucket count sizes do not match. From %s To: %s',
prior,
latest)
raise ValueError(u'Bucket count sizes do not match')
if prior.count <= 0:
return
old_count = latest.count
old_mean = latest.mean
old_summed_variance = latest.sumOfSquaredDeviation
bucket_counts = latest.bucketCounts
# Update the latest
latest.count += prior.count
latest.maximum = max(prior.maximum, latest.maximum)
latest.minimum = min(prior.minimum, latest.minimum)
latest.mean = ((old_count * old_mean + prior.count * prior.mean) /
latest.count)
latest.sumOfSquaredDeviation = (
old_summed_variance + prior.sumOfSquaredDeviation +
old_count * (latest.mean - old_mean) ** 2 +
prior.count * (latest.mean - prior.mean) ** 2)
for i, (x, y) in enumerate(zip(prior.bucketCounts, bucket_counts)):
bucket_counts[i] = x + y
|
def merge(prior, latest)
|
Merge `prior` into `latest`.
N.B, this mutates latest. It ensures that the statistics and histogram are
updated to correctly include the original values from both instances.
Args:
prior (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
an instance
latest (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
an instance to be updated
Raises:
ValueError: if the bucket options of `prior` and `latest` do not match
ValueError: if the bucket counts of `prior` and `latest` do not match
| 2.698155
| 2.406461
| 1.121213
|
a_type, a_buckets = _detect_bucket_option(a_dist)
b_type, b_buckets = _detect_bucket_option(b_dist)
if a_type != b_type:
return False
elif a_type == u'linearBuckets':
return _linear_buckets_nearly_equal(a_buckets, b_buckets)
elif a_type == u'exponentialBuckets':
return _exponential_buckets_nearly_equal(a_buckets, b_buckets)
elif a_type == u'explicitBuckets':
return _explicit_buckets_nearly_equal(a_buckets, b_buckets)
else:
return False
|
def _buckets_nearly_equal(a_dist, b_dist)
|
Determines whether two `Distributions` are nearly equal.
Args:
a_dist (:class:`Distribution`): an instance
b_dist (:class:`Distribution`): another instance
Return:
boolean: `True` if the two instances are approximately equal, otherwise
False
| 1.909178
| 2.11302
| 0.903531
|
if not dist.count:
dist.count = 1
dist.maximum = a_float
dist.minimum = a_float
dist.mean = a_float
dist.sumOfSquaredDeviation = 0
else:
old_count = dist.count
old_mean = dist.mean
new_mean = ((old_count * old_mean) + a_float) / (old_count + 1)
delta_sum_squares = (a_float - old_mean) * (a_float - new_mean)
dist.count += 1
dist.mean = new_mean
dist.maximum = max(a_float, dist.maximum)
dist.minimum = min(a_float, dist.minimum)
dist.sumOfSquaredDeviation += delta_sum_squares
|
def _update_general_statistics(a_float, dist)
|
Adds a_float to distribution, updating the statistics fields.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
| 1.76693
| 1.735378
| 1.018181
|
buckets = dist.exponentialBuckets
if buckets is None:
raise ValueError(_BAD_UNSET_BUCKETS % (u'exponential buckets'))
bucket_counts = dist.bucketCounts
num_finite_buckets = buckets.numFiniteBuckets
if len(bucket_counts) < num_finite_buckets + 2:
raise ValueError(_BAD_LOW_BUCKET_COUNT)
scale = buckets.scale
factor = buckets.growthFactor
if (a_float <= scale):
index = 0
else:
index = 1 + int((math.log(a_float / scale) / math.log(factor)))
index = min(index, num_finite_buckets + 1)
bucket_counts[index] += 1
_logger.debug(u'scale:%f, factor:%f, sample:%f, index:%d',
scale, factor, a_float, index)
|
def _update_exponential_bucket_count(a_float, dist)
|
Adds `a_float` to `dist`, updating its exponential buckets.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
Raises:
ValueError: if `dist` does not already have exponential buckets defined
ValueError: if there are not enough bucket count fields in `dist`
| 3.526914
| 3.600309
| 0.979614
|
buckets = dist.linearBuckets
if buckets is None:
raise ValueError(_BAD_UNSET_BUCKETS % (u'linear buckets'))
bucket_counts = dist.bucketCounts
num_finite_buckets = buckets.numFiniteBuckets
if len(bucket_counts) < num_finite_buckets + 2:
raise ValueError(_BAD_LOW_BUCKET_COUNT)
width = buckets.width
lower = buckets.offset
upper = lower + (num_finite_buckets * width)
if a_float < lower:
index = 0
elif a_float >= upper:
index = num_finite_buckets + 1
else:
index = 1 + int(((a_float - lower) / width))
bucket_counts[index] += 1
_logger.debug(u'upper:%f, lower:%f, width:%f, sample:%f, index:%d',
upper, lower, width, a_float, index)
|
def _update_linear_bucket_count(a_float, dist)
|
Adds `a_float` to `dist`, updating the its linear buckets.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
Raises:
ValueError: if `dist` does not already have linear buckets defined
ValueError: if there are not enough bucket count fields in `dist`
| 3.151316
| 3.228153
| 0.976198
|
buckets = dist.explicitBuckets
if buckets is None:
raise ValueError(_BAD_UNSET_BUCKETS % (u'explicit buckets'))
bucket_counts = dist.bucketCounts
bounds = buckets.bounds
if len(bucket_counts) < len(bounds) + 1:
raise ValueError(_BAD_LOW_BUCKET_COUNT)
bucket_counts[bisect.bisect(bounds, a_float)] += 1
|
def _update_explicit_bucket_count(a_float, dist)
|
Adds `a_float` to `dist`, updating its explicit buckets.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
Raises:
ValueError: if `dist` does not already have explict buckets defined
ValueError: if there are not enough bucket count fields in `dist`
| 4.8174
| 4.897379
| 0.983669
|
if kwargs is _sentinel:
kwargs = {}
event = Event(time, priority, action, argument, kwargs)
with self._lock:
heapq.heappush(self._queue, event)
return event
|
def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel)
|
Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary.
| 2.500317
| 2.795483
| 0.894413
|
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument, kwargs)
|
def enter(self, delay, priority, action, argument=(), kwargs=_sentinel)
|
A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
| 7.001029
| 5.546553
| 1.262231
|
with self._lock:
self._queue.remove(event)
heapq.heapify(self._queue)
|
def cancel(self, event)
|
Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises ValueError.
| 5.811367
| 4.664476
| 1.245878
|
# localize variable access to minimize overhead
# and to improve thread safety
lock = self._lock
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while True:
with lock:
if not q:
break
time, priority, action, argument, kwargs = q[0]
now = timefunc()
if time > now:
delay = True
else:
delay = False
pop(q)
if delay:
if not blocking:
return time - now
delayfunc(time - now)
else:
action(*argument, **kwargs)
delayfunc(0)
|
def run(self, blocking=True)
|
Execute events until the queue is empty.
If blocking is False executes the scheduled events due to
expire soonest (if any) and then return the deadline of the
next scheduled call in the scheduler.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
| 4.574835
| 4.260461
| 1.073789
|
if not isinstance(money, sc_messages.Money):
raise ValueError(u'Inputs should be of type %s' % (sc_messages.Money,))
currency = money.currencyCode
if not currency or len(currency) != 3:
raise ValueError(_MSG_3_LETTERS_LONG)
units = money.units
nanos = money.nanos
if ((units > 0) and (nanos < 0)) or ((units < 0) and (nanos > 0)):
raise ValueError(_MSG_UNITS_NANOS_MISMATCH)
if abs(nanos) > MAX_NANOS:
raise ValueError(_MSG_NANOS_OOB)
|
def check_valid(money)
|
Determine if an instance of `Money` is valid.
Args:
money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the
instance to test
Raises:
ValueError: if the money instance is invalid
| 3.444168
| 3.367918
| 1.02264
|
for m in (a, b):
if not isinstance(m, sc_messages.Money):
raise ValueError(u'Inputs should be of type %s' % (sc_messages.Money,))
if a.currencyCode != b.currencyCode:
raise ValueError(u'Money values need the same currency to be summed')
nano_carry, nanos_sum = _sum_nanos(a, b)
units_sum_no_carry = a.units + b.units
units_sum = units_sum_no_carry + nano_carry
# Adjust when units_sum and nanos_sum have different signs
if units_sum > 0 and nanos_sum < 0:
units_sum -= 1
nanos_sum += _BILLION
elif units_sum < 0 and nanos_sum > 0:
units_sum += 1
nanos_sum -= _BILLION
# Return the result, detecting overflow if it occurs
sign_a = _sign_of(a)
sign_b = _sign_of(b)
if sign_a > 0 and sign_b > 0 and units_sum >= _INT64_MAX:
if not allow_overflow:
raise OverflowError(u'Money addition positive overflow')
else:
return sc_messages.Money(units=_INT64_MAX,
nanos=MAX_NANOS,
currencyCode=a.currencyCode)
elif (sign_a < 0 and sign_b < 0 and
(units_sum_no_carry <= -_INT64_MAX or units_sum <= -_INT64_MAX)):
if not allow_overflow:
raise OverflowError(u'Money addition negative overflow')
else:
return sc_messages.Money(units=_INT64_MIN,
nanos=-MAX_NANOS,
currencyCode=a.currencyCode)
else:
return sc_messages.Money(units=units_sum,
nanos=nanos_sum,
currencyCode=a.currencyCode)
|
def add(a, b, allow_overflow=False)
|
Adds two instances of `Money`.
Args:
a (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): one money
value
b (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): another
money value
allow_overflow: determines if the addition is allowed to overflow
Return:
`Money`: an instance of Money
Raises:
ValueError: if the inputs do not have the same currency code
OverflowError: if the sum overflows and allow_overflow is not `True`
| 2.47276
| 2.348853
| 1.052752
|
units = money.units
nanos = money.nanos
if units:
if units > 0:
return 1
elif units < 0:
return -1
if nanos:
if nanos > 0:
return 1
elif nanos < 0:
return -1
return 0
|
def _sign_of(money)
|
Determines the amount sign of a money instance
Args:
money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the
instance to test
Return:
int: 1, 0 or -1
| 2.143447
| 2.157937
| 0.993286
|
current_time = time.time()
expiration = jwt_claims[u"exp"]
if not isinstance(expiration, INT_TYPES):
raise suppliers.UnauthenticatedException(u'Malformed claim: "exp" must be an integer')
if current_time >= expiration:
raise suppliers.UnauthenticatedException(u"The auth token has already expired")
if u"nbf" not in jwt_claims:
return
not_before_time = jwt_claims[u"nbf"]
if not isinstance(not_before_time, INT_TYPES):
raise suppliers.UnauthenticatedException(u'Malformed claim: "nbf" must be an integer')
if current_time < not_before_time:
raise suppliers.UnauthenticatedException(u'Current time is less than the "nbf" time')
|
def _check_jwt_claims(jwt_claims)
|
Checks whether the JWT claims should be accepted.
Specifically, this method checks the "exp" claim and the "nbf" claim (if
present), and raises UnauthenticatedException if 1) the current time is
before the time identified by the "nbf" claim, or 2) the current time is
equal to or after the time identified by the "exp" claim.
Args:
jwt_claims: the JWT claims whose expiratio to be checked.
Raises:
UnauthenticatedException: When the "exp" claim is malformed or the JWT has
already expired.
| 2.57835
| 2.488885
| 1.035946
|
for claim_name in [u"aud", u"exp", u"iss", u"sub"]:
if claim_name not in jwt_claims:
raise suppliers.UnauthenticatedException(u'Missing "%s" claim' % claim_name)
|
def _verify_required_claims_exist(jwt_claims)
|
Verifies that the required claims exist.
Args:
jwt_claims: the JWT claims to be verified.
Raises:
UnauthenticatedException: if some claim doesn't exist.
| 4.314309
| 4.681463
| 0.921573
|
try:
jwt_claims = self.get_jwt_claims(auth_token)
except Exception as error:
raise suppliers.UnauthenticatedException(u"Cannot decode the auth token",
error)
_check_jwt_claims(jwt_claims)
user_info = UserInfo(jwt_claims)
issuer = user_info.issuer
if issuer not in self._issuers_to_provider_ids:
raise suppliers.UnauthenticatedException(u"Unknown issuer: " + issuer)
provider_id = self._issuers_to_provider_ids[issuer]
if not auth_info.is_provider_allowed(provider_id):
raise suppliers.UnauthenticatedException(u"The requested method does not "
u"allow provider id: " + provider_id)
# Check the audiences decoded from the auth token. The auth token is
# allowed when 1) an audience is equal to the service name, or 2) at least
# one audience is allowed in the method configuration.
audiences = user_info.audiences
has_service_name = service_name in audiences
allowed_audiences = auth_info.get_allowed_audiences(provider_id)
intersected_audiences = set(allowed_audiences).intersection(audiences)
if not has_service_name and not intersected_audiences:
raise suppliers.UnauthenticatedException(u"Audiences not allowed")
return user_info
|
def authenticate(self, auth_token, auth_info, service_name)
|
Authenticates the current auth token.
Args:
auth_token: the auth token.
auth_info: the auth configurations of the API method being called.
service_name: the name of this service.
Returns:
A constructed UserInfo object representing the identity of the caller.
Raises:
UnauthenticatedException: When
* the issuer is not allowed;
* the audiences are not allowed;
* the auth token has already expired.
| 3.083824
| 2.905311
| 1.061444
|
def _decode_and_verify():
jwt_claims = jwt.JWT().unpack(auth_token).payload()
_verify_required_claims_exist(jwt_claims)
issuer = jwt_claims[u"iss"]
keys = self._jwks_supplier.supply(issuer)
try:
return jws.JWS().verify_compact(auth_token, keys)
except (jwkest.BadSignature, jws.NoSuitableSigningKeys,
jws.SignerAlgError) as exception:
raise suppliers.UnauthenticatedException(u"Signature verification failed",
exception)
return self._cache.get_or_create(auth_token, _decode_and_verify)
|
def get_jwt_claims(self, auth_token)
|
Decodes the auth_token into JWT claims represented as a JSON object.
This method first tries to look up the cache and returns the result
immediately in case of a cache hit. When cache misses, the method tries to
decode the given auth token, verify its signature, and check the existence
of required JWT claims. When successful, the decoded JWT claims are loaded
into the cache and then returned.
Args:
auth_token: the auth token to be decoded.
Returns:
The decoded JWT claims.
Raises:
UnauthenticatedException: When the signature verification fails, or when
required claims are missing.
| 5.607223
| 5.106103
| 1.098141
|
if options is None: # no options, don't create cache
return None
if not isinstance(options, (CheckOptions, QuotaOptions, ReportOptions)):
_logger.error(u'make_cache(): bad options %s', options)
raise ValueError(u'Invalid options')
if (options.num_entries <= 0):
_logger.debug(u"did not create cache, options was %s", options)
return None
_logger.debug(u"creating a cache from %s", options)
if (options.flush_interval > ZERO_INTERVAL):
# options always has a flush_interval, but may have an expiration
# field. If the expiration is present, use that instead of the
# flush_interval for the ttl
ttl = getattr(options, u'expiration', options.flush_interval)
cache_cls = DequeOutTTLCache if use_deque else cachetools.TTLCache
return LockedObject(
cache_cls(
options.num_entries,
ttl=ttl.total_seconds(),
timer=to_cache_timer(timer)
))
cache_cls = DequeOutLRUCache if use_deque else cachetools.LRUCache
return LockedObject(cache_cls(options.num_entries))
|
def create(options, timer=None, use_deque=True)
|
Create a cache specified by ``options``
``options`` is an instance of either
:class:`endpoints_management.control.caches.CheckOptions` or
:class:`endpoints_management.control.caches.ReportOptions`
The returned cache is wrapped in a :class:`LockedObject`, requiring it to
be accessed in a with statement that gives synchronized access
Example:
>>> options = CheckOptions()
>>> synced_cache = make_cache(options)
>>> with synced_cache as cache: # acquire the lock
... cache['a_key'] = 'a_value'
Args:
options (object): an instance of either of the options classes
Returns:
:class:`cachetools.Cache`: the cache implementation specified by options
or None: if options is ``None`` or if options.num_entries < 0
Raises:
ValueError: if options is not a support type
| 5.06389
| 4.181276
| 1.211087
|
if datetime_func is None:
datetime_func = datetime.utcnow
def _timer():
return (datetime_func() - datetime(1970, 1, 1)).total_seconds()
return _timer
|
def to_cache_timer(datetime_func)
|
Converts a datetime_func to a timestamp_func.
Args:
datetime_func (callable[[datatime]]): a func that returns the current
time
Returns:
time_func (callable[[timestamp]): a func that returns the timestamp
from the epoch
| 3.313181
| 4.274142
| 0.775169
|
self.expire()
expired = {k: v for (k, v) in self._tracking.items() if self.get(k) is None}
for k, v in expired.items():
del self._tracking[k]
self._out_deque.append(v)
return self._out_deque
|
def out_deque(self)
|
The :class:`collections.deque` to which expired items are added.
| 3.690129
| 3.134109
| 1.177409
|
build.packages.install("wheel")
build.packages.install("twine")
build.executables.run([
"python", "setup.py",
"sdist", "bdist_wheel", "--universal", "upload",
])
build.executables.run([
"twine", "upload", "dist/*"
])
|
def distribute(build)
|
distribute the uranium package
| 2.988497
| 3.135026
| 0.953261
|
global _THREAD_CLASS # pylint: disable=global-statement
try:
from google.appengine.api.background_thread import background_thread
_THREAD_CLASS = background_thread.BackgroundThread
except ImportError:
_logger.error(
u'Could not install appengine background threads!'
u' Please install the python AppEngine SDK and use this from there')
|
def use_gae_thread()
|
Makes ``Client``s started after this use the appengine thread class.
| 6.519082
| 5.576786
| 1.168967
|
with self._lock:
if self._running:
return
self._stopped = False
self._running = True
self._start_idle_timer()
_logger.debug(u'starting thread of type %s to run the scheduler',
_THREAD_CLASS)
self._thread = create_thread(target=self._schedule_flushes)
try:
self._thread.start()
except Exception: # pylint: disable=broad-except
_logger.warn(
u'no scheduler thread, scheduler.run() will be invoked by report(...)',
exc_info=True)
self._thread = None
self._initialize_flushing()
|
def start(self)
|
Starts processing.
Calling this method
- starts the thread that regularly flushes all enabled caches.
- enables the other methods on the instance to be called successfully
| 5.843136
| 6.067248
| 0.963062
|
with self._lock:
if self._stopped:
_logger.debug(u'%s is already stopped', self)
return
self._flush_all_reports()
self._stopped = True
if self._run_scheduler_directly:
self._cleanup_if_stopped()
if self._scheduler and self._scheduler.empty():
# if there are events scheduled, then _running will subsequently
# be set False by the scheduler thread. This handles the
# case where there are no events, e.g because all aggreagation
# was disabled
self._running = False
self._scheduler = None
|
def stop(self)
|
Halts processing
This will lead to the reports being flushed, the caches being cleared
and a stop to the current processing thread.
| 8.310855
| 7.629501
| 1.089305
|
self.start()
res = self._check_aggregator.check(check_req)
if res:
_logger.debug(u'using cached check response for %s: %s',
check_request, res)
return res
# Application code should not fail because check request's don't
# complete, They should fail open, so here simply log the error and
# return None to indicate that no response was obtained
try:
transport = self._create_transport()
resp = transport.services.Check(check_req)
self._check_aggregator.add_response(check_req, resp)
return resp
except exceptions.Error: # only sink apitools errors
_logger.error(u'direct send of check request failed %s',
check_request, exc_info=True)
return None
|
def check(self, check_req)
|
Process a check_request.
The req is first passed to the check_aggregator. If there is a valid
cached response, that is returned, otherwise a response is obtained from
the transport.
Args:
check_req (``ServicecontrolServicesCheckRequest``): to be sent to
the service control service
Returns:
``CheckResponse``: either the cached response if one is applicable
or a response from making a transport request, or None if
if the request to the transport fails
| 8.476797
| 6.568201
| 1.290581
|
self.start()
# no thread running, run the scheduler to ensure any pending
# flush tasks are executed.
if self._run_scheduler_directly:
self._scheduler.run(blocking=False)
if not self._report_aggregator.report(report_req):
_logger.debug(u'need to send a report request directly')
try:
transport = self._create_transport()
transport.services.Report(report_req)
except exceptions.Error: # only sink apitools errors
_logger.error(u'direct send for report request failed',
exc_info=True)
|
def report(self, report_req)
|
Processes a report request.
It will aggregate it with prior report_requests to be send later
or it will send it immediately if that's appropriate.
| 9.845939
| 9.102946
| 1.081621
|
if labels is not None:
kw[u'labels'] = encoding.PyValueToMessage(MetricValue.LabelsValue,
labels)
return MetricValue(**kw)
|
def create(labels=None, **kw)
|
Constructs a new metric value.
This acts as an alternate to MetricValue constructor which
simplifies specification of labels. Rather than having to create
a MetricValue.Labels instance, all that's necessary to specify the
required string.
Args:
labels (dict([string, [string]]):
**kw: any other valid keyword args valid in the MetricValue constructor
Returns
:class:`MetricValue`: the created instance
| 10.958944
| 8.826053
| 1.241659
|
prior_type, _ = _detect_value(prior)
latest_type, _ = _detect_value(latest)
if prior_type != latest_type:
_logger.warn(u'Metric values are not compatible: %s, %s',
prior, latest)
raise ValueError(u'Incompatible delta metric values')
if prior_type is None:
_logger.warn(u'Bad metric values, types not known for : %s, %s',
prior, latest)
raise ValueError(u'Unsupported delta metric types')
if metric_kind == MetricKind.DELTA:
return _merge_delta_metric(prior, latest)
else:
return _merge_cumulative_or_gauge_metrics(prior, latest)
|
def merge(metric_kind, prior, latest)
|
Merges `prior` and `latest`
Args:
metric_kind (:class:`MetricKind`): indicates the kind of metrics
being merged
prior (:class:`MetricValue`): an prior instance of the metric
latest (:class:`MetricValue`: the latest instance of the metric
| 3.407441
| 3.503196
| 0.972666
|
if mv.labels:
signing.add_dict_to_hash(a_hash, encoding.MessageToPyValue(mv.labels))
money_value = mv.get_assigned_value(u'moneyValue')
if money_value is not None:
a_hash.update(b'\x00')
a_hash.update(money_value.currencyCode.encode('utf-8'))
|
def update_hash(a_hash, mv)
|
Adds ``mv`` to ``a_hash``
Args:
a_hash (`Hash`): the secure hash, e.g created by hashlib.md5
mv (:class:`MetricValue`): the instance to add to the hash
| 6.577706
| 6.196712
| 1.061483
|
md5 = hashlib.md5()
update_hash(md5, mv)
return md5.digest()
|
def sign(mv)
|
Obtains a signature for a `MetricValue`
Args:
mv (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricValue`): a
MetricValue that's part of an operation
Returns:
string: a unique signature for that operation
| 5.91347
| 7.871776
| 0.751224
|
issuer_uri_config = self._issuer_uri_configs.get(issuer)
if not issuer_uri_config:
# The issuer is unknown.
return
jwks_uri = issuer_uri_config.jwks_uri
if jwks_uri:
# When jwks_uri is set, return it directly.
return jwks_uri
# When jwksUri is empty, we try to retrieve it through the OpenID
# discovery.
open_id_valid = issuer_uri_config.open_id_valid
if open_id_valid:
discovered_jwks_uri = _discover_jwks_uri(issuer)
self._issuer_uri_configs[issuer] = IssuerUriConfig(False,
discovered_jwks_uri)
return discovered_jwks_uri
|
def supply(self, issuer)
|
Supplies the `jwks_uri` for the given issuer.
Args:
issuer: the issuer.
Returns:
The `jwks_uri` that is either statically configured or retrieved via
OpenId discovery. None is returned when the issuer is unknown or the
OpenId discovery fails.
| 3.1966
| 2.743029
| 1.165354
|
def _retrieve_jwks():
jwks_uri = self._key_uri_supplier.supply(issuer)
if not jwks_uri:
raise UnauthenticatedException(u"Cannot find the `jwks_uri` for issuer "
u"%s: either the issuer is unknown or "
u"the OpenID discovery failed" % issuer)
try:
response = requests.get(jwks_uri)
json_response = response.json()
except Exception as exception:
message = u"Cannot retrieve valid verification keys from the `jwks_uri`"
raise UnauthenticatedException(message, exception)
if u"keys" in json_response:
# De-serialize the JSON as a JWKS object.
jwks_keys = jwk.KEYS()
jwks_keys.load_jwks(response.text)
return jwks_keys._keys
else:
# The JSON is a dictionary mapping from key id to X.509 certificates.
# Thus we extract the public key from the X.509 certificates and
# construct a JWKS object.
return _extract_x509_certificates(json_response)
return self._jwks_cache.get_or_create(issuer, _retrieve_jwks)
|
def supply(self, issuer)
|
Supplies the `Json Web Key Set` for the given issuer.
Args:
issuer: the issuer.
Returns:
The successfully retrieved Json Web Key Set. None is returned if the
issuer is unknown or the retrieval process fails.
Raises:
UnauthenticatedException: When this method cannot supply JWKS for the
given issuer (e.g. unknown issuer, HTTP request error).
| 4.351367
| 3.939902
| 1.104435
|
desc_value_type = desc.valueType or ValueType.STRING # default not parsed
return (self.label_name == desc.key and
self.value_type == desc_value_type)
|
def matches(self, desc)
|
Determines if a given label descriptor matches this enum instance
Args:
desc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`):
the instance to test
Return:
`True` if desc is supported, otherwise `False`
| 10.678733
| 13.656669
| 0.781943
|
if self.update_label_func:
self.update_label_func(self.label_name, info, labels)
|
def do_labels_update(self, info, labels)
|
Updates a dictionary of labels using the assigned update_op_func
Args:
info (:class:`endpoints_management.control.report_request.Info`): the
info instance to update
labels (dict[string[string]]): the labels dictionary
Return:
`True` if desc is supported, otherwise `False`
| 4.676391
| 5.334557
| 0.876622
|
for l in cls:
if l.matches(desc):
return True
return False
|
def is_supported(cls, desc)
|
Determines if the given label descriptor is supported.
Args:
desc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`):
the label descriptor to test
Return:
`True` if desc is supported, otherwise `False`
| 6.059226
| 6.542096
| 0.92619
|
resource_descs = service.monitoredResources
labels_dict = {}
logs = set()
if service.logging:
logs = _add_logging_destinations(
service.logging.producerDestinations,
resource_descs,
service.logs,
labels_dict,
label_is_supported
)
metrics_dict = {}
monitoring = service.monitoring
if monitoring:
for destinations in (monitoring.consumerDestinations,
monitoring.producerDestinations):
_add_monitoring_destinations(destinations,
resource_descs,
service.metrics,
metrics_dict,
metric_is_supported,
labels_dict,
label_is_supported)
return logs, metrics_dict.keys(), labels_dict.keys()
|
def extract_report_spec(
service,
label_is_supported=label_descriptor.KnownLabels.is_supported,
metric_is_supported=metric_descriptor.KnownMetrics.is_supported)
|
Obtains the used logs, metrics and labels from a service.
label_is_supported and metric_is_supported are filter functions used to
determine if label_descriptors or metric_descriptors found in the service
are supported.
Args:
service (:class:`endpoints_management.gen.servicecontrol_v1_messages.Service`):
a service instance
label_is_supported (:func): determines if a given label is supported
metric_is_supported (:func): determines if a given metric is supported
Return:
tuple: (
logs (set[string}), # the logs to report to
metrics (list[string]), # the metrics to use
labels (list[string]) # the labels to add
)
| 3.419569
| 3.054293
| 1.119595
|
service = self._service
if not service.authentication:
return {}
auth_infos = {}
for auth_rule in service.authentication.rules:
selector = auth_rule.selector
provider_ids_to_audiences = {}
for requirement in auth_rule.requirements:
provider_id = requirement.providerId
if provider_id and requirement.audiences:
audiences = requirement.audiences.split(u",")
provider_ids_to_audiences[provider_id] = audiences
auth_infos[selector] = AuthInfo(provider_ids_to_audiences)
return auth_infos
|
def _extract_auth_config(self)
|
Obtains the authentication configurations.
| 3.074576
| 2.786576
| 1.103353
|
service = self._service
all_urls = set()
urls_with_options = set()
if not service.http:
return
for rule in service.http.rules:
http_method, url = _detect_pattern_option(rule)
if not url or not http_method or not rule.selector:
_logger.error(u'invalid HTTP binding encountered')
continue
# Obtain the method info
method_info = self._get_or_create_method_info(rule.selector)
if rule.body:
method_info.body_field_path = rule.body
if not self._register(http_method, url, method_info):
continue # detected an invalid url
all_urls.add(url)
if http_method == self._OPTIONS:
urls_with_options.add(url)
self._add_cors_options_selectors(all_urls - urls_with_options)
self._update_usage()
self._update_system_parameters()
|
def _extract_methods(self)
|
Obtains the methods used in the service.
| 5.368587
| 5.101057
| 1.052446
|
res = left * right
return "{left}*{right}={res}".format(left=left, right=right, res=res)
|
async def get_blueprint_params(request, left: int, right: int) -> str
|
API Description: Multiply, left * right. This will show in the swagger page (localhost:8000/api/v1/).
| 5.196681
| 3.83588
| 1.354756
|
if not service_name:
service_name = _get_env_var_or_raise(_SERVICE_NAME_ENV_KEY)
if not service_version:
service_version = _get_service_version(_SERVICE_VERSION_ENV_KEY,
service_name)
_logger.debug(u'Contacting Service Management API for service %s version %s',
service_name, service_version)
response = _make_service_config_request(service_name, service_version)
_logger.debug(u'obtained service json from the management api:\n%s', response.data)
service = encoding.JsonToMessage(messages.Service, response.data)
_validate_service_config(service, service_name, service_version)
return service
|
def fetch_service_config(service_name=None, service_version=None)
|
Fetches the service config from Google Service Management API.
Args:
service_name: the service name. When this argument is unspecified, this
method uses the value of the "SERVICE_NAME" environment variable as the
service name, and raises ValueError if the environment variable is unset.
service_version: the service version. When this argument is unspecified,
this method uses the value of the "SERVICE_VERSION" environment variable
as the service version, and raises ValueError if the environment variable
is unset.
Returns: the fetched service config JSON object.
Raises:
ValueError: when the service name/version is neither provided as an
argument or set as an environment variable; or when the fetched service
config fails validation.
Exception: when the Google Service Management API returns non-200 response.
| 3.259498
| 3.061342
| 1.064728
|
return ConfigFetchWrapper(application, project_id, control_client, loader)
|
def add_all(application, project_id, control_client,
loader=service.Loaders.FROM_SERVICE_MANAGEMENT)
|
Adds all endpoints middleware to a wsgi application.
Sets up application to use all default endpoints middleware.
Example:
>>> application = MyWsgiApp() # an existing WSGI application
>>>
>>> # the name of the controlled service
>>> service_name = 'my-service-name'
>>>
>>> # A GCP project with service control enabled
>>> project_id = 'my-project-id'
>>>
>>> # wrap the app for service control
>>> from endpoints_management.control import wsgi
>>> control_client = client.Loaders.DEFAULT.load(service_name)
>>> control_client.start()
>>> wrapped_app = add_all(application, project_id, control_client)
>>>
>>> # now use wrapped_app in place of app
Args:
application: the wrapped wsgi application
project_id: the project_id thats providing service control support
control_client: the service control client instance
loader (:class:`endpoints_management.control.service.Loader`): loads the service
instance that configures this instance's behaviour
| 8.772241
| 26.542553
| 0.330497
|
if not isinstance(a_service, sm_messages.Service):
raise ValueError(u"service is None or not an instance of Service")
authentication = a_service.authentication
if not authentication:
_logger.info(u"authentication is not configured in service, "
u"authentication checks will be disabled")
return
issuers_to_provider_ids = {}
issuer_uri_configs = {}
for provider in authentication.providers:
issuer = provider.issuer
jwks_uri = provider.jwksUri
# Enable openID discovery if jwks_uri is unset
open_id = jwks_uri is None
issuer_uri_configs[issuer] = suppliers.IssuerUriConfig(open_id, jwks_uri)
issuers_to_provider_ids[issuer] = provider.id
key_uri_supplier = suppliers.KeyUriSupplier(issuer_uri_configs)
jwks_supplier = suppliers.JwksSupplier(key_uri_supplier)
authenticator = tokens.Authenticator(issuers_to_provider_ids, jwks_supplier)
return authenticator
|
def _create_authenticator(a_service)
|
Create an instance of :class:`google.auth.tokens.Authenticator`.
Args:
a_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`): a
service instance
| 3.618599
| 3.434093
| 1.053728
|
md5 = hashlib.md5()
md5.update(op.consumerId.encode('utf-8'))
md5.update(b'\x00')
md5.update(op.operationName.encode('utf-8'))
if op.labels:
signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))
return md5.digest()
|
def _sign_operation(op)
|
Obtains a signature for an operation in a ReportRequest.
Args:
op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an
operation used in a `ReportRequest`
Returns:
string: a unique signature for that operation
| 3.948114
| 3.41882
| 1.154818
|
if not metric_names:
metric_names = ()
if not label_names:
label_names = ()
known_labels = []
known_metrics = []
# pylint: disable=no-member
# pylint is not aware of the __members__ attributes
for l in label_descriptor.KnownLabels.__members__.values():
if l.update_label_func and l.label_name in label_names:
known_labels.append(l)
for m in metric_descriptor.KnownMetrics.__members__.values():
if m.update_op_func and m.metric_name in metric_names:
known_metrics.append(m)
return cls(logs=logs, metrics=known_metrics, labels=known_labels)
|
def from_known_inputs(cls, logs=None, metric_names=None, label_names=None)
|
An alternate constructor that assumes known metrics and labels.
This differs from the default constructor in that the metrics and labels
are iterables of names of 'known' metrics and labels respectively. The
names are used to obtain the metrics and labels from
:class:`endpoints_management.control.metric_descriptor.KnownMetrics` and
:class:`endpoints_management.control.label_descriptor.KnownLabels` respectively.
names that don't correspond to a known metric or label are ignored; as
are metrics or labels that don't yet have a way of updating the
`ReportRequest` operation.
Args:
logs (iterable[string]): the name of logs to be included in the
`ReportRequest`
metric_names (iterable[string]): the name of a known metric to be
added to the `ReportRequest`
label_names (iterable[string]): the name of a known label to be added
to the `ReportRequest`
| 2.906454
| 2.700958
| 1.076083
|
# initialize the struct with fields that are always present
d = {
u'http_response_code': self.response_code,
u'timestamp': time.mktime(now.timetuple())
}
# compute the severity
severity = _SEVERITY.INFO
if self.response_code >= 400:
severity = _SEVERITY.ERROR
d[u'error_cause'] = self.error_cause.name
# add 'optional' fields to the struct
if self.request_size > 0:
d[u'request_size'] = self.request_size
if self.response_size > 0:
d[u'response_size'] = self.response_size
if self.method:
d[u'http_method'] = self.method
if self.request_time:
d[u'request_latency_in_ms'] = self.request_time.total_seconds() * 1000
# add 'copyable' fields to the struct
for key in self.COPYABLE_LOG_FIELDS:
value = getattr(self, key, None)
if value:
d[key] = value
return sc_messages.LogEntry(
name=name,
timestamp=timestamp.to_rfc3339(now),
severity=severity,
structPayload=_struct_payload_from(d))
|
def _as_log_entry(self, name, now)
|
Makes a `LogEntry` from this instance for the given log_name.
Args:
rules (:class:`ReportingRules`): determines what labels, metrics and
logs to include in the report request.
now (:class:`datetime.DateTime`): the current time
Return:
a ``LogEntry`` generated from this instance with the given name
and timestamp
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesReportRequest``
| 2.992644
| 3.033021
| 0.986688
|
if not self.service_name:
raise ValueError(u'the service name must be set')
op = super(Info, self).as_operation(timer=timer)
# Populate metrics and labels if they can be associated with a
# method/operation
if op.operationId and op.operationName:
labels = {}
for known_label in rules.labels:
known_label.do_labels_update(self, labels)
# Forcibly add system label reporting here, as the base service
# config does not specify it as a label.
labels[_KNOWN_LABELS.SCC_PLATFORM.label_name] = (
self.platform.friendly_string())
labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = (
SERVICE_AGENT)
labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT
if labels:
op.labels = encoding.PyValueToMessage(
sc_messages.Operation.LabelsValue,
labels)
for known_metric in rules.metrics:
known_metric.do_operation_update(self, op)
# Populate the log entries
now = timer()
op.logEntries = [self._as_log_entry(l, now) for l in rules.logs]
return sc_messages.ServicecontrolServicesReportRequest(
serviceName=self.service_name,
reportRequest=sc_messages.ReportRequest(operations=[op]))
|
def as_report_request(self, rules, timer=datetime.utcnow)
|
Makes a `ServicecontrolServicesReportRequest` from this instance
Args:
rules (:class:`ReportingRules`): determines what labels, metrics and
logs to include in the report request.
timer: a function that determines the current time
Return:
a ``ServicecontrolServicesReportRequest`` generated from this instance
governed by the provided ``rules``
Raises:
ValueError: if the fields in this instance cannot be used to create
a valid ``ServicecontrolServicesReportRequest``
| 5.58925
| 4.853089
| 1.151689
|
if self._cache is None:
return _NO_RESULTS
with self._cache as c:
flushed_ops = [x.as_operation() for x in c.out_deque]
c.out_deque.clear()
reqs = []
max_ops = self.MAX_OPERATION_COUNT
for x in range(0, len(flushed_ops), max_ops):
report_request = sc_messages.ReportRequest(
operations=flushed_ops[x:x + max_ops])
reqs.append(
sc_messages.ServicecontrolServicesReportRequest(
serviceName=self.service_name,
reportRequest=report_request))
return reqs
|
def flush(self)
|
Flushes this instance's cache.
The driver of this instance should call this method every
`flush_interval`.
Returns:
list[``ServicecontrolServicesReportRequest``]: corresponding to the
pending cached operations
| 5.420157
| 3.833242
| 1.413988
|
if self._cache is None:
return _NO_RESULTS
if self._cache is not None:
with self._cache as k:
res = [x.as_operation() for x in k.values()]
k.clear()
k.out_deque.clear()
return res
|
def clear(self)
|
Clears the cache.
| 8.545139
| 7.727908
| 1.105751
|
if self._cache is None:
return None # no cache, send request now
if not isinstance(req, sc_messages.ServicecontrolServicesReportRequest):
raise ValueError(u'Invalid request')
if req.serviceName != self.service_name:
_logger.error(u'bad report(): service_name %s does not match ours %s',
req.serviceName, self.service_name)
raise ValueError(u'Service name mismatch')
report_req = req.reportRequest
if report_req is None:
_logger.error(u'bad report(): no report_request in %s', req)
raise ValueError(u'Expected report_request not set')
if _has_high_important_operation(report_req) or self._cache is None:
return None
ops_by_signature = _key_by_signature(report_req.operations,
_sign_operation)
# Concurrency:
#
# This holds a lock on the cache while updating it. No i/o operations
# are performed, so any waiting threads see minimal delays
with self._cache as cache:
for key, op in ops_by_signature.items():
agg = cache.get(key)
if agg is None:
cache[key] = operation.Aggregator(op, self._kinds)
else:
agg.add(op)
return self.CACHED_OK
|
def report(self, req)
|
Adds a report request to the cache.
Returns ``None`` if it could not be aggregated, and callers need to
send the request to the server, otherwise it returns ``CACHED_OK``.
Args:
req (:class:`sc_messages.ReportRequest`): the request
to be aggregated
Result:
``None`` if the request as not cached, otherwise ``CACHED_OK``
| 5.677157
| 5.299903
| 1.071181
|
if not allocate_quota_response or not allocate_quota_response.allocateErrors:
return _IS_OK
# only allocate_quota the first error for now, as per ESP
theError = allocate_quota_response.allocateErrors[0]
error_tuple = _QUOTA_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN)
if error_tuple[1].find(u'{') == -1: # no replacements needed:
return error_tuple
updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.description or u'')
return error_tuple[0], updated_msg
|
def convert_response(allocate_quota_response, project_id)
|
Computes a http status code and message `AllocateQuotaResponse`
The return value a tuple (code, message) where
code: is the http status code
message: is the message to return
Args:
allocate_quota_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.AllocateQuotaResponse`):
the response from calling an api
Returns:
tuple(code, message)
| 6.574203
| 6.706091
| 0.980333
|
if not isinstance(allocate_quota_request, sc_messages.AllocateQuotaRequest):
raise ValueError(u'Invalid request')
op = allocate_quota_request.allocateOperation
if op is None or op.methodName is None or op.consumerId is None:
logging.error(u'Bad %s: not initialized => not signed', allocate_quota_request)
raise ValueError(u'allocate_quota request must be initialized with an operation')
md5 = hashlib.md5()
md5.update(op.methodName.encode('utf-8'))
md5.update(b'\x00')
md5.update(op.consumerId.encode('utf-8'))
if op.labels:
signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))
for value_set in op.quotaMetrics:
md5.update(b'\x00')
md5.update(value_set.metricName.encode('utf-8'))
for mv in value_set.metricValues:
metric_value.update_hash(md5, mv)
md5.update(b'\x00')
return md5.digest()
|
def sign(allocate_quota_request)
|
Obtains a signature for an operation in a `AllocateQuotaRequest`
Args:
op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an
operation used in a `AllocateQuotaRequest`
Returns:
string: a secure hash generated from the operation
| 3.573026
| 3.384763
| 1.055621
|
if not self.service_name:
raise ValueError(u'the service name must be set')
if not self.operation_id:
raise ValueError(u'the operation id must be set')
if not self.operation_name:
raise ValueError(u'the operation name must be set')
op = super(Info, self).as_operation(timer=timer)
labels = {}
if self.client_ip:
labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip
if self.referer:
labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer
qop = sc_messages.QuotaOperation(
operationId=op.operationId,
methodName=op.operationName,
consumerId=op.consumerId,
quotaMode=sc_messages.QuotaOperation.QuotaModeValueValuesEnum.BEST_EFFORT,
)
qop.labels = encoding.PyValueToMessage(
sc_messages.QuotaOperation.LabelsValue, labels)
quota_info = self.quota_info if self.quota_info else {}
qop.quotaMetrics = [
sc_messages.MetricValueSet(
metricName=name, metricValues=[sc_messages.MetricValue(int64Value=cost)])
for name, cost in quota_info.items()
]
allocate_quota_request = sc_messages.AllocateQuotaRequest(allocateOperation=qop)
if self.config_id:
allocate_quota_request.serviceConfigId = self.config_id
return sc_messages.ServicecontrolServicesAllocateQuotaRequest(
serviceName=self.service_name,
allocateQuotaRequest=allocate_quota_request)
|
def as_allocate_quota_request(self, timer=datetime.utcnow)
|
Makes a `ServicecontrolServicesAllocateQuotaRequest` from this instance
Returns:
a ``ServicecontrolServicesAllocateQuotaRequest``
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesAllocateQuotaRequest``
| 3.098443
| 2.854879
| 1.085315
|
if self._cache is None:
return []
with self._cache as c, self._out as out:
c.expire()
now = self._timer()
for item in c.values():
if (not self._in_flush_all) and (not self._should_expire(item)):
if (not item.is_in_flight) and item._op_aggregator is not None:
item.is_in_flight = True
item.last_refresh_timestamp = now
out.append(item.extract_request()) # pylint: disable=no-member
flushed_items = list(out)
out.clear() # pylint: disable=no-member
for req in flushed_items:
assert isinstance(req, sc_messages.ServicecontrolServicesAllocateQuotaRequest)
return flushed_items
|
def flush(self)
|
Flushes this instance's cache.
The driver of this instance should call this method every
`flush_interval`.
Returns:
list['ServicecontrolServicesAllocateQuotaRequest']: corresponding
to AllocateQuotaRequests that were pending
| 6.798751
| 5.254384
| 1.29392
|
if self._cache is not None:
with self._cache as c, self._out as out:
self.in_flush_all = True
c.clear()
out.clear() # pylint: disable=no-member
self.in_flush_all = False
|
def clear(self)
|
Clears this instance's cache.
| 5.537055
| 5.027883
| 1.10127
|
if self._cache is None:
return
signature = sign(req.allocateQuotaRequest)
with self._cache as c:
now = self._timer()
item = c.get(signature)
if item is None:
c[signature] = CachedItem(
req, resp, self.service_name, now)
else:
# Update the cached item to reflect that it is updated
item.last_check_time = now
item.response = resp
item.is_in_flight = False
c[signature] = item
|
def add_response(self, req, resp)
|
Adds the response from sending to `req` to this instance's cache.
Args:
req (`ServicecontrolServicesAllocateQuotaRequest`): the request
resp (AllocateQuotaResponse): the response from sending the request
| 5.666236
| 5.099594
| 1.111115
|
transmute_func = TransmuteFunction(
fn,
args_not_from_request=["request"]
)
handler = create_handler(transmute_func, context=context)
get_swagger_spec(app_or_blueprint).add_func(transmute_func, context)
for p in transmute_func.paths:
sanic_path = _convert_to_sanic_path(p)
app_or_blueprint.add_route(handler, sanic_path, methods=list(transmute_func.methods))
|
def add_route(app_or_blueprint, fn, context=default_context)
|
a decorator that adds a transmute route to the application
| 4.563713
| 4.29898
| 1.06158
|
return (self.metric_name == desc.name and
self.kind == desc.metricKind and
self.value_type == desc.valueType)
|
def matches(self, desc)
|
Determines if a given metric descriptor matches this enum instance
Args:
desc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the
instance to test
Return:
`True` if desc is supported, otherwise `False`
| 6.020209
| 5.317451
| 1.132161
|
self.update_op_func(self.metric_name, info, an_op)
|
def do_operation_update(self, info, an_op)
|
Updates an operation using the assigned update_op_func
Args:
info: (:class:`endpoints_management.control.report_request.Info`): the
info instance to update
an_op: (:class:`endpoints_management.control.report_request.Info`):
the info instance to update
Return:
`True` if desc is supported, otherwise `False`
| 10.419102
| 6.842448
| 1.522716
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.