_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q8600
DAWG.similar_keys
train
def similar_keys(self, key, replaces): """ Returns all variants of ``key`` in this DAWG according to ``replaces``. ``replaces`` is an object obtained from ``DAWG.compile_replaces(mapping)`` where mapping is a dict that maps single-char unicode sitrings to another single-char unicode strings. This may be useful e.g. for handling single-character umlauts. """ return self._similar_keys("", key, self.dct.ROOT, replaces)
python
{ "resource": "" }
q8601
DAWG.prefixes
train
def prefixes(self, key): ''' Returns a list with keys of this DAWG that are prefixes of the ``key``. ''' res = [] index = self.dct.ROOT if not isinstance(key, bytes): key = key.encode('utf8') pos = 1 for ch in key: index = self.dct.follow_char(int_from_byte(ch), index) if not index: break if self._has_value(index): res.append(key[:pos].decode('utf8')) pos += 1 return res
python
{ "resource": "" }
q8602
BytesDAWG.similar_item_values
train
def similar_item_values(self, key, replaces): """ Returns a list of values for all variants of the ``key`` in this DAWG according to ``replaces``. ``replaces`` is an object obtained from ``DAWG.compile_replaces(mapping)`` where mapping is a dict that maps single-char unicode sitrings to another single-char unicode strings. """ return self._similar_item_values(0, key, self.dct.ROOT, replaces)
python
{ "resource": "" }
q8603
Dictionary.value
train
def value(self, index): "Gets a value from a given index." offset = units.offset(self._units[index]) value_index = (index ^ offset) & units.PRECISION_MASK return units.value(self._units[value_index])
python
{ "resource": "" }
q8604
Dictionary.read
train
def read(self, fp): "Reads a dictionary from an input stream." base_size = struct.unpack(str("=I"), fp.read(4))[0] self._units.fromfile(fp, base_size)
python
{ "resource": "" }
q8605
Dictionary.contains
train
def contains(self, key): "Exact matching." index = self.follow_bytes(key, self.ROOT) if index is None: return False return self.has_value(index)
python
{ "resource": "" }
q8606
Dictionary.follow_char
train
def follow_char(self, label, index): "Follows a transition" offset = units.offset(self._units[index]) next_index = (index ^ offset ^ label) & units.PRECISION_MASK if units.label(self._units[next_index]) != label: return None return next_index
python
{ "resource": "" }
q8607
Dictionary.follow_bytes
train
def follow_bytes(self, s, index): "Follows transitions." for ch in s: index = self.follow_char(int_from_byte(ch), index) if index is None: return None return index
python
{ "resource": "" }
q8608
Completer.next
train
def next(self): "Gets the next key" if not self._index_stack: return False index = self._index_stack[-1] if self._last_index != self._dic.ROOT: child_label = self._guide.child(index) # UCharType if child_label: # Follows a transition to the first child. index = self._follow(child_label, index) if index is None: return False else: while True: sibling_label = self._guide.sibling(index) # Moves to the previous node. if len(self.key) > 0: self.key.pop() #self.key[-1] = 0 self._index_stack.pop() if not self._index_stack: return False index = self._index_stack[-1] if sibling_label: # Follows a transition to the next sibling. index = self._follow(sibling_label, index) if index is None: return False break return self._find_terminal(index)
python
{ "resource": "" }
q8609
convert_response
train
def convert_response(check_response, project_id): """Computes a http status code and message `CheckResponse` The return value a tuple (code, message, api_key_is_bad) where code: is the http status code message: is the message to return api_key_is_bad: indicates that a given api_key is bad Args: check_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.CheckResponse`): the response from calling an api Returns: tuple(code, message, bool) """ if not check_response or not check_response.checkErrors: return _IS_OK # only check the first error for now, as per ESP theError = check_response.checkErrors[0] error_tuple = _CHECK_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN) if error_tuple[1].find(u'{') == -1: # no replacements needed: return error_tuple updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.detail or u'') return error_tuple[0], updated_msg, error_tuple[2]
python
{ "resource": "" }
q8610
sign
train
def sign(check_request): """Obtains a signature for an operation in a `CheckRequest` Args: op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an operation used in a `CheckRequest` Returns: string: a secure hash generated from the operation """ if not isinstance(check_request, sc_messages.CheckRequest): raise ValueError(u'Invalid request') op = check_request.operation if op is None or op.operationName is None or op.consumerId is None: logging.error(u'Bad %s: not initialized => not signed', check_request) raise ValueError(u'check request must be initialized with an operation') md5 = hashlib.md5() md5.update(op.operationName.encode('utf-8')) md5.update(b'\x00') md5.update(op.consumerId.encode('utf-8')) if op.labels: signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels)) for value_set in op.metricValueSets: md5.update(b'\x00') md5.update(value_set.metricName.encode('utf-8')) for mv in value_set.metricValues: metric_value.update_hash(md5, mv) md5.update(b'\x00') if op.quotaProperties: # N.B: this differs form cxx implementation, which serializes the # protobuf. This should be OK as the exact hash used does not need to # match across implementations. md5.update(repr(op.quotaProperties).encode('utf-8')) md5.update(b'\x00') return md5.digest()
python
{ "resource": "" }
q8611
Info.as_check_request
train
def as_check_request(self, timer=datetime.utcnow): """Makes a `ServicecontrolServicesCheckRequest` from this instance Returns: a ``ServicecontrolServicesCheckRequest`` Raises: ValueError: if the fields in this instance are insufficient to to create a valid ``ServicecontrolServicesCheckRequest`` """ if not self.service_name: raise ValueError(u'the service name must be set') if not self.operation_id: raise ValueError(u'the operation id must be set') if not self.operation_name: raise ValueError(u'the operation name must be set') op = super(Info, self).as_operation(timer=timer) labels = {} if self.android_cert_fingerprint: labels[_KNOWN_LABELS.SCC_ANDROID_CERT_FINGERPRINT.label_name] = self.android_cert_fingerprint if self.android_package_name: labels[_KNOWN_LABELS.SCC_ANDROID_PACKAGE_NAME.label_name] = self.android_package_name if self.client_ip: labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip if self.ios_bundle_id: labels[_KNOWN_LABELS.SCC_IOS_BUNDLE_ID.label_name] = self.ios_bundle_id if self.referer: labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer # Forcibly add system label reporting here, as the base service # config does not specify it as a label. labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = SERVICE_AGENT labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT op.labels = encoding.PyValueToMessage( sc_messages.Operation.LabelsValue, labels) check_request = sc_messages.CheckRequest(operation=op) return sc_messages.ServicecontrolServicesCheckRequest( serviceName=self.service_name, checkRequest=check_request)
python
{ "resource": "" }
q8612
Aggregator.check
train
def check(self, req): """Determine if ``req`` is in this instances cache. Determine if there are cache hits for the request in this aggregator instance. Not in the cache If req is not in the cache, it returns ``None`` to indicate that the caller should send the request. Cache Hit; response has errors When a cached CheckResponse has errors, it's assumed that ``req`` would fail as well, so the cached CheckResponse is returned. However, the first CheckRequest after the flush interval has elapsed should be sent to the server to refresh the CheckResponse, though until it's received, subsequent CheckRequests should fail with the cached CheckResponse. Cache behaviour - response passed If the cached CheckResponse has no errors, it's assumed that ``req`` will succeed as well, so the CheckResponse is returned, with the quota info updated to the same as requested. The requested tokens are aggregated until flushed. Args: req (``ServicecontrolServicesCheckRequest``): to be sent to the service control service Raises: ValueError: if the ``req`` service_name is not the same as this instances Returns: ``CheckResponse``: if an applicable response is cached by this instance is available for use or None, if there is no applicable response """ if self._cache is None: return None # no cache, send request now if not isinstance(req, sc_messages.ServicecontrolServicesCheckRequest): raise ValueError(u'Invalid request') if req.serviceName != self.service_name: _logger.error(u'bad check(): service_name %s does not match ours %s', req.serviceName, self.service_name) raise ValueError(u'Service name mismatch') check_request = req.checkRequest if check_request is None: _logger.error(u'bad check(): no check_request in %s', req) raise ValueError(u'Expected operation not set') op = check_request.operation if op is None: _logger.error(u'bad check(): no operation in %s', req) raise ValueError(u'Expected operation not set') if op.importance != sc_messages.Operation.ImportanceValueValuesEnum.LOW: return None # op is important, send request now signature = sign(check_request) with self._cache as cache: _logger.debug(u'checking the cache for %r\n%s', signature, cache) item = cache.get(signature) if item is None: return None # signal to caller to send req else: return self._handle_cached_response(req, item)
python
{ "resource": "" }
q8613
compare
train
def compare(a, b): """Compares two timestamps. ``a`` and ``b`` must be the same type, in addition to normal representations of timestamps that order naturally, they can be rfc3339 formatted strings. Args: a (string|object): a timestamp b (string|object): another timestamp Returns: int: -1 if a < b, 0 if a == b or 1 if a > b Raises: ValueError: if a or b are not the same type ValueError: if a or b strings but not in valid rfc3339 format """ a_is_text = isinstance(a, basestring) b_is_text = isinstance(b, basestring) if type(a) != type(b) and not (a_is_text and b_is_text): _logger.error(u'Cannot compare %s to %s, types differ %s!=%s', a, b, type(a), type(b)) raise ValueError(u'cannot compare inputs of differing types') if a_is_text: a = from_rfc3339(a, with_nanos=True) b = from_rfc3339(b, with_nanos=True) if a < b: return -1 elif a > b: return 1 else: return 0
python
{ "resource": "" }
q8614
to_rfc3339
train
def to_rfc3339(timestamp): """Converts ``timestamp`` to an RFC 3339 date string format. ``timestamp`` can be either a ``datetime.datetime`` or a ``datetime.timedelta``. Instances of the later are assumed to be a delta with the beginining of the unix epoch, 1st of January, 1970 The returned string is always Z-normalized. Examples of the return format: '1972-01-01T10:00:20.021Z' Args: timestamp (datetime|timedelta): represents the timestamp to convert Returns: string: timestamp converted to a rfc3339 compliant string as above Raises: ValueError: if timestamp is not a datetime.datetime or datetime.timedelta """ if isinstance(timestamp, datetime.datetime): timestamp = timestamp - _EPOCH_START if not isinstance(timestamp, datetime.timedelta): _logger.error(u'Could not convert %s to a rfc3339 time,', timestamp) raise ValueError(u'Invalid timestamp type') return strict_rfc3339.timestamp_to_rfc3339_utcoffset( timestamp.total_seconds())
python
{ "resource": "" }
q8615
from_rfc3339
train
def from_rfc3339(rfc3339_text, with_nanos=False): """Parse a RFC 3339 date string format to datetime.date. Example of accepted format: '1972-01-01T10:00:20.021-05:00' - By default, the result is a datetime.datetime - If with_nanos is true, the result is a 2-tuple, (datetime.datetime, nanos), where the second field represents the possible nanosecond resolution component of the second field. Args: rfc3339_text (string): An rfc3339 formatted date string with_nanos (bool): Determines if nanoseconds should be parsed from the string Raises: ValueError: if ``rfc3339_text`` is invalid Returns: :class:`datetime.datetime`: when with_nanos is False tuple(:class:`datetime.datetime`, int): when with_nanos is True """ timestamp = strict_rfc3339.rfc3339_to_timestamp(rfc3339_text) result = datetime.datetime.utcfromtimestamp(timestamp) if with_nanos: return (result, int((timestamp - int(timestamp)) * 1e9)) else: return result
python
{ "resource": "" }
q8616
Info.as_operation
train
def as_operation(self, timer=datetime.utcnow): """Makes an ``Operation`` from this instance. Returns: an ``Operation`` """ now = timer() op = sc_messages.Operation( endTime=timestamp.to_rfc3339(now), startTime=timestamp.to_rfc3339(now), importance=sc_messages.Operation.ImportanceValueValuesEnum.LOW) if self.operation_id: op.operationId = self.operation_id if self.operation_name: op.operationName = self.operation_name if self.api_key and self.api_key_valid: op.consumerId = u'api_key:' + self.api_key elif self.consumer_project_id: op.consumerId = u'project:' + self.consumer_project_id return op
python
{ "resource": "" }
q8617
Aggregator.as_operation
train
def as_operation(self): """Obtains a single `Operation` representing this instances contents. Returns: :class:`endpoints_management.gen.servicecontrol_v1_messages.Operation` """ result = encoding.CopyProtoMessage(self._op) names = sorted(self._metric_values_by_name_then_sign.keys()) for name in names: mvs = self._metric_values_by_name_then_sign[name] result.metricValueSets.append( sc_messages.MetricValueSet( metricName=name, metricValues=mvs.values())) return result
python
{ "resource": "" }
q8618
Aggregator.add
train
def add(self, other_op): """Combines `other_op` with the operation held by this aggregator. N.B. It merges the operations log entries and metric values, but makes the assumption the operation is consistent. It's the callers responsibility to ensure consistency Args: other_op ( class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an operation merge into this one """ self._op.logEntries.extend(other_op.logEntries) self._merge_timestamps(other_op) self._merge_metric_values(other_op)
python
{ "resource": "" }
q8619
create_exponential
train
def create_exponential(num_finite_buckets, growth_factor, scale): """Creates a new instance of distribution with exponential buckets Args: num_finite_buckets (int): initializes number of finite buckets growth_factor (float): initializes the growth factor scale (float): initializes the scale Return: :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution` Raises: ValueError: if the args are invalid for creating an instance """ if num_finite_buckets <= 0: raise ValueError(_BAD_NUM_FINITE_BUCKETS) if growth_factor <= 1.0: raise ValueError(_BAD_FLOAT_ARG % (u'growth factor', 1.0)) if scale <= 0.0: raise ValueError(_BAD_FLOAT_ARG % (u'scale', 0.0)) return sc_messages.Distribution( bucketCounts=[0] * (num_finite_buckets + 2), exponentialBuckets=sc_messages.ExponentialBuckets( numFiniteBuckets=num_finite_buckets, growthFactor=growth_factor, scale=scale))
python
{ "resource": "" }
q8620
create_linear
train
def create_linear(num_finite_buckets, width, offset): """Creates a new instance of distribution with linear buckets. Args: num_finite_buckets (int): initializes number of finite buckets width (float): initializes the width of each bucket offset (float): initializes the offset Return: :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution` Raises: ValueError: if the args are invalid for creating an instance """ if num_finite_buckets <= 0: raise ValueError(_BAD_NUM_FINITE_BUCKETS) if width <= 0.0: raise ValueError(_BAD_FLOAT_ARG % (u'width', 0.0)) return sc_messages.Distribution( bucketCounts=[0] * (num_finite_buckets + 2), linearBuckets=sc_messages.LinearBuckets( numFiniteBuckets=num_finite_buckets, width=width, offset=offset))
python
{ "resource": "" }
q8621
create_explicit
train
def create_explicit(bounds): """Creates a new instance of distribution with explicit buckets. bounds is an iterable of ordered floats that define the explicit buckets Args: bounds (iterable[float]): initializes the bounds Return: :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution` Raises: ValueError: if the args are invalid for creating an instance """ safe_bounds = sorted(float(x) for x in bounds) if len(safe_bounds) != len(set(safe_bounds)): raise ValueError(u'Detected two elements of bounds that are the same') return sc_messages.Distribution( bucketCounts=[0] * (len(safe_bounds) + 1), explicitBuckets=sc_messages.ExplicitBuckets(bounds=safe_bounds))
python
{ "resource": "" }
q8622
add_sample
train
def add_sample(a_float, dist): """Adds `a_float` to `dist`, updating its existing buckets. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated Raises: ValueError: if `dist` does not have known bucket options defined ValueError: if there are not enough bucket count fields in `dist` """ dist_type, _ = _detect_bucket_option(dist) if dist_type == u'exponentialBuckets': _update_general_statistics(a_float, dist) _update_exponential_bucket_count(a_float, dist) elif dist_type == u'linearBuckets': _update_general_statistics(a_float, dist) _update_linear_bucket_count(a_float, dist) elif dist_type == u'explicitBuckets': _update_general_statistics(a_float, dist) _update_explicit_bucket_count(a_float, dist) else: _logger.error(u'Could not determine bucket option type for %s', dist) raise ValueError(u'Unknown bucket option type')
python
{ "resource": "" }
q8623
merge
train
def merge(prior, latest): """Merge `prior` into `latest`. N.B, this mutates latest. It ensures that the statistics and histogram are updated to correctly include the original values from both instances. Args: prior (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): an instance latest (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): an instance to be updated Raises: ValueError: if the bucket options of `prior` and `latest` do not match ValueError: if the bucket counts of `prior` and `latest` do not match """ if not _buckets_nearly_equal(prior, latest): _logger.error(u'Bucket options do not match. From %s To: %s', prior, latest) raise ValueError(u'Bucket options do not match') if len(prior.bucketCounts) != len(latest.bucketCounts): _logger.error(u'Bucket count sizes do not match. From %s To: %s', prior, latest) raise ValueError(u'Bucket count sizes do not match') if prior.count <= 0: return old_count = latest.count old_mean = latest.mean old_summed_variance = latest.sumOfSquaredDeviation bucket_counts = latest.bucketCounts # Update the latest latest.count += prior.count latest.maximum = max(prior.maximum, latest.maximum) latest.minimum = min(prior.minimum, latest.minimum) latest.mean = ((old_count * old_mean + prior.count * prior.mean) / latest.count) latest.sumOfSquaredDeviation = ( old_summed_variance + prior.sumOfSquaredDeviation + old_count * (latest.mean - old_mean) ** 2 + prior.count * (latest.mean - prior.mean) ** 2) for i, (x, y) in enumerate(zip(prior.bucketCounts, bucket_counts)): bucket_counts[i] = x + y
python
{ "resource": "" }
q8624
_buckets_nearly_equal
train
def _buckets_nearly_equal(a_dist, b_dist): """Determines whether two `Distributions` are nearly equal. Args: a_dist (:class:`Distribution`): an instance b_dist (:class:`Distribution`): another instance Return: boolean: `True` if the two instances are approximately equal, otherwise False """ a_type, a_buckets = _detect_bucket_option(a_dist) b_type, b_buckets = _detect_bucket_option(b_dist) if a_type != b_type: return False elif a_type == u'linearBuckets': return _linear_buckets_nearly_equal(a_buckets, b_buckets) elif a_type == u'exponentialBuckets': return _exponential_buckets_nearly_equal(a_buckets, b_buckets) elif a_type == u'explicitBuckets': return _explicit_buckets_nearly_equal(a_buckets, b_buckets) else: return False
python
{ "resource": "" }
q8625
_update_general_statistics
train
def _update_general_statistics(a_float, dist): """Adds a_float to distribution, updating the statistics fields. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated """ if not dist.count: dist.count = 1 dist.maximum = a_float dist.minimum = a_float dist.mean = a_float dist.sumOfSquaredDeviation = 0 else: old_count = dist.count old_mean = dist.mean new_mean = ((old_count * old_mean) + a_float) / (old_count + 1) delta_sum_squares = (a_float - old_mean) * (a_float - new_mean) dist.count += 1 dist.mean = new_mean dist.maximum = max(a_float, dist.maximum) dist.minimum = min(a_float, dist.minimum) dist.sumOfSquaredDeviation += delta_sum_squares
python
{ "resource": "" }
q8626
_update_exponential_bucket_count
train
def _update_exponential_bucket_count(a_float, dist): """Adds `a_float` to `dist`, updating its exponential buckets. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated Raises: ValueError: if `dist` does not already have exponential buckets defined ValueError: if there are not enough bucket count fields in `dist` """ buckets = dist.exponentialBuckets if buckets is None: raise ValueError(_BAD_UNSET_BUCKETS % (u'exponential buckets')) bucket_counts = dist.bucketCounts num_finite_buckets = buckets.numFiniteBuckets if len(bucket_counts) < num_finite_buckets + 2: raise ValueError(_BAD_LOW_BUCKET_COUNT) scale = buckets.scale factor = buckets.growthFactor if (a_float <= scale): index = 0 else: index = 1 + int((math.log(a_float / scale) / math.log(factor))) index = min(index, num_finite_buckets + 1) bucket_counts[index] += 1 _logger.debug(u'scale:%f, factor:%f, sample:%f, index:%d', scale, factor, a_float, index)
python
{ "resource": "" }
q8627
_update_linear_bucket_count
train
def _update_linear_bucket_count(a_float, dist): """Adds `a_float` to `dist`, updating the its linear buckets. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated Raises: ValueError: if `dist` does not already have linear buckets defined ValueError: if there are not enough bucket count fields in `dist` """ buckets = dist.linearBuckets if buckets is None: raise ValueError(_BAD_UNSET_BUCKETS % (u'linear buckets')) bucket_counts = dist.bucketCounts num_finite_buckets = buckets.numFiniteBuckets if len(bucket_counts) < num_finite_buckets + 2: raise ValueError(_BAD_LOW_BUCKET_COUNT) width = buckets.width lower = buckets.offset upper = lower + (num_finite_buckets * width) if a_float < lower: index = 0 elif a_float >= upper: index = num_finite_buckets + 1 else: index = 1 + int(((a_float - lower) / width)) bucket_counts[index] += 1 _logger.debug(u'upper:%f, lower:%f, width:%f, sample:%f, index:%d', upper, lower, width, a_float, index)
python
{ "resource": "" }
q8628
_update_explicit_bucket_count
train
def _update_explicit_bucket_count(a_float, dist): """Adds `a_float` to `dist`, updating its explicit buckets. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated Raises: ValueError: if `dist` does not already have explict buckets defined ValueError: if there are not enough bucket count fields in `dist` """ buckets = dist.explicitBuckets if buckets is None: raise ValueError(_BAD_UNSET_BUCKETS % (u'explicit buckets')) bucket_counts = dist.bucketCounts bounds = buckets.bounds if len(bucket_counts) < len(bounds) + 1: raise ValueError(_BAD_LOW_BUCKET_COUNT) bucket_counts[bisect.bisect(bounds, a_float)] += 1
python
{ "resource": "" }
q8629
scheduler.enterabs
train
def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel): """Enter a new event in the queue at an absolute time. Returns an ID for the event which can be used to remove it, if necessary. """ if kwargs is _sentinel: kwargs = {} event = Event(time, priority, action, argument, kwargs) with self._lock: heapq.heappush(self._queue, event) return event
python
{ "resource": "" }
q8630
scheduler.enter
train
def enter(self, delay, priority, action, argument=(), kwargs=_sentinel): """A variant that specifies the time as a relative time. This is actually the more commonly used interface. """ time = self.timefunc() + delay return self.enterabs(time, priority, action, argument, kwargs)
python
{ "resource": "" }
q8631
scheduler.cancel
train
def cancel(self, event): """Remove an event from the queue. This must be presented the ID as returned by enter(). If the event is not in the queue, this raises ValueError. """ with self._lock: self._queue.remove(event) heapq.heapify(self._queue)
python
{ "resource": "" }
q8632
check_valid
train
def check_valid(money): """Determine if an instance of `Money` is valid. Args: money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the instance to test Raises: ValueError: if the money instance is invalid """ if not isinstance(money, sc_messages.Money): raise ValueError(u'Inputs should be of type %s' % (sc_messages.Money,)) currency = money.currencyCode if not currency or len(currency) != 3: raise ValueError(_MSG_3_LETTERS_LONG) units = money.units nanos = money.nanos if ((units > 0) and (nanos < 0)) or ((units < 0) and (nanos > 0)): raise ValueError(_MSG_UNITS_NANOS_MISMATCH) if abs(nanos) > MAX_NANOS: raise ValueError(_MSG_NANOS_OOB)
python
{ "resource": "" }
q8633
add
train
def add(a, b, allow_overflow=False): """Adds two instances of `Money`. Args: a (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): one money value b (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): another money value allow_overflow: determines if the addition is allowed to overflow Return: `Money`: an instance of Money Raises: ValueError: if the inputs do not have the same currency code OverflowError: if the sum overflows and allow_overflow is not `True` """ for m in (a, b): if not isinstance(m, sc_messages.Money): raise ValueError(u'Inputs should be of type %s' % (sc_messages.Money,)) if a.currencyCode != b.currencyCode: raise ValueError(u'Money values need the same currency to be summed') nano_carry, nanos_sum = _sum_nanos(a, b) units_sum_no_carry = a.units + b.units units_sum = units_sum_no_carry + nano_carry # Adjust when units_sum and nanos_sum have different signs if units_sum > 0 and nanos_sum < 0: units_sum -= 1 nanos_sum += _BILLION elif units_sum < 0 and nanos_sum > 0: units_sum += 1 nanos_sum -= _BILLION # Return the result, detecting overflow if it occurs sign_a = _sign_of(a) sign_b = _sign_of(b) if sign_a > 0 and sign_b > 0 and units_sum >= _INT64_MAX: if not allow_overflow: raise OverflowError(u'Money addition positive overflow') else: return sc_messages.Money(units=_INT64_MAX, nanos=MAX_NANOS, currencyCode=a.currencyCode) elif (sign_a < 0 and sign_b < 0 and (units_sum_no_carry <= -_INT64_MAX or units_sum <= -_INT64_MAX)): if not allow_overflow: raise OverflowError(u'Money addition negative overflow') else: return sc_messages.Money(units=_INT64_MIN, nanos=-MAX_NANOS, currencyCode=a.currencyCode) else: return sc_messages.Money(units=units_sum, nanos=nanos_sum, currencyCode=a.currencyCode)
python
{ "resource": "" }
q8634
_sign_of
train
def _sign_of(money): """Determines the amount sign of a money instance Args: money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the instance to test Return: int: 1, 0 or -1 """ units = money.units nanos = money.nanos if units: if units > 0: return 1 elif units < 0: return -1 if nanos: if nanos > 0: return 1 elif nanos < 0: return -1 return 0
python
{ "resource": "" }
q8635
_check_jwt_claims
train
def _check_jwt_claims(jwt_claims): """Checks whether the JWT claims should be accepted. Specifically, this method checks the "exp" claim and the "nbf" claim (if present), and raises UnauthenticatedException if 1) the current time is before the time identified by the "nbf" claim, or 2) the current time is equal to or after the time identified by the "exp" claim. Args: jwt_claims: the JWT claims whose expiratio to be checked. Raises: UnauthenticatedException: When the "exp" claim is malformed or the JWT has already expired. """ current_time = time.time() expiration = jwt_claims[u"exp"] if not isinstance(expiration, INT_TYPES): raise suppliers.UnauthenticatedException(u'Malformed claim: "exp" must be an integer') if current_time >= expiration: raise suppliers.UnauthenticatedException(u"The auth token has already expired") if u"nbf" not in jwt_claims: return not_before_time = jwt_claims[u"nbf"] if not isinstance(not_before_time, INT_TYPES): raise suppliers.UnauthenticatedException(u'Malformed claim: "nbf" must be an integer') if current_time < not_before_time: raise suppliers.UnauthenticatedException(u'Current time is less than the "nbf" time')
python
{ "resource": "" }
q8636
_verify_required_claims_exist
train
def _verify_required_claims_exist(jwt_claims): """Verifies that the required claims exist. Args: jwt_claims: the JWT claims to be verified. Raises: UnauthenticatedException: if some claim doesn't exist. """ for claim_name in [u"aud", u"exp", u"iss", u"sub"]: if claim_name not in jwt_claims: raise suppliers.UnauthenticatedException(u'Missing "%s" claim' % claim_name)
python
{ "resource": "" }
q8637
Authenticator.authenticate
train
def authenticate(self, auth_token, auth_info, service_name): """Authenticates the current auth token. Args: auth_token: the auth token. auth_info: the auth configurations of the API method being called. service_name: the name of this service. Returns: A constructed UserInfo object representing the identity of the caller. Raises: UnauthenticatedException: When * the issuer is not allowed; * the audiences are not allowed; * the auth token has already expired. """ try: jwt_claims = self.get_jwt_claims(auth_token) except Exception as error: raise suppliers.UnauthenticatedException(u"Cannot decode the auth token", error) _check_jwt_claims(jwt_claims) user_info = UserInfo(jwt_claims) issuer = user_info.issuer if issuer not in self._issuers_to_provider_ids: raise suppliers.UnauthenticatedException(u"Unknown issuer: " + issuer) provider_id = self._issuers_to_provider_ids[issuer] if not auth_info.is_provider_allowed(provider_id): raise suppliers.UnauthenticatedException(u"The requested method does not " u"allow provider id: " + provider_id) # Check the audiences decoded from the auth token. The auth token is # allowed when 1) an audience is equal to the service name, or 2) at least # one audience is allowed in the method configuration. audiences = user_info.audiences has_service_name = service_name in audiences allowed_audiences = auth_info.get_allowed_audiences(provider_id) intersected_audiences = set(allowed_audiences).intersection(audiences) if not has_service_name and not intersected_audiences: raise suppliers.UnauthenticatedException(u"Audiences not allowed") return user_info
python
{ "resource": "" }
q8638
Authenticator.get_jwt_claims
train
def get_jwt_claims(self, auth_token): """Decodes the auth_token into JWT claims represented as a JSON object. This method first tries to look up the cache and returns the result immediately in case of a cache hit. When cache misses, the method tries to decode the given auth token, verify its signature, and check the existence of required JWT claims. When successful, the decoded JWT claims are loaded into the cache and then returned. Args: auth_token: the auth token to be decoded. Returns: The decoded JWT claims. Raises: UnauthenticatedException: When the signature verification fails, or when required claims are missing. """ def _decode_and_verify(): jwt_claims = jwt.JWT().unpack(auth_token).payload() _verify_required_claims_exist(jwt_claims) issuer = jwt_claims[u"iss"] keys = self._jwks_supplier.supply(issuer) try: return jws.JWS().verify_compact(auth_token, keys) except (jwkest.BadSignature, jws.NoSuitableSigningKeys, jws.SignerAlgError) as exception: raise suppliers.UnauthenticatedException(u"Signature verification failed", exception) return self._cache.get_or_create(auth_token, _decode_and_verify)
python
{ "resource": "" }
q8639
create
train
def create(options, timer=None, use_deque=True): """Create a cache specified by ``options`` ``options`` is an instance of either :class:`endpoints_management.control.caches.CheckOptions` or :class:`endpoints_management.control.caches.ReportOptions` The returned cache is wrapped in a :class:`LockedObject`, requiring it to be accessed in a with statement that gives synchronized access Example: >>> options = CheckOptions() >>> synced_cache = make_cache(options) >>> with synced_cache as cache: # acquire the lock ... cache['a_key'] = 'a_value' Args: options (object): an instance of either of the options classes Returns: :class:`cachetools.Cache`: the cache implementation specified by options or None: if options is ``None`` or if options.num_entries < 0 Raises: ValueError: if options is not a support type """ if options is None: # no options, don't create cache return None if not isinstance(options, (CheckOptions, QuotaOptions, ReportOptions)): _logger.error(u'make_cache(): bad options %s', options) raise ValueError(u'Invalid options') if (options.num_entries <= 0): _logger.debug(u"did not create cache, options was %s", options) return None _logger.debug(u"creating a cache from %s", options) if (options.flush_interval > ZERO_INTERVAL): # options always has a flush_interval, but may have an expiration # field. If the expiration is present, use that instead of the # flush_interval for the ttl ttl = getattr(options, u'expiration', options.flush_interval) cache_cls = DequeOutTTLCache if use_deque else cachetools.TTLCache return LockedObject( cache_cls( options.num_entries, ttl=ttl.total_seconds(), timer=to_cache_timer(timer) )) cache_cls = DequeOutLRUCache if use_deque else cachetools.LRUCache return LockedObject(cache_cls(options.num_entries))
python
{ "resource": "" }
q8640
to_cache_timer
train
def to_cache_timer(datetime_func): """Converts a datetime_func to a timestamp_func. Args: datetime_func (callable[[datatime]]): a func that returns the current time Returns: time_func (callable[[timestamp]): a func that returns the timestamp from the epoch """ if datetime_func is None: datetime_func = datetime.utcnow def _timer(): """Return the timestamp since the epoch.""" return (datetime_func() - datetime(1970, 1, 1)).total_seconds() return _timer
python
{ "resource": "" }
q8641
distribute
train
def distribute(build): """ distribute the uranium package """ build.packages.install("wheel") build.packages.install("twine") build.executables.run([ "python", "setup.py", "sdist", "bdist_wheel", "--universal", "upload", ]) build.executables.run([ "twine", "upload", "dist/*" ])
python
{ "resource": "" }
q8642
use_gae_thread
train
def use_gae_thread(): """Makes ``Client``s started after this use the appengine thread class.""" global _THREAD_CLASS # pylint: disable=global-statement try: from google.appengine.api.background_thread import background_thread _THREAD_CLASS = background_thread.BackgroundThread except ImportError: _logger.error( u'Could not install appengine background threads!' u' Please install the python AppEngine SDK and use this from there')
python
{ "resource": "" }
q8643
Client.start
train
def start(self): """Starts processing. Calling this method - starts the thread that regularly flushes all enabled caches. - enables the other methods on the instance to be called successfully """ with self._lock: if self._running: return self._stopped = False self._running = True self._start_idle_timer() _logger.debug(u'starting thread of type %s to run the scheduler', _THREAD_CLASS) self._thread = create_thread(target=self._schedule_flushes) try: self._thread.start() except Exception: # pylint: disable=broad-except _logger.warn( u'no scheduler thread, scheduler.run() will be invoked by report(...)', exc_info=True) self._thread = None self._initialize_flushing()
python
{ "resource": "" }
q8644
Client.check
train
def check(self, check_req): """Process a check_request. The req is first passed to the check_aggregator. If there is a valid cached response, that is returned, otherwise a response is obtained from the transport. Args: check_req (``ServicecontrolServicesCheckRequest``): to be sent to the service control service Returns: ``CheckResponse``: either the cached response if one is applicable or a response from making a transport request, or None if if the request to the transport fails """ self.start() res = self._check_aggregator.check(check_req) if res: _logger.debug(u'using cached check response for %s: %s', check_request, res) return res # Application code should not fail because check request's don't # complete, They should fail open, so here simply log the error and # return None to indicate that no response was obtained try: transport = self._create_transport() resp = transport.services.Check(check_req) self._check_aggregator.add_response(check_req, resp) return resp except exceptions.Error: # only sink apitools errors _logger.error(u'direct send of check request failed %s', check_request, exc_info=True) return None
python
{ "resource": "" }
q8645
Client.report
train
def report(self, report_req): """Processes a report request. It will aggregate it with prior report_requests to be send later or it will send it immediately if that's appropriate. """ self.start() # no thread running, run the scheduler to ensure any pending # flush tasks are executed. if self._run_scheduler_directly: self._scheduler.run(blocking=False) if not self._report_aggregator.report(report_req): _logger.debug(u'need to send a report request directly') try: transport = self._create_transport() transport.services.Report(report_req) except exceptions.Error: # only sink apitools errors _logger.error(u'direct send for report request failed', exc_info=True)
python
{ "resource": "" }
q8646
create
train
def create(labels=None, **kw): """Constructs a new metric value. This acts as an alternate to MetricValue constructor which simplifies specification of labels. Rather than having to create a MetricValue.Labels instance, all that's necessary to specify the required string. Args: labels (dict([string, [string]]): **kw: any other valid keyword args valid in the MetricValue constructor Returns :class:`MetricValue`: the created instance """ if labels is not None: kw[u'labels'] = encoding.PyValueToMessage(MetricValue.LabelsValue, labels) return MetricValue(**kw)
python
{ "resource": "" }
q8647
merge
train
def merge(metric_kind, prior, latest): """Merges `prior` and `latest` Args: metric_kind (:class:`MetricKind`): indicates the kind of metrics being merged prior (:class:`MetricValue`): an prior instance of the metric latest (:class:`MetricValue`: the latest instance of the metric """ prior_type, _ = _detect_value(prior) latest_type, _ = _detect_value(latest) if prior_type != latest_type: _logger.warn(u'Metric values are not compatible: %s, %s', prior, latest) raise ValueError(u'Incompatible delta metric values') if prior_type is None: _logger.warn(u'Bad metric values, types not known for : %s, %s', prior, latest) raise ValueError(u'Unsupported delta metric types') if metric_kind == MetricKind.DELTA: return _merge_delta_metric(prior, latest) else: return _merge_cumulative_or_gauge_metrics(prior, latest)
python
{ "resource": "" }
q8648
update_hash
train
def update_hash(a_hash, mv): """Adds ``mv`` to ``a_hash`` Args: a_hash (`Hash`): the secure hash, e.g created by hashlib.md5 mv (:class:`MetricValue`): the instance to add to the hash """ if mv.labels: signing.add_dict_to_hash(a_hash, encoding.MessageToPyValue(mv.labels)) money_value = mv.get_assigned_value(u'moneyValue') if money_value is not None: a_hash.update(b'\x00') a_hash.update(money_value.currencyCode.encode('utf-8'))
python
{ "resource": "" }
q8649
sign
train
def sign(mv): """Obtains a signature for a `MetricValue` Args: mv (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricValue`): a MetricValue that's part of an operation Returns: string: a unique signature for that operation """ md5 = hashlib.md5() update_hash(md5, mv) return md5.digest()
python
{ "resource": "" }
q8650
KeyUriSupplier.supply
train
def supply(self, issuer): """Supplies the `jwks_uri` for the given issuer. Args: issuer: the issuer. Returns: The `jwks_uri` that is either statically configured or retrieved via OpenId discovery. None is returned when the issuer is unknown or the OpenId discovery fails. """ issuer_uri_config = self._issuer_uri_configs.get(issuer) if not issuer_uri_config: # The issuer is unknown. return jwks_uri = issuer_uri_config.jwks_uri if jwks_uri: # When jwks_uri is set, return it directly. return jwks_uri # When jwksUri is empty, we try to retrieve it through the OpenID # discovery. open_id_valid = issuer_uri_config.open_id_valid if open_id_valid: discovered_jwks_uri = _discover_jwks_uri(issuer) self._issuer_uri_configs[issuer] = IssuerUriConfig(False, discovered_jwks_uri) return discovered_jwks_uri
python
{ "resource": "" }
q8651
JwksSupplier.supply
train
def supply(self, issuer): """Supplies the `Json Web Key Set` for the given issuer. Args: issuer: the issuer. Returns: The successfully retrieved Json Web Key Set. None is returned if the issuer is unknown or the retrieval process fails. Raises: UnauthenticatedException: When this method cannot supply JWKS for the given issuer (e.g. unknown issuer, HTTP request error). """ def _retrieve_jwks(): """Retrieve the JWKS from the given jwks_uri when cache misses.""" jwks_uri = self._key_uri_supplier.supply(issuer) if not jwks_uri: raise UnauthenticatedException(u"Cannot find the `jwks_uri` for issuer " u"%s: either the issuer is unknown or " u"the OpenID discovery failed" % issuer) try: response = requests.get(jwks_uri) json_response = response.json() except Exception as exception: message = u"Cannot retrieve valid verification keys from the `jwks_uri`" raise UnauthenticatedException(message, exception) if u"keys" in json_response: # De-serialize the JSON as a JWKS object. jwks_keys = jwk.KEYS() jwks_keys.load_jwks(response.text) return jwks_keys._keys else: # The JSON is a dictionary mapping from key id to X.509 certificates. # Thus we extract the public key from the X.509 certificates and # construct a JWKS object. return _extract_x509_certificates(json_response) return self._jwks_cache.get_or_create(issuer, _retrieve_jwks)
python
{ "resource": "" }
q8652
KnownLabels.matches
train
def matches(self, desc): """Determines if a given label descriptor matches this enum instance Args: desc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`): the instance to test Return: `True` if desc is supported, otherwise `False` """ desc_value_type = desc.valueType or ValueType.STRING # default not parsed return (self.label_name == desc.key and self.value_type == desc_value_type)
python
{ "resource": "" }
q8653
KnownLabels.do_labels_update
train
def do_labels_update(self, info, labels): """Updates a dictionary of labels using the assigned update_op_func Args: info (:class:`endpoints_management.control.report_request.Info`): the info instance to update labels (dict[string[string]]): the labels dictionary Return: `True` if desc is supported, otherwise `False` """ if self.update_label_func: self.update_label_func(self.label_name, info, labels)
python
{ "resource": "" }
q8654
KnownLabels.is_supported
train
def is_supported(cls, desc): """Determines if the given label descriptor is supported. Args: desc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`): the label descriptor to test Return: `True` if desc is supported, otherwise `False` """ for l in cls: if l.matches(desc): return True return False
python
{ "resource": "" }
q8655
extract_report_spec
train
def extract_report_spec( service, label_is_supported=label_descriptor.KnownLabels.is_supported, metric_is_supported=metric_descriptor.KnownMetrics.is_supported): """Obtains the used logs, metrics and labels from a service. label_is_supported and metric_is_supported are filter functions used to determine if label_descriptors or metric_descriptors found in the service are supported. Args: service (:class:`endpoints_management.gen.servicecontrol_v1_messages.Service`): a service instance label_is_supported (:func): determines if a given label is supported metric_is_supported (:func): determines if a given metric is supported Return: tuple: ( logs (set[string}), # the logs to report to metrics (list[string]), # the metrics to use labels (list[string]) # the labels to add ) """ resource_descs = service.monitoredResources labels_dict = {} logs = set() if service.logging: logs = _add_logging_destinations( service.logging.producerDestinations, resource_descs, service.logs, labels_dict, label_is_supported ) metrics_dict = {} monitoring = service.monitoring if monitoring: for destinations in (monitoring.consumerDestinations, monitoring.producerDestinations): _add_monitoring_destinations(destinations, resource_descs, service.metrics, metrics_dict, metric_is_supported, labels_dict, label_is_supported) return logs, metrics_dict.keys(), labels_dict.keys()
python
{ "resource": "" }
q8656
MethodRegistry._extract_auth_config
train
def _extract_auth_config(self): """Obtains the authentication configurations.""" service = self._service if not service.authentication: return {} auth_infos = {} for auth_rule in service.authentication.rules: selector = auth_rule.selector provider_ids_to_audiences = {} for requirement in auth_rule.requirements: provider_id = requirement.providerId if provider_id and requirement.audiences: audiences = requirement.audiences.split(u",") provider_ids_to_audiences[provider_id] = audiences auth_infos[selector] = AuthInfo(provider_ids_to_audiences) return auth_infos
python
{ "resource": "" }
q8657
MethodRegistry._extract_methods
train
def _extract_methods(self): """Obtains the methods used in the service.""" service = self._service all_urls = set() urls_with_options = set() if not service.http: return for rule in service.http.rules: http_method, url = _detect_pattern_option(rule) if not url or not http_method or not rule.selector: _logger.error(u'invalid HTTP binding encountered') continue # Obtain the method info method_info = self._get_or_create_method_info(rule.selector) if rule.body: method_info.body_field_path = rule.body if not self._register(http_method, url, method_info): continue # detected an invalid url all_urls.add(url) if http_method == self._OPTIONS: urls_with_options.add(url) self._add_cors_options_selectors(all_urls - urls_with_options) self._update_usage() self._update_system_parameters()
python
{ "resource": "" }
q8658
fetch_service_config
train
def fetch_service_config(service_name=None, service_version=None): """Fetches the service config from Google Service Management API. Args: service_name: the service name. When this argument is unspecified, this method uses the value of the "SERVICE_NAME" environment variable as the service name, and raises ValueError if the environment variable is unset. service_version: the service version. When this argument is unspecified, this method uses the value of the "SERVICE_VERSION" environment variable as the service version, and raises ValueError if the environment variable is unset. Returns: the fetched service config JSON object. Raises: ValueError: when the service name/version is neither provided as an argument or set as an environment variable; or when the fetched service config fails validation. Exception: when the Google Service Management API returns non-200 response. """ if not service_name: service_name = _get_env_var_or_raise(_SERVICE_NAME_ENV_KEY) if not service_version: service_version = _get_service_version(_SERVICE_VERSION_ENV_KEY, service_name) _logger.debug(u'Contacting Service Management API for service %s version %s', service_name, service_version) response = _make_service_config_request(service_name, service_version) _logger.debug(u'obtained service json from the management api:\n%s', response.data) service = encoding.JsonToMessage(messages.Service, response.data) _validate_service_config(service, service_name, service_version) return service
python
{ "resource": "" }
q8659
add_all
train
def add_all(application, project_id, control_client, loader=service.Loaders.FROM_SERVICE_MANAGEMENT): """Adds all endpoints middleware to a wsgi application. Sets up application to use all default endpoints middleware. Example: >>> application = MyWsgiApp() # an existing WSGI application >>> >>> # the name of the controlled service >>> service_name = 'my-service-name' >>> >>> # A GCP project with service control enabled >>> project_id = 'my-project-id' >>> >>> # wrap the app for service control >>> from endpoints_management.control import wsgi >>> control_client = client.Loaders.DEFAULT.load(service_name) >>> control_client.start() >>> wrapped_app = add_all(application, project_id, control_client) >>> >>> # now use wrapped_app in place of app Args: application: the wrapped wsgi application project_id: the project_id thats providing service control support control_client: the service control client instance loader (:class:`endpoints_management.control.service.Loader`): loads the service instance that configures this instance's behaviour """ return ConfigFetchWrapper(application, project_id, control_client, loader)
python
{ "resource": "" }
q8660
_sign_operation
train
def _sign_operation(op): """Obtains a signature for an operation in a ReportRequest. Args: op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an operation used in a `ReportRequest` Returns: string: a unique signature for that operation """ md5 = hashlib.md5() md5.update(op.consumerId.encode('utf-8')) md5.update(b'\x00') md5.update(op.operationName.encode('utf-8')) if op.labels: signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels)) return md5.digest()
python
{ "resource": "" }
q8661
ReportingRules.from_known_inputs
train
def from_known_inputs(cls, logs=None, metric_names=None, label_names=None): """An alternate constructor that assumes known metrics and labels. This differs from the default constructor in that the metrics and labels are iterables of names of 'known' metrics and labels respectively. The names are used to obtain the metrics and labels from :class:`endpoints_management.control.metric_descriptor.KnownMetrics` and :class:`endpoints_management.control.label_descriptor.KnownLabels` respectively. names that don't correspond to a known metric or label are ignored; as are metrics or labels that don't yet have a way of updating the `ReportRequest` operation. Args: logs (iterable[string]): the name of logs to be included in the `ReportRequest` metric_names (iterable[string]): the name of a known metric to be added to the `ReportRequest` label_names (iterable[string]): the name of a known label to be added to the `ReportRequest` """ if not metric_names: metric_names = () if not label_names: label_names = () known_labels = [] known_metrics = [] # pylint: disable=no-member # pylint is not aware of the __members__ attributes for l in label_descriptor.KnownLabels.__members__.values(): if l.update_label_func and l.label_name in label_names: known_labels.append(l) for m in metric_descriptor.KnownMetrics.__members__.values(): if m.update_op_func and m.metric_name in metric_names: known_metrics.append(m) return cls(logs=logs, metrics=known_metrics, labels=known_labels)
python
{ "resource": "" }
q8662
Info._as_log_entry
train
def _as_log_entry(self, name, now): """Makes a `LogEntry` from this instance for the given log_name. Args: rules (:class:`ReportingRules`): determines what labels, metrics and logs to include in the report request. now (:class:`datetime.DateTime`): the current time Return: a ``LogEntry`` generated from this instance with the given name and timestamp Raises: ValueError: if the fields in this instance are insufficient to to create a valid ``ServicecontrolServicesReportRequest`` """ # initialize the struct with fields that are always present d = { u'http_response_code': self.response_code, u'timestamp': time.mktime(now.timetuple()) } # compute the severity severity = _SEVERITY.INFO if self.response_code >= 400: severity = _SEVERITY.ERROR d[u'error_cause'] = self.error_cause.name # add 'optional' fields to the struct if self.request_size > 0: d[u'request_size'] = self.request_size if self.response_size > 0: d[u'response_size'] = self.response_size if self.method: d[u'http_method'] = self.method if self.request_time: d[u'request_latency_in_ms'] = self.request_time.total_seconds() * 1000 # add 'copyable' fields to the struct for key in self.COPYABLE_LOG_FIELDS: value = getattr(self, key, None) if value: d[key] = value return sc_messages.LogEntry( name=name, timestamp=timestamp.to_rfc3339(now), severity=severity, structPayload=_struct_payload_from(d))
python
{ "resource": "" }
q8663
Info.as_report_request
train
def as_report_request(self, rules, timer=datetime.utcnow): """Makes a `ServicecontrolServicesReportRequest` from this instance Args: rules (:class:`ReportingRules`): determines what labels, metrics and logs to include in the report request. timer: a function that determines the current time Return: a ``ServicecontrolServicesReportRequest`` generated from this instance governed by the provided ``rules`` Raises: ValueError: if the fields in this instance cannot be used to create a valid ``ServicecontrolServicesReportRequest`` """ if not self.service_name: raise ValueError(u'the service name must be set') op = super(Info, self).as_operation(timer=timer) # Populate metrics and labels if they can be associated with a # method/operation if op.operationId and op.operationName: labels = {} for known_label in rules.labels: known_label.do_labels_update(self, labels) # Forcibly add system label reporting here, as the base service # config does not specify it as a label. labels[_KNOWN_LABELS.SCC_PLATFORM.label_name] = ( self.platform.friendly_string()) labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = ( SERVICE_AGENT) labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT if labels: op.labels = encoding.PyValueToMessage( sc_messages.Operation.LabelsValue, labels) for known_metric in rules.metrics: known_metric.do_operation_update(self, op) # Populate the log entries now = timer() op.logEntries = [self._as_log_entry(l, now) for l in rules.logs] return sc_messages.ServicecontrolServicesReportRequest( serviceName=self.service_name, reportRequest=sc_messages.ReportRequest(operations=[op]))
python
{ "resource": "" }
q8664
Aggregator.clear
train
def clear(self): """Clears the cache.""" if self._cache is None: return _NO_RESULTS if self._cache is not None: with self._cache as k: res = [x.as_operation() for x in k.values()] k.clear() k.out_deque.clear() return res
python
{ "resource": "" }
q8665
Aggregator.report
train
def report(self, req): """Adds a report request to the cache. Returns ``None`` if it could not be aggregated, and callers need to send the request to the server, otherwise it returns ``CACHED_OK``. Args: req (:class:`sc_messages.ReportRequest`): the request to be aggregated Result: ``None`` if the request as not cached, otherwise ``CACHED_OK`` """ if self._cache is None: return None # no cache, send request now if not isinstance(req, sc_messages.ServicecontrolServicesReportRequest): raise ValueError(u'Invalid request') if req.serviceName != self.service_name: _logger.error(u'bad report(): service_name %s does not match ours %s', req.serviceName, self.service_name) raise ValueError(u'Service name mismatch') report_req = req.reportRequest if report_req is None: _logger.error(u'bad report(): no report_request in %s', req) raise ValueError(u'Expected report_request not set') if _has_high_important_operation(report_req) or self._cache is None: return None ops_by_signature = _key_by_signature(report_req.operations, _sign_operation) # Concurrency: # # This holds a lock on the cache while updating it. No i/o operations # are performed, so any waiting threads see minimal delays with self._cache as cache: for key, op in ops_by_signature.items(): agg = cache.get(key) if agg is None: cache[key] = operation.Aggregator(op, self._kinds) else: agg.add(op) return self.CACHED_OK
python
{ "resource": "" }
q8666
convert_response
train
def convert_response(allocate_quota_response, project_id): """Computes a http status code and message `AllocateQuotaResponse` The return value a tuple (code, message) where code: is the http status code message: is the message to return Args: allocate_quota_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.AllocateQuotaResponse`): the response from calling an api Returns: tuple(code, message) """ if not allocate_quota_response or not allocate_quota_response.allocateErrors: return _IS_OK # only allocate_quota the first error for now, as per ESP theError = allocate_quota_response.allocateErrors[0] error_tuple = _QUOTA_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN) if error_tuple[1].find(u'{') == -1: # no replacements needed: return error_tuple updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.description or u'') return error_tuple[0], updated_msg
python
{ "resource": "" }
q8667
sign
train
def sign(allocate_quota_request): """Obtains a signature for an operation in a `AllocateQuotaRequest` Args: op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an operation used in a `AllocateQuotaRequest` Returns: string: a secure hash generated from the operation """ if not isinstance(allocate_quota_request, sc_messages.AllocateQuotaRequest): raise ValueError(u'Invalid request') op = allocate_quota_request.allocateOperation if op is None or op.methodName is None or op.consumerId is None: logging.error(u'Bad %s: not initialized => not signed', allocate_quota_request) raise ValueError(u'allocate_quota request must be initialized with an operation') md5 = hashlib.md5() md5.update(op.methodName.encode('utf-8')) md5.update(b'\x00') md5.update(op.consumerId.encode('utf-8')) if op.labels: signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels)) for value_set in op.quotaMetrics: md5.update(b'\x00') md5.update(value_set.metricName.encode('utf-8')) for mv in value_set.metricValues: metric_value.update_hash(md5, mv) md5.update(b'\x00') return md5.digest()
python
{ "resource": "" }
q8668
Info.as_allocate_quota_request
train
def as_allocate_quota_request(self, timer=datetime.utcnow): """Makes a `ServicecontrolServicesAllocateQuotaRequest` from this instance Returns: a ``ServicecontrolServicesAllocateQuotaRequest`` Raises: ValueError: if the fields in this instance are insufficient to to create a valid ``ServicecontrolServicesAllocateQuotaRequest`` """ if not self.service_name: raise ValueError(u'the service name must be set') if not self.operation_id: raise ValueError(u'the operation id must be set') if not self.operation_name: raise ValueError(u'the operation name must be set') op = super(Info, self).as_operation(timer=timer) labels = {} if self.client_ip: labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip if self.referer: labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer qop = sc_messages.QuotaOperation( operationId=op.operationId, methodName=op.operationName, consumerId=op.consumerId, quotaMode=sc_messages.QuotaOperation.QuotaModeValueValuesEnum.BEST_EFFORT, ) qop.labels = encoding.PyValueToMessage( sc_messages.QuotaOperation.LabelsValue, labels) quota_info = self.quota_info if self.quota_info else {} qop.quotaMetrics = [ sc_messages.MetricValueSet( metricName=name, metricValues=[sc_messages.MetricValue(int64Value=cost)]) for name, cost in quota_info.items() ] allocate_quota_request = sc_messages.AllocateQuotaRequest(allocateOperation=qop) if self.config_id: allocate_quota_request.serviceConfigId = self.config_id return sc_messages.ServicecontrolServicesAllocateQuotaRequest( serviceName=self.service_name, allocateQuotaRequest=allocate_quota_request)
python
{ "resource": "" }
q8669
add_route
train
def add_route(app_or_blueprint, fn, context=default_context): """ a decorator that adds a transmute route to the application """ transmute_func = TransmuteFunction( fn, args_not_from_request=["request"] ) handler = create_handler(transmute_func, context=context) get_swagger_spec(app_or_blueprint).add_func(transmute_func, context) for p in transmute_func.paths: sanic_path = _convert_to_sanic_path(p) app_or_blueprint.add_route(handler, sanic_path, methods=list(transmute_func.methods))
python
{ "resource": "" }
q8670
KnownMetrics.matches
train
def matches(self, desc): """Determines if a given metric descriptor matches this enum instance Args: desc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the instance to test Return: `True` if desc is supported, otherwise `False` """ return (self.metric_name == desc.name and self.kind == desc.metricKind and self.value_type == desc.valueType)
python
{ "resource": "" }
q8671
KnownMetrics.do_operation_update
train
def do_operation_update(self, info, an_op): """Updates an operation using the assigned update_op_func Args: info: (:class:`endpoints_management.control.report_request.Info`): the info instance to update an_op: (:class:`endpoints_management.control.report_request.Info`): the info instance to update Return: `True` if desc is supported, otherwise `False` """ self.update_op_func(self.metric_name, info, an_op)
python
{ "resource": "" }
q8672
KnownMetrics.is_supported
train
def is_supported(cls, desc): """Determines if the given metric descriptor is supported. Args: desc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the metric descriptor to test Return: `True` if desc is supported, otherwise `False` """ for m in cls: if m.matches(desc): return True return False
python
{ "resource": "" }
q8673
add_dict_to_hash
train
def add_dict_to_hash(a_hash, a_dict): """Adds `a_dict` to `a_hash` Args: a_hash (`Hash`): the secure hash, e.g created by hashlib.md5 a_dict (dict[string, [string]]): the dictionary to add to the hash """ if a_dict is None: return for k, v in a_dict.items(): a_hash.update(b'\x00' + k.encode('utf-8') + b'\x00' + v.encode('utf-8'))
python
{ "resource": "" }
q8674
create_swagger_json_handler
train
def create_swagger_json_handler(app, **kwargs): """ Create a handler that returns the swagger definition for an application. This method assumes the application is using the TransmuteUrlDispatcher as the router. """ spec = get_swagger_spec(app) _add_blueprint_specs(app, spec) spec_dict = spec.swagger_definition(**kwargs) encoded_spec = json.dumps(spec_dict).encode("UTF-8") async def swagger(request): return HTTPResponse( body_bytes=encoded_spec, headers={ "Access-Control-Allow-Origin": "*" }, content_type="application/json", ) return swagger
python
{ "resource": "" }
q8675
FQDN.absolute
train
def absolute(self): """ The FQDN as a string in absolute form """ if not self.is_valid: raise ValueError('invalid FQDN `{0}`'.format(self.fqdn)) if self.is_valid_absolute: return self.fqdn return '{0}.'.format(self.fqdn)
python
{ "resource": "" }
q8676
FQDN.relative
train
def relative(self): """ The FQDN as a string in relative form """ if not self.is_valid: raise ValueError('invalid FQDN `{0}`'.format(self.fqdn)) if self.is_valid_absolute: return self.fqdn[:-1] return self.fqdn
python
{ "resource": "" }
q8677
PipedGzipReader._raise_if_error
train
def _raise_if_error(self): """ Raise IOError if process is not running anymore and the exit code is nonzero. """ retcode = self.process.poll() if retcode is not None and retcode != 0: message = self._stderr.read().strip() raise IOError(message)
python
{ "resource": "" }
q8678
MAB.run
train
def run(self, trials=100, strategy=None, parameters=None): ''' Run MAB test with T trials. Parameters ---------- trials : int Number of trials to run. strategy : str Name of selected strategy. parameters : dict Parameters for selected strategy. Available strategies: - Epsilon-greedy ("eps_greedy") - Softmax ("softmax") - Upper confidence bound ("ucb") Returns ------- None ''' if trials < 1: raise Exception('MAB.run: Number of trials cannot be less than 1!') if not strategy: strategy = 'eps_greedy' else: if strategy not in self.strategies: raise Exception('MAB,run: Strategy name invalid. Choose from:' ' {}'.format(', '.join(self.strategies))) # Run strategy for n in range(trials): self._run(strategy, parameters)
python
{ "resource": "" }
q8679
MAB._run
train
def _run(self, strategy, parameters=None): ''' Run single trial of MAB strategy. Parameters ---------- strategy : function parameters : dict Returns ------- None ''' choice = self.run_strategy(strategy, parameters) self.choices.append(choice) payout = self.bandits.pull(choice) if payout is None: print('Trials exhausted. No more values for bandit', choice) return None else: self.wins[choice] += payout self.pulls[choice] += 1
python
{ "resource": "" }
q8680
MAB.bayesian
train
def bayesian(self, params=None): ''' Run the Bayesian Bandit algorithm which utilizes a beta distribution for exploration and exploitation. Parameters ---------- params : None For API consistency, this function can take a parameters argument, but it is ignored. Returns ------- int Index of chosen bandit ''' p_success_arms = [ np.random.beta(self.wins[i] + 1, self.pulls[i] - self.wins[i] + 1) for i in range(len(self.wins)) ] return np.array(p_success_arms).argmax()
python
{ "resource": "" }
q8681
MAB.softmax
train
def softmax(self, params): ''' Run the softmax selection strategy. Parameters ---------- Params : dict Tau Returns ------- int Index of chosen bandit ''' default_tau = 0.1 if params and type(params) == dict: tau = params.get('tau') try: float(tau) except ValueError: 'slots: softmax: Setting tau to default' tau = default_tau else: tau = default_tau # Handle cold start. Not all bandits tested yet. if True in (self.pulls < 3): return np.random.choice(range(len(self.pulls))) else: payouts = self.wins / (self.pulls + 0.1) norm = sum(np.exp(payouts/tau)) ps = np.exp(payouts/tau)/norm # Randomly choose index based on CMF cmf = [sum(ps[:i+1]) for i in range(len(ps))] rand = np.random.rand() found = False found_i = None i = 0 while not found: if rand < cmf[i]: found_i = i found = True else: i += 1 return found_i
python
{ "resource": "" }
q8682
MAB.ucb
train
def ucb(self, params=None): ''' Run the upper confidence bound MAB selection strategy. This is the UCB1 algorithm described in https://homes.di.unimi.it/~cesabian/Pubblicazioni/ml-02.pdf Parameters ---------- params : None For API consistency, this function can take a parameters argument, but it is ignored. Returns ------- int Index of chosen bandit ''' # UCB = j_max(payout_j + sqrt(2ln(n_tot)/n_j)) # Handle cold start. Not all bandits tested yet. if True in (self.pulls < 3): return np.random.choice(range(len(self.pulls))) else: n_tot = sum(self.pulls) payouts = self.wins / (self.pulls + 0.1) ubcs = payouts + np.sqrt(2*np.log(n_tot)/self.pulls) return np.argmax(ubcs)
python
{ "resource": "" }
q8683
MAB.best
train
def best(self): ''' Return current 'best' choice of bandit. Returns ------- int Index of bandit ''' if len(self.choices) < 1: print('slots: No trials run so far.') return None else: return np.argmax(self.wins/(self.pulls+0.1))
python
{ "resource": "" }
q8684
MAB.est_payouts
train
def est_payouts(self): ''' Calculate current estimate of average payout for each bandit. Returns ------- array of floats or None ''' if len(self.choices) < 1: print('slots: No trials run so far.') return None else: return self.wins/(self.pulls+0.1)
python
{ "resource": "" }
q8685
MAB.regret
train
def regret(self): ''' Calculate expected regret, where expected regret is maximum optimal reward - sum of collected rewards, i.e. expected regret = T*max_k(mean_k) - sum_(t=1-->T) (reward_t) Returns ------- float ''' return (sum(self.pulls)*np.max(np.nan_to_num(self.wins/self.pulls)) - sum(self.wins)) / sum(self.pulls)
python
{ "resource": "" }
q8686
MAB.crit_met
train
def crit_met(self): ''' Determine if stopping criterion has been met. Returns ------- bool ''' if True in (self.pulls < 3): return False else: return self.criteria[self.criterion](self.stop_value)
python
{ "resource": "" }
q8687
MAB.regret_met
train
def regret_met(self, threshold=None): ''' Determine if regret criterion has been met. Parameters ---------- threshold : float Returns ------- bool ''' if not threshold: return self.regret() <= self.stop_value elif self.regret() <= threshold: return True else: return False
python
{ "resource": "" }
q8688
MAB.online_trial
train
def online_trial(self, bandit=None, payout=None, strategy='eps_greedy', parameters=None): ''' Update the bandits with the results of the previous live, online trial. Next run a the selection algorithm. If the stopping criteria is met, return the best arm estimate. Otherwise return the next arm to try. Parameters ---------- bandit : int Bandit index payout : float Payout value strategy : string Name of update strategy parameters : dict Parameters for update strategy function Returns ------- dict Format: {'new_trial': boolean, 'choice': int, 'best': int} ''' if bandit is not None and payout is not None: self.update(bandit=bandit, payout=payout) else: raise Exception('slots.online_trial: bandit and/or payout value' ' missing.') if self.crit_met(): return {'new_trial': False, 'choice': self.best(), 'best': self.best()} else: return {'new_trial': True, 'choice': self.run_strategy(strategy, parameters), 'best': self.best()}
python
{ "resource": "" }
q8689
MAB.update
train
def update(self, bandit, payout): ''' Update bandit trials and payouts for given bandit. Parameters ---------- bandit : int Bandit index payout : float Returns ------- None ''' self.choices.append(bandit) self.pulls[bandit] += 1 self.wins[bandit] += payout self.bandits.payouts[bandit] += payout
python
{ "resource": "" }
q8690
Bandits.pull
train
def pull(self, i): ''' Return the payout from a single pull of the bandit i's arm. Parameters ---------- i : int Index of bandit. Returns ------- float or None ''' if self.live: if len(self.payouts[i]) > 0: return self.payouts[i].pop() else: return None else: if np.random.rand() < self.probs[i]: return self.payouts[i] else: return 0.0
python
{ "resource": "" }
q8691
click_table_printer
train
def click_table_printer(headers, _filter, data): """Generate space separated output for click commands.""" _filter = [h.lower() for h in _filter] + [h.upper() for h in _filter] headers = [h for h in headers if not _filter or h in _filter] # Maximum header width header_widths = [len(h) for h in headers] for row in data: for idx in range(len(headers)): # If a row contains an element which is wider update maximum width if header_widths[idx] < len(str(row[idx])): header_widths[idx] = len(str(row[idx])) # Prepare the format string with the maximum widths formatted_output_parts = ['{{:<{0}}}'.format(hw) for hw in header_widths] formatted_output = ' '.join(formatted_output_parts) # Print the table with the headers capitalized click.echo(formatted_output.format(*[h.upper() for h in headers])) for row in data: click.echo(formatted_output.format(*row))
python
{ "resource": "" }
q8692
calculate_hash_of_dir
train
def calculate_hash_of_dir(directory, file_list=None): """Calculate hash of directory.""" md5_hash = md5() if not os.path.exists(directory): return -1 try: for subdir, dirs, files in os.walk(directory): for _file in files: file_path = os.path.join(subdir, _file) if file_list is not None and file_path not in file_list: continue try: _file_object = open(file_path, 'rb') except Exception: # You can't open the file for some reason _file_object.close() # We return -1 since we cannot ensure that the file that # can not be read, will not change from one execution to # another. return -1 while 1: # Read file in little chunks buf = _file_object.read(4096) if not buf: break md5_hash.update(md5(buf).hexdigest().encode()) _file_object.close() except Exception: return -1 return md5_hash.hexdigest()
python
{ "resource": "" }
q8693
calculate_job_input_hash
train
def calculate_job_input_hash(job_spec, workflow_json): """Calculate md5 hash of job specification and workflow json.""" if 'workflow_workspace' in job_spec: del job_spec['workflow_workspace'] job_md5_buffer = md5() job_md5_buffer.update(json.dumps(job_spec).encode('utf-8')) job_md5_buffer.update(json.dumps(workflow_json).encode('utf-8')) return job_md5_buffer.hexdigest()
python
{ "resource": "" }
q8694
calculate_file_access_time
train
def calculate_file_access_time(workflow_workspace): """Calculate access times of files in workspace.""" access_times = {} for subdir, dirs, files in os.walk(workflow_workspace): for file in files: file_path = os.path.join(subdir, file) access_times[file_path] = os.stat(file_path).st_atime return access_times
python
{ "resource": "" }
q8695
copy_openapi_specs
train
def copy_openapi_specs(output_path, component): """Copy generated and validated openapi specs to reana-commons module.""" if component == 'reana-server': file = 'reana_server.json' elif component == 'reana-workflow-controller': file = 'reana_workflow_controller.json' elif component == 'reana-job-controller': file = 'reana_job_controller.json' if os.environ.get('REANA_SRCDIR'): reana_srcdir = os.environ.get('REANA_SRCDIR') else: reana_srcdir = os.path.join('..') try: reana_commons_specs_path = os.path.join( reana_srcdir, 'reana-commons', 'reana_commons', 'openapi_specifications') if os.path.exists(reana_commons_specs_path): if os.path.isfile(output_path): shutil.copy(output_path, os.path.join(reana_commons_specs_path, file)) # copy openapi specs file as well to docs shutil.copy(output_path, os.path.join('docs', 'openapi.json')) except Exception as e: click.echo('Something went wrong, could not copy openapi ' 'specifications to reana-commons \n{0}'.format(e))
python
{ "resource": "" }
q8696
get_workflow_status_change_verb
train
def get_workflow_status_change_verb(status): """Give the correct verb conjugation depending on status tense. :param status: String which represents the status the workflow changed to. """ verb = '' if status.endswith('ing'): verb = 'is' elif status.endswith('ed'): verb = 'has been' else: raise ValueError('Unrecognised status {}'.format(status)) return verb
python
{ "resource": "" }
q8697
build_progress_message
train
def build_progress_message(total=None, running=None, finished=None, failed=None, cached=None): """Build the progress message with correct formatting.""" progress_message = {} if total: progress_message['total'] = total if running: progress_message['running'] = running if finished: progress_message['finished'] = finished if failed: progress_message['failed'] = failed if cached: progress_message['cached'] = cached return progress_message
python
{ "resource": "" }
q8698
build_caching_info_message
train
def build_caching_info_message(job_spec, job_id, workflow_workspace, workflow_json, result_path): """Build the caching info message with correct formatting.""" caching_info_message = { "job_spec": job_spec, "job_id": job_id, "workflow_workspace": workflow_workspace, "workflow_json": workflow_json, "result_path": result_path } return caching_info_message
python
{ "resource": "" }
q8699
get_workspace_disk_usage
train
def get_workspace_disk_usage(workspace, summarize=False): """Retrieve disk usage information of a workspace.""" command = ['du', '-h'] if summarize: command.append('-s') else: command.append('-a') command.append(workspace) disk_usage_info = subprocess.check_output(command).decode().split() # create pairs of (size, filename) filesize_pairs = list(zip(disk_usage_info[::2], disk_usage_info[1::2])) filesizes = [] for filesize_pair in filesize_pairs: size, name = filesize_pair # trim workspace path in every file name filesizes.append({'name': name[len(workspace):], 'size': size}) return filesizes
python
{ "resource": "" }