_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q5300
|
_coerce_topic
|
train
|
def _coerce_topic(topic):
"""
Ensure that the topic name is text string of a valid length.
:param topic: Kafka topic name. Valid characters are in the set ``[a-zA-Z0-9._-]``.
:raises ValueError: when the topic name exceeds 249 bytes
:raises TypeError: when the topic is not :class:`unicode` or :class:`str`
"""
if not isinstance(topic, string_types):
raise TypeError('topic={!r} must be text'.format(topic))
if not isinstance(topic, text_type):
topic = topic.decode('ascii')
if len(topic) < 1:
raise ValueError('invalid empty topic name')
if len(topic) > 249:
raise ValueError('topic={!r} name is too long: {} > 249'.format(
topic, len(topic)))
return topic
|
python
|
{
"resource": ""
}
|
q5301
|
_coerce_consumer_group
|
train
|
def _coerce_consumer_group(consumer_group):
"""
Ensure that the consumer group is a text string.
:param consumer_group: :class:`bytes` or :class:`str` instance
:raises TypeError: when `consumer_group` is not :class:`bytes`
or :class:`str`
"""
if not isinstance(consumer_group, string_types):
raise TypeError('consumer_group={!r} must be text'.format(consumer_group))
if not isinstance(consumer_group, text_type):
consumer_group = consumer_group.decode('utf-8')
return consumer_group
|
python
|
{
"resource": ""
}
|
q5302
|
_coerce_client_id
|
train
|
def _coerce_client_id(client_id):
"""
Ensure the provided client ID is a byte string. If a text string is
provided, it is encoded as UTF-8 bytes.
:param client_id: :class:`bytes` or :class:`str` instance
"""
if isinstance(client_id, type(u'')):
client_id = client_id.encode('utf-8')
if not isinstance(client_id, bytes):
raise TypeError('{!r} is not a valid consumer group (must be'
' str or bytes)'.format(client_id))
return client_id
|
python
|
{
"resource": ""
}
|
q5303
|
write_short_ascii
|
train
|
def write_short_ascii(s):
"""
Encode a Kafka short string which represents text.
:param str s:
Text string (`str` on Python 3, `str` or `unicode` on Python 2) or
``None``. The string will be ASCII-encoded.
:returns: length-prefixed `bytes`
:raises:
`struct.error` for strings longer than 32767 characters
"""
if s is None:
return _NULL_SHORT_STRING
if not isinstance(s, string_types):
raise TypeError('{!r} is not text'.format(s))
return write_short_bytes(s.encode('ascii'))
|
python
|
{
"resource": ""
}
|
q5304
|
write_short_bytes
|
train
|
def write_short_bytes(b):
"""
Encode a Kafka short string which contains arbitrary bytes. A short string
is limited to 32767 bytes in length by the signed 16-bit length prefix.
A length prefix of -1 indicates ``null``, represented as ``None`` in
Python.
:param bytes b:
No more than 32767 bytes, or ``None`` for the null encoding.
:return: length-prefixed `bytes`
:raises:
`struct.error` for strings longer than 32767 characters
"""
if b is None:
return _NULL_SHORT_STRING
if not isinstance(b, bytes):
raise TypeError('{!r} is not bytes'.format(b))
elif len(b) > 32767:
raise struct.error(len(b))
else:
return struct.pack('>h', len(b)) + b
|
python
|
{
"resource": ""
}
|
q5305
|
CrontabReader.parse_cron_line
|
train
|
def parse_cron_line(self, line):
"""Parses crontab line and returns only starting time string
Args:
line: crontab line
Returns:
Time part of cron line
"""
stripped = line.strip()
if stripped and stripped.startswith('#') is False:
rexres = self.rex.search(stripped)
if rexres:
return ' '.join(rexres.group(1).split())
return None
|
python
|
{
"resource": ""
}
|
q5306
|
_KafkaBrokerClient.updateMetadata
|
train
|
def updateMetadata(self, new):
"""
Update the metadata stored for this broker.
Future connections made to the broker will use the host and port
defined in the new metadata. Any existing connection is not dropped,
however.
:param new:
:clas:`afkak.common.BrokerMetadata` with the same node ID as the
current metadata.
"""
if self.node_id != new.node_id:
raise ValueError("Broker metadata {!r} doesn't match node_id={}".format(new, self.node_id))
self.node_id = new.node_id
self.host = new.host
self.port = new.port
|
python
|
{
"resource": ""
}
|
q5307
|
_KafkaBrokerClient.makeRequest
|
train
|
def makeRequest(self, requestId, request, expectResponse=True):
"""
Send a request to our broker via our self.proto KafkaProtocol object.
Return a deferred which will fire when the reply matching the requestId
comes back from the server, or, if expectResponse is False, then
return None instead.
If we are not currently connected, then we buffer the request to send
when the connection comes back up.
"""
if requestId in self.requests:
# Id is duplicate to 'in-flight' request. Reject it, as we
# won't be able to properly deliver the response(s)
# Note that this won't protect against a client calling us
# twice with the same ID, but first with expectResponse=False
# But that's pathological, and the only defense is to track
# all requestIds sent regardless of whether we expect to see
# a response, which is effectively a memory leak...
raise DuplicateRequestError(
'Reuse of requestId:{}'.format(requestId))
# If we've been told to shutdown (close() called) then fail request
if self._dDown:
return fail(ClientError('makeRequest() called after close()'))
# Ok, we are going to save/send it, create a _Request object to track
canceller = partial(
self.cancelRequest, requestId,
CancelledError(message="Request correlationId={} was cancelled".format(requestId)))
tReq = _Request(requestId, request, expectResponse, canceller)
# add it to our requests dict
self.requests[requestId] = tReq
# Add an errback to the tReq.d to remove it from our requests dict
# if something goes wrong...
tReq.d.addErrback(self._handleRequestFailure, requestId)
# Do we have a connection over which to send the request?
if self.proto:
# Send the request
self._sendRequest(tReq)
# Have we not even started trying to connect yet? Do so now
elif not self.connector:
self._connect()
return tReq.d
|
python
|
{
"resource": ""
}
|
q5308
|
_KafkaBrokerClient.disconnect
|
train
|
def disconnect(self):
"""
Disconnect from the Kafka broker.
This is used to implement disconnection on timeout as a workaround for
Kafka connections occasionally getting stuck on the server side under
load. Requests are not cancelled, so they will be retried.
"""
if self.proto:
log.debug('%r Disconnecting from %r', self, self.proto.transport.getPeer())
self.proto.transport.loseConnection()
|
python
|
{
"resource": ""
}
|
q5309
|
_KafkaBrokerClient.close
|
train
|
def close(self):
"""Permanently dispose of the broker client.
This terminates any outstanding connection and cancels any pending
requests.
"""
log.debug('%r: close() proto=%r connector=%r', self, self.proto, self.connector)
assert self._dDown is None
self._dDown = Deferred()
if self.proto is not None:
self.proto.transport.loseConnection()
elif self.connector is not None:
def connectingFailed(reason):
"""
Handle the failure resulting from cancellation.
:reason: a `Failure`, most likely a cancellation error (but
that's not guaranteed).
:returns: `None` to handle the failure
"""
log.debug('%r: connection attempt has been cancelled: %r', self, reason)
self._dDown.callback(None)
self.connector.addErrback(connectingFailed)
self.connector.cancel()
else:
# Fake a cleanly closing connection
self._dDown.callback(None)
try:
raise CancelledError(message="Broker client for node_id={} {}:{} was closed".format(
self.node_id, self.host, self.port))
except Exception:
reason = Failure()
# Cancel any requests
for correlation_id in list(self.requests.keys()): # must copy, may del
self.cancelRequest(correlation_id, reason)
return self._dDown
|
python
|
{
"resource": ""
}
|
q5310
|
_KafkaBrokerClient._connectionLost
|
train
|
def _connectionLost(self, reason):
"""Called when the protocol connection is lost
- Log the disconnection.
- Mark any outstanding requests as unsent so they will be sent when
a new connection is made.
- If closing the broker client, mark completion of that process.
:param reason:
Failure that indicates the reason for disconnection.
"""
log.info('%r: Connection closed: %r', self, reason)
# Reset our proto so we don't try to send to a down connection
self.proto = None
# Mark any in-flight requests as unsent.
for tReq in self.requests.values():
tReq.sent = False
if self._dDown:
self._dDown.callback(None)
elif self.requests:
self._connect()
|
python
|
{
"resource": ""
}
|
q5311
|
_KafkaBrokerClient.handleResponse
|
train
|
def handleResponse(self, response):
"""Handle the response string received by KafkaProtocol.
Ok, we've received the response from the broker. Find the requestId
in the message, lookup & fire the deferred with the response.
"""
requestId = KafkaCodec.get_response_correlation_id(response)
# Protect against responses coming back we didn't expect
tReq = self.requests.pop(requestId, None)
if tReq is None:
# This could happen if we've sent it, are waiting on the response
# when it's cancelled, causing us to remove it from self.requests
log.warning('Unexpected response with correlationId=%d: %r',
requestId, reprlib.repr(response))
else:
tReq.d.callback(response)
|
python
|
{
"resource": ""
}
|
q5312
|
_KafkaBrokerClient._sendRequest
|
train
|
def _sendRequest(self, tReq):
"""Send a single request over our protocol to the Kafka broker."""
try:
tReq.sent = True
self.proto.sendString(tReq.data)
except Exception as e:
log.exception('%r: Failed to send request %r', self, tReq)
del self.requests[tReq.id]
tReq.d.errback(e)
else:
if not tReq.expect:
# Once we've sent a request for which we don't expect a reply,
# we're done, remove it from requests, and fire the deferred
# with 'None', since there is no reply to be expected
del self.requests[tReq.id]
tReq.d.callback(None)
|
python
|
{
"resource": ""
}
|
q5313
|
_KafkaBrokerClient._sendQueued
|
train
|
def _sendQueued(self):
"""Connection just came up, send the unsent requests."""
for tReq in list(self.requests.values()): # must copy, may del
if not tReq.sent:
self._sendRequest(tReq)
|
python
|
{
"resource": ""
}
|
q5314
|
_KafkaBrokerClient._connect
|
train
|
def _connect(self):
"""Connect to the Kafka Broker
This routine will repeatedly try to connect to the broker (with backoff
according to the retry policy) until it succeeds.
"""
def tryConnect():
self.connector = d = maybeDeferred(connect)
d.addCallback(cbConnect)
d.addErrback(ebConnect)
def connect():
endpoint = self._endpointFactory(self._reactor, self.host, self.port)
log.debug('%r: connecting with %s', self, endpoint)
return endpoint.connect(self)
def cbConnect(proto):
log.debug('%r: connected to %r', self, proto.transport.getPeer())
self._failures = 0
self.connector = None
self.proto = proto
if self._dDown:
proto.transport.loseConnection()
else:
self._sendQueued()
def ebConnect(fail):
if self._dDown:
log.debug('%r: breaking connect loop due to %r after close()', self, fail)
return fail
self._failures += 1
delay = self._retryPolicy(self._failures)
log.debug('%r: failure %d to connect -> %s; retry in %.2f seconds.',
self, self._failures, fail.value, delay)
self.connector = d = deferLater(self._reactor, delay, lambda: None)
d.addCallback(cbDelayed)
def cbDelayed(result):
tryConnect()
self._failures = 0
tryConnect()
|
python
|
{
"resource": ""
}
|
q5315
|
group_envs
|
train
|
def group_envs(envlist):
"""Group Tox environments for Travis CI builds
Separate by Python version so that they can go in different Travis jobs:
>>> group_envs('py37-int-snappy', 'py36-int')
[('py36', 'int', ['py36-int']), ('py37', 'int', ['py37-int-snappy'])]
Group unit tests and linting together:
>>> group_envs(['py27-unit', 'py27-lint'])
[('py27', 'unit', ['py27-unit', 'py27-lint'])]
"""
groups = {}
for env in envlist:
envpy, category = env.split('-')[0:2]
if category == 'lint':
category = 'unit'
try:
groups[envpy, category].append(env)
except KeyError:
groups[envpy, category] = [env]
return sorted((envpy, category, envs) for (envpy, category), envs in groups.items())
|
python
|
{
"resource": ""
}
|
q5316
|
create_gzip_message
|
train
|
def create_gzip_message(message_set):
"""
Construct a gzip-compressed message containing multiple messages
The given messages will be encoded, compressed, and sent as a single atomic
message to Kafka.
:param list message_set: a list of :class:`Message` instances
"""
encoded_message_set = KafkaCodec._encode_message_set(message_set)
gzipped = gzip_encode(encoded_message_set)
return Message(0, CODEC_GZIP, None, gzipped)
|
python
|
{
"resource": ""
}
|
q5317
|
create_snappy_message
|
train
|
def create_snappy_message(message_set):
"""
Construct a Snappy-compressed message containing multiple messages
The given messages will be encoded, compressed, and sent as a single atomic
message to Kafka.
:param list message_set: a list of :class:`Message` instances
"""
encoded_message_set = KafkaCodec._encode_message_set(message_set)
snapped = snappy_encode(encoded_message_set)
return Message(0, CODEC_SNAPPY, None, snapped)
|
python
|
{
"resource": ""
}
|
q5318
|
create_message_set
|
train
|
def create_message_set(requests, codec=CODEC_NONE):
"""
Create a message set from a list of requests.
Each request can have a list of messages and its own key. If codec is
:data:`CODEC_NONE`, return a list of raw Kafka messages. Otherwise, return
a list containing a single codec-encoded message.
:param codec:
The encoding for the message set, one of the constants:
- `afkak.CODEC_NONE`
- `afkak.CODEC_GZIP`
- `afkak.CODEC_SNAPPY`
:raises: :exc:`UnsupportedCodecError` for an unsupported codec
"""
msglist = []
for req in requests:
msglist.extend([create_message(m, key=req.key) for m in req.messages])
if codec == CODEC_NONE:
return msglist
elif codec == CODEC_GZIP:
return [create_gzip_message(msglist)]
elif codec == CODEC_SNAPPY:
return [create_snappy_message(msglist)]
else:
raise UnsupportedCodecError("Codec 0x%02x unsupported" % codec)
|
python
|
{
"resource": ""
}
|
q5319
|
KafkaCodec.decode_consumermetadata_response
|
train
|
def decode_consumermetadata_response(cls, data):
"""
Decode bytes to a ConsumerMetadataResponse
:param bytes data: bytes to decode
"""
(correlation_id, error_code, node_id), cur = \
relative_unpack('>ihi', data, 0)
host, cur = read_short_ascii(data, cur)
(port,), cur = relative_unpack('>i', data, cur)
return ConsumerMetadataResponse(
error_code, node_id, nativeString(host), port)
|
python
|
{
"resource": ""
}
|
q5320
|
KafkaCodec.encode_offset_fetch_request
|
train
|
def encode_offset_fetch_request(cls, client_id, correlation_id,
group, payloads):
"""
Encode some OffsetFetchRequest structs
:param bytes client_id: string
:param int correlation_id: int
:param bytes group: string, the consumer group you are fetching offsets for
:param list payloads: list of :class:`OffsetFetchRequest`
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(
client_id, correlation_id, KafkaCodec.OFFSET_FETCH_KEY,
api_version=1)
message += write_short_ascii(group)
message += struct.pack('>i', len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_ascii(topic)
message += struct.pack('>i', len(topic_payloads))
for partition in topic_payloads:
message += struct.pack('>i', partition)
return message
|
python
|
{
"resource": ""
}
|
q5321
|
KafkaBootstrapProtocol.stringReceived
|
train
|
def stringReceived(self, response):
"""
Handle a response from the broker.
"""
correlation_id = response[0:4]
try:
d = self._pending.pop(correlation_id)
except KeyError:
self._log.warn((
"Response has unknown correlation ID {correlation_id!r}."
" Dropping connection to {peer}."
), correlation_id=correlation_id, peer=self.transport.getPeer())
self.transport.loseConnection()
else:
d.callback(response)
|
python
|
{
"resource": ""
}
|
q5322
|
KafkaBootstrapProtocol.connectionLost
|
train
|
def connectionLost(self, reason=connectionDone):
"""
Mark the protocol as failed and fail all pending operations.
"""
self._failed = reason
pending, self._pending = self._pending, None
for d in pending.values():
d.errback(reason)
|
python
|
{
"resource": ""
}
|
q5323
|
KafkaBootstrapProtocol.request
|
train
|
def request(self, request):
"""
Send a request to the Kafka broker.
:param bytes request:
The bytes of a Kafka `RequestMessage`_ structure. It must have
a unique (to this connection) correlation ID.
:returns:
`Deferred` which will:
- Succeed with the bytes of a Kafka `ResponseMessage`_
- Fail when the connection terminates
.. _RequestMessage:: https://kafka.apache.org/protocol.html#protocol_messages
"""
if self._failed is not None:
return fail(self._failed)
correlation_id = request[4:8]
assert correlation_id not in self._pending
d = Deferred()
self.sendString(request)
self._pending[correlation_id] = d
return d
|
python
|
{
"resource": ""
}
|
q5324
|
Producer.stop
|
train
|
def stop(self):
"""
Terminate any outstanding requests.
:returns: :class:``Deferred` which fires when fully stopped.
"""
self.stopping = True
# Cancel any outstanding request to our client
if self._batch_send_d:
self._batch_send_d.cancel()
# Do we have to worry about our looping call?
if self.batch_every_t is not None:
# Stop our looping call, and wait for the deferred to be called
if self._sendLooper is not None:
self._sendLooper.stop()
# Make sure requests that wasn't cancelled above are now
self._cancel_outstanding()
return self._sendLooperD or succeed(None)
|
python
|
{
"resource": ""
}
|
q5325
|
Producer._next_partition
|
train
|
def _next_partition(self, topic, key=None):
"""get the next partition to which to publish
Check with our client for the latest partitions for the topic, then
ask our partitioner for the next partition to which we should publish
for the give key. If needed, create a new partitioner for the topic.
"""
# check if the client has metadata for the topic
while self.client.metadata_error_for_topic(topic):
# client doesn't have good metadata for topic. ask to fetch...
# check if we have request attempts left
if self._req_attempts >= self._max_attempts:
# No, no attempts left, so raise the error
_check_error(self.client.metadata_error_for_topic(topic))
yield self.client.load_metadata_for_topics(topic)
if not self.client.metadata_error_for_topic(topic):
break
self._req_attempts += 1
d = Deferred()
self.client.reactor.callLater(
self._retry_interval, d.callback, True)
self._retry_interval *= self.RETRY_INTERVAL_FACTOR
yield d
# Ok, should be safe to get the partitions now...
partitions = self.client.topic_partitions[topic]
# Do we have a partitioner for this topic already?
if topic not in self.partitioners:
# No, create a new paritioner for topic, partitions
self.partitioners[topic] = \
self.partitioner_class(topic, partitions)
# Lookup the next partition
partition = self.partitioners[topic].partition(key, partitions)
returnValue(partition)
|
python
|
{
"resource": ""
}
|
q5326
|
Producer._send_requests
|
train
|
def _send_requests(self, parts_results, requests):
"""Send the requests
We've determined the partition for each message group in the batch, or
got errors for them.
"""
# We use these dictionaries to be able to combine all the messages
# destined to the same topic/partition into one request
# the messages & deferreds, both by topic+partition
reqsByTopicPart = defaultdict(list)
payloadsByTopicPart = defaultdict(list)
deferredsByTopicPart = defaultdict(list)
# We now have a list of (succeeded/failed, partition/None) tuples
# for the partition lookups we did on each message group, zipped with
# the requests
for (success, part_or_failure), req in zip(parts_results, requests):
if req.deferred.called:
# Submitter cancelled the request while we were waiting for
# the topic/partition, skip it
continue
if not success:
# We failed to get a partition for this request, errback to the
# caller with the failure. Maybe this should retry? However,
# since this failure is likely to affect an entire Topic, there
# should be no issues with ordering of messages within a
# partition of a topic getting out of order. Let the caller
# retry the particular request if they like, or they could
# cancel all their outstanding requests in
req.deferred.errback(part_or_failure)
continue
# Ok, we now have a partition for this request, we can add the
# request for this topic/partition to reqsByTopicPart, and the
# caller's deferred to deferredsByTopicPart
topicPart = TopicAndPartition(req.topic, part_or_failure)
reqsByTopicPart[topicPart].append(req)
deferredsByTopicPart[topicPart].append(req.deferred)
# Build list of payloads grouped by topic/partition
# That is, we bundle all the messages destined for a given
# topic/partition, even if they were submitted by different
# requests into a single 'payload', and then we submit all the
# payloads as a list to the client for sending to the various
# brokers. The finest granularity of success/failure is at the
# payload (topic/partition) level.
payloads = []
for (topic, partition), reqs in reqsByTopicPart.items():
msgSet = create_message_set(reqs, self.codec)
req = ProduceRequest(topic, partition, msgSet)
topicPart = TopicAndPartition(topic, partition)
payloads.append(req)
payloadsByTopicPart[topicPart] = req
# Make sure we have some payloads to send
if not payloads:
return
# send the request
d = self.client.send_produce_request(
payloads, acks=self.req_acks, timeout=self.ack_timeout,
fail_on_error=False)
self._req_attempts += 1
# add our handlers
d.addBoth(self._handle_send_response, payloadsByTopicPart,
deferredsByTopicPart)
return d
|
python
|
{
"resource": ""
}
|
q5327
|
Producer._complete_batch_send
|
train
|
def _complete_batch_send(self, resp):
"""Complete the processing of our batch send operation
Clear the deferred tracking our current batch processing
and reset our retry count and retry interval
Return none to eat any errors coming from up the deferred chain
"""
self._batch_send_d = None
self._req_attempts = 0
self._retry_interval = self._init_retry_interval
if isinstance(resp, Failure) and not resp.check(tid_CancelledError,
CancelledError):
log.error("Failure detected in _complete_batch_send: %r\n%r",
resp, resp.getTraceback())
return
|
python
|
{
"resource": ""
}
|
q5328
|
Producer._send_batch
|
train
|
def _send_batch(self):
"""
Send the waiting messages, if there are any, and we can...
This is called by our LoopingCall every send_every_t interval, and
from send_messages everytime we have enough messages to send.
This is also called from py:method:`send_messages` via
py:method:`_check_send_batch` if there are enough messages/bytes
to require a send.
Note, the send will be delayed (triggered by completion or failure of
previous) if we are currently trying to complete the last batch send.
"""
# We can be triggered by the LoopingCall, and have nothing to send...
# Or, we've got SendRequest(s) to send, but are still processing the
# previous batch...
if (not self._batch_reqs) or self._batch_send_d:
return
# Save a local copy, and clear the global list & metrics
requests, self._batch_reqs = self._batch_reqs, []
self._waitingByteCount = 0
self._waitingMsgCount = 0
# Iterate over them, fetching the partition for each message batch
d_list = []
for req in requests:
# For each request, we get the topic & key and use that to lookup
# the next partition on which we should produce
d_list.append(self._next_partition(req.topic, req.key))
d = self._batch_send_d = Deferred()
# Since DeferredList doesn't propagate cancel() calls to deferreds it
# might be waiting on for a result, we need to use this structure,
# rather than just using the DeferredList directly
d.addCallback(lambda r: DeferredList(d_list, consumeErrors=True))
d.addCallback(self._send_requests, requests)
# Once we finish fully processing the current batch, clear the
# _batch_send_d and check if any more requests piled up when we
# were busy.
d.addBoth(self._complete_batch_send)
d.addBoth(self._check_send_batch)
# Fire off the callback to start processing...
d.callback(None)
|
python
|
{
"resource": ""
}
|
q5329
|
Producer._handle_send_response
|
train
|
def _handle_send_response(self, result, payloadsByTopicPart,
deferredsByTopicPart):
"""Handle the response from our client to our send_produce_request
This is a bit complex. Failures can happen in a few ways:
1. The client sent an empty list, False, None or some similar thing
as the result, but we were expecting real responses.
2. The client had a failure before it even tried sending any requests
to any brokers.
a. Kafka error: See if we can retry the whole request
b. Non-kafka: Figure it's a programming error, fail all deferreds
3. The client sent all the requests (it's all or none) to the brokers
but one or more request failed (timed out before receiving a
response, or the brokerclient threw some sort of exception on send
In this case, the client throws FailedPayloadsError, and attaches
the responses (NOTE: some can have errors!), and the payloads
where the send itself failed to the exception.
4. The client sent all the requests, all responses were received, but
the Kafka broker indicated an error with servicing the request on
some of the responses.
"""
def _deliver_result(d_list, result=None):
"""Possibly callback each deferred in a list with single result"""
for d in d_list:
if not isinstance(d, Deferred):
# nested list...
_deliver_result(d, result)
else:
# We check d.called since the request could have been
# cancelled while we waited for the response
if not d.called:
d.callback(result)
def _do_retry(payloads):
# We use 'fail_on_error=False' because we want our client to
# process every response that comes back from the brokers so
# we can determine which requests were successful, and which
# failed for retry
d = self.client.send_produce_request(
payloads, acks=self.req_acks, timeout=self.ack_timeout,
fail_on_error=False)
self._req_attempts += 1
# add our handlers
d.addBoth(self._handle_send_response, payloadsByTopicPart,
deferredsByTopicPart)
return d
def _cancel_retry(failure, dc):
# Cancel the retry callLater and pass-thru the failure
dc.cancel()
# cancel all the top-level deferreds associated with the request
_deliver_result(deferredsByTopicPart.values(), failure)
return failure
def _check_retry_payloads(failed_payloads_with_errs):
"""Check our retry count and retry after a delay or errback
If we have more retries to try, create a deferred that will fire
with the result of delayed retry. If not, errback the remaining
deferreds with failure
Params:
failed_payloads - list of (payload, failure) tuples
"""
# Do we have retries left?
if self._req_attempts >= self._max_attempts:
# No, no retries left, fail each failed_payload with its
# associated failure
for p, f in failed_payloads_with_errs:
t_and_p = TopicAndPartition(p.topic, p.partition)
_deliver_result(deferredsByTopicPart[t_and_p], f)
return
# Retries remain! Schedule one...
d = Deferred()
dc = self.client.reactor.callLater(
self._retry_interval, d.callback, [p for p, f in
failed_payloads])
self._retry_interval *= self.RETRY_INTERVAL_FACTOR
# Cancel the callLater when request is cancelled before it fires
d.addErrback(_cancel_retry, dc)
# Reset the topic metadata for all topics which had failed_requests
# where the failures were of the kind UnknownTopicOrPartitionError
# or NotLeaderForPartitionError, since those indicate our client's
# metadata is out of date.
reset_topics = set()
for payload, e in failed_payloads:
if (isinstance(e, NotLeaderForPartitionError) or
isinstance(e, UnknownTopicOrPartitionError)):
reset_topics.add(payload.topic)
if reset_topics:
self.client.reset_topic_metadata(*reset_topics)
d.addCallback(_do_retry)
return d
# The payloads we need to retry, if we still can..
failed_payloads = []
# In the case we are sending requests without requiring acks, the
# brokerclient will immediately callback() the deferred upon send with
# None. In that case, we just iterate over all the deferreds in
# deferredsByTopicPart and callback them with None
# If we are expecting responses/acks, and we get an empty result, we
# callback with a Failure of NoResponseError
if not result:
# Success, but no results, is that what we're expecting?
if self.req_acks == PRODUCER_ACK_NOT_REQUIRED:
result = None
else:
# We got no result, but we were expecting one? Fail everything!
result = Failure(NoResponseError())
_deliver_result(deferredsByTopicPart.values(), result)
return
elif isinstance(result, Failure):
# Failure! Was it total, or partial?
if not result.check(FailedPayloadsError):
# Total failure of some sort!
# The client was unable to send the request at all. If it's
# a KafkaError (probably Leader/Partition unavailable), retry
if result.check(KafkaError):
# Yep, a kafak error. Set failed_payloads, and we'll retry
# them all below. Set failure for errback to callers if we
# are all out of retries
failure, result = result, [] # no succesful results, retry
failed_payloads = [(p, failure) for p in
payloadsByTopicPart.values()]
else:
# Was the request cancelled?
if not result.check(tid_CancelledError):
# Uh Oh, programming error? Log it!
log.error("Unexpected failure: %r in "
"_handle_send_response", result)
# Cancelled, or programming error, we fail the requests
_deliver_result(deferredsByTopicPart.values(), result)
return
else:
# FailedPayloadsError: This means that some/all of the
# requests to a/some brokerclients failed to send.
# Pull the successful responses and the failed_payloads off
# the exception and handle them below. Preserve the
# FailedPayloadsError as 'failure'
failure = result
result = failure.value.args[0]
failed_payloads = failure.value.args[1]
# Do we have results? Iterate over them and if the response indicates
# success, then callback the associated deferred. If the response
# indicates an error, then setup that request for retry.
# NOTE: In this case, each failed_payload get it's own error...
for res in result:
t_and_p = TopicAndPartition(res.topic, res.partition)
t_and_p_err = _check_error(res, raiseException=False)
if not t_and_p_err:
# Success for this topic/partition
d_list = deferredsByTopicPart[t_and_p]
_deliver_result(d_list, res)
else:
p = payloadsByTopicPart[t_and_p]
failed_payloads.append((p, t_and_p_err))
# Were there any failed requests to possibly retry?
if failed_payloads:
return _check_retry_payloads(failed_payloads)
return
|
python
|
{
"resource": ""
}
|
q5330
|
Producer._cancel_outstanding
|
train
|
def _cancel_outstanding(self):
"""Cancel all of our outstanding requests"""
for d in list(self._outstanding):
d.addErrback(lambda _: None) # Eat any uncaught errors
d.cancel()
|
python
|
{
"resource": ""
}
|
q5331
|
KafkaClient.reset_consumer_group_metadata
|
train
|
def reset_consumer_group_metadata(self, *groups):
"""Reset cache of what broker manages the offset for specified groups
Remove the cache of what Kafka broker should be contacted when
fetching or updating the committed offsets for a given consumer
group or groups.
NOTE: Does not cancel any outstanding requests for updates to the
consumer group metadata for the specified groups.
"""
groups = tuple(_coerce_consumer_group(g) for g in groups)
for group in groups:
if group in self.consumer_group_to_brokers:
del self.consumer_group_to_brokers[group]
|
python
|
{
"resource": ""
}
|
q5332
|
KafkaClient.reset_all_metadata
|
train
|
def reset_all_metadata(self):
"""Clear all cached metadata
Metadata will be re-fetched as required to satisfy requests.
"""
self.topics_to_brokers.clear()
self.topic_partitions.clear()
self.topic_errors.clear()
self.consumer_group_to_brokers.clear()
|
python
|
{
"resource": ""
}
|
q5333
|
KafkaClient.topic_fully_replicated
|
train
|
def topic_fully_replicated(self, topic):
"""
Determine if the given topic is fully replicated according to the
currently known cluster metadata.
.. note::
This relies on cached cluster metadata. You may call
:meth:`load_metadata_for_topics()` first to refresh this cache.
:param str topic: Topic name
:returns:
A boolean indicating that:
1. The number of partitions in the topic is non-zero.
2. For each partition, all replicas are in the in-sync replica
(ISR) set.
:rtype: :class:`bool`
"""
topic = _coerce_topic(topic)
if topic not in self.topic_partitions:
return False
if not self.topic_partitions[topic]:
# Don't consider an empty partition list 'fully replicated'
return False
return all(
self.partition_fully_replicated(TopicAndPartition(topic, p))
for p in self.topic_partitions[topic]
)
|
python
|
{
"resource": ""
}
|
q5334
|
KafkaClient.close
|
train
|
def close(self):
"""Permanently dispose of the client
- Immediately mark the client as closed, causing current operations to
fail with :exc:`~afkak.common.CancelledError` and future operations to
fail with :exc:`~afkak.common.ClientError`.
- Clear cached metadata.
- Close any connections to Kafka brokers.
:returns:
deferred that fires when all resources have been released
"""
# If we're already waiting on an/some outstanding disconnects
# make sure we continue to wait for them...
log.debug("%r: close", self)
self._closing = True
# Close down any clients we have
brokerclients, self.clients = self.clients, None
self._close_brokerclients(brokerclients.values())
# clean up other outstanding operations
self.reset_all_metadata()
return self.close_dlist or defer.succeed(None)
|
python
|
{
"resource": ""
}
|
q5335
|
KafkaClient.load_metadata_for_topics
|
train
|
def load_metadata_for_topics(self, *topics):
"""Discover topic metadata and brokers
Afkak internally calls this method whenever metadata is required.
:param str topics:
Topic names to look up. The resulting metadata includes the list of
topic partitions, brokers owning those partitions, and which
partitions are in sync.
Fetching metadata for a topic may trigger auto-creation if that is
enabled on the Kafka broker.
When no topic name is given metadata for *all* topics is fetched.
This is an expensive operation, but it does not trigger topic
creation.
:returns:
:class:`Deferred` for the completion of the metadata fetch.
This will fire with ``True`` on success, ``None`` on
cancellation, or fail with an exception on error.
On success, topic metadata is available from the attributes of
:class:`KafkaClient`: :data:`~KafkaClient.topic_partitions`,
:data:`~KafkaClient.topics_to_brokers`, etc.
"""
topics = tuple(_coerce_topic(t) for t in topics)
log.debug("%r: load_metadata_for_topics(%s)", self, ', '.join(repr(t) for t in topics))
fetch_all_metadata = not topics
# create the request
requestId = self._next_id()
request = KafkaCodec.encode_metadata_request(self._clientIdBytes,
requestId, topics)
# Callbacks for the request deferred...
def _handleMetadataResponse(response):
# Decode the response
brokers, topics = KafkaCodec.decode_metadata_response(response)
log.debug("%r: got metadata brokers=%r topics=%r", self, brokers, topics)
# If we fetched the metadata for all topics, then store away the
# received metadata for diagnostics.
if fetch_all_metadata:
self._brokers = brokers
self._topics = topics
# Iff we were fetching for all topics, and we got at least one
# broker back, then remove brokers when we update our brokers
ok_to_remove = (fetch_all_metadata and len(brokers))
# Take the metadata we got back, update our self.clients, and
# if needed disconnect or connect from/to old/new brokers
self._update_brokers(brokers.values(), remove=ok_to_remove)
# Now loop through all the topics/partitions in the response
# and setup our cache/data-structures
for topic, topic_metadata in topics.items():
_, topic_error, partitions = topic_metadata
self.reset_topic_metadata(topic)
self.topic_errors[topic] = topic_error
if not partitions:
log.warning('No partitions for %s, Err:%d',
topic, topic_error)
continue
self.topic_partitions[topic] = []
for partition, meta in partitions.items():
self.topic_partitions[topic].append(partition)
topic_part = TopicAndPartition(topic, partition)
self.partition_meta[topic_part] = meta
if meta.leader == -1:
log.warning('No leader for topic %s partition %s',
topic, partition)
self.topics_to_brokers[topic_part] = None
else:
self.topics_to_brokers[
topic_part] = brokers[meta.leader]
self.topic_partitions[topic] = sorted(
self.topic_partitions[topic])
return True
def _handleMetadataErr(err):
# This should maybe do more cleanup?
if err.check(t_CancelledError, CancelledError):
# Eat the error
# XXX Shouldn't this return False? The success branch
# returns True.
return None
log.error("Failed to retrieve metadata:%s", err)
raise KafkaUnavailableError(
"Unable to load metadata from configured "
"hosts: {!r}".format(err))
# Send the request, add the handlers
d = self._send_broker_unaware_request(requestId, request)
d.addCallbacks(_handleMetadataResponse, _handleMetadataErr)
return d
|
python
|
{
"resource": ""
}
|
q5336
|
KafkaClient.load_consumer_metadata_for_group
|
train
|
def load_consumer_metadata_for_group(self, group):
"""
Determine broker for the consumer metadata for the specified group
Returns a deferred which callbacks with True if the group's coordinator
could be determined, or errbacks with
ConsumerCoordinatorNotAvailableError if not.
Parameters
----------
group:
group name as `str`
"""
group = _coerce_consumer_group(group)
log.debug("%r: load_consumer_metadata_for_group(%r)", self, group)
# If we are already loading the metadata for this group, then
# just return the outstanding deferred
if group in self.coordinator_fetches:
d = defer.Deferred()
self.coordinator_fetches[group][1].append(d)
return d
# No outstanding request, create a new one
requestId = self._next_id()
request = KafkaCodec.encode_consumermetadata_request(
self._clientIdBytes, requestId, group)
# Callbacks for the request deferred...
def _handleConsumerMetadataResponse(response_bytes):
# Decode the response (returns ConsumerMetadataResponse)
response = KafkaCodec.decode_consumermetadata_response(response_bytes)
log.debug("%r: load_consumer_metadata_for_group(%r) -> %r", self, group, response)
if response.error:
raise BrokerResponseError.errnos.get(response.error, UnknownError)(response)
bm = BrokerMetadata(response.node_id, response.host, response.port)
self.consumer_group_to_brokers[group] = bm
self._update_brokers([bm])
return True
def _handleConsumerMetadataErr(err):
log.error("Failed to retrieve consumer metadata for group %r", group,
exc_info=(err.type, err.value, err.getTracebackObject()))
# Clear any stored value for the group's coordinator
self.reset_consumer_group_metadata(group)
# FIXME: This exception should chain from err.
raise ConsumerCoordinatorNotAvailableError(
"Coordinator for group {!r} not available".format(group),
)
def _propagate(result):
[_, ds] = self.coordinator_fetches.pop(group, None)
for d in ds:
d.callback(result)
# Send the request, add the handlers
request_d = self._send_broker_unaware_request(requestId, request)
d = defer.Deferred()
# Save the deferred under the fetches for this group
self.coordinator_fetches[group] = (request_d, [d])
request_d.addCallback(_handleConsumerMetadataResponse)
request_d.addErrback(_handleConsumerMetadataErr)
request_d.addBoth(_propagate)
return d
|
python
|
{
"resource": ""
}
|
q5337
|
KafkaClient.send_offset_commit_request
|
train
|
def send_offset_commit_request(self, group, payloads=None,
fail_on_error=True, callback=None,
group_generation_id=-1,
consumer_id=''):
"""Send a list of OffsetCommitRequests to the Kafka broker for the
given consumer group.
Args:
group (str): The consumer group to which to commit the offsets
payloads ([OffsetCommitRequest]): List of topic, partition, offsets
to commit.
fail_on_error (bool): Whether to raise an exception if a response
from the Kafka broker indicates an error
callback (callable): a function to call with each of the responses
before returning the returned value to the caller.
group_generation_id (int): Must currently always be -1
consumer_id (str): Must currently always be empty string
Returns:
[OffsetCommitResponse]: List of OffsetCommitResponse objects.
Will raise KafkaError for failed requests if fail_on_error is True
"""
group = _coerce_consumer_group(group)
encoder = partial(KafkaCodec.encode_offset_commit_request,
group=group, group_generation_id=group_generation_id,
consumer_id=consumer_id)
decoder = KafkaCodec.decode_offset_commit_response
resps = yield self._send_broker_aware_request(
payloads, encoder, decoder, consumer_group=group)
returnValue(self._handle_responses(
resps, fail_on_error, callback, group))
|
python
|
{
"resource": ""
}
|
q5338
|
KafkaClient._get_brokerclient
|
train
|
def _get_brokerclient(self, node_id):
"""
Get a broker client.
:param int node_id: Broker node ID
:raises KeyError: for an unknown node ID
:returns: :class:`_KafkaBrokerClient`
"""
if self._closing:
raise ClientError("Cannot get broker client for node_id={}: {} has been closed".format(node_id, self))
if node_id not in self.clients:
broker_metadata = self._brokers[node_id]
log.debug("%r: creating client for %s", self, broker_metadata)
self.clients[node_id] = _KafkaBrokerClient(
self.reactor, self._endpoint_factory,
broker_metadata, self.clientId, self._retry_policy,
)
return self.clients[node_id]
|
python
|
{
"resource": ""
}
|
q5339
|
KafkaClient._close_brokerclients
|
train
|
def _close_brokerclients(self, clients):
"""
Close the given broker clients.
:param clients: Iterable of `_KafkaBrokerClient`
"""
def _log_close_failure(failure, brokerclient):
log.debug(
'BrokerClient: %s close result: %s: %s', brokerclient,
failure.type.__name__, failure.getErrorMessage())
def _clean_close_dlist(result, close_dlist):
# If there aren't any other outstanding closings going on, then
# close_dlist == self.close_dlist, and we can reset it.
if close_dlist == self.close_dlist:
self.close_dlist = None
if not self.close_dlist:
dList = []
else:
log.debug("%r: _close_brokerclients has nested deferredlist: %r",
self, self.close_dlist)
dList = [self.close_dlist]
for brokerClient in clients:
log.debug("Calling close on: %r", brokerClient)
d = brokerClient.close().addErrback(_log_close_failure, brokerClient)
dList.append(d)
self.close_dlist = DeferredList(dList)
self.close_dlist.addBoth(_clean_close_dlist, self.close_dlist)
|
python
|
{
"resource": ""
}
|
q5340
|
KafkaClient._update_brokers
|
train
|
def _update_brokers(self, brokers, remove=False):
"""
Update `self._brokers` and `self.clients`
Update our self.clients based on brokers in received metadata
Take the received dict of brokers and reconcile it with our current
list of brokers (self.clients). If there is a new one, bring up a new
connection to it, and if remove is True, and any in our current list
aren't in the metadata returned, disconnect from it.
:param brokers: Iterable of `BrokerMetadata`. A client will be created
for every broker given if it doesn't yet exist.
:param bool remove:
Is this metadata for *all* brokers? If so, clients for brokers
which are no longer found in the metadata will be closed.
"""
log.debug("%r: _update_brokers(%r, remove=%r)",
self, brokers, remove)
brokers_by_id = {bm.node_id: bm for bm in brokers}
self._brokers.update(brokers_by_id)
# Update the metadata of broker clients that already exist.
for node_id, broker_meta in brokers_by_id.items():
if node_id not in self.clients:
continue
self.clients[node_id].updateMetadata(broker_meta)
# Remove any clients for brokers which no longer exist.
if remove:
to_close = [
self.clients.pop(node_id)
for node_id in set(self.clients) - set(brokers_by_id)
]
if to_close:
self._close_brokerclients(to_close)
|
python
|
{
"resource": ""
}
|
q5341
|
KafkaClient._make_request_to_broker
|
train
|
def _make_request_to_broker(self, broker, requestId, request, **kwArgs):
"""Send a request to the specified broker."""
def _timeout_request(broker, requestId):
"""The time we allotted for the request expired, cancel it."""
try:
# FIXME: This should be done by calling .cancel() on the Deferred
# returned by the broker client.
broker.cancelRequest(requestId, reason=RequestTimedOutError(
'Request: {} cancelled due to timeout'.format(requestId)))
except KeyError: # pragma: no cover This should never happen...
log.exception('ERROR: Failed to find key for timed-out '
'request. Broker: %r Req: %d',
broker, requestId)
raise
if self._disconnect_on_timeout:
broker.disconnect()
def _alert_blocked_reactor(timeout, start):
"""Complain if this timer didn't fire before the timeout elapsed"""
now = self.reactor.seconds()
if now >= (start + timeout):
log.warning('Reactor was starved for %r seconds', now - start)
def _cancel_timeout(result, dc):
"""Request completed/cancelled, cancel the timeout delayedCall."""
if dc.active():
dc.cancel()
return result
# Make the request to the specified broker
log.debug('_mrtb: sending request: %d to broker: %r',
requestId, broker)
d = broker.makeRequest(requestId, request, **kwArgs)
# Set a delayedCall to fire if we don't get a reply in time
dc = self.reactor.callLater(
self.timeout, _timeout_request, broker, requestId)
# Set a delayedCall to complain if the reactor has been blocked
rc = self.reactor.callLater(
(self.timeout * 0.9), _alert_blocked_reactor, self.timeout,
self.reactor.seconds())
# Setup a callback on the request deferred to cancel both callLater
d.addBoth(_cancel_timeout, dc)
d.addBoth(_cancel_timeout, rc)
return d
|
python
|
{
"resource": ""
}
|
q5342
|
KafkaClient._send_bootstrap_request
|
train
|
def _send_bootstrap_request(self, request):
"""Make a request using an ephemeral broker connection
This routine is used to make broker-unaware requests to get the initial
cluster metadata. It cycles through the configured hosts, trying to
connect and send the request to each in turn. This temporary connection
is closed once a response is received.
Note that most Kafka APIs require requests be sent to a specific
broker. This method will only function for broker-agnostic requests
like:
* `Metadata <https://kafka.apache.org/protocol.html#The_Messages_Metadata>`_
* `FindCoordinator <https://kafka.apache.org/protocol.html#The_Messages_FindCoordinator>`_
:param bytes request:
The bytes of a Kafka `RequestMessage`_ structure. It must have
a unique (to this connection) correlation ID.
:returns: API response message for *request*
:rtype: Deferred[bytes]
:raises:
- `KafkaUnavailableError` when making the request of all known hosts
has failed.
- `twisted.internet.defer.TimeoutError` when connecting or making
a request exceeds the timeout.
"""
hostports = list(self._bootstrap_hosts)
random.shuffle(hostports)
for host, port in hostports:
ep = self._endpoint_factory(self.reactor, host, port)
try:
protocol = yield ep.connect(_bootstrapFactory)
except Exception as e:
log.debug("%s: bootstrap connect to %s:%s -> %s", self, host, port, e)
continue
try:
response = yield protocol.request(request).addTimeout(self.timeout, self.reactor)
except Exception:
log.debug("%s: bootstrap request to %s:%s failed", self, host, port, exc_info=True)
else:
returnValue(response)
finally:
protocol.transport.loseConnection()
raise KafkaUnavailableError("Failed to bootstrap from hosts {}".format(hostports))
|
python
|
{
"resource": ""
}
|
q5343
|
ExpressionDescriptor.get_description
|
train
|
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
|
python
|
{
"resource": ""
}
|
q5344
|
ExpressionDescriptor.get_full_description
|
train
|
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
|
python
|
{
"resource": ""
}
|
q5345
|
ExpressionDescriptor.get_time_of_day_description
|
train
|
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
|
python
|
{
"resource": ""
}
|
q5346
|
ExpressionDescriptor.get_seconds_description
|
train
|
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
|
python
|
{
"resource": ""
}
|
q5347
|
ExpressionDescriptor.get_minutes_description
|
train
|
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
|
python
|
{
"resource": ""
}
|
q5348
|
ExpressionDescriptor.get_hours_description
|
train
|
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
|
python
|
{
"resource": ""
}
|
q5349
|
ExpressionDescriptor.get_day_of_week_description
|
train
|
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
|
python
|
{
"resource": ""
}
|
q5350
|
ExpressionDescriptor.get_month_description
|
train
|
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
|
python
|
{
"resource": ""
}
|
q5351
|
ExpressionDescriptor.get_day_of_month_description
|
train
|
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
|
python
|
{
"resource": ""
}
|
q5352
|
ExpressionDescriptor.get_year_description
|
train
|
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
|
python
|
{
"resource": ""
}
|
q5353
|
ExpressionDescriptor.number_to_day
|
train
|
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
|
python
|
{
"resource": ""
}
|
q5354
|
Consumer.shutdown
|
train
|
def shutdown(self):
"""Gracefully shutdown the consumer
Consumer will complete any outstanding processing, commit its current
offsets (if so configured) and stop.
Returns deferred which callbacks with a tuple of:
(last processed offset, last committed offset) if it was able to
successfully commit, or errbacks with the commit failure, if any,
or fail(RestopError) if consumer is not running.
"""
def _handle_shutdown_commit_success(result):
"""Handle the result of the commit attempted by shutdown"""
self._shutdown_d, d = None, self._shutdown_d
self.stop()
self._shuttingdown = False # Shutdown complete
d.callback((self._last_processed_offset,
self._last_committed_offset))
def _handle_shutdown_commit_failure(failure):
"""Handle failure of commit() attempted by shutdown"""
if failure.check(OperationInProgress):
failure.value.deferred.addCallback(_commit_and_stop)
return
self._shutdown_d, d = None, self._shutdown_d
self.stop()
self._shuttingdown = False # Shutdown complete
d.errback(failure)
def _commit_and_stop(result):
"""Commit the current offsets (if needed) and stop the consumer"""
if not self.consumer_group: # No consumer group, no committing
return _handle_shutdown_commit_success(None)
# Need to commit prior to stopping
self.commit().addCallbacks(_handle_shutdown_commit_success,
_handle_shutdown_commit_failure)
# If we're not running, return an failure
if self._start_d is None:
return fail(Failure(
RestopError("Shutdown called on non-running consumer")))
# If we're called multiple times, return a failure
if self._shutdown_d:
return fail(Failure(
RestopError("Shutdown called more than once.")))
# Set our _shuttingdown flag, so our _process_message routine will stop
# feeding new messages to the processor, and fetches won't be retried
self._shuttingdown = True
# Keep track of state for debugging
self._state = '[shutting down]'
# Create a deferred to track the shutdown
self._shutdown_d = d = Deferred()
# Are we waiting for the processor to complete? If so, when it's done,
# commit our offsets and stop.
if self._processor_d:
self._processor_d.addCallback(_commit_and_stop)
else:
# No need to wait for the processor, we can commit and stop now
_commit_and_stop(None)
# return the deferred
return d
|
python
|
{
"resource": ""
}
|
q5355
|
Consumer.stop
|
train
|
def stop(self):
"""
Stop the consumer and return offset of last processed message. This
cancels all outstanding operations. Also, if the deferred returned
by `start` hasn't been called, it is called with a tuple consisting
of the last processed offset and the last committed offset.
:raises: :exc:`RestopError` if the :class:`Consumer` is not running.
"""
if self._start_d is None:
raise RestopError("Stop called on non-running consumer")
self._stopping = True
# Keep track of state for debugging
self._state = '[stopping]'
# Are we waiting for a request to come back?
if self._request_d:
self._request_d.cancel()
# Are we working our way through a block of messages?
if self._msg_block_d:
# Need to add a cancel handler...
_msg_block_d, self._msg_block_d = self._msg_block_d, None
_msg_block_d.addErrback(lambda fail: fail.trap(CancelledError))
_msg_block_d.cancel()
# Are we waiting for the processor to complete?
if self._processor_d:
self._processor_d.cancel()
# Are we waiting to retry a request?
if self._retry_call:
self._retry_call.cancel()
# Are we waiting on a commit request?
if self._commit_ds:
while self._commit_ds:
d = self._commit_ds.pop()
d.cancel()
if self._commit_req:
self._commit_req.cancel()
# Are we waiting to retry a commit?
if self._commit_call:
self._commit_call.cancel()
# Do we have an auto-commit looping call?
if self._commit_looper is not None:
self._commit_looper.stop()
# Done stopping
self._stopping = False
# Keep track of state for debugging
self._state = '[stopped]'
# Clear and possibly callback our start() Deferred
self._start_d, d = None, self._start_d
if not d.called:
d.callback((self._last_processed_offset,
self._last_committed_offset))
# Return the offset of the message we last processed
return self._last_processed_offset
|
python
|
{
"resource": ""
}
|
q5356
|
Consumer.commit
|
train
|
def commit(self):
"""
Commit the offset of the message we last processed if it is different
from what we believe is the last offset committed to Kafka.
.. note::
It is possible to commit a smaller offset than Kafka has stored.
This is by design, so we can reprocess a Kafka message stream if
desired.
On error, will retry according to :attr:`request_retry_max_attempts`
(by default, forever).
If called while a commit operation is in progress, and new messages
have been processed since the last request was sent then the commit
will fail with :exc:`OperationInProgress`. The
:exc:`OperationInProgress` exception wraps
a :class:`~twisted.internet.defer.Deferred` which fires when the
outstanding commit operation completes.
:returns:
A :class:`~twisted.internet.defer.Deferred` which resolves with the
committed offset when the operation has completed. It will resolve
immediately if the current offset and the last committed offset do
not differ.
"""
# Can't commit without a consumer_group
if not self.consumer_group:
return fail(Failure(InvalidConsumerGroupError(
"Bad Group_id:{0!r}".format(self.consumer_group))))
# short circuit if we are 'up to date', or haven't processed anything
if ((self._last_processed_offset is None) or
(self._last_processed_offset == self._last_committed_offset)):
return succeed(self._last_committed_offset)
# If we're currently processing a commit we return a failure
# with a deferred we'll fire when the in-progress one completes
if self._commit_ds:
d = Deferred()
self._commit_ds.append(d)
return fail(OperationInProgress(d))
# Ok, we have processed messages since our last commit attempt, and
# we're not currently waiting on a commit request to complete:
# Start a new one
d = Deferred()
self._commit_ds.append(d)
# Send the request
self._send_commit_request()
# Reset the commit_looper here, rather than on success to give
# more stability to the commit interval.
if self._commit_looper is not None:
self._commit_looper.reset()
# return the deferred
return d
|
python
|
{
"resource": ""
}
|
q5357
|
Consumer._auto_commit
|
train
|
def _auto_commit(self, by_count=False):
"""Check if we should start a new commit operation and commit"""
# Check if we are even supposed to do any auto-committing
if (self._stopping or self._shuttingdown or (not self._start_d) or
(self._last_processed_offset is None) or
(not self.consumer_group) or
(by_count and not self.auto_commit_every_n)):
return
# If we're auto_committing because the timer expired, or by count and
# we don't have a record of our last_committed_offset, or we've
# processed enough messages since our last commit, then try to commit
if (not by_count or self._last_committed_offset is None or
(self._last_processed_offset - self._last_committed_offset
) >= self.auto_commit_every_n):
if not self._commit_ds:
commit_d = self.commit()
commit_d.addErrback(self._handle_auto_commit_error)
else:
# We're waiting on the last commit to complete, so add a
# callback to be called when the current request completes
d = Deferred()
d.addCallback(self._retry_auto_commit, by_count)
self._commit_ds.append(d)
|
python
|
{
"resource": ""
}
|
q5358
|
Consumer._handle_offset_response
|
train
|
def _handle_offset_response(self, response):
"""
Handle responses to both OffsetRequest and OffsetFetchRequest, since
they are similar enough.
:param response:
A tuple of a single OffsetFetchResponse or OffsetResponse
"""
# Got a response, clear our outstanding request deferred
self._request_d = None
# Successful request, reset our retry delay, count, etc
self.retry_delay = self.retry_init_delay
self._fetch_attempt_count = 1
response = response[0]
if hasattr(response, 'offsets'):
# It's a response to an OffsetRequest
self._fetch_offset = response.offsets[0]
else:
# It's a response to an OffsetFetchRequest
# Make sure we got a valid offset back. Kafka uses -1 to indicate
# no committed offset was retrieved
if response.offset == OFFSET_NOT_COMMITTED:
self._fetch_offset = OFFSET_EARLIEST
else:
self._fetch_offset = response.offset + 1
self._last_committed_offset = response.offset
self._do_fetch()
|
python
|
{
"resource": ""
}
|
q5359
|
Consumer._handle_offset_error
|
train
|
def _handle_offset_error(self, failure):
"""
Retry the offset fetch request if appropriate.
Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we
log a warning. This should perhaps be extended to abort sooner on
certain errors.
"""
# outstanding request got errback'd, clear it
self._request_d = None
if self._stopping and failure.check(CancelledError):
# Not really an error
return
# Do we need to abort?
if (self.request_retry_max_attempts != 0 and
self._fetch_attempt_count >= self.request_retry_max_attempts):
log.debug(
"%r: Exhausted attempts: %d fetching offset from kafka: %r",
self, self.request_retry_max_attempts, failure)
self._start_d.errback(failure)
return
# Decide how to log this failure... If we have retried so many times
# we're at the retry_max_delay, then we log at warning every other time
# debug otherwise
if (self.retry_delay < self.retry_max_delay or
0 == (self._fetch_attempt_count % 2)):
log.debug("%r: Failure fetching offset from kafka: %r", self,
failure)
else:
# We've retried until we hit the max delay, log at warn
log.warning("%r: Still failing fetching offset from kafka: %r",
self, failure)
self._retry_fetch()
|
python
|
{
"resource": ""
}
|
q5360
|
Consumer._send_commit_request
|
train
|
def _send_commit_request(self, retry_delay=None, attempt=None):
"""Send a commit request with our last_processed_offset"""
# If there's a _commit_call, and it's not active, clear it, it probably
# just called us...
if self._commit_call and not self._commit_call.active():
self._commit_call = None
# Make sure we only have one outstanding commit request at a time
if self._commit_req is not None:
raise OperationInProgress(self._commit_req)
# Handle defaults
if retry_delay is None:
retry_delay = self.retry_init_delay
if attempt is None:
attempt = 1
# Create new OffsetCommitRequest with the latest processed offset
commit_offset = self._last_processed_offset
commit_request = OffsetCommitRequest(
self.topic, self.partition, commit_offset,
TIMESTAMP_INVALID, self.commit_metadata)
log.debug("Committing off=%d grp=%s tpc=%s part=%s req=%r",
self._last_processed_offset, self.consumer_group,
self.topic, self.partition, commit_request)
# Send the request, add our callbacks
self._commit_req = d = self.client.send_offset_commit_request(
self.consumer_group, [commit_request])
d.addBoth(self._clear_commit_req)
d.addCallbacks(
self._update_committed_offset, self._handle_commit_error,
callbackArgs=(commit_offset,),
errbackArgs=(retry_delay, attempt))
|
python
|
{
"resource": ""
}
|
q5361
|
Consumer._handle_commit_error
|
train
|
def _handle_commit_error(self, failure, retry_delay, attempt):
""" Retry the commit request, depending on failure type
Depending on the type of the failure, we retry the commit request
with the latest processed offset, or callback/errback self._commit_ds
"""
# Check if we are stopping and the request was cancelled
if self._stopping and failure.check(CancelledError):
# Not really an error
return self._deliver_commit_result(self._last_committed_offset)
# Check that the failure type is a Kafka error...this could maybe be
# a tighter check to determine whether a retry will succeed...
if not failure.check(KafkaError):
log.error("Unhandleable failure during commit attempt: %r\n\t%r",
failure, failure.getBriefTraceback())
return self._deliver_commit_result(failure)
# Do we need to abort?
if (self.request_retry_max_attempts != 0 and
attempt >= self.request_retry_max_attempts):
log.debug("%r: Exhausted attempts: %d to commit offset: %r",
self, self.request_retry_max_attempts, failure)
return self._deliver_commit_result(failure)
# Check the retry_delay to see if we should log at the higher level
# Using attempts % 2 gets us 1-warn/minute with defaults timings
if retry_delay < self.retry_max_delay or 0 == (attempt % 2):
log.debug("%r: Failure committing offset to kafka: %r", self,
failure)
else:
# We've retried until we hit the max delay, log alternately at warn
log.warning("%r: Still failing committing offset to kafka: %r",
self, failure)
# Schedule a delayed call to retry the commit
retry_delay = min(retry_delay * REQUEST_RETRY_FACTOR,
self.retry_max_delay)
self._commit_call = self.client.reactor.callLater(
retry_delay, self._send_commit_request, retry_delay, attempt + 1)
|
python
|
{
"resource": ""
}
|
q5362
|
Consumer._handle_processor_error
|
train
|
def _handle_processor_error(self, failure):
"""Handle a failure in the processing of a block of messages
This method is called when the processor func fails while processing
a block of messages. Since we can't know how best to handle a
processor failure, we just :func:`errback` our :func:`start` method's
deferred to let our user know about the failure.
"""
# Check if we're stopping/stopped and the errback of the processor
# deferred is just the cancelling we initiated. If so, we skip
# notifying via the _start_d deferred, as it will be 'callback'd at the
# end of stop()
if not (self._stopping and failure.check(CancelledError)):
if self._start_d: # Make sure we're not already stopped
self._start_d.errback(failure)
|
python
|
{
"resource": ""
}
|
q5363
|
Consumer._handle_fetch_error
|
train
|
def _handle_fetch_error(self, failure):
"""A fetch request resulted in an error. Retry after our current delay
When a fetch error occurs, we check to see if the Consumer is being
stopped, and if so just return, trapping the CancelledError. If not, we
check if the Consumer has a non-zero setting for
:attr:`request_retry_max_attempts` and if so and we have reached that limit we
errback() the Consumer's start() deferred with the failure. If not, we
determine whether to log at debug or warning (we log at warning every
other retry after backing off to the max retry delay, resulting in a
warning message approximately once per minute with the default timings)
We then wait our current :attr:`retry_delay`, and retry the fetch. We
also increase our retry_delay by Apery's constant (1.20205) and note
the failed fetch by incrementing :attr:`_fetch_attempt_count`.
NOTE: this may retry forever.
TODO: Possibly make this differentiate based on the failure
"""
# The _request_d deferred has fired, clear it.
self._request_d = None
if failure.check(OffsetOutOfRangeError):
if self.auto_offset_reset is None:
self._start_d.errback(failure)
return
self._fetch_offset = self.auto_offset_reset
if self._stopping and failure.check(CancelledError):
# Not really an error
return
# Do we need to abort?
if (self.request_retry_max_attempts != 0 and
self._fetch_attempt_count >= self.request_retry_max_attempts):
log.debug(
"%r: Exhausted attempts: %d fetching messages from kafka: %r",
self, self.request_retry_max_attempts, failure)
self._start_d.errback(failure)
return
# Decide how to log this failure... If we have retried so many times
# we're at the retry_max_delay, then we log at warning every other time
# debug otherwise
if (self.retry_delay < self.retry_max_delay or
0 == (self._fetch_attempt_count % 2)):
log.debug("%r: Failure fetching messages from kafka: %r", self,
failure)
else:
# We've retried until we hit the max delay, log at warn
log.warning("%r: Still failing fetching messages from kafka: %r",
self, failure)
self._retry_fetch()
|
python
|
{
"resource": ""
}
|
q5364
|
Consumer._handle_fetch_response
|
train
|
def _handle_fetch_response(self, responses):
"""The callback handling the successful response from the fetch request
Delivers the message list to the processor, handles per-message errors
(ConsumerFetchSizeTooSmall), triggers another fetch request
If the processor is still processing the last batch of messages, we
defer this processing until it's done. Otherwise, we start another
fetch request and submit the messages to the processor
"""
# Successful fetch, reset our retry delay
self.retry_delay = self.retry_init_delay
self._fetch_attempt_count = 1
# Check to see if we are still processing the last block we fetched...
if self._msg_block_d:
# We are still working through the last block of messages...
# We have to wait until it's done, then process this response
self._msg_block_d.addCallback(
lambda _: self._handle_fetch_response(responses))
return
# No ongoing processing, great, let's get some started.
# Request no longer outstanding, clear the deferred tracker so we
# can refetch
self._request_d = None
messages = []
try:
for resp in responses: # We should really only ever get one...
if resp.partition != self.partition:
log.warning(
"%r: Got response with partition: %r not our own: %r",
self, resp.partition, self.partition)
continue
# resp.messages is a KafkaCodec._decode_message_set_iter
# Note that 'message' here is really an OffsetAndMessage
for message in resp.messages:
# Check for messages included which are from prior to our
# desired offset: can happen due to compressed message sets
if message.offset < self._fetch_offset:
log.debug(
'Skipping message at offset: %d, because its '
'offset is less that our fetch offset: %d.',
message.offset, self._fetch_offset)
continue
# Create a 'SourcedMessage' and add it to the messages list
messages.append(
SourcedMessage(
message=message.message,
offset=message.offset, topic=self.topic,
partition=self.partition))
# Update our notion of from where to fetch.
self._fetch_offset = message.offset + 1
except ConsumerFetchSizeTooSmall:
# A message was too large for us to receive, given our current
# buffer size. Grow it until it works, or we hit our max
# Grow by 16x up to 1MB (could result in 16MB buf), then by 2x
factor = 2
if self.buffer_size <= 2**20:
factor = 16
if self.max_buffer_size is None:
# No limit, increase until we succeed or fail to alloc RAM
self.buffer_size *= factor
elif (self.max_buffer_size is not None and
self.buffer_size < self.max_buffer_size):
# Limited, but currently below it.
self.buffer_size = min(
self.buffer_size * factor, self.max_buffer_size)
else:
# We failed, and are already at our max. Nothing we can do but
# create a Failure and errback() our start() deferred
log.error("Max fetch size %d too small", self.max_buffer_size)
failure = Failure(
ConsumerFetchSizeTooSmall(
"Max buffer size:%d too small for message",
self.max_buffer_size))
self._start_d.errback(failure)
return
log.debug(
"Next message larger than fetch size, increasing "
"to %d (~2x) and retrying", self.buffer_size)
finally:
# If we were able to extract any messages, deliver them to the
# processor now.
if messages:
self._msg_block_d = Deferred()
self._process_messages(messages)
# start another fetch, if needed, but use callLater to avoid recursion
self._retry_fetch(0)
|
python
|
{
"resource": ""
}
|
q5365
|
Consumer._process_messages
|
train
|
def _process_messages(self, messages):
"""Send messages to the `processor` callback to be processed
In the case we have a commit policy, we send messages to the processor
in blocks no bigger than auto_commit_every_n (if set). Otherwise, we
send the entire message block to be processed.
"""
# Have we been told to shutdown?
if self._shuttingdown:
return
# Do we have any messages to process?
if not messages:
# No, we're done with this block. If we had another fetch result
# waiting, this callback will trigger the processing thereof.
if self._msg_block_d:
_msg_block_d, self._msg_block_d = self._msg_block_d, None
_msg_block_d.callback(True)
return
# Yes, we've got some messages to process.
# Default to processing the entire block...
proc_block_size = sys.maxsize
# Unless our auto commit_policy restricts us to process less
if self.auto_commit_every_n:
proc_block_size = self.auto_commit_every_n
# Divide messages into two lists: one to process now, and remainder
msgs_to_proc = messages[:proc_block_size]
msgs_remainder = messages[proc_block_size:]
# Call our processor callable and handle the possibility it returned
# a deferred...
last_offset = msgs_to_proc[-1].offset
self._processor_d = d = maybeDeferred(self.processor, self, msgs_to_proc)
log.debug('self.processor return: %r, last_offset: %r', d, last_offset)
# Once the processor completes, clear our _processor_d
d.addBoth(self._clear_processor_deferred)
# Record the offset of the last processed message and check autocommit
d.addCallback(self._update_processed_offset, last_offset)
# If we were stopped, cancel the processor deferred. Note, we have to
# do this here, in addition to in stop() because the processor func
# itself could have called stop(), and then when it returned, we re-set
# self._processor_d to the return of maybeDeferred().
if self._stopping or self._start_d is None:
d.cancel()
else:
# Setup to process the rest of our messages
d.addCallback(lambda _: self._process_messages(msgs_remainder))
# Add an error handler
d.addErrback(self._handle_processor_error)
|
python
|
{
"resource": ""
}
|
q5366
|
Consumer._do_fetch
|
train
|
def _do_fetch(self):
"""Send a fetch request if there isn't a request outstanding
Sends a fetch request to the Kafka cluster to get messages at the
current offset. When the response comes back, if there are messages,
it delivers them to the :attr:`processor` callback and initiates
another fetch request. If there is a recoverable error, the fetch is
retried after :attr:`retry_delay`.
In the case of an unrecoverable error, :func:`errback` is called on the
:class:`Deferred` returned by :meth:`start()`.
"""
# Check for outstanding request.
if self._request_d:
log.debug("_do_fetch: Outstanding request: %r", self._request_d)
return
# Cleanup our _retry_call, if we have one
if self._retry_call is not None:
if self._retry_call.active():
self._retry_call.cancel()
self._retry_call = None
# Do we know our offset yet, or do we need to figure it out?
if (self._fetch_offset == OFFSET_EARLIEST or
self._fetch_offset == OFFSET_LATEST):
# We need to fetch the offset for our topic/partition
offset_request = OffsetRequest(
self.topic, self.partition, self._fetch_offset, 1)
self._request_d = self.client.send_offset_request([offset_request])
self._request_d.addCallbacks(
self._handle_offset_response, self._handle_offset_error)
elif self._fetch_offset == OFFSET_COMMITTED:
# We need to fetch the committed offset for our topic/partition
# Note we use the same callbacks, as the responses are "close
# enough" for our needs here
if not self.consumer_group:
# consumer_group must be set for OFFSET_COMMITTED
failure = Failure(
InvalidConsumerGroupError("Bad Group_id:{0!r}".format(
self.consumer_group)))
self._start_d.errback(failure)
request = OffsetFetchRequest(self.topic, self.partition)
self._request_d = self.client.send_offset_fetch_request(
self.consumer_group, [request])
self._request_d.addCallbacks(
self._handle_offset_response, self._handle_offset_error)
else:
# Create fetch request payload for our partition
request = FetchRequest(
self.topic, self.partition, self._fetch_offset,
self.buffer_size)
# Send request and add handlers for the response
self._request_d = self.client.send_fetch_request(
[request], max_wait_time=self.fetch_max_wait_time,
min_bytes=self.fetch_min_bytes)
# We need a temp for this because if the response is already
# available, _handle_fetch_response() will clear self._request_d
d = self._request_d
d.addCallback(self._handle_fetch_response)
d.addErrback(self._handle_fetch_error)
|
python
|
{
"resource": ""
}
|
q5367
|
HashedPartitioner.partition
|
train
|
def partition(self, key, partitions):
"""
Select a partition based on the hash of the key.
:param key: Partition key
:type key: text string or UTF-8 `bytes` or `bytearray`
:param list partitions:
An indexed sequence of partition identifiers.
:returns:
One of the given partition identifiers. The result will be the same
each time the same key and partition list is passed.
"""
return partitions[(self._hash(key) & 0x7FFFFFFF) % len(partitions)]
|
python
|
{
"resource": ""
}
|
q5368
|
snappy_encode
|
train
|
def snappy_encode(payload, xerial_compatible=False,
xerial_blocksize=32 * 1024):
"""
Compress the given data with the Snappy algorithm.
:param bytes payload: Data to compress.
:param bool xerial_compatible:
If set then the stream is broken into length-prefixed blocks in
a fashion compatible with the xerial snappy library.
The format winds up being::
+-------------+------------+--------------+------------+--------------+
| Header | Block1_len | Block1 data | BlockN len | BlockN data |
|-------------+------------+--------------+------------+--------------|
| 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes |
+-------------+------------+--------------+------------+--------------+
:param int xerial_blocksize:
Number of bytes per chunk to independently Snappy encode. 32k is the
default in the xerial library.
:returns: Compressed bytes.
:rtype: :class:`bytes`
"""
if not has_snappy(): # FIXME This should be static, not checked every call.
raise NotImplementedError("Snappy codec is not available")
if xerial_compatible:
def _chunker():
for i in range(0, len(payload), xerial_blocksize):
yield payload[i:i+xerial_blocksize]
out = BytesIO()
out.write(_XERIAL_HEADER)
for chunk in _chunker():
block = snappy.compress(chunk)
out.write(struct.pack('!i', len(block)))
out.write(block)
out.seek(0)
return out.read()
else:
return snappy.compress(payload)
|
python
|
{
"resource": ""
}
|
q5369
|
VideoFile._get_video_info
|
train
|
def _get_video_info(self):
"""
Returns basic information about the video as dictionary.
"""
if not hasattr(self, '_info_cache'):
encoding_backend = get_backend()
try:
path = os.path.abspath(self.path)
except AttributeError:
path = os.path.abspath(self.name)
self._info_cache = encoding_backend.get_media_info(path)
return self._info_cache
|
python
|
{
"resource": ""
}
|
q5370
|
FFmpegBackend.encode
|
train
|
def encode(self, source_path, target_path, params): # NOQA: C901
"""
Encodes a video to a specified file. All encoder specific options
are passed in using `params`.
"""
total_time = self.get_media_info(source_path)['duration']
cmds = [self.ffmpeg_path, '-i', source_path]
cmds.extend(self.params)
cmds.extend(params)
cmds.extend([target_path])
process = self._spawn(cmds)
buf = output = ''
# update progress
while True:
# any more data?
out = process.stderr.read(10)
if not out:
break
out = out.decode(console_encoding)
output += out
buf += out
try:
line, buf = buf.split('\r', 1)
except ValueError:
continue
try:
time_str = RE_TIMECODE.findall(line)[0]
except IndexError:
continue
# convert progress to percent
time = 0
for part in time_str.split(':'):
time = 60 * time + float(part)
percent = time / total_time
logger.debug('yield {}%'.format(percent))
yield percent
if os.path.getsize(target_path) == 0:
raise exceptions.FFmpegError("File size of generated file is 0")
# wait for process to exit
self._check_returncode(process)
logger.debug(output)
if not output:
raise exceptions.FFmpegError("No output from FFmpeg.")
yield 100
|
python
|
{
"resource": ""
}
|
q5371
|
FFmpegBackend.get_media_info
|
train
|
def get_media_info(self, video_path):
"""
Returns information about the given video as dict.
"""
cmds = [self.ffprobe_path, '-i', video_path]
cmds.extend(['-print_format', 'json'])
cmds.extend(['-show_format', '-show_streams'])
process = self._spawn(cmds)
stdout, __ = self._check_returncode(process)
media_info = self._parse_media_info(stdout)
return {
'duration': float(media_info['format']['duration']),
'width': int(media_info['video'][0]['width']),
'height': int(media_info['video'][0]['height']),
}
|
python
|
{
"resource": ""
}
|
q5372
|
FFmpegBackend.get_thumbnail
|
train
|
def get_thumbnail(self, video_path, at_time=0.5):
"""
Extracts an image of a video and returns its path.
If the requested thumbnail is not within the duration of the video
an `InvalidTimeError` is thrown.
"""
filename = os.path.basename(video_path)
filename, __ = os.path.splitext(filename)
_, image_path = tempfile.mkstemp(suffix='_{}.jpg'.format(filename))
video_duration = self.get_media_info(video_path)['duration']
if at_time > video_duration:
raise exceptions.InvalidTimeError()
thumbnail_time = at_time
cmds = [self.ffmpeg_path, '-i', video_path, '-vframes', '1']
cmds.extend(['-ss', str(thumbnail_time), '-y', image_path])
process = self._spawn(cmds)
self._check_returncode(process)
if not os.path.getsize(image_path):
# we somehow failed to generate thumbnail
os.unlink(image_path)
raise exceptions.InvalidTimeError()
return image_path
|
python
|
{
"resource": ""
}
|
q5373
|
convert_all_videos
|
train
|
def convert_all_videos(app_label, model_name, object_pk):
"""
Automatically converts all videos of a given instance.
"""
# get instance
Model = apps.get_model(app_label=app_label, model_name=model_name)
instance = Model.objects.get(pk=object_pk)
# search for `VideoFields`
fields = instance._meta.fields
for field in fields:
if isinstance(field, VideoField):
if not getattr(instance, field.name):
# ignore empty fields
continue
# trigger conversion
fieldfile = getattr(instance, field.name)
convert_video(fieldfile)
|
python
|
{
"resource": ""
}
|
q5374
|
convert_video
|
train
|
def convert_video(fieldfile, force=False):
"""
Converts a given video file into all defined formats.
"""
instance = fieldfile.instance
field = fieldfile.field
filename = os.path.basename(fieldfile.path)
source_path = fieldfile.path
encoding_backend = get_backend()
for options in settings.VIDEO_ENCODING_FORMATS[encoding_backend.name]:
video_format, created = Format.objects.get_or_create(
object_id=instance.pk,
content_type=ContentType.objects.get_for_model(instance),
field_name=field.name, format=options['name'])
# do not reencode if not requested
if video_format.file and not force:
continue
else:
# set progress to 0
video_format.reset_progress()
# TODO do not upscale videos
_, target_path = tempfile.mkstemp(
suffix='_{name}.{extension}'.format(**options))
try:
encoding = encoding_backend.encode(
source_path, target_path, options['params'])
while encoding:
try:
progress = next(encoding)
except StopIteration:
break
video_format.update_progress(progress)
except VideoEncodingError:
# TODO handle with more care
video_format.delete()
os.remove(target_path)
continue
# save encoded file
video_format.file.save(
'{filename}_{name}.{extension}'.format(filename=filename,
**options),
File(open(target_path, mode='rb')))
video_format.update_progress(100) # now we are ready
# remove temporary file
os.remove(target_path)
|
python
|
{
"resource": ""
}
|
q5375
|
Firefly.distance_to
|
train
|
def distance_to(self, other):
"""Return Euclidian distance between self and other Firefly"""
return np.linalg.norm(self.position-other.position)
|
python
|
{
"resource": ""
}
|
q5376
|
Firefly.compute_intensity
|
train
|
def compute_intensity(self, _cost_func):
"""Evaluate cost function and compute intensity at this position"""
self.evaluate(_cost_func)
self.intensity = 1 / self.time
|
python
|
{
"resource": ""
}
|
q5377
|
Firefly.move_towards
|
train
|
def move_towards(self, other, beta, alpha):
"""Move firefly towards another given beta and alpha values"""
self.position += beta * (other.position - self.position)
self.position += alpha * (np.random.uniform(-0.5, 0.5, len(self.position)))
self.position = np.minimum(self.position, [b[1] for b in self.bounds])
self.position = np.maximum(self.position, [b[0] for b in self.bounds])
|
python
|
{
"resource": ""
}
|
q5378
|
DeviceInterface.benchmark
|
train
|
def benchmark(self, func, gpu_args, instance, times, verbose):
"""benchmark the kernel instance"""
logging.debug('benchmark ' + instance.name)
logging.debug('thread block dimensions x,y,z=%d,%d,%d', *instance.threads)
logging.debug('grid dimensions x,y,z=%d,%d,%d', *instance.grid)
time = None
try:
time = self.dev.benchmark(func, gpu_args, instance.threads, instance.grid, times)
except Exception as e:
#some launches may fail because too many registers are required
#to run the kernel given the current thread block size
#the desired behavior is to simply skip over this configuration
#and proceed to try the next one
skippable_exceptions = ["too many resources requested for launch", "OUT_OF_RESOURCES", "INVALID_WORK_GROUP_SIZE"]
if any([skip_str in str(e) for skip_str in skippable_exceptions]):
logging.debug('benchmark fails due to runtime failure too many resources required')
if verbose:
print("skipping config", instance.name, "reason: too many resources requested for launch")
else:
logging.debug('benchmark encountered runtime failure: ' + str(e))
print("Error while benchmarking:", instance.name)
raise e
return time
|
python
|
{
"resource": ""
}
|
q5379
|
DeviceInterface.check_kernel_output
|
train
|
def check_kernel_output(self, func, gpu_args, instance, answer, atol, verify, verbose):
"""runs the kernel once and checks the result against answer"""
logging.debug('check_kernel_output')
#if not using custom verify function, check if the length is the same
if not verify and len(instance.arguments) != len(answer):
raise TypeError("The length of argument list and provided results do not match.")
#zero GPU memory for output arguments
for i, arg in enumerate(instance.arguments):
if verify or answer[i] is not None:
if isinstance(arg, numpy.ndarray):
self.dev.memcpy_htod(gpu_args[i], arg)
#run the kernel
check = self.run_kernel(func, gpu_args, instance)
if not check:
return True #runtime failure occured that should be ignored, skip correctness check
#retrieve gpu results to host memory
result_host = []
for i, arg in enumerate(instance.arguments):
if verify or answer[i] is not None:
if isinstance(arg, numpy.ndarray):
result_host.append(numpy.zeros_like(arg))
self.dev.memcpy_dtoh(result_host[-1], gpu_args[i])
else:
result_host.append(None)
#if the user has specified a custom verify function, then call it, else use default based on numpy allclose
if verify:
try:
return verify(answer, result_host, atol=atol)
except TypeError:
return verify(answer, result_host)
else:
return _default_verify_function(instance, answer, result_host, atol, verbose)
|
python
|
{
"resource": ""
}
|
q5380
|
DeviceInterface.compile_and_benchmark
|
train
|
def compile_and_benchmark(self, gpu_args, params, kernel_options, tuning_options):
""" Compile and benchmark a kernel instance based on kernel strings and parameters """
instance_string = util.get_instance_string(params)
logging.debug('compile_and_benchmark ' + instance_string)
mem_usage = round(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024.0, 1)
logging.debug('Memory usage : %2.2f MB', mem_usage)
verbose = tuning_options.verbose
instance = self.create_kernel_instance(kernel_options, params, verbose)
if instance is None:
return None
try:
#compile the kernel
func = self.compile_kernel(instance, verbose)
if func is None:
return None
#add constant memory arguments to compiled module
if kernel_options.cmem_args is not None:
self.dev.copy_constant_memory_args(kernel_options.cmem_args)
#add texture memory arguments to compiled module
if kernel_options.texmem_args is not None:
self.dev.copy_texture_memory_args(kernel_options.texmem_args)
#test kernel for correctness and benchmark
if tuning_options.answer is not None:
self.check_kernel_output(func, gpu_args, instance, tuning_options.answer, tuning_options.atol, tuning_options.verify, verbose)
#benchmark
time = self.benchmark(func, gpu_args, instance, tuning_options.times, verbose)
except Exception as e:
#dump kernel_string to temp file
temp_filename = util.get_temp_filename(suffix=".c")
util.write_file(temp_filename, instance.kernel_string)
print("Error while compiling or benchmarking, see source files: " + temp_filename + " ".join(instance.temp_files.values()))
raise e
#clean up any temporary files, if no error occured
for v in instance.temp_files.values():
util.delete_temp_file(v)
return time
|
python
|
{
"resource": ""
}
|
q5381
|
DeviceInterface.compile_kernel
|
train
|
def compile_kernel(self, instance, verbose):
"""compile the kernel for this specific instance"""
logging.debug('compile_kernel ' + instance.name)
#compile kernel_string into device func
func = None
try:
func = self.dev.compile(instance.name, instance.kernel_string)
except Exception as e:
#compiles may fail because certain kernel configurations use too
#much shared memory for example, the desired behavior is to simply
#skip over this configuration and try the next one
if "uses too much shared data" in str(e):
logging.debug('compile_kernel failed due to kernel using too much shared memory')
if verbose:
print("skipping config", instance.name, "reason: too much shared memory used")
else:
logging.debug('compile_kernel failed due to error: ' + str(e))
print("Error while compiling:", instance.name)
raise e
return func
|
python
|
{
"resource": ""
}
|
q5382
|
DeviceInterface.copy_constant_memory_args
|
train
|
def copy_constant_memory_args(self, cmem_args):
"""adds constant memory arguments to the most recently compiled module, if using CUDA"""
if self.lang == "CUDA":
self.dev.copy_constant_memory_args(cmem_args)
else:
raise Exception("Error cannot copy constant memory arguments when language is not CUDA")
|
python
|
{
"resource": ""
}
|
q5383
|
DeviceInterface.copy_texture_memory_args
|
train
|
def copy_texture_memory_args(self, texmem_args):
"""adds texture memory arguments to the most recently compiled module, if using CUDA"""
if self.lang == "CUDA":
self.dev.copy_texture_memory_args(texmem_args)
else:
raise Exception("Error cannot copy texture memory arguments when language is not CUDA")
|
python
|
{
"resource": ""
}
|
q5384
|
DeviceInterface.create_kernel_instance
|
train
|
def create_kernel_instance(self, kernel_options, params, verbose):
"""create kernel instance from kernel source, parameters, problem size, grid divisors, and so on"""
instance_string = util.get_instance_string(params)
grid_div = (kernel_options.grid_div_x, kernel_options.grid_div_y, kernel_options.grid_div_z)
#insert default block_size_names if needed
if not kernel_options.block_size_names:
kernel_options.block_size_names = util.default_block_size_names
#setup thread block and grid dimensions
threads, grid = util.setup_block_and_grid(kernel_options.problem_size, grid_div, params, kernel_options.block_size_names)
if numpy.prod(threads) > self.dev.max_threads:
if verbose:
print("skipping config", instance_string, "reason: too many threads per block")
return None
#obtain the kernel_string and prepare additional files, if any
temp_files = dict()
kernel_source = kernel_options.kernel_string
if not isinstance(kernel_source, list):
kernel_source = [kernel_source]
name, kernel_string, temp_files = util.prepare_list_of_files(kernel_options.kernel_name, kernel_source, params, grid, threads, kernel_options.block_size_names)
#collect everything we know about this instance and return it
return KernelInstance(name, kernel_string, temp_files, threads, grid, params, kernel_options.arguments)
|
python
|
{
"resource": ""
}
|
q5385
|
DeviceInterface.run_kernel
|
train
|
def run_kernel(self, func, gpu_args, instance):
""" Run a compiled kernel instance on a device """
logging.debug('run_kernel %s', instance.name)
logging.debug('thread block dims (%d, %d, %d)', *instance.threads)
logging.debug('grid dims (%d, %d, %d)', *instance.grid)
try:
self.dev.run_kernel(func, gpu_args, instance.threads, instance.grid)
except Exception as e:
if "too many resources requested for launch" in str(e) or "OUT_OF_RESOURCES" in str(e):
logging.debug('ignoring runtime failure due to too many resources required')
return False
else:
logging.debug('encountered unexpected runtime failure: ' + str(e))
raise e
return True
|
python
|
{
"resource": ""
}
|
q5386
|
NoodlesRunner.run
|
train
|
def run(self, parameter_space, kernel_options, tuning_options):
""" Tune all instances in parameter_space using a multiple threads
:param parameter_space: The parameter space as an iterable.
:type parameter_space: iterable
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
workflow = self._parameter_sweep(parameter_space, kernel_options, self.device_options,
tuning_options)
if tuning_options.verbose:
with NCDisplay(_error_filter) as display:
answer = run_parallel_with_display(workflow, self.max_threads, display)
else:
answer = run_parallel(workflow, self.max_threads)
if answer is None:
print("Tuning did not return any results, did an error occur?")
return None
# Filter out None times
result = []
for chunk in answer:
result += [d for d in chunk if d['time']]
return result, {}
|
python
|
{
"resource": ""
}
|
q5387
|
NoodlesRunner._parameter_sweep
|
train
|
def _parameter_sweep(self, parameter_space, kernel_options, device_options, tuning_options):
"""Build a Noodles workflow by sweeping the parameter space"""
results = []
#randomize parameter space to do pseudo load balancing
parameter_space = list(parameter_space)
random.shuffle(parameter_space)
#split parameter space into chunks
work_per_thread = int(numpy.ceil(len(parameter_space) / float(self.max_threads)))
chunks = _chunk_list(parameter_space, work_per_thread)
for chunk in chunks:
chunked_result = self._run_chunk(chunk, kernel_options, device_options, tuning_options)
results.append(lift(chunked_result))
return gather(*results)
|
python
|
{
"resource": ""
}
|
q5388
|
NoodlesRunner._run_chunk
|
train
|
def _run_chunk(self, chunk, kernel_options, device_options, tuning_options):
"""Benchmark a single kernel instance in the parameter space"""
#detect language and create high-level device interface
self.dev = DeviceInterface(kernel_options.kernel_string, iterations=tuning_options.iterations, **device_options)
#move data to the GPU
gpu_args = self.dev.ready_argument_list(kernel_options.arguments)
results = []
for element in chunk:
params = dict(OrderedDict(zip(tuning_options.tune_params.keys(), element)))
try:
time = self.dev.compile_and_benchmark(gpu_args, params, kernel_options, tuning_options)
params['time'] = time
results.append(params)
except Exception:
params['time'] = None
results.append(params)
return results
|
python
|
{
"resource": ""
}
|
q5389
|
tune
|
train
|
def tune(runner, kernel_options, device_options, tuning_options):
""" Tune a random sample of sample_fraction fraction in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
tune_params = tuning_options.tune_params
#compute cartesian product of all tunable parameters
parameter_space = itertools.product(*tune_params.values())
#check for search space restrictions
if tuning_options.restrictions is not None:
parameter_space = filter(lambda p: util.check_restrictions(tuning_options.restrictions, p,
tune_params.keys(),
tuning_options.verbose),
parameter_space)
#reduce parameter space to a random sample using sample_fraction
parameter_space = numpy.array(list(parameter_space))
size = len(parameter_space)
fraction = int(numpy.ceil(size * float(tuning_options.sample_fraction)))
sample_indices = numpy.random.choice(range(size), size=fraction, replace=False)
parameter_space = parameter_space[sample_indices]
#call the runner
results, env = runner.run(parameter_space, kernel_options, tuning_options)
return results, env
|
python
|
{
"resource": ""
}
|
q5390
|
check_argument_type
|
train
|
def check_argument_type(dtype, kernel_argument, i):
"""check if the numpy.dtype matches the type used in the code"""
types_map = {"uint8": ["uchar", "unsigned char", "uint8_t"],
"int8": ["char", "int8_t"],
"uint16": ["ushort", "unsigned short", "uint16_t"],
"int16": ["short", "int16_t"],
"uint32": ["uint", "unsigned int", "uint32_t"],
"int32": ["int", "int32_t"], #discrepancy between OpenCL and C here, long may be 32bits in C
"uint64": ["ulong", "unsigned long", "uint64_t"],
"int64": ["long", "int64_t"],
"float16": ["half"],
"float32": ["float"],
"float64": ["double"]}
if dtype in types_map:
return any([substr in kernel_argument for substr in types_map[dtype]])
else:
return False
|
python
|
{
"resource": ""
}
|
q5391
|
check_argument_list
|
train
|
def check_argument_list(kernel_name, kernel_string, args):
""" raise an exception if a kernel arguments do not match host arguments """
kernel_arguments = list()
collected_errors = list()
for iterator in re.finditer(kernel_name + "[ \n\t]*" + "\(", kernel_string):
kernel_start = iterator.end()
kernel_end = kernel_string.find(")", kernel_start)
if kernel_start != 0:
kernel_arguments.append(kernel_string[kernel_start:kernel_end].split(","))
for arguments_set, arguments in enumerate(kernel_arguments):
collected_errors.append(list())
if len(arguments) != len(args):
collected_errors[arguments_set].append("Kernel and host argument lists do not match in size.")
continue
for (i, arg) in enumerate(args):
kernel_argument = arguments[i]
if not isinstance(arg, (numpy.ndarray, numpy.generic)):
raise TypeError("Argument at position " + str(i) + " of type: " + str(type(arg)) + " should be of type numpy.ndarray or numpy scalar")
correct = True
if isinstance(arg, numpy.ndarray) and not "*" in kernel_argument:
correct = False #array is passed to non-pointer kernel argument
if correct and check_argument_type(str(arg.dtype), kernel_argument, i):
continue
collected_errors[arguments_set].append("Argument at position " + str(i) + " of dtype: " + str(arg.dtype) +
" does not match " + kernel_argument + ".")
if not collected_errors[arguments_set]:
# We assume that if there is a possible list of arguments that matches with the provided one
# it is the right one
return
for errors in collected_errors:
warnings.warn(errors[0], UserWarning)
|
python
|
{
"resource": ""
}
|
q5392
|
check_tune_params_list
|
train
|
def check_tune_params_list(tune_params):
""" raise an exception if a tune parameter has a forbidden name """
forbidden_names = ("grid_size_x", "grid_size_y", "grid_size_z")
forbidden_name_substr = ("time", "times")
for name, param in tune_params.items():
if name in forbidden_names:
raise ValueError("Tune parameter " + name + " with value " + str(param) + " has a forbidden name!")
for forbidden_substr in forbidden_name_substr:
if forbidden_substr in name:
raise ValueError("Tune parameter " + name + " with value " + str(param) + " has a forbidden name: not allowed to use " + forbidden_substr + " in tune parameter names!")
|
python
|
{
"resource": ""
}
|
q5393
|
check_restrictions
|
train
|
def check_restrictions(restrictions, element, keys, verbose):
""" check whether a specific instance meets the search space restrictions """
params = OrderedDict(zip(keys, element))
for restrict in restrictions:
if not eval(replace_param_occurrences(restrict, params)):
if verbose:
print("skipping config", get_instance_string(params), "reason: config fails restriction")
return False
return True
|
python
|
{
"resource": ""
}
|
q5394
|
detect_language
|
train
|
def detect_language(lang, kernel_source):
"""attempt to detect language from the kernel_string if not specified"""
if lang is None:
if callable(kernel_source):
raise TypeError("Please specify language when using a code generator function")
kernel_string = get_kernel_string(kernel_source)
if "__global__" in kernel_string:
lang = "CUDA"
elif "__kernel" in kernel_string:
lang = "OpenCL"
else:
lang = "C"
return lang
|
python
|
{
"resource": ""
}
|
q5395
|
get_config_string
|
train
|
def get_config_string(params, units=None):
""" return a compact string representation of a dictionary """
compact_str_items = []
# first make a list of compact strings for each parameter
for k, v in params.items():
unit = ""
if isinstance(units, dict): #check if not None not enough, units could be mocked which causes errors
unit = units.get(k, "")
compact_str_items.append(k + "=" + str(v) + unit)
# and finally join them
compact_str = ", ".join(compact_str_items)
return compact_str
|
python
|
{
"resource": ""
}
|
q5396
|
get_grid_dimensions
|
train
|
def get_grid_dimensions(current_problem_size, params, grid_div, block_size_names):
"""compute grid dims based on problem sizes and listed grid divisors"""
def get_dimension_divisor(divisor_list, default, params):
if divisor_list is None:
if default in params:
divisor_list = [default]
else:
return 1
return numpy.prod([int(eval(replace_param_occurrences(s, params))) for s in divisor_list])
divisors = [get_dimension_divisor(d, block_size_names[i], params) for i, d in enumerate(grid_div)]
return tuple(int(numpy.ceil(float(current_problem_size[i]) / float(d))) for i, d in enumerate(divisors))
|
python
|
{
"resource": ""
}
|
q5397
|
get_kernel_string
|
train
|
def get_kernel_string(kernel_source, params=None):
""" retrieve the kernel source and return as a string
This function processes the passed kernel_source argument, which could be
a function, a string with a filename, or just a string with code already.
If kernel_source is a function, the function is called with instance
parameters in 'params' as the only argument.
If kernel_source looks like filename, the file is read in, but if
the file does not exist, it is assumed that the string is not a filename
after all.
:param kernel_source: One of the sources for the kernel, could be a
function that generates the kernel code, a string containing a filename
that points to the kernel source, or just a string that contains the code.
:type kernel_source: string or callable
:param params: Dictionary containing the tunable parameters for this specific
kernel instance, only needed when kernel_source is a generator.
:type param: dict
:returns: A string containing the kernel code.
:rtype: string
"""
#logging.debug('get_kernel_string called with %s', str(kernel_source))
logging.debug('get_kernel_string called')
kernel_string = None
if callable(kernel_source):
kernel_string = kernel_source(params)
elif isinstance(kernel_source, str):
if looks_like_a_filename(kernel_source):
kernel_string = read_file(kernel_source) or kernel_source
else:
kernel_string = kernel_source
else:
raise TypeError("Error kernel_source is not a string nor a callable function")
return kernel_string
|
python
|
{
"resource": ""
}
|
q5398
|
get_problem_size
|
train
|
def get_problem_size(problem_size, params):
"""compute current problem size"""
if isinstance(problem_size, (str, int, numpy.integer)):
problem_size = (problem_size, )
current_problem_size = [1, 1, 1]
for i, s in enumerate(problem_size):
if isinstance(s, str):
current_problem_size[i] = int(eval(replace_param_occurrences(s, params)))
elif isinstance(s, (int, numpy.integer)):
current_problem_size[i] = s
else:
raise TypeError("Error: problem_size should only contain strings or integers")
return current_problem_size
|
python
|
{
"resource": ""
}
|
q5399
|
get_temp_filename
|
train
|
def get_temp_filename(suffix=None):
""" return a string in the form of temp_X, where X is a large integer """
file = tempfile.mkstemp(suffix=suffix or "", prefix="temp_", dir=os.getcwd()) # or "" for Python 2 compatibility
os.close(file[0])
return file[1]
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.