sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def collect_hosts(hosts, randomize=True):
"""
Collects a comma-separated set of hosts (host:port) and optionally
randomize the returned list.
"""
if isinstance(hosts, six.string_types):
hosts = hosts.strip().split(',')
result = []
for host_port in hosts:
res = host_port.split(':')
host = res[0]
port = int(res[1]) if len(res) > 1 else DEFAULT_KAFKA_PORT
result.append((host.strip(), port))
if randomize:
shuffle(result)
return result | Collects a comma-separated set of hosts (host:port) and optionally
randomize the returned list. | entailment |
def send(self, request_id, payload):
"""
Send a request to Kafka
Arguments::
request_id (int): can be any int (used only for debug logging...)
payload: an encoded kafka packet (see KafkaProtocol)
"""
log.debug("About to send %d bytes to Kafka, request %d" % (len(payload), request_id))
# Make sure we have a connection
if not self._sock:
self.reinit()
try:
self._sock.sendall(payload)
except socket.error:
log.exception('Unable to send payload to Kafka')
self._raise_connection_error() | Send a request to Kafka
Arguments::
request_id (int): can be any int (used only for debug logging...)
payload: an encoded kafka packet (see KafkaProtocol) | entailment |
def recv(self, request_id):
"""
Get a response packet from Kafka
Arguments:
request_id: can be any int (only used for debug logging...)
Returns:
str: Encoded kafka packet response from server
"""
log.debug("Reading response %d from Kafka" % request_id)
# Make sure we have a connection
if not self._sock:
self.reinit()
# Read the size off of the header
resp = self._read_bytes(4)
(size,) = struct.unpack('>i', resp)
# Read the remainder of the response
resp = self._read_bytes(size)
return resp | Get a response packet from Kafka
Arguments:
request_id: can be any int (only used for debug logging...)
Returns:
str: Encoded kafka packet response from server | entailment |
def copy(self):
"""
Create an inactive copy of the connection object, suitable for
passing to a background thread.
The returned copy is not connected; you must call reinit() before
using.
"""
c = copy.deepcopy(self)
# Python 3 doesn't copy custom attributes of the threadlocal subclass
c.host = copy.copy(self.host)
c.port = copy.copy(self.port)
c.timeout = copy.copy(self.timeout)
c._sock = None
return c | Create an inactive copy of the connection object, suitable for
passing to a background thread.
The returned copy is not connected; you must call reinit() before
using. | entailment |
def close(self):
"""
Shutdown and close the connection socket
"""
log.debug("Closing socket connection for %s:%d" % (self.host, self.port))
if self._sock:
# Call shutdown to be a good TCP client
# But expect an error if the socket has already been
# closed by the server
try:
self._sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
# Closing the socket should always succeed
self._sock.close()
self._sock = None
else:
log.debug("No socket found to close!") | Shutdown and close the connection socket | entailment |
def reinit(self):
"""
Re-initialize the socket connection
close current socket (if open)
and start a fresh connection
raise ConnectionError on error
"""
log.debug("Reinitializing socket connection for %s:%d" % (self.host, self.port))
if self._sock:
self.close()
try:
self._sock = socket.create_connection((self.host, self.port), self.timeout)
except socket.error:
log.exception('Unable to connect to kafka broker at %s:%d' % (self.host, self.port))
self._raise_connection_error() | Re-initialize the socket connection
close current socket (if open)
and start a fresh connection
raise ConnectionError on error | entailment |
def configure(self, **configs):
"""Configure the consumer instance
Configuration settings can be passed to constructor,
otherwise defaults will be used:
Keyword Arguments:
bootstrap_servers (list): List of initial broker nodes the consumer
should contact to bootstrap initial cluster metadata. This does
not have to be the full node list. It just needs to have at
least one broker that will respond to a Metadata API Request.
client_id (str): a unique name for this client. Defaults to
'kafka.consumer.kafka'.
group_id (str): the name of the consumer group to join,
Offsets are fetched / committed to this group name.
fetch_message_max_bytes (int, optional): Maximum bytes for each
topic/partition fetch request. Defaults to 1024*1024.
fetch_min_bytes (int, optional): Minimum amount of data the server
should return for a fetch request, otherwise wait up to
fetch_wait_max_ms for more data to accumulate. Defaults to 1.
fetch_wait_max_ms (int, optional): Maximum time for the server to
block waiting for fetch_min_bytes messages to accumulate.
Defaults to 100.
refresh_leader_backoff_ms (int, optional): Milliseconds to backoff
when refreshing metadata on errors (subject to random jitter).
Defaults to 200.
socket_timeout_ms (int, optional): TCP socket timeout in
milliseconds. Defaults to 30*1000.
auto_offset_reset (str, optional): A policy for resetting offsets on
OffsetOutOfRange errors. 'smallest' will move to the oldest
available message, 'largest' will move to the most recent. Any
ofther value will raise the exception. Defaults to 'largest'.
deserializer_class (callable, optional): Any callable that takes a
raw message value and returns a deserialized value. Defaults to
lambda msg: msg.
auto_commit_enable (bool, optional): Enabling auto-commit will cause
the KafkaConsumer to periodically commit offsets without an
explicit call to commit(). Defaults to False.
auto_commit_interval_ms (int, optional): If auto_commit_enabled,
the milliseconds between automatic offset commits. Defaults to
60 * 1000.
auto_commit_interval_messages (int, optional): If
auto_commit_enabled, a number of messages consumed between
automatic offset commits. Defaults to None (disabled).
consumer_timeout_ms (int, optional): number of millisecond to throw
a timeout exception to the consumer if no message is available
for consumption. Defaults to -1 (dont throw exception).
Configuration parameters are described in more detail at
http://kafka.apache.org/documentation.html#highlevelconsumerapi
"""
configs = self._deprecate_configs(**configs)
self._config = {}
for key in self.DEFAULT_CONFIG:
self._config[key] = configs.pop(key, self.DEFAULT_CONFIG[key])
if configs:
raise KafkaConfigurationError('Unknown configuration key(s): ' +
str(list(configs.keys())))
if self._config['auto_commit_enable']:
if not self._config['group_id']:
raise KafkaConfigurationError(
'KafkaConsumer configured to auto-commit '
'without required consumer group (group_id)'
)
# Check auto-commit configuration
if self._config['auto_commit_enable']:
logger.info("Configuring consumer to auto-commit offsets")
self._reset_auto_commit()
if not self._config['bootstrap_servers']:
raise KafkaConfigurationError(
'bootstrap_servers required to configure KafkaConsumer'
)
self._client = KafkaClient(
self._config['bootstrap_servers'],
client_id=self._config['client_id'],
timeout=(self._config['socket_timeout_ms'] / 1000.0)
) | Configure the consumer instance
Configuration settings can be passed to constructor,
otherwise defaults will be used:
Keyword Arguments:
bootstrap_servers (list): List of initial broker nodes the consumer
should contact to bootstrap initial cluster metadata. This does
not have to be the full node list. It just needs to have at
least one broker that will respond to a Metadata API Request.
client_id (str): a unique name for this client. Defaults to
'kafka.consumer.kafka'.
group_id (str): the name of the consumer group to join,
Offsets are fetched / committed to this group name.
fetch_message_max_bytes (int, optional): Maximum bytes for each
topic/partition fetch request. Defaults to 1024*1024.
fetch_min_bytes (int, optional): Minimum amount of data the server
should return for a fetch request, otherwise wait up to
fetch_wait_max_ms for more data to accumulate. Defaults to 1.
fetch_wait_max_ms (int, optional): Maximum time for the server to
block waiting for fetch_min_bytes messages to accumulate.
Defaults to 100.
refresh_leader_backoff_ms (int, optional): Milliseconds to backoff
when refreshing metadata on errors (subject to random jitter).
Defaults to 200.
socket_timeout_ms (int, optional): TCP socket timeout in
milliseconds. Defaults to 30*1000.
auto_offset_reset (str, optional): A policy for resetting offsets on
OffsetOutOfRange errors. 'smallest' will move to the oldest
available message, 'largest' will move to the most recent. Any
ofther value will raise the exception. Defaults to 'largest'.
deserializer_class (callable, optional): Any callable that takes a
raw message value and returns a deserialized value. Defaults to
lambda msg: msg.
auto_commit_enable (bool, optional): Enabling auto-commit will cause
the KafkaConsumer to periodically commit offsets without an
explicit call to commit(). Defaults to False.
auto_commit_interval_ms (int, optional): If auto_commit_enabled,
the milliseconds between automatic offset commits. Defaults to
60 * 1000.
auto_commit_interval_messages (int, optional): If
auto_commit_enabled, a number of messages consumed between
automatic offset commits. Defaults to None (disabled).
consumer_timeout_ms (int, optional): number of millisecond to throw
a timeout exception to the consumer if no message is available
for consumption. Defaults to -1 (dont throw exception).
Configuration parameters are described in more detail at
http://kafka.apache.org/documentation.html#highlevelconsumerapi | entailment |
def set_topic_partitions(self, *topics):
"""
Set the topic/partitions to consume
Optionally specify offsets to start from
Accepts types:
* str (utf-8): topic name (will consume all available partitions)
* tuple: (topic, partition)
* dict:
- { topic: partition }
- { topic: [partition list] }
- { topic: (partition tuple,) }
Optionally, offsets can be specified directly:
* tuple: (topic, partition, offset)
* dict: { (topic, partition): offset, ... }
Example:
.. code:: python
kafka = KafkaConsumer()
# Consume topic1-all; topic2-partition2; topic3-partition0
kafka.set_topic_partitions("topic1", ("topic2", 2), {"topic3": 0})
# Consume topic1-0 starting at offset 12, and topic2-1 at offset 45
# using tuples --
kafka.set_topic_partitions(("topic1", 0, 12), ("topic2", 1, 45))
# using dict --
kafka.set_topic_partitions({ ("topic1", 0): 12, ("topic2", 1): 45 })
"""
self._topics = []
self._client.load_metadata_for_topics()
# Setup offsets
self._offsets = OffsetsStruct(fetch=dict(),
commit=dict(),
highwater=dict(),
task_done=dict())
# Handle different topic types
for arg in topics:
# Topic name str -- all partitions
if isinstance(arg, (six.string_types, six.binary_type)):
topic = kafka_bytestring(arg)
for partition in self._client.get_partition_ids_for_topic(topic):
self._consume_topic_partition(topic, partition)
# (topic, partition [, offset]) tuple
elif isinstance(arg, tuple):
topic = kafka_bytestring(arg[0])
partition = arg[1]
self._consume_topic_partition(topic, partition)
if len(arg) == 3:
offset = arg[2]
self._offsets.fetch[(topic, partition)] = offset
# { topic: partitions, ... } dict
elif isinstance(arg, dict):
for key, value in six.iteritems(arg):
# key can be string (a topic)
if isinstance(key, (six.string_types, six.binary_type)):
topic = kafka_bytestring(key)
# topic: partition
if isinstance(value, int):
self._consume_topic_partition(topic, value)
# topic: [ partition1, partition2, ... ]
elif isinstance(value, (list, tuple)):
for partition in value:
self._consume_topic_partition(topic, partition)
else:
raise KafkaConfigurationError(
'Unknown topic type '
'(dict key must be int or list/tuple of ints)'
)
# (topic, partition): offset
elif isinstance(key, tuple):
topic = kafka_bytestring(key[0])
partition = key[1]
self._consume_topic_partition(topic, partition)
self._offsets.fetch[(topic, partition)] = value
else:
raise KafkaConfigurationError('Unknown topic type (%s)' % type(arg))
# If we have a consumer group, try to fetch stored offsets
if self._config['group_id']:
self._get_commit_offsets()
# Update missing fetch/commit offsets
for topic_partition in self._topics:
# Commit offsets default is None
if topic_partition not in self._offsets.commit:
self._offsets.commit[topic_partition] = None
# Skip if we already have a fetch offset from user args
if topic_partition not in self._offsets.fetch:
# Fetch offsets default is (1) commit
if self._offsets.commit[topic_partition] is not None:
self._offsets.fetch[topic_partition] = self._offsets.commit[topic_partition]
# or (2) auto reset
else:
self._offsets.fetch[topic_partition] = \
self._reset_partition_offset(topic_partition)
# highwater marks (received from server on fetch response)
# and task_done (set locally by user)
# should always get initialized to None
self._reset_highwater_offsets()
self._reset_task_done_offsets()
# Reset message iterator in case we were in the middle of one
self._reset_message_iterator() | Set the topic/partitions to consume
Optionally specify offsets to start from
Accepts types:
* str (utf-8): topic name (will consume all available partitions)
* tuple: (topic, partition)
* dict:
- { topic: partition }
- { topic: [partition list] }
- { topic: (partition tuple,) }
Optionally, offsets can be specified directly:
* tuple: (topic, partition, offset)
* dict: { (topic, partition): offset, ... }
Example:
.. code:: python
kafka = KafkaConsumer()
# Consume topic1-all; topic2-partition2; topic3-partition0
kafka.set_topic_partitions("topic1", ("topic2", 2), {"topic3": 0})
# Consume topic1-0 starting at offset 12, and topic2-1 at offset 45
# using tuples --
kafka.set_topic_partitions(("topic1", 0, 12), ("topic2", 1, 45))
# using dict --
kafka.set_topic_partitions({ ("topic1", 0): 12, ("topic2", 1): 45 }) | entailment |
def next(self):
"""Return the next available message
Blocks indefinitely unless consumer_timeout_ms > 0
Returns:
a single KafkaMessage from the message iterator
Raises:
ConsumerTimeout after consumer_timeout_ms and no message
Note:
This is also the method called internally during iteration
"""
self._set_consumer_timeout_start()
while True:
try:
return six.next(self._get_message_iterator())
# Handle batch completion
except StopIteration:
self._reset_message_iterator()
self._check_consumer_timeout() | Return the next available message
Blocks indefinitely unless consumer_timeout_ms > 0
Returns:
a single KafkaMessage from the message iterator
Raises:
ConsumerTimeout after consumer_timeout_ms and no message
Note:
This is also the method called internally during iteration | entailment |
def fetch_messages(self):
"""Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset`
"""
max_bytes = self._config['fetch_message_max_bytes']
max_wait_time = self._config['fetch_wait_max_ms']
min_bytes = self._config['fetch_min_bytes']
if not self._topics:
raise KafkaConfigurationError('No topics or partitions configured')
if not self._offsets.fetch:
raise KafkaConfigurationError(
'No fetch offsets found when calling fetch_messages'
)
fetches = [FetchRequest(topic, partition,
self._offsets.fetch[(topic, partition)],
max_bytes)
for (topic, partition) in self._topics]
# send_fetch_request will batch topic/partition requests by leader
responses = self._client.send_fetch_request(
fetches,
max_wait_time=max_wait_time,
min_bytes=min_bytes,
fail_on_error=False
)
for resp in responses:
if isinstance(resp, FailedPayloadsError):
logger.warning('FailedPayloadsError attempting to fetch data')
self._refresh_metadata_on_error()
continue
topic = kafka_bytestring(resp.topic)
partition = resp.partition
try:
check_error(resp)
except OffsetOutOfRangeError:
logger.warning('OffsetOutOfRange: topic %s, partition %d, '
'offset %d (Highwatermark: %d)',
topic, partition,
self._offsets.fetch[(topic, partition)],
resp.highwaterMark)
# Reset offset
self._offsets.fetch[(topic, partition)] = (
self._reset_partition_offset((topic, partition))
)
continue
except NotLeaderForPartitionError:
logger.warning("NotLeaderForPartitionError for %s - %d. "
"Metadata may be out of date",
topic, partition)
self._refresh_metadata_on_error()
continue
except RequestTimedOutError:
logger.warning("RequestTimedOutError for %s - %d",
topic, partition)
continue
# Track server highwater mark
self._offsets.highwater[(topic, partition)] = resp.highwaterMark
# Yield each message
# Kafka-python could raise an exception during iteration
# we are not catching -- user will need to address
for (offset, message) in resp.messages:
# deserializer_class could raise an exception here
val = self._config['deserializer_class'](message.value)
msg = KafkaMessage(topic, partition, offset, message.key, val)
# in some cases the server will return earlier messages
# than we requested. skip them per kafka spec
if offset < self._offsets.fetch[(topic, partition)]:
logger.debug('message offset less than fetched offset '
'skipping: %s', msg)
continue
# Only increment fetch offset
# if we safely got the message and deserialized
self._offsets.fetch[(topic, partition)] = offset + 1
# Then yield to user
yield msg | Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset` | entailment |
def get_partition_offsets(self, topic, partition, request_time_ms, max_num_offsets):
"""Request available fetch offsets for a single topic/partition
Keyword Arguments:
topic (str): topic for offset request
partition (int): partition for offset request
request_time_ms (int): Used to ask for all messages before a
certain time (ms). There are two special values.
Specify -1 to receive the latest offset (i.e. the offset of the
next coming message) and -2 to receive the earliest available
offset. Note that because offsets are pulled in descending
order, asking for the earliest offset will always return you a
single element.
max_num_offsets (int): Maximum offsets to include in the OffsetResponse
Returns:
a list of offsets in the OffsetResponse submitted for the provided
topic / partition. See:
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetAPI
"""
reqs = [OffsetRequest(topic, partition, request_time_ms, max_num_offsets)]
(resp,) = self._client.send_offset_request(reqs)
check_error(resp)
# Just for sanity..
# probably unnecessary
assert resp.topic == topic
assert resp.partition == partition
return resp.offsets | Request available fetch offsets for a single topic/partition
Keyword Arguments:
topic (str): topic for offset request
partition (int): partition for offset request
request_time_ms (int): Used to ask for all messages before a
certain time (ms). There are two special values.
Specify -1 to receive the latest offset (i.e. the offset of the
next coming message) and -2 to receive the earliest available
offset. Note that because offsets are pulled in descending
order, asking for the earliest offset will always return you a
single element.
max_num_offsets (int): Maximum offsets to include in the OffsetResponse
Returns:
a list of offsets in the OffsetResponse submitted for the provided
topic / partition. See:
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetAPI | entailment |
def offsets(self, group=None):
"""Get internal consumer offset values
Keyword Arguments:
group: Either "fetch", "commit", "task_done", or "highwater".
If no group specified, returns all groups.
Returns:
A copy of internal offsets struct
"""
if not group:
return {
'fetch': self.offsets('fetch'),
'commit': self.offsets('commit'),
'task_done': self.offsets('task_done'),
'highwater': self.offsets('highwater')
}
else:
return dict(deepcopy(getattr(self._offsets, group))) | Get internal consumer offset values
Keyword Arguments:
group: Either "fetch", "commit", "task_done", or "highwater".
If no group specified, returns all groups.
Returns:
A copy of internal offsets struct | entailment |
def task_done(self, message):
"""Mark a fetched message as consumed.
Offsets for messages marked as "task_done" will be stored back
to the kafka cluster for this consumer group on commit()
Arguments:
message (KafkaMessage): the message to mark as complete
Returns:
True, unless the topic-partition for this message has not
been configured for the consumer. In normal operation, this
should not happen. But see github issue 364.
"""
topic_partition = (message.topic, message.partition)
if topic_partition not in self._topics:
logger.warning('Unrecognized topic/partition in task_done message: '
'{0}:{1}'.format(*topic_partition))
return False
offset = message.offset
# Warn on non-contiguous offsets
prev_done = self._offsets.task_done[topic_partition]
if prev_done is not None and offset != (prev_done + 1):
logger.warning('Marking task_done on a non-continuous offset: %d != %d + 1',
offset, prev_done)
# Warn on smaller offsets than previous commit
# "commit" offsets are actually the offset of the next message to fetch.
prev_commit = self._offsets.commit[topic_partition]
if prev_commit is not None and ((offset + 1) <= prev_commit):
logger.warning('Marking task_done on a previously committed offset?: %d (+1) <= %d',
offset, prev_commit)
self._offsets.task_done[topic_partition] = offset
# Check for auto-commit
if self._does_auto_commit_messages():
self._incr_auto_commit_message_count()
if self._should_auto_commit():
self.commit()
return True | Mark a fetched message as consumed.
Offsets for messages marked as "task_done" will be stored back
to the kafka cluster for this consumer group on commit()
Arguments:
message (KafkaMessage): the message to mark as complete
Returns:
True, unless the topic-partition for this message has not
been configured for the consumer. In normal operation, this
should not happen. But see github issue 364. | entailment |
def commit(self):
"""Store consumed message offsets (marked via task_done())
to kafka cluster for this consumer_group.
Returns:
True on success, or False if no offsets were found for commit
Note:
this functionality requires server version >=0.8.1.1
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommit/FetchAPI
"""
if not self._config['group_id']:
logger.warning('Cannot commit without a group_id!')
raise KafkaConfigurationError(
'Attempted to commit offsets '
'without a configured consumer group (group_id)'
)
# API supports storing metadata with each commit
# but for now it is unused
metadata = b''
offsets = self._offsets.task_done
commits = []
for topic_partition, task_done_offset in six.iteritems(offsets):
# Skip if None
if task_done_offset is None:
continue
# Commit offsets as the next offset to fetch
# which is consistent with the Java Client
# task_done is marked by messages consumed,
# so add one to mark the next message for fetching
commit_offset = (task_done_offset + 1)
# Skip if no change from previous committed
if commit_offset == self._offsets.commit[topic_partition]:
continue
commits.append(
OffsetCommitRequest(topic_partition[0], topic_partition[1],
commit_offset, metadata)
)
if commits:
logger.info('committing consumer offsets to group %s', self._config['group_id'])
resps = self._client.send_offset_commit_request(
kafka_bytestring(self._config['group_id']), commits,
fail_on_error=False
)
for r in resps:
check_error(r)
topic_partition = (r.topic, r.partition)
task_done = self._offsets.task_done[topic_partition]
self._offsets.commit[topic_partition] = (task_done + 1)
if self._config['auto_commit_enable']:
self._reset_auto_commit()
return True
else:
logger.info('No new offsets found to commit in group %s', self._config['group_id'])
return False | Store consumed message offsets (marked via task_done())
to kafka cluster for this consumer_group.
Returns:
True on success, or False if no offsets were found for commit
Note:
this functionality requires server version >=0.8.1.1
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommit/FetchAPI | entailment |
def as_json(data, **kwargs):
"""Writes data as json.
:param dict data: data to convert to json
:param kwargs kwargs: kwargs for json dumps
:return: json string
:rtype: str
"""
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = False
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
data = json.dumps(data, **kwargs)
return data | Writes data as json.
:param dict data: data to convert to json
:param kwargs kwargs: kwargs for json dumps
:return: json string
:rtype: str | entailment |
def read_body(payload, content_type=JSON_CONTENT_TYPE):
"""Reads HTTP payload according to given content_type.
Function is capable of reading from payload stream.
Read data is then processed according to content_type.
Note:
Content-Type is validated. It means that if read_body
body is not capable of reading data in requested type,
it will throw an exception.
If read data was empty method will return false boolean
value to indicate that.
Note:
There is no transformation if content type is equal to
'text/plain'. What has been read is returned.
:param stream payload: payload to read, payload should have read method
:param str content_type: payload content type, default to application/json
:return: read data, returned type depends on content_type or False
if empty
:exception: :py:class:`.UnreadableBody` - in case of any failure when
reading data
"""
if content_type not in _READABLE_CONTENT_TYPES:
msg = ('Cannot read %s, not in %s' %
(content_type, _READABLE_CONTENT_TYPES))
raise exceptions.UnsupportedContentTypeException(msg)
try:
content = payload.read()
if not content:
return None
except Exception as ex:
raise exceptions.UnreadableContentError(str(ex))
return _READABLE_CONTENT_TYPES[content_type](content) | Reads HTTP payload according to given content_type.
Function is capable of reading from payload stream.
Read data is then processed according to content_type.
Note:
Content-Type is validated. It means that if read_body
body is not capable of reading data in requested type,
it will throw an exception.
If read data was empty method will return false boolean
value to indicate that.
Note:
There is no transformation if content type is equal to
'text/plain'. What has been read is returned.
:param stream payload: payload to read, payload should have read method
:param str content_type: payload content type, default to application/json
:return: read data, returned type depends on content_type or False
if empty
:exception: :py:class:`.UnreadableBody` - in case of any failure when
reading data | entailment |
def __process_idle_events(self):
"""This should never be called directly, it is called via an
event, and should always be on the GUI thread"""
while True:
try:
callable, args = self.queue.get(block=False)
except queue.Empty:
break
callable(*args) | This should never be called directly, it is called via an
event, and should always be on the GUI thread | entailment |
def timer_fired(self):
"""Polling loop for events from other threads"""
self.__process_idle_events()
# grab the simulation lock, gather all of the
# wpilib objects, and display them on the screen
self.update_widgets()
# call next timer_fired (or we'll never call timer_fired again!)
delay = 100 # milliseconds
self.root.after(delay, self.timer_fired) | Polling loop for events from other threads | entailment |
def snappy_encode(payload, xerial_compatible=False, xerial_blocksize=32 * 1024):
"""Encodes the given data with snappy if xerial_compatible is set then the
stream is encoded in a fashion compatible with the xerial snappy library
The block size (xerial_blocksize) controls how frequent the blocking
occurs 32k is the default in the xerial library.
The format winds up being
+-------------+------------+--------------+------------+--------------+
| Header | Block1 len | Block1 data | Blockn len | Blockn data |
|-------------+------------+--------------+------------+--------------|
| 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes |
+-------------+------------+--------------+------------+--------------+
It is important to not that the blocksize is the amount of uncompressed
data presented to snappy at each block, whereas the blocklen is the
number of bytes that will be present in the stream, that is the
length will always be <= blocksize.
"""
if not has_snappy():
raise NotImplementedError("Snappy codec is not available")
if xerial_compatible:
def _chunker():
for i in xrange(0, len(payload), xerial_blocksize):
yield payload[i:i + xerial_blocksize]
out = BytesIO()
header = b''.join([struct.pack('!' + fmt, dat) for fmt, dat
in zip(_XERIAL_V1_FORMAT, _XERIAL_V1_HEADER)])
out.write(header)
for chunk in _chunker():
block = snappy.compress(chunk)
block_size = len(block)
out.write(struct.pack('!i', block_size))
out.write(block)
out.seek(0)
return out.read()
else:
return snappy.compress(payload) | Encodes the given data with snappy if xerial_compatible is set then the
stream is encoded in a fashion compatible with the xerial snappy library
The block size (xerial_blocksize) controls how frequent the blocking
occurs 32k is the default in the xerial library.
The format winds up being
+-------------+------------+--------------+------------+--------------+
| Header | Block1 len | Block1 data | Blockn len | Blockn data |
|-------------+------------+--------------+------------+--------------|
| 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes |
+-------------+------------+--------------+------------+--------------+
It is important to not that the blocksize is the amount of uncompressed
data presented to snappy at each block, whereas the blocklen is the
number of bytes that will be present in the stream, that is the
length will always be <= blocksize. | entailment |
def relpath(path):
"""Path helper, gives you a path relative to this file"""
return os.path.normpath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), path)
) | Path helper, gives you a path relative to this file | entailment |
def draw_pathfinder_trajectory(
self,
trajectory,
color="#ff0000",
offset=None,
scale=(1, 1),
show_dt=False,
dt_offset=0.0,
**kwargs
):
"""
Special helper function for drawing trajectories generated by
robotpy-pathfinder
:param trajectory: A list of pathfinder segment objects
:param offset: If specified, should be x/y tuple to add to the path
relative to the robot coordinates
:param scale: Multiply all points by this (x,y) tuple
:param show_dt: draw text every N seconds along path, or False
:param dt_offset: add this to each dt shown
:param kwargs: Keyword options to pass to tkinter.create_line
"""
# pathfinder x/y coordinates are switched
pts = [(pt.x, -pt.y) for pt in trajectory]
robot_coordinates = offset if offset else True
self.draw_line(
pts,
color=color,
robot_coordinates=robot_coordinates,
relative_to_first=True,
arrow=True,
scale=scale,
)
if show_dt:
dt = trajectory[0].dt
def _defer_text():
# defer this execution to save effort when drawing
px_per_ft = UserRenderer._global_ui.field.px_per_ft
line = self._elements[-1]
for i in range(0, len(pts), int(show_dt / dt)):
text = "t=%.2f" % (dt_offset + i * dt,)
el = TextElement(
text, line.pts[i], 0, "#000000", int(px_per_ft * 0.5)
)
UserRenderer._global_ui.field.add_moving_element(el)
self._elements.append(el)
self._run(_defer_text) | Special helper function for drawing trajectories generated by
robotpy-pathfinder
:param trajectory: A list of pathfinder segment objects
:param offset: If specified, should be x/y tuple to add to the path
relative to the robot coordinates
:param scale: Multiply all points by this (x,y) tuple
:param show_dt: draw text every N seconds along path, or False
:param dt_offset: add this to each dt shown
:param kwargs: Keyword options to pass to tkinter.create_line | entailment |
def draw_line(
self,
line_pts,
color="#ff0000",
robot_coordinates=False,
relative_to_first=False,
arrow=True,
scale=(1, 1),
**kwargs
):
"""
:param line_pts: A list of (x,y) pairs to draw. (x,y) are in field units
which are measured in feet
:param color: The color of the line, expressed as a 6-digit hex color
:param robot_coordinates: If True, the pts will be adjusted such that
the first point starts at the center
of the robot and that x and y coordinates
are rotated according to the robot's
current heading. If a tuple, then the pts
are adjusted relative to the robot center
AND the x,y in the tuple
:param relative_to_first: If True, the points will be adjusted such
that the first point is considered to be
(0,0)
:param arrow: If True, draw the line with an arrow at the end
:param scale: Multiply all points by this (x,y) tuple
:param kwargs: Keyword options to pass to tkinter.create_line
"""
def _defer(): # called later because the field might not exist yet
px_per_ft = UserRenderer._global_ui.field.px_per_ft
if arrow:
kwargs["arrow"] = "last"
sx, sy = scale
line = DrawableLine(
[(x * px_per_ft * sx, y * px_per_ft * sy) for x, y in line_pts],
color,
kwargs,
)
# if relative to first, create object, then move relative to the first
if relative_to_first:
line.move((-line.pts[0][0], -line.pts[0][1]))
if robot_coordinates:
self._do_robot_coordinates(line, robot_coordinates)
line.update_coordinates()
UserRenderer._global_ui.field.add_moving_element(line)
self._elements.append(line)
self._run(_defer) | :param line_pts: A list of (x,y) pairs to draw. (x,y) are in field units
which are measured in feet
:param color: The color of the line, expressed as a 6-digit hex color
:param robot_coordinates: If True, the pts will be adjusted such that
the first point starts at the center
of the robot and that x and y coordinates
are rotated according to the robot's
current heading. If a tuple, then the pts
are adjusted relative to the robot center
AND the x,y in the tuple
:param relative_to_first: If True, the points will be adjusted such
that the first point is considered to be
(0,0)
:param arrow: If True, draw the line with an arrow at the end
:param scale: Multiply all points by this (x,y) tuple
:param kwargs: Keyword options to pass to tkinter.create_line | entailment |
def draw_text(
self,
text,
pt,
color="#000000",
fontSize=10,
robot_coordinates=False,
scale=(1, 1),
**kwargs
):
"""
:param text: Text to render
:param pt: A tuple of (x,y) in field units (which are measured in feet)
:param color: The color of the text, expressed as a 6-digit hex color
:param robot_coordinates: If True, the pt will be adjusted such that
the point starts at the center of the
robot and that x and y coordinates are
rotated according to the robot's current
heading. If a tuple, then the pt
is adjusted relative to the robot center
AND the x,y in the tuple
:param arrow: If True, draw the line with an arrow at the end
:param scale: Multiply all points by this (x,y) tuple
:param kwargs: Keyword options to pass to tkinter.create_text
"""
x, y = pt
def _defer(): # called later because the field might not exist yet
px_per_ft = UserRenderer._global_ui.field.px_per_ft
sx, sy = scale
pt = ((x * px_per_ft * sx), (y * px_per_ft * sy))
el = TextElement(text, pt, 0, color, fontSize, **kwargs)
if robot_coordinates:
self._do_robot_coordinates(el, robot_coordinates)
el.update_coordinates()
UserRenderer._global_ui.field.add_moving_element(el)
self._elements.append(el)
self._run(_defer) | :param text: Text to render
:param pt: A tuple of (x,y) in field units (which are measured in feet)
:param color: The color of the text, expressed as a 6-digit hex color
:param robot_coordinates: If True, the pt will be adjusted such that
the point starts at the center of the
robot and that x and y coordinates are
rotated according to the robot's current
heading. If a tuple, then the pt
is adjusted relative to the robot center
AND the x,y in the tuple
:param arrow: If True, draw the line with an arrow at the end
:param scale: Multiply all points by this (x,y) tuple
:param kwargs: Keyword options to pass to tkinter.create_text | entailment |
def murmur2(key):
"""Pure-python Murmur2 implementation.
Based on java client, see org.apache.kafka.common.utils.Utils.murmur2
Args:
key: if not a bytes type, encoded using default encoding
Returns: MurmurHash2 of key bytearray
"""
# Convert key to bytes or bytearray
if isinstance(key, bytearray) or (six.PY3 and isinstance(key, bytes)):
data = key
else:
data = bytearray(str(key).encode())
length = len(data)
seed = 0x9747b28c
# 'm' and 'r' are mixing constants generated offline.
# They're not really 'magic', they just happen to work well.
m = 0x5bd1e995
r = 24
# Initialize the hash to a random value
h = seed ^ length
length4 = length // 4
for i in range(length4):
i4 = i * 4
k = ((data[i4 + 0] & 0xff) +
((data[i4 + 1] & 0xff) << 8) +
((data[i4 + 2] & 0xff) << 16) +
((data[i4 + 3] & 0xff) << 24))
k &= 0xffffffff
k *= m
k &= 0xffffffff
k ^= (k % 0x100000000) >> r # k ^= k >>> r
k &= 0xffffffff
k *= m
k &= 0xffffffff
h *= m
h &= 0xffffffff
h ^= k
h &= 0xffffffff
# Handle the last few bytes of the input array
extra_bytes = length % 4
if extra_bytes >= 3:
h ^= (data[(length & ~3) + 2] & 0xff) << 16
h &= 0xffffffff
if extra_bytes >= 2:
h ^= (data[(length & ~3) + 1] & 0xff) << 8
h &= 0xffffffff
if extra_bytes >= 1:
h ^= (data[length & ~3] & 0xff)
h &= 0xffffffff
h *= m
h &= 0xffffffff
h ^= (h % 0x100000000) >> 13 # h >>> 13;
h &= 0xffffffff
h *= m
h &= 0xffffffff
h ^= (h % 0x100000000) >> 15 # h >>> 15;
h &= 0xffffffff
return h | Pure-python Murmur2 implementation.
Based on java client, see org.apache.kafka.common.utils.Utils.murmur2
Args:
key: if not a bytes type, encoded using default encoding
Returns: MurmurHash2 of key bytearray | entailment |
def init(policy_file=None, rules=None, default_rule=None, use_conf=True):
"""Init an Enforcer class.
:param policy_file: Custom policy file to use, if none is specified,
`CONF.policy_file` will be used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from config file.
"""
global _ENFORCER
global saved_file_rules
if not _ENFORCER:
_ENFORCER = policy.Enforcer(CONF,
policy_file=policy_file,
rules=rules,
default_rule=default_rule,
use_conf=use_conf
)
register_rules(_ENFORCER)
_ENFORCER.load_rules()
# Only the rules which are loaded from file may be changed
current_file_rules = _ENFORCER.file_rules
current_file_rules = _serialize_rules(current_file_rules)
if saved_file_rules != current_file_rules:
_warning_for_deprecated_user_based_rules(current_file_rules)
saved_file_rules = copy.deepcopy(current_file_rules) | Init an Enforcer class.
:param policy_file: Custom policy file to use, if none is specified,
`CONF.policy_file` will be used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from config file. | entailment |
def _serialize_rules(rules):
"""Serialize all the Rule object as string.
New string is used to compare the rules list.
"""
result = [(rule_name, str(rule)) for rule_name, rule in rules.items()]
return sorted(result, key=lambda rule: rule[0]) | Serialize all the Rule object as string.
New string is used to compare the rules list. | entailment |
def _warning_for_deprecated_user_based_rules(rules):
"""Warning user based policy enforcement used in the rule but the rule
doesn't support it.
"""
for rule in rules:
# We will skip the warning for the resources which support user based
# policy enforcement.
if [resource for resource in USER_BASED_RESOURCES
if resource in rule[0]]:
continue
if 'user_id' in KEY_EXPR.findall(rule[1]):
LOG.warning(_LW("The user_id attribute isn't supported in the "
"rule '%s'. All the user_id based policy "
"enforcement will be removed in the "
"future."), rule[0]) | Warning user based policy enforcement used in the rule but the rule
doesn't support it. | entailment |
def authorize(context, action, target, do_raise=True):
"""Verify that the action is valid on the target in this context.
:param context: monasca project context
:param action: String representing the action to be checked. This
should be colon separated for clarity.
:param target: Dictionary representing the object of the action for
object creation. This should be a dictionary representing
the location of the object e.g.
``{'project_id': 'context.project_id'}``
:param do_raise: if True (the default), raises PolicyNotAuthorized,
if False returns False
:type context: object
:type action: str
:type target: dict
:type do_raise: bool
:return: returns a non-False value (not necessarily True) if authorized,
and the False if not authorized and do_raise if False
:raises oslo_policy.policy.PolicyNotAuthorized: if verification fails
"""
init()
credentials = context.to_policy_values()
try:
result = _ENFORCER.authorize(action, target, credentials,
do_raise=do_raise, action=action)
return result
except policy.PolicyNotRegistered:
LOG.exception('Policy not registered')
raise
except Exception:
LOG.debug('Policy check for %(action)s failed with credentials '
'%(credentials)s',
{'action': action, 'credentials': credentials})
raise | Verify that the action is valid on the target in this context.
:param context: monasca project context
:param action: String representing the action to be checked. This
should be colon separated for clarity.
:param target: Dictionary representing the object of the action for
object creation. This should be a dictionary representing
the location of the object e.g.
``{'project_id': 'context.project_id'}``
:param do_raise: if True (the default), raises PolicyNotAuthorized,
if False returns False
:type context: object
:type action: str
:type target: dict
:type do_raise: bool
:return: returns a non-False value (not necessarily True) if authorized,
and the False if not authorized and do_raise if False
:raises oslo_policy.policy.PolicyNotAuthorized: if verification fails | entailment |
def check_is_admin(context):
"""Check if roles contains 'admin' role according to policy settings."""
init()
credentials = context.to_policy_values()
target = credentials
return _ENFORCER.authorize('admin_required', target, credentials) | Check if roles contains 'admin' role according to policy settings. | entailment |
def set_rules(rules, overwrite=True, use_conf=False): # pragma: no cover
"""Set rules based on the provided dict of rules.
Note:
Used in tests only.
:param rules: New rules to use. It should be an instance of dict
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from config file.
"""
init(use_conf=False)
_ENFORCER.set_rules(rules, overwrite, use_conf) | Set rules based on the provided dict of rules.
Note:
Used in tests only.
:param rules: New rules to use. It should be an instance of dict
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from config file. | entailment |
def verify_deprecated_policy(old_policy, new_policy, default_rule, context):
"""Check the rule of the deprecated policy action
If the current rule of the deprecated policy action is set to a non-default
value, then a warning message is logged stating that the new policy
action should be used to dictate permissions as the old policy action is
being deprecated.
:param old_policy: policy action that is being deprecated
:param new_policy: policy action that is replacing old_policy
:param default_rule: the old_policy action default rule value
:param context: the monasca context
"""
if _ENFORCER:
current_rule = str(_ENFORCER.rules[old_policy])
else:
current_rule = None
if current_rule != default_rule:
LOG.warning("Start using the new action '{0}'. The existing "
"action '{1}' is being deprecated and will be "
"removed in future release.".format(new_policy,
old_policy))
target = {'project_id': context.project_id,
'user_id': context.user_id}
return authorize(context=context, action=old_policy, target=target)
else:
return False | Check the rule of the deprecated policy action
If the current rule of the deprecated policy action is set to a non-default
value, then a warning message is logged stating that the new policy
action should be used to dictate permissions as the old policy action is
being deprecated.
:param old_policy: policy action that is being deprecated
:param new_policy: policy action that is replacing old_policy
:param default_rule: the old_policy action default rule value
:param context: the monasca context | entailment |
def build_sample_ace_problem_wang04(N=100):
"""Build sample problem from Wang 2004."""
x = [numpy.random.uniform(-1, 1, size=N)
for _i in range(0, 5)]
noise = numpy.random.standard_normal(N)
y = numpy.log(4.0 + numpy.sin(4 * x[0]) + numpy.abs(x[1]) + x[2] ** 2 +
x[3] ** 3 + x[4] + 0.1 * noise)
return x, y | Build sample problem from Wang 2004. | entailment |
def run_wang04():
"""Run sample problem."""
x, y = build_sample_ace_problem_wang04(N=200)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
ace.plot_transforms(ace_solver, 'ace_transforms_wang04.png')
ace.plot_input(ace_solver, 'ace_input_wang04.png')
except ImportError:
pass
return ace_solver | Run sample problem. | entailment |
def add_robot(self, controller):
"""Add a robot controller"""
# connect to the controller
# -> this is to support module robots
controller.on_mode_change(self._on_robot_mode_change)
self.robots.append(controller) | Add a robot controller | entailment |
def set_joystick(self, x, y, n):
"""
Receives joystick values from the SnakeBoard
x,y Coordinates
n Robot number to give it to
"""
self.robots[n].set_joystick(x, y) | Receives joystick values from the SnakeBoard
x,y Coordinates
n Robot number to give it to | entailment |
async def disconnect(self):
"""
Disconnect coroutine
"""
async with self._disconnect_lock:
if self._state == ConnectionState.DISCONNECTED:
return
self._set_state(ConnectionState.DISCONNECTING)
logger.info('%s Disconnecting...', self.fingerprint)
if self._reconnect_task:
self._reconnect_task.cancel()
self._reconnect_task = None
if self._ping_task and not self._ping_task.done():
self._ping_task.cancel()
self._ping_task = None
self._db = _DbMock()
if self._transport:
self._disconnect_waiter = _create_future(self._loop)
self._transport.close()
self._transport = None
self._protocol = None
await self._disconnect_waiter
self._disconnect_waiter = None
self._set_state(ConnectionState.DISCONNECTED)
else:
self._transport = None
self._protocol = None
self._disconnect_waiter = None
self._set_state(ConnectionState.DISCONNECTED) | Disconnect coroutine | entailment |
def close(self):
"""
Same as disconnect, but not a coroutine, i.e. it does not wait
for disconnect to finish.
"""
if self._state == ConnectionState.DISCONNECTED:
return
self._set_state(ConnectionState.DISCONNECTING)
logger.info('%s Disconnecting...', self.fingerprint)
if self._reconnect_task and not self._reconnect_task.done():
self._reconnect_task.cancel()
self._reconnect_task = None
if self._ping_task and not self._ping_task.done():
self._ping_task.cancel()
self._ping_task = None
if self._transport:
self._transport.close()
self._transport = None
self._protocol = None
self._disconnect_waiter = None
self._db = _DbMock()
self._set_state(ConnectionState.DISCONNECTED) | Same as disconnect, but not a coroutine, i.e. it does not wait
for disconnect to finish. | entailment |
def call(self, func_name, args=None, *,
timeout=-1.0, push_subscribe=False) -> _MethodRet:
"""
Call request coroutine. It is a call with a new behaviour
(return result of a Tarantool procedure is not wrapped into
an extra tuple). If you're connecting to Tarantool with
version < 1.7, then this call method acts like a call16 method
Examples:
.. code-block:: pycon
# tarantool function:
# function f(...)
# return ...
# end
>>> await conn.call('f')
<Response sync=3 rowcount=0 data=[]>
>>> await conn.call('f', [20, 42])
<Response sync=3 rowcount=2 data=[20, 42]>
:param func_name: function name to call
:param args: arguments to pass to the function (list object)
:param timeout: Request timeout
:param push_subscribe: Subscribe to push notifications
:returns: :class:`asynctnt.Response` instance
"""
return self._db.call(func_name, args,
timeout=timeout, push_subscribe=push_subscribe) | Call request coroutine. It is a call with a new behaviour
(return result of a Tarantool procedure is not wrapped into
an extra tuple). If you're connecting to Tarantool with
version < 1.7, then this call method acts like a call16 method
Examples:
.. code-block:: pycon
# tarantool function:
# function f(...)
# return ...
# end
>>> await conn.call('f')
<Response sync=3 rowcount=0 data=[]>
>>> await conn.call('f', [20, 42])
<Response sync=3 rowcount=2 data=[20, 42]>
:param func_name: function name to call
:param args: arguments to pass to the function (list object)
:param timeout: Request timeout
:param push_subscribe: Subscribe to push notifications
:returns: :class:`asynctnt.Response` instance | entailment |
def eval(self, expression, args=None, *,
timeout=-1.0, push_subscribe=False) -> _MethodRet:
"""
Eval request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.eval('return 42')
<Response sync=3 rowcount=1 data=[42]>
>>> await conn.eval('return box.info.version')
<Response sync=3 rowcount=1 data=['2.1.1-7-gd381a45b6']>
:param expression: expression to execute
:param args: arguments to pass to the function, that will
execute your expression (list object)
:param timeout: Request timeout
:param push_subscribe: Subscribe to push messages
:returns: :class:`asynctnt.Response` instance
"""
return self._db.eval(expression, args,
timeout=timeout, push_subscribe=push_subscribe) | Eval request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.eval('return 42')
<Response sync=3 rowcount=1 data=[42]>
>>> await conn.eval('return box.info.version')
<Response sync=3 rowcount=1 data=['2.1.1-7-gd381a45b6']>
:param expression: expression to execute
:param args: arguments to pass to the function, that will
execute your expression (list object)
:param timeout: Request timeout
:param push_subscribe: Subscribe to push messages
:returns: :class:`asynctnt.Response` instance | entailment |
def select(self, space, key=None, **kwargs) -> _MethodRet:
"""
Select request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.select('tester')
<Response sync=3 rowcount=2 data=[
<TarantoolTuple id=1 name='one'>,
<TarantoolTuple id=2 name='two'>
]>
>>> res = await conn.select('_space', ['tester'], index='name')
>>> res.data
[<TarantoolTuple id=512
owner=1
name='tester'
engine='memtx'
field_count=0
flags={}
format=[
{'name': 'id', 'type': 'unsigned'},
{'name': 'name', 'type': 'string'}
]>]
:param space: space id or space name.
:param key: key to select
:param offset: offset to use
:param limit: limit to use
:param index: index id or name
:param iterator: one of the following
* iterator id (int number),
* :class:`asynctnt.Iterator` object
* string with an iterator name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.select(space, key, **kwargs) | Select request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.select('tester')
<Response sync=3 rowcount=2 data=[
<TarantoolTuple id=1 name='one'>,
<TarantoolTuple id=2 name='two'>
]>
>>> res = await conn.select('_space', ['tester'], index='name')
>>> res.data
[<TarantoolTuple id=512
owner=1
name='tester'
engine='memtx'
field_count=0
flags={}
format=[
{'name': 'id', 'type': 'unsigned'},
{'name': 'name', 'type': 'string'}
]>]
:param space: space id or space name.
:param key: key to select
:param offset: offset to use
:param limit: limit to use
:param index: index id or name
:param iterator: one of the following
* iterator id (int number),
* :class:`asynctnt.Iterator` object
* string with an iterator name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | entailment |
def insert(self, space, t, *, replace=False, timeout=-1) -> _MethodRet:
"""
Insert request coroutine.
Examples:
.. code-block:: pycon
# Basic usage
>>> await conn.insert('tester', [0, 'hello'])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
# Using dict as an argument tuple
>>> await conn.insert('tester', {
... 'id': 0
... 'text': 'hell0'
... })
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param t: tuple to insert (list object)
:param replace: performs replace request instead of insert
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.insert(space, t,
replace=replace,
timeout=timeout) | Insert request coroutine.
Examples:
.. code-block:: pycon
# Basic usage
>>> await conn.insert('tester', [0, 'hello'])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
# Using dict as an argument tuple
>>> await conn.insert('tester', {
... 'id': 0
... 'text': 'hell0'
... })
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param t: tuple to insert (list object)
:param replace: performs replace request instead of insert
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | entailment |
def replace(self, space, t, *, timeout=-1.0) -> _MethodRet:
"""
Replace request coroutine. Same as insert, but replace.
:param space: space id or space name.
:param t: tuple to insert (list object)
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.replace(space, t, timeout=timeout) | Replace request coroutine. Same as insert, but replace.
:param space: space id or space name.
:param t: tuple to insert (list object)
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | entailment |
def delete(self, space, key, **kwargs) -> _MethodRet:
"""
Delete request coroutine.
Examples:
.. code-block:: pycon
# Assuming tuple [0, 'hello'] is in space tester
>>> await conn.delete('tester', [0])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param key: key to delete
:param index: index id or name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.delete(space, key, **kwargs) | Delete request coroutine.
Examples:
.. code-block:: pycon
# Assuming tuple [0, 'hello'] is in space tester
>>> await conn.delete('tester', [0])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param key: key to delete
:param index: index id or name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | entailment |
def update(self, space, key, operations, **kwargs) -> _MethodRet:
"""
Update request coroutine.
Examples:
.. code-block:: pycon
# Assuming tuple [0, 'hello'] is in space tester
>>> await conn.update('tester', [0], [ ['=', 1, 'hi!'] ])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hi!'>
]>
# you can use fields names as well
>>> res = await conn.update('tester', [0],
... [ ['=', 'text', 'hola'] ])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hola'>
]>
:param space: space id or space name.
:param key: key to update
:param operations:
Operations list of the following format:
[ [op_type, field_no, ...], ... ]. Please refer to
https://tarantool.org/doc/book/box/box_space.html?highlight=update#lua-function.space_object.update
You can use field numbers as well as their names in space
format as a field_no (if only fetch_schema is True).
If field is unknown then TarantoolSchemaError is raised.
:param index: index id or name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.update(space, key, operations, **kwargs) | Update request coroutine.
Examples:
.. code-block:: pycon
# Assuming tuple [0, 'hello'] is in space tester
>>> await conn.update('tester', [0], [ ['=', 1, 'hi!'] ])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hi!'>
]>
# you can use fields names as well
>>> res = await conn.update('tester', [0],
... [ ['=', 'text', 'hola'] ])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hola'>
]>
:param space: space id or space name.
:param key: key to update
:param operations:
Operations list of the following format:
[ [op_type, field_no, ...], ... ]. Please refer to
https://tarantool.org/doc/book/box/box_space.html?highlight=update#lua-function.space_object.update
You can use field numbers as well as their names in space
format as a field_no (if only fetch_schema is True).
If field is unknown then TarantoolSchemaError is raised.
:param index: index id or name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | entailment |
def upsert(self, space, t, operations, **kwargs) -> _MethodRet:
"""
Update request coroutine. Performs either insert or update
(depending of either tuple exists or not)
Examples:
.. code-block:: pycon
# upsert does not return anything
>>> await conn.upsert('tester', [0, 'hello'],
... [ ['=', 1, 'hi!'] ])
<Response sync=3 rowcount=0 data=[]>
:param space: space id or space name.
:param t: tuple to insert if it's not in space
:param operations:
Operations list to use for update if tuple is already in
space. It has the same format as in update requets:
[ [op_type, field_no, ...], ... ]. Please refer to
https://tarantool.org/doc/book/box/box_space.html?highlight=update#lua-function.space_object.update
You can use field numbers as well as their names in space
format as a field_no (if only fetch_schema is True).
If field is unknown then TarantoolSchemaError is raised.
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.upsert(space, t, operations, **kwargs) | Update request coroutine. Performs either insert or update
(depending of either tuple exists or not)
Examples:
.. code-block:: pycon
# upsert does not return anything
>>> await conn.upsert('tester', [0, 'hello'],
... [ ['=', 1, 'hi!'] ])
<Response sync=3 rowcount=0 data=[]>
:param space: space id or space name.
:param t: tuple to insert if it's not in space
:param operations:
Operations list to use for update if tuple is already in
space. It has the same format as in update requets:
[ [op_type, field_no, ...], ... ]. Please refer to
https://tarantool.org/doc/book/box/box_space.html?highlight=update#lua-function.space_object.update
You can use field numbers as well as their names in space
format as a field_no (if only fetch_schema is True).
If field is unknown then TarantoolSchemaError is raised.
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | entailment |
def sql(self, query, args=None, *,
parse_metadata=True, timeout=-1.0) -> _MethodRet:
"""
Executes an SQL statement (only for Tarantool > 2)
Examples:
.. code-block:: pycon
>>> await conn.sql("select 1 as a, 2 as b")
<Response sync=3 rowcount=1 data=[<TarantoolTuple A=1 B=2>]>
>>> await conn.sql("select * from sql_space")
<Response sync=3 rowcount=2 data=[
<TarantoolTuple ID=1 NAME='James Bond'>,
<TarantoolTuple ID=2 NAME='Ethan Hunt'>
]>
>>> await conn.sql("select * from sql_space",
... parse_metadata=False)
<Response sync=3 rowcount=2 data=[
<TarantoolTuple 0=1 1='James Bond'>,
<TarantoolTuple 0=2 1='Ethan Hunt'>
]>
:param query: SQL query
:param args: Query arguments
:param parse_metadata: Set to False to disable response's metadata
parsing for better performance
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.sql(query, args,
parse_metadata=parse_metadata,
timeout=timeout) | Executes an SQL statement (only for Tarantool > 2)
Examples:
.. code-block:: pycon
>>> await conn.sql("select 1 as a, 2 as b")
<Response sync=3 rowcount=1 data=[<TarantoolTuple A=1 B=2>]>
>>> await conn.sql("select * from sql_space")
<Response sync=3 rowcount=2 data=[
<TarantoolTuple ID=1 NAME='James Bond'>,
<TarantoolTuple ID=2 NAME='Ethan Hunt'>
]>
>>> await conn.sql("select * from sql_space",
... parse_metadata=False)
<Response sync=3 rowcount=2 data=[
<TarantoolTuple 0=1 1='James Bond'>,
<TarantoolTuple 0=2 1='Ethan Hunt'>
]>
:param query: SQL query
:param args: Query arguments
:param parse_metadata: Set to False to disable response's metadata
parsing for better performance
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | entailment |
def compute(self):
"""Run the SuperSmoother."""
self._compute_primary_smooths()
self._smooth_the_residuals()
self._select_best_smooth_at_each_point()
self._enhance_bass()
self._smooth_best_span_estimates()
self._apply_best_spans_to_primaries()
self._smooth_interpolated_smooth()
self._store_unsorted_results(self.smooth_result, numpy.zeros(len(self.smooth_result))) | Run the SuperSmoother. | entailment |
def _compute_primary_smooths(self):
"""Compute fixed-span smooths with all of the default spans."""
for span in DEFAULT_SPANS:
smooth = smoother.perform_smooth(self.x, self.y, span)
self._primary_smooths.append(smooth) | Compute fixed-span smooths with all of the default spans. | entailment |
def _smooth_the_residuals(self):
"""
Apply the MID_SPAN to the residuals of the primary smooths.
"For stability reasons, it turns out to be a little better to smooth
|r_{i}(J)| against xi" - [1]
"""
for primary_smooth in self._primary_smooths:
smooth = smoother.perform_smooth(self.x,
primary_smooth.cross_validated_residual,
MID_SPAN)
self._residual_smooths.append(smooth.smooth_result) | Apply the MID_SPAN to the residuals of the primary smooths.
"For stability reasons, it turns out to be a little better to smooth
|r_{i}(J)| against xi" - [1] | entailment |
def _select_best_smooth_at_each_point(self):
"""
Solve Eq (10) to find the best span for each observation.
Stores index so we can easily grab the best residual smooth, primary smooth, etc.
"""
for residuals_i in zip(*self._residual_smooths):
index_of_best_span = residuals_i.index(min(residuals_i))
self._best_span_at_each_point.append(DEFAULT_SPANS[index_of_best_span]) | Solve Eq (10) to find the best span for each observation.
Stores index so we can easily grab the best residual smooth, primary smooth, etc. | entailment |
def _enhance_bass(self):
"""Update best span choices with bass enhancement as requested by user (Eq. 11)."""
if not self._bass_enhancement:
# like in supsmu, skip if alpha=0
return
bass_span = DEFAULT_SPANS[BASS_INDEX]
enhanced_spans = []
for i, best_span_here in enumerate(self._best_span_at_each_point):
best_smooth_index = DEFAULT_SPANS.index(best_span_here)
best_span = DEFAULT_SPANS[best_smooth_index]
best_span_residual = self._residual_smooths[best_smooth_index][i]
bass_span_residual = self._residual_smooths[BASS_INDEX][i]
if 0 < best_span_residual < bass_span_residual:
ri = best_span_residual / bass_span_residual
bass_factor = ri ** (10.0 - self._bass_enhancement)
enhanced_spans.append(best_span + (bass_span - best_span) * bass_factor)
else:
enhanced_spans.append(best_span)
self._best_span_at_each_point = enhanced_spans | Update best span choices with bass enhancement as requested by user (Eq. 11). | entailment |
def _smooth_best_span_estimates(self):
"""Apply a MID_SPAN smooth to the best span estimates at each observation."""
self._smoothed_best_spans = smoother.perform_smooth(self.x,
self._best_span_at_each_point,
MID_SPAN) | Apply a MID_SPAN smooth to the best span estimates at each observation. | entailment |
def _apply_best_spans_to_primaries(self):
"""
Apply best spans.
Given the best span, interpolate to compute the best smoothed value
at each observation.
"""
self.smooth_result = []
for xi, best_span in enumerate(self._smoothed_best_spans.smooth_result):
primary_values = [s.smooth_result[xi] for s in self._primary_smooths]
# pylint: disable=no-member
best_value = numpy.interp(best_span, DEFAULT_SPANS, primary_values)
self.smooth_result.append(best_value) | Apply best spans.
Given the best span, interpolate to compute the best smoothed value
at each observation. | entailment |
def _smooth_interpolated_smooth(self):
"""
Smooth interpolated results with tweeter span.
A final step of the supersmoother is to smooth the interpolated values with
the tweeter span. This is done in Breiman's supsmu.f but is not explicitly
discussed in the publication. This step is necessary to match
the FORTRAN version perfectly.
"""
smoothed_results = smoother.perform_smooth(self.x,
self.smooth_result,
TWEETER_SPAN)
self.smooth_result = smoothed_results.smooth_result | Smooth interpolated results with tweeter span.
A final step of the supersmoother is to smooth the interpolated values with
the tweeter span. This is done in Breiman's supsmu.f but is not explicitly
discussed in the publication. This step is necessary to match
the FORTRAN version perfectly. | entailment |
def _mp_consume(client, group, topic, queue, size, events, **consumer_options):
"""
A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class
"""
# Initial interval for retries in seconds.
interval = 1
while not events.exit.is_set():
try:
# Make the child processes open separate socket connections
client.reinit()
# We will start consumers without auto-commit. Auto-commit will be
# done by the master controller process.
consumer = SimpleConsumer(client, group, topic,
auto_commit=False,
auto_commit_every_n=None,
auto_commit_every_t=None,
**consumer_options)
# Ensure that the consumer provides the partition information
consumer.provide_partition_info()
while True:
# Wait till the controller indicates us to start consumption
events.start.wait()
# If we are asked to quit, do so
if events.exit.is_set():
break
# Consume messages and add them to the queue. If the controller
# indicates a specific number of messages, follow that advice
count = 0
message = consumer.get_message()
if message:
while True:
try:
queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)
break
except queue.Full:
if events.exit.is_set():
break
count += 1
# We have reached the required size. The controller might have
# more than what he needs. Wait for a while.
# Without this logic, it is possible that we run into a big
# loop consuming all available messages before the controller
# can reset the 'start' event
if count == size.value:
events.pause.wait()
else:
# In case we did not receive any message, give up the CPU for
# a while before we try again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
consumer.stop()
except KafkaError as e:
# Retry with exponential backoff
log.error(
"Problem communicating with Kafka (%s), retrying in %d seconds..." % (e, interval))
time.sleep(interval)
interval = interval * 2 if interval * 2 < MAX_BACKOFF_SECONDS else MAX_BACKOFF_SECONDS | A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class | entailment |
def move(self, v):
"""v is a tuple of x/y coordinates to move object"""
# rotate movement vector according to the angle of the object
vx, vy = v
vx, vy = (
vx * math.cos(self.angle) - vy * math.sin(self.angle),
vx * math.sin(self.angle) + vy * math.cos(self.angle),
)
def _move(xy):
x, y = xy
return x + vx, y + vy
# TODO: detect other objects in the way, stop movement appropriately
self.pts = [p for p in map(lambda x: _move(x), self.pts)]
self.center = _move(self.center) | v is a tuple of x/y coordinates to move object | entailment |
def rotate(self, angle):
"""
This works. Rotates the object about its center.
Angle is specified in radians
"""
self.angle = (self.angle + angle) % (math.pi * 2.0)
# precalculate these parameters
c = math.cos(angle)
s = math.sin(angle)
px, py = self.center
def _rotate_point(xy):
x, y = xy
x = x - px
y = y - py
return (x * c - y * s) + px, (x * s + y * c) + py
# calculate rotation for each point
self.pts = [p for p in map(lambda x: _rotate_point(x), self.pts)] | This works. Rotates the object about its center.
Angle is specified in radians | entailment |
def delivery_report(err, msg):
"""
Callback called once for each produced message to indicate the final
delivery result. Triggered by poll() or flush().
:param confluent_kafka.KafkaError err: Information about any error
that occurred whilst producing the message.
:param confluent_kafka.Message msg: Information about the message
produced.
:returns: None
:raises confluent_kafka.KafkaException
"""
if err is not None:
log.exception(u'Message delivery failed: {}'.format(err))
raise confluent_kafka.KafkaException(err)
else:
log.debug(u'Message delivered to {} [{}]: {}'.format(
msg.topic(), msg.partition(), msg.value())) | Callback called once for each produced message to indicate the final
delivery result. Triggered by poll() or flush().
:param confluent_kafka.KafkaError err: Information about any error
that occurred whilst producing the message.
:param confluent_kafka.Message msg: Information about the message
produced.
:returns: None
:raises confluent_kafka.KafkaException | entailment |
def publish(self, topic, messages, key=None, timeout=2):
"""
Publish messages to the topic.
:param str topic: Topic to produce messages to.
:param list(str) messages: List of message payloads.
:param str key: Message key.
:param float timeout: Maximum time to block in seconds.
:returns: Number of messages still in queue.
:rtype int
"""
if not isinstance(messages, list):
messages = [messages]
try:
for m in messages:
m = encodeutils.safe_encode(m, incoming='utf-8')
self._producer.produce(topic, m, key,
callback=KafkaProducer.delivery_report)
self._producer.poll(0)
return self._producer.flush(timeout)
except (BufferError, confluent_kafka.KafkaException,
NotImplementedError):
log.exception(u'Error publishing to {} topic.'.format(topic))
raise | Publish messages to the topic.
:param str topic: Topic to produce messages to.
:param list(str) messages: List of message payloads.
:param str key: Message key.
:param float timeout: Maximum time to block in seconds.
:returns: Number of messages still in queue.
:rtype int | entailment |
def increment_time_by(self, secs):
"""This is called when wpilib.Timer.delay() occurs"""
self.slept = [True] * 3
was_paused = False
with self.lock:
self._increment_tm(secs)
while self.paused and secs > 0:
if self.pause_secs is not None:
# if pause_secs is set, this means it was a step operation,
# so we adjust the wait accordingly
if secs > self.pause_secs:
secs -= self.pause_secs
else:
secs = 0
self.pause_secs = None
was_paused = True
self.lock.wait()
# if the operator tried to do another step, this will update
# the paused flag so we don't escape the loop
self._increment_tm(secs)
if not was_paused:
time.sleep(secs) | This is called when wpilib.Timer.delay() occurs | entailment |
def _send_upstream(queue, client, codec, batch_time, batch_size,
req_acks, ack_timeout, retry_options, stop_event,
log_messages_on_error=ASYNC_LOG_MESSAGES_ON_ERROR,
stop_timeout=ASYNC_STOP_TIMEOUT_SECS,
codec_compresslevel=None):
"""Private method to manage producing messages asynchronously
Listens on the queue for a specified number of messages or until
a specified timeout and then sends messages to the brokers in grouped
requests (one per broker).
Messages placed on the queue should be tuples that conform to this format:
((topic, partition), message, key)
Currently does not mark messages with task_done. Do not attempt to join()!
Arguments:
queue (threading.Queue): the queue from which to get messages
client (KafkaClient): instance to use for communicating with brokers
codec (kafka.protocol.ALL_CODECS): compression codec to use
batch_time (int): interval in seconds to send message batches
batch_size (int): count of messages that will trigger an immediate send
req_acks: required acks to use with ProduceRequests. see server protocol
ack_timeout: timeout to wait for required acks. see server protocol
retry_options (RetryOptions): settings for retry limits, backoff etc
stop_event (threading.Event): event to monitor for shutdown signal.
when this event is 'set', the producer will stop sending messages.
log_messages_on_error (bool, optional): log stringified message-contents
on any produce error, otherwise only log a hash() of the contents,
defaults to True.
stop_timeout (int or float, optional): number of seconds to continue
retrying messages after stop_event is set, defaults to 30.
"""
request_tries = {}
while not stop_event.is_set():
try:
client.reinit()
except Exception as e:
log.warn(
'Async producer failed to connect to brokers; backoff for %s(ms) before retrying',
retry_options.backoff_ms)
time.sleep(float(retry_options.backoff_ms) / 1000)
else:
break
stop_at = None
while not (stop_event.is_set() and queue.empty() and not request_tries):
# Handle stop_timeout
if stop_event.is_set():
if not stop_at:
stop_at = stop_timeout + time.time()
if time.time() > stop_at:
log.debug('Async producer stopping due to stop_timeout')
break
timeout = batch_time
count = batch_size
send_at = time.time() + timeout
msgset = defaultdict(list)
# Merging messages will require a bit more work to manage correctly
# for now, dont look for new batches if we have old ones to retry
if request_tries:
count = 0
log.debug('Skipping new batch collection to handle retries')
else:
log.debug('Batching size: %s, timeout: %s', count, timeout)
# Keep fetching till we gather enough messages or a
# timeout is reached
while count > 0 and timeout >= 0:
try:
topic_partition, msg, key = queue.get(timeout=timeout)
except Empty:
break
# Check if the controller has requested us to stop
if topic_partition == STOP_ASYNC_PRODUCER:
stop_event.set()
break
# Adjust the timeout to match the remaining period
count -= 1
timeout = send_at - time.time()
msgset[topic_partition].append((msg, key))
# Send collected requests upstream
for topic_partition, msg in msgset.items():
messages = create_message_set(msg, codec, key, codec_compresslevel)
req = ProduceRequest(topic_partition.topic,
topic_partition.partition,
tuple(messages))
request_tries[req] = 0
if not request_tries:
continue
reqs_to_retry, error_cls = [], None
retry_state = {
'do_backoff': False,
'do_refresh': False
}
def _handle_error(error_cls, request):
if (issubclass(error_cls, RETRY_ERROR_TYPES) or
(retry_options.retry_on_timeouts and
issubclass(error_cls, RequestTimedOutError))):
reqs_to_retry.append(request)
if issubclass(error_cls, RETRY_BACKOFF_ERROR_TYPES):
retry_state['do_backoff'] |= True
if issubclass(error_cls, RETRY_REFRESH_ERROR_TYPES):
retry_state['do_refresh'] |= True
requests = list(request_tries.keys())
log.debug('Sending: %s', requests)
responses = client.send_produce_request(requests,
acks=req_acks,
timeout=ack_timeout,
fail_on_error=False)
log.debug('Received: %s', responses)
for i, response in enumerate(responses):
error_cls = None
if isinstance(response, FailedPayloadsError):
error_cls = response.__class__
orig_req = response.payload
elif isinstance(response, ProduceResponse) and response.error:
error_cls = kafka_errors.get(response.error, UnknownError)
orig_req = requests[i]
if error_cls:
_handle_error(error_cls, orig_req)
log.error('%s sending ProduceRequest (#%d of %d) '
'to %s:%d with msgs %s',
error_cls.__name__, (i + 1), len(requests),
orig_req.topic, orig_req.partition,
orig_req.messages if log_messages_on_error else hash(orig_req.messages))
if not reqs_to_retry:
request_tries = {}
continue
# doing backoff before next retry
if retry_state['do_backoff'] and retry_options.backoff_ms:
log.warn('Async producer backoff for %s(ms) before retrying', retry_options.backoff_ms)
time.sleep(float(retry_options.backoff_ms) / 1000)
# refresh topic metadata before next retry
if retry_state['do_refresh']:
log.warn('Async producer forcing metadata refresh metadata before retrying')
try:
client.load_metadata_for_topics()
except Exception as e:
log.error("Async producer couldn't reload topic metadata. Error: `%s`", e.message)
# Apply retry limit, dropping messages that are over
request_tries = dict(
(key, count + 1)
for (key, count) in request_tries.items()
if key in reqs_to_retry and (retry_options.limit is None or
(count < retry_options.limit))
)
# Log messages we are going to retry
for orig_req in request_tries.keys():
log.info('Retrying ProduceRequest to %s:%d with msgs %s',
orig_req.topic, orig_req.partition,
orig_req.messages if log_messages_on_error else hash(orig_req.messages))
if request_tries or not queue.empty():
log.error('Stopped producer with {0} unsent messages'
.format(len(request_tries) + queue.qsize())) | Private method to manage producing messages asynchronously
Listens on the queue for a specified number of messages or until
a specified timeout and then sends messages to the brokers in grouped
requests (one per broker).
Messages placed on the queue should be tuples that conform to this format:
((topic, partition), message, key)
Currently does not mark messages with task_done. Do not attempt to join()!
Arguments:
queue (threading.Queue): the queue from which to get messages
client (KafkaClient): instance to use for communicating with brokers
codec (kafka.protocol.ALL_CODECS): compression codec to use
batch_time (int): interval in seconds to send message batches
batch_size (int): count of messages that will trigger an immediate send
req_acks: required acks to use with ProduceRequests. see server protocol
ack_timeout: timeout to wait for required acks. see server protocol
retry_options (RetryOptions): settings for retry limits, backoff etc
stop_event (threading.Event): event to monitor for shutdown signal.
when this event is 'set', the producer will stop sending messages.
log_messages_on_error (bool, optional): log stringified message-contents
on any produce error, otherwise only log a hash() of the contents,
defaults to True.
stop_timeout (int or float, optional): number of seconds to continue
retrying messages after stop_event is set, defaults to 30. | entailment |
def get_cache_key(page):
"""
Create the cache key for the current page and language
"""
try:
site_id = page.node.site_id
except AttributeError:
site_id = page.site_id
return _get_cache_key('page_sitemap', page, 'default', site_id) | Create the cache key for the current page and language | entailment |
def out_name(stem, timestep=None):
"""Return StagPy out file name.
Args:
stem (str): short description of file content.
timestep (int): timestep if relevant.
Returns:
str: the output file name.
Other Parameters:
conf.core.outname (str): the generic name stem, defaults to
``'stagpy'``.
"""
if timestep is not None:
stem = (stem + INT_FMT).format(timestep)
return conf.core.outname + '_' + stem | Return StagPy out file name.
Args:
stem (str): short description of file content.
timestep (int): timestep if relevant.
Returns:
str: the output file name.
Other Parameters:
conf.core.outname (str): the generic name stem, defaults to
``'stagpy'``. | entailment |
def saveplot(fig, *name_args, close=True, **name_kwargs):
"""Save matplotlib figure.
You need to provide :data:`stem` as a positional or keyword argument (see
:func:`out_name`).
Args:
fig (:class:`matplotlib.figure.Figure`): matplotlib figure.
close (bool): whether to close the figure.
name_args: positional arguments passed on to :func:`out_name`.
name_kwargs: keyword arguments passed on to :func:`out_name`.
"""
oname = out_name(*name_args, **name_kwargs)
fig.savefig('{}.{}'.format(oname, conf.plot.format),
format=conf.plot.format, bbox_inches='tight')
if close:
plt.close(fig) | Save matplotlib figure.
You need to provide :data:`stem` as a positional or keyword argument (see
:func:`out_name`).
Args:
fig (:class:`matplotlib.figure.Figure`): matplotlib figure.
close (bool): whether to close the figure.
name_args: positional arguments passed on to :func:`out_name`.
name_kwargs: keyword arguments passed on to :func:`out_name`. | entailment |
def baredoc(obj):
"""Return the first line of the docstring of an object.
Trailing periods and spaces as well as leading spaces are removed from the
output.
Args:
obj: any Python object.
Returns:
str: the first line of the docstring of obj.
"""
doc = getdoc(obj)
if not doc:
return ''
doc = doc.splitlines()[0]
return doc.rstrip(' .').lstrip() | Return the first line of the docstring of an object.
Trailing periods and spaces as well as leading spaces are removed from the
output.
Args:
obj: any Python object.
Returns:
str: the first line of the docstring of obj. | entailment |
def fmttime(tin):
"""Return LaTeX expression with time in scientific notation.
Args:
tin (float): the time.
Returns:
str: the LaTeX expression.
"""
aaa, bbb = '{:.2e}'.format(tin).split('e')
bbb = int(bbb)
return r'$t={} \times 10^{{{}}}$'.format(aaa, bbb) | Return LaTeX expression with time in scientific notation.
Args:
tin (float): the time.
Returns:
str: the LaTeX expression. | entailment |
def list_of_vars(arg_plot):
"""Construct list of variables per plot.
Args:
arg_plot (str): string with variable names separated with
``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).
Returns:
three nested lists of str
- variables on the same subplot;
- subplots on the same figure;
- figures.
"""
lovs = [[[var for var in svars.split(',') if var]
for svars in pvars.split('.') if svars]
for pvars in arg_plot.split('-') if pvars]
lovs = [[slov for slov in lov if slov] for lov in lovs if lov]
return [lov for lov in lovs if lov] | Construct list of variables per plot.
Args:
arg_plot (str): string with variable names separated with
``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).
Returns:
three nested lists of str
- variables on the same subplot;
- subplots on the same figure;
- figures. | entailment |
def set_of_vars(lovs):
"""Build set of variables from list.
Args:
lovs: nested lists of variables such as the one produced by
:func:`list_of_vars`.
Returns:
set of str: flattened set of all the variables present in the
nested lists.
"""
return set(var for pvars in lovs for svars in pvars for var in svars) | Build set of variables from list.
Args:
lovs: nested lists of variables such as the one produced by
:func:`list_of_vars`.
Returns:
set of str: flattened set of all the variables present in the
nested lists. | entailment |
def get_rbounds(step):
"""Radial or vertical position of boundaries.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of floats: radial or vertical positions of boundaries of the
domain.
"""
if step.geom is not None:
rcmb = step.geom.rcmb
else:
rcmb = step.sdat.par['geometry']['r_cmb']
if step.sdat.par['geometry']['shape'].lower() == 'cartesian':
rcmb = 0
rcmb = max(rcmb, 0)
return rcmb, rcmb + 1 | Radial or vertical position of boundaries.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of floats: radial or vertical positions of boundaries of the
domain. | entailment |
def fnames(self, names):
"""Ensure constant size of fnames"""
names = list(names[:len(self._fnames)])
self._fnames = names + self._fnames[len(names):] | Ensure constant size of fnames | entailment |
def _plot_time_list(sdat, lovs, tseries, metas, times=None):
"""Plot requested profiles"""
if times is None:
times = {}
for vfig in lovs:
fig, axes = plt.subplots(nrows=len(vfig), sharex=True,
figsize=(12, 2 * len(vfig)))
axes = [axes] if len(vfig) == 1 else axes
fname = ['time']
for iplt, vplt in enumerate(vfig):
ylabel = None
for ivar, tvar in enumerate(vplt):
fname.append(tvar)
time = times[tvar] if tvar in times else tseries['t']
axes[iplt].plot(time, tseries[tvar],
conf.time.style,
label=metas[tvar].description)
lbl = metas[tvar].kind
if ylabel is None:
ylabel = lbl
elif ylabel != lbl:
ylabel = ''
if ivar == 0:
ylabel = metas[tvar].description
if ylabel:
_, unit = sdat.scale(1, metas[tvar].dim)
if unit:
ylabel += ' ({})'.format(unit)
axes[iplt].set_ylabel(ylabel)
if vplt[0][:3] == 'eta': # list of log variables
axes[iplt].set_yscale('log')
axes[iplt].set_ylim(bottom=conf.plot.vmin, top=conf.plot.vmax)
if ivar:
axes[iplt].legend()
axes[iplt].tick_params()
_, unit = sdat.scale(1, 's')
if unit:
unit = ' ({})'.format(unit)
axes[-1].set_xlabel('Time' + unit)
axes[-1].set_xlim((tseries['t'].iloc[0], tseries['t'].iloc[-1]))
axes[-1].tick_params()
misc.saveplot(fig, '_'.join(fname)) | Plot requested profiles | entailment |
def get_time_series(sdat, var, tstart, tend):
"""Extract or compute and rescale a time series.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
var (str): time series name, a key of :data:`stagpy.phyvars.TIME`
or :data:`stagpy.phyvars.TIME_EXTRA`.
tstart (float): starting time of desired series. Set to None to start
at the beginning of available data.
tend (float): ending time of desired series. Set to None to stop at the
end of available data.
Returns:
tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Vart`:
series, time, meta
series is the requested time series, time the time at which it
is evaluated (set to None if it is the one of time series output
by StagYY), and meta is a :class:`stagpy.phyvars.Vart` instance
holding metadata of the requested variable.
"""
tseries = sdat.tseries_between(tstart, tend)
if var in tseries.columns:
series = tseries[var]
time = None
if var in phyvars.TIME:
meta = phyvars.TIME[var]
else:
meta = phyvars.Vart(var, None, '1')
elif var in phyvars.TIME_EXTRA:
meta = phyvars.TIME_EXTRA[var]
series, time = meta.description(sdat, tstart, tend)
meta = phyvars.Vart(misc.baredoc(meta.description),
meta.kind, meta.dim)
else:
raise UnknownTimeVarError(var)
series, _ = sdat.scale(series, meta.dim)
if time is not None:
time, _ = sdat.scale(time, 's')
return series, time, meta | Extract or compute and rescale a time series.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
var (str): time series name, a key of :data:`stagpy.phyvars.TIME`
or :data:`stagpy.phyvars.TIME_EXTRA`.
tstart (float): starting time of desired series. Set to None to start
at the beginning of available data.
tend (float): ending time of desired series. Set to None to stop at the
end of available data.
Returns:
tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Vart`:
series, time, meta
series is the requested time series, time the time at which it
is evaluated (set to None if it is the one of time series output
by StagYY), and meta is a :class:`stagpy.phyvars.Vart` instance
holding metadata of the requested variable. | entailment |
def plot_time_series(sdat, lovs):
"""Plot requested time series.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of series names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.time.tstart: the starting time.
conf.time.tend: the ending time.
"""
sovs = misc.set_of_vars(lovs)
tseries = {}
times = {}
metas = {}
for tvar in sovs:
series, time, meta = get_time_series(
sdat, tvar, conf.time.tstart, conf.time.tend)
tseries[tvar] = series
metas[tvar] = meta
if time is not None:
times[tvar] = time
tseries['t'] = get_time_series(
sdat, 't', conf.time.tstart, conf.time.tend)[0]
_plot_time_list(sdat, lovs, tseries, metas, times) | Plot requested time series.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of series names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.time.tstart: the starting time.
conf.time.tend: the ending time. | entailment |
def compstat(sdat, tstart=None, tend=None):
"""Compute statistics from series output by StagYY.
Create a file 'statistics.dat' containing the mean and standard deviation
of each series on the requested time span.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): starting time. Set to None to start at the beginning of
available data.
tend (float): ending time. Set to None to stop at the end of available
data.
"""
data = sdat.tseries_between(tstart, tend)
time = data['t'].values
delta_time = time[-1] - time[0]
data = data.iloc[:, 1:].values # assume t is first column
mean = np.trapz(data, x=time, axis=0) / delta_time
rms = np.sqrt(np.trapz((data - mean)**2, x=time, axis=0) / delta_time)
with open(misc.out_name('statistics.dat'), 'w') as out_file:
mean.tofile(out_file, sep=' ', format="%10.5e")
out_file.write('\n')
rms.tofile(out_file, sep=' ', format="%10.5e")
out_file.write('\n') | Compute statistics from series output by StagYY.
Create a file 'statistics.dat' containing the mean and standard deviation
of each series on the requested time span.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): starting time. Set to None to start at the beginning of
available data.
tend (float): ending time. Set to None to stop at the end of available
data. | entailment |
def cmd():
"""Implementation of time subcommand.
Other Parameters:
conf.time
conf.core
"""
sdat = StagyyData(conf.core.path)
if sdat.tseries is None:
return
if conf.time.fraction is not None:
if not 0 < conf.time.fraction <= 1:
raise InvalidTimeFractionError(conf.time.fraction)
conf.time.tend = None
t_0 = sdat.tseries.iloc[0].loc['t']
t_f = sdat.tseries.iloc[-1].loc['t']
conf.time.tstart = (t_0 * conf.time.fraction +
t_f * (1 - conf.time.fraction))
lovs = misc.list_of_vars(conf.time.plot)
if lovs:
plot_time_series(sdat, lovs)
if conf.time.compstat:
compstat(sdat, conf.time.tstart, conf.time.tend) | Implementation of time subcommand.
Other Parameters:
conf.time
conf.core | entailment |
def info_cmd():
"""Print basic information about StagYY run."""
sdat = stagyydata.StagyyData(conf.core.path)
lsnap = sdat.snaps.last
lstep = sdat.steps.last
print('StagYY run in {}'.format(sdat.path))
if lsnap.geom.threed:
dimension = '{} x {} x {}'.format(lsnap.geom.nxtot,
lsnap.geom.nytot,
lsnap.geom.nztot)
elif lsnap.geom.twod_xz:
dimension = '{} x {}'.format(lsnap.geom.nxtot,
lsnap.geom.nztot)
else:
dimension = '{} x {}'.format(lsnap.geom.nytot,
lsnap.geom.nztot)
if lsnap.geom.cartesian:
print('Cartesian', dimension)
elif lsnap.geom.cylindrical:
print('Cylindrical', dimension)
else:
print('Spherical', dimension)
print('Last timestep:',
' istep: {}'.format(lstep.istep),
' time: {}'.format(lstep.timeinfo['t']),
' <T>: {}'.format(lstep.timeinfo['Tmean']),
sep='\n')
print('Last snapshot (istep {}):'.format(lsnap.istep),
' isnap: {}'.format(lsnap.isnap),
' time: {}'.format(lsnap.timeinfo['t']),
' output fields: {}'.format(','.join(lsnap.fields)),
sep='\n') | Print basic information about StagYY run. | entailment |
def _pretty_print(key_val, sep=': ', min_col_width=39, text_width=None):
"""Print a iterable of key/values
Args:
key_val (list of (str, str)): the pairs of section names and text.
sep (str): separator between section names and text.
min_col_width (int): minimal acceptable column width
text_width (int): text width to use. If set to None, will try to infer
the size of the terminal.
"""
if text_width is None:
text_width = get_terminal_size().columns
if text_width < min_col_width:
min_col_width = text_width
ncols = (text_width + 1) // (min_col_width + 1)
colw = (text_width + 1) // ncols - 1
ncols = min(ncols, len(key_val))
wrapper = TextWrapper(width=colw)
lines = []
for key, val in key_val:
if len(key) + len(sep) >= colw // 2:
wrapper.subsequent_indent = ' '
else:
wrapper.subsequent_indent = ' ' * (len(key) + len(sep))
lines.extend(wrapper.wrap('{}{}{}'.format(key, sep, val)))
chunks = []
for rem_col in range(ncols, 1, -1):
isep = ceil(len(lines) / rem_col)
while isep < len(lines) and lines[isep][0] == ' ':
isep += 1
chunks.append(lines[:isep])
lines = lines[isep:]
chunks.append(lines)
lines = zip_longest(*chunks, fillvalue='')
fmt = '|'.join(['{{:{}}}'.format(colw)] * (ncols - 1))
fmt += '|{}' if ncols > 1 else '{}'
print(*(fmt.format(*line) for line in lines), sep='\n') | Print a iterable of key/values
Args:
key_val (list of (str, str)): the pairs of section names and text.
sep (str): separator between section names and text.
min_col_width (int): minimal acceptable column width
text_width (int): text width to use. If set to None, will try to infer
the size of the terminal. | entailment |
def _layout(dict_vars, dict_vars_extra):
"""Print nicely [(var, description)] from phyvars"""
desc = [(v, m.description) for v, m in dict_vars.items()]
desc.extend((v, baredoc(m.description))
for v, m in dict_vars_extra.items())
_pretty_print(desc, min_col_width=26) | Print nicely [(var, description)] from phyvars | entailment |
def var_cmd():
"""Print a list of available variables.
See :mod:`stagpy.phyvars` where the lists of variables organized by command
are defined.
"""
print_all = not any(val for _, val in conf.var.opt_vals_())
if print_all or conf.var.field:
print('field:')
_layout(phyvars.FIELD, phyvars.FIELD_EXTRA)
print()
if print_all or conf.var.sfield:
print('surface field:')
_layout(phyvars.SFIELD, {})
print()
if print_all or conf.var.rprof:
print('rprof:')
_layout(phyvars.RPROF, phyvars.RPROF_EXTRA)
print()
if print_all or conf.var.time:
print('time:')
_layout(phyvars.TIME, phyvars.TIME_EXTRA)
print()
if print_all or conf.var.plates:
print('plates:')
_layout(phyvars.PLATES, {}) | Print a list of available variables.
See :mod:`stagpy.phyvars` where the lists of variables organized by command
are defined. | entailment |
def report_parsing_problems(parsing_out):
"""Output message about potential parsing problems."""
_, empty, faulty = parsing_out
if CONFIG_FILE in empty or CONFIG_FILE in faulty:
print('Unable to read global config file', CONFIG_FILE,
file=sys.stderr)
print('Please run stagpy config --create',
sep='\n', end='\n\n', file=sys.stderr)
if CONFIG_LOCAL in faulty:
print('Unable to read local config file', CONFIG_LOCAL,
file=sys.stderr)
print('Please run stagpy config --create_local',
sep='\n', end='\n\n', file=sys.stderr) | Output message about potential parsing problems. | entailment |
def config_pp(subs):
"""Pretty print of configuration options.
Args:
subs (iterable of str): iterable with the list of conf sections to
print.
"""
print('(c|f): available only as CLI argument/in the config file',
end='\n\n')
for sub in subs:
hlp_lst = []
for opt, meta in conf[sub].defaults_():
if meta.cmd_arg ^ meta.conf_arg:
opt += ' (c)' if meta.cmd_arg else ' (f)'
hlp_lst.append((opt, meta.help))
if hlp_lst:
print('{}:'.format(sub))
_pretty_print(hlp_lst, sep=' -- ',
text_width=min(get_terminal_size().columns, 100))
print() | Pretty print of configuration options.
Args:
subs (iterable of str): iterable with the list of conf sections to
print. | entailment |
def config_cmd():
"""Configuration handling.
Other Parameters:
conf.config
"""
if not (conf.common.config or conf.config.create or
conf.config.create_local or conf.config.update or
conf.config.edit):
config_pp(conf.sections_())
loam.tools.config_cmd_handler(conf) | Configuration handling.
Other Parameters:
conf.config | entailment |
def _plot_rprof_list(sdat, lovs, rprofs, metas, stepstr, rads=None):
"""Plot requested profiles"""
if rads is None:
rads = {}
for vfig in lovs:
fig, axes = plt.subplots(ncols=len(vfig), sharey=True)
axes = [axes] if len(vfig) == 1 else axes
fname = 'rprof_'
for iplt, vplt in enumerate(vfig):
xlabel = None
for ivar, rvar in enumerate(vplt):
fname += rvar + '_'
rad = rads[rvar] if rvar in rads else rprofs['r']
if conf.rprof.depth:
rad = rprofs['bounds'][1] - rad
axes[iplt].plot(rprofs[rvar], rad,
conf.rprof.style,
label=metas[rvar].description)
if conf.rprof.depth:
axes[iplt].invert_yaxis()
if xlabel is None:
xlabel = metas[rvar].kind
elif xlabel != metas[rvar].kind:
xlabel = ''
if ivar == 0:
xlabel = metas[rvar].description
if xlabel:
_, unit = sdat.scale(1, metas[rvar].dim)
if unit:
xlabel += ' ({})'.format(unit)
axes[iplt].set_xlabel(xlabel)
if vplt[0][:3] == 'eta': # list of log variables
axes[iplt].set_xscale('log')
axes[iplt].set_xlim(left=conf.plot.vmin, right=conf.plot.vmax)
if ivar:
axes[iplt].legend()
ylabel = 'Depth' if conf.rprof.depth else 'Radius'
_, unit = sdat.scale(1, 'm')
if unit:
ylabel += ' ({})'.format(unit)
axes[0].set_ylabel(ylabel)
misc.saveplot(fig, fname + stepstr) | Plot requested profiles | entailment |
def get_rprof(step, var):
"""Extract or compute and rescale requested radial profile.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): radial profile name, a key of :data:`stagpy.phyvars.RPROF`
or :data:`stagpy.phyvars.RPROF_EXTRA`.
Returns:
tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Varr`:
rprof, rad, meta
rprof is the requested profile, rad the radial position at which it
is evaluated (set to None if it is the position of profiles output
by StagYY), and meta is a :class:`stagpy.phyvars.Varr` instance
holding metadata of the requested variable.
"""
if var in step.rprof.columns:
rprof = step.rprof[var]
rad = None
if var in phyvars.RPROF:
meta = phyvars.RPROF[var]
else:
meta = phyvars.Varr(var, None, '1')
elif var in phyvars.RPROF_EXTRA:
meta = phyvars.RPROF_EXTRA[var]
rprof, rad = meta.description(step)
meta = phyvars.Varr(misc.baredoc(meta.description),
meta.kind, meta.dim)
else:
raise UnknownRprofVarError(var)
rprof, _ = step.sdat.scale(rprof, meta.dim)
if rad is not None:
rad, _ = step.sdat.scale(rad, 'm')
return rprof, rad, meta | Extract or compute and rescale requested radial profile.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): radial profile name, a key of :data:`stagpy.phyvars.RPROF`
or :data:`stagpy.phyvars.RPROF_EXTRA`.
Returns:
tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Varr`:
rprof, rad, meta
rprof is the requested profile, rad the radial position at which it
is evaluated (set to None if it is the position of profiles output
by StagYY), and meta is a :class:`stagpy.phyvars.Varr` instance
holding metadata of the requested variable. | entailment |
def plot_grid(step):
"""Plot cell position and thickness.
The figure is call grid_N.pdf where N is replace by the step index.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
"""
rad = get_rprof(step, 'r')[0]
drad = get_rprof(step, 'dr')[0]
_, unit = step.sdat.scale(1, 'm')
if unit:
unit = ' ({})'.format(unit)
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(rad, '-ko')
ax1.set_ylabel('$r$' + unit)
ax2.plot(drad, '-ko')
ax2.set_ylabel('$dr$' + unit)
ax2.set_xlim([-0.5, len(rad) - 0.5])
ax2.set_xlabel('Cell number')
misc.saveplot(fig, 'grid', step.istep) | Plot cell position and thickness.
The figure is call grid_N.pdf where N is replace by the step index.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance. | entailment |
def plot_average(sdat, lovs):
"""Plot time averaged profiles.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps.
"""
steps_iter = iter(sdat.walk.filter(rprof=True))
try:
step = next(steps_iter)
except StopIteration:
return
sovs = misc.set_of_vars(lovs)
istart = step.istep
nprofs = 1
rprof_averaged = {}
rads = {}
metas = {}
# assume constant z spacing for the moment
for rvar in sovs:
rprof_averaged[rvar], rad, metas[rvar] = get_rprof(step, rvar)
if rad is not None:
rads[rvar] = rad
for step in steps_iter:
nprofs += 1
for rvar in sovs:
rprof_averaged[rvar] += get_rprof(step, rvar)[0]
ilast = step.istep
for rvar in sovs:
rprof_averaged[rvar] /= nprofs
rcmb, rsurf = misc.get_rbounds(step)
rprof_averaged['bounds'] = (step.sdat.scale(rcmb, 'm')[0],
step.sdat.scale(rsurf, 'm')[0])
rprof_averaged['r'] = get_rprof(step, 'r')[0] + rprof_averaged['bounds'][0]
stepstr = '{}_{}'.format(istart, ilast)
_plot_rprof_list(sdat, lovs, rprof_averaged, metas, stepstr, rads) | Plot time averaged profiles.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps. | entailment |
def plot_every_step(sdat, lovs):
"""Plot profiles at each time step.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps.
"""
sovs = misc.set_of_vars(lovs)
for step in sdat.walk.filter(rprof=True):
rprofs = {}
rads = {}
metas = {}
for rvar in sovs:
rprof, rad, meta = get_rprof(step, rvar)
rprofs[rvar] = rprof
metas[rvar] = meta
if rad is not None:
rads[rvar] = rad
rprofs['bounds'] = misc.get_rbounds(step)
rcmb, rsurf = misc.get_rbounds(step)
rprofs['bounds'] = (step.sdat.scale(rcmb, 'm')[0],
step.sdat.scale(rsurf, 'm')[0])
rprofs['r'] = get_rprof(step, 'r')[0] + rprofs['bounds'][0]
stepstr = str(step.istep)
_plot_rprof_list(sdat, lovs, rprofs, metas, stepstr, rads) | Plot profiles at each time step.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps. | entailment |
def cmd():
"""Implementation of rprof subcommand.
Other Parameters:
conf.rprof
conf.core
"""
sdat = StagyyData(conf.core.path)
if sdat.rprof is None:
return
if conf.rprof.grid:
for step in sdat.walk.filter(rprof=True):
plot_grid(step)
lovs = misc.list_of_vars(conf.rprof.plot)
if not lovs:
return
if conf.rprof.average:
plot_average(sdat, lovs)
else:
plot_every_step(sdat, lovs) | Implementation of rprof subcommand.
Other Parameters:
conf.rprof
conf.core | entailment |
def main():
"""StagPy entry point"""
if not DEBUG:
signal.signal(signal.SIGINT, sigint_handler)
warnings.simplefilter('ignore')
args = importlib.import_module('stagpy.args')
error = importlib.import_module('stagpy.error')
try:
args.parse_args()()
except error.StagpyError as err:
if DEBUG:
raise
print('Oops! StagPy encountered the following problem while '
'processing your request.',
'Please check the path to your simulation and the command line '
'arguments.', '',
'{}: {}'.format(err.__class__.__name__, err),
sep='\n', file=sys.stderr)
sys.exit() | StagPy entry point | entailment |
def _enrich_with_par(par_nml, par_file):
"""Enrich a par namelist with the content of a file."""
par_new = f90nml.read(str(par_file))
for section, content in par_new.items():
if section not in par_nml:
par_nml[section] = {}
for par, value in content.items():
try:
content[par] = value.strip()
except AttributeError:
pass
par_nml[section].update(content) | Enrich a par namelist with the content of a file. | entailment |
def readpar(par_file, root):
"""Read StagYY par file.
The namelist is populated in chronological order with:
- :data:`PAR_DEFAULT`, an internal dictionary defining defaults;
- :data:`PAR_DFLT_FILE`, the global configuration par file;
- ``par_name_defaultparameters`` if it is defined in ``par_file``;
- ``par_file`` itself;
- ``parameters.dat`` if it can be found in the StagYY output directories.
Args:
par_file (:class:`pathlib.Path`): path of par file.
root (:class:`pathlib.Path`): path on which other paths are rooted.
This is usually par.parent.
Returns:
:class:`f90nml.namelist.Namelist`: case insensitive dict of dict of
values with first key being the namelist and second key the variables'
name.
"""
par_nml = deepcopy(PAR_DEFAULT)
if PAR_DFLT_FILE.is_file():
_enrich_with_par(par_nml, PAR_DFLT_FILE)
else:
PAR_DFLT_FILE.parent.mkdir(exist_ok=True)
f90nml.write(par_nml, str(PAR_DFLT_FILE))
if not par_file.is_file():
raise NoParFileError(par_file)
par_main = f90nml.read(str(par_file))
if 'default_parameters_parfile' in par_main:
par_dflt = par_main['default_parameters_parfile'].get(
'par_name_defaultparameters', 'par_defaults')
par_dflt = root / par_dflt
if not par_dflt.is_file():
raise NoParFileError(par_dflt)
_enrich_with_par(par_nml, par_dflt)
_enrich_with_par(par_nml, par_file)
par_out = root / par_nml['ioin']['output_file_stem'] / '_parameters.dat'
if par_out.is_file():
_enrich_with_par(par_nml, par_out)
par_out = root / par_nml['ioin']['hdf5_output_folder'] / 'parameters.dat'
if par_out.is_file():
_enrich_with_par(par_nml, par_out)
return par_nml | Read StagYY par file.
The namelist is populated in chronological order with:
- :data:`PAR_DEFAULT`, an internal dictionary defining defaults;
- :data:`PAR_DFLT_FILE`, the global configuration par file;
- ``par_name_defaultparameters`` if it is defined in ``par_file``;
- ``par_file`` itself;
- ``parameters.dat`` if it can be found in the StagYY output directories.
Args:
par_file (:class:`pathlib.Path`): path of par file.
root (:class:`pathlib.Path`): path on which other paths are rooted.
This is usually par.parent.
Returns:
:class:`f90nml.namelist.Namelist`: case insensitive dict of dict of
values with first key being the namelist and second key the variables'
name. | entailment |
def get_meshes_fld(step, var):
"""Return scalar field along with coordinates meshes.
Only works properly in 2D geometry.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): scalar field name.
Returns:
tuple of :class:`numpy.array`: xmesh, ymesh, fld
2D arrays containing respectively the x position, y position, and
the value of the requested field.
"""
fld = step.fields[var]
if step.geom.twod_xz:
xmesh, ymesh = step.geom.x_mesh[:, 0, :], step.geom.z_mesh[:, 0, :]
fld = fld[:, 0, :, 0]
elif step.geom.cartesian and step.geom.twod_yz:
xmesh, ymesh = step.geom.y_mesh[0, :, :], step.geom.z_mesh[0, :, :]
fld = fld[0, :, :, 0]
else: # spherical yz
xmesh, ymesh = step.geom.x_mesh[0, :, :], step.geom.y_mesh[0, :, :]
fld = fld[0, :, :, 0]
return xmesh, ymesh, fld | Return scalar field along with coordinates meshes.
Only works properly in 2D geometry.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): scalar field name.
Returns:
tuple of :class:`numpy.array`: xmesh, ymesh, fld
2D arrays containing respectively the x position, y position, and
the value of the requested field. | entailment |
def get_meshes_vec(step, var):
"""Return vector field components along with coordinates meshes.
Only works properly in 2D geometry.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): vector field name.
Returns:
tuple of :class:`numpy.array`: xmesh, ymesh, fldx, fldy
2D arrays containing respectively the x position, y position, x
component and y component of the requested vector field.
"""
if step.geom.twod_xz:
xmesh, ymesh = step.geom.x_mesh[:, 0, :], step.geom.z_mesh[:, 0, :]
vec1 = step.fields[var + '1'][:, 0, :, 0]
vec2 = step.fields[var + '3'][:, 0, :, 0]
elif step.geom.cartesian and step.geom.twod_yz:
xmesh, ymesh = step.geom.y_mesh[0, :, :], step.geom.z_mesh[0, :, :]
vec1 = step.fields[var + '2'][0, :, :, 0]
vec2 = step.fields[var + '3'][0, :, :, 0]
else: # spherical yz
xmesh, ymesh = step.geom.x_mesh[0, :, :], step.geom.y_mesh[0, :, :]
pmesh = step.geom.p_mesh[0, :, :]
vec_phi = step.fields[var + '2'][0, :, :, 0]
vec_r = step.fields[var + '3'][0, :, :, 0]
vec1 = vec_r * np.cos(pmesh) - vec_phi * np.sin(pmesh)
vec2 = vec_phi * np.cos(pmesh) + vec_r * np.sin(pmesh)
return xmesh, ymesh, vec1, vec2 | Return vector field components along with coordinates meshes.
Only works properly in 2D geometry.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): vector field name.
Returns:
tuple of :class:`numpy.array`: xmesh, ymesh, fldx, fldy
2D arrays containing respectively the x position, y position, x
component and y component of the requested vector field. | entailment |
def set_of_vars(arg_plot):
"""Build set of needed field variables.
Each var is a tuple, first component is a scalar field, second component is
either:
- a scalar field, isocontours are added to the plot.
- a vector field (e.g. 'v' for the (v1,v2,v3) vector), arrows are added to
the plot.
Args:
arg_plot (str): string with variable names separated with
``,`` (figures), and ``+`` (same plot).
Returns:
set of str: set of needed field variables.
"""
sovs = set(tuple((var + '+').split('+')[:2])
for var in arg_plot.split(','))
sovs.discard(('', ''))
return sovs | Build set of needed field variables.
Each var is a tuple, first component is a scalar field, second component is
either:
- a scalar field, isocontours are added to the plot.
- a vector field (e.g. 'v' for the (v1,v2,v3) vector), arrows are added to
the plot.
Args:
arg_plot (str): string with variable names separated with
``,`` (figures), and ``+`` (same plot).
Returns:
set of str: set of needed field variables. | entailment |
def plot_scalar(step, var, field=None, axis=None, set_cbar=True, **extra):
"""Plot scalar field.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): the scalar field name.
field (:class:`numpy.array`): if not None, it is plotted instead of
step.fields[var]. This is useful to plot a masked or rescaled
array. Note that if conf.scaling.dimensional is True, this
field will be scaled accordingly.
axis (:class:`matplotlib.axes.Axes`): the axis objet where the field
should be plotted. If set to None, a new figure with one subplot
is created.
set_cbar (bool): whether to add a colorbar to the plot.
extra (dict): options that will be passed on to
:func:`matplotlib.axes.Axes.pcolormesh`.
Returns:
fig, axis, surf, cbar
handles to various :mod:`matplotlib` objects, respectively the
figure, the axis, the surface returned by
:func:`~matplotlib.axes.Axes.pcolormesh`, and the colorbar returned
by :func:`matplotlib.pyplot.colorbar`.
"""
if var in phyvars.FIELD:
meta = phyvars.FIELD[var]
else:
meta = phyvars.FIELD_EXTRA[var]
meta = phyvars.Varf(misc.baredoc(meta.description), meta.dim)
if step.geom.threed:
raise NotAvailableError('plot_scalar only implemented for 2D fields')
xmesh, ymesh, fld = get_meshes_fld(step, var)
xmin, xmax = xmesh.min(), xmesh.max()
ymin, ymax = ymesh.min(), ymesh.max()
if field is not None:
fld = field
if conf.field.perturbation:
fld = fld - np.mean(fld, axis=0)
if conf.field.shift:
fld = np.roll(fld, conf.field.shift, axis=0)
fld, unit = step.sdat.scale(fld, meta.dim)
if axis is None:
fig, axis = plt.subplots(ncols=1)
else:
fig = axis.get_figure()
if step.sdat.par['magma_oceans_in']['magma_oceans_mode']:
rcmb = step.sdat.par['geometry']['r_cmb']
xmax = rcmb + 1
ymax = xmax
xmin = -xmax
ymin = -ymax
rsurf = xmax if step.timeinfo['thick_tmo'] > 0 \
else step.geom.r_mesh[0, 0, -3]
cmb = mpat.Circle((0, 0), rcmb, color='dimgray', zorder=0)
psurf = mpat.Circle((0, 0), rsurf, color='indianred', zorder=0)
axis.add_patch(psurf)
axis.add_patch(cmb)
extra_opts = dict(
cmap=conf.field.cmap.get(var),
vmin=conf.plot.vmin,
vmax=conf.plot.vmax,
norm=mpl.colors.LogNorm() if var == 'eta' else None,
rasterized=conf.plot.raster,
shading='gouraud' if conf.field.interpolate else 'flat',
)
extra_opts.update(extra)
surf = axis.pcolormesh(xmesh, ymesh, fld, **extra_opts)
cbar = None
if set_cbar:
cbar = plt.colorbar(surf, shrink=conf.field.shrinkcb)
cbar.set_label(meta.description +
(' pert.' if conf.field.perturbation else '') +
(' ({})'.format(unit) if unit else ''))
if step.geom.spherical or conf.plot.ratio is None:
plt.axis('equal')
plt.axis('off')
else:
axis.set_aspect(conf.plot.ratio / axis.get_data_ratio())
axis.set_adjustable('box')
axis.set_xlim(xmin, xmax)
axis.set_ylim(ymin, ymax)
return fig, axis, surf, cbar | Plot scalar field.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): the scalar field name.
field (:class:`numpy.array`): if not None, it is plotted instead of
step.fields[var]. This is useful to plot a masked or rescaled
array. Note that if conf.scaling.dimensional is True, this
field will be scaled accordingly.
axis (:class:`matplotlib.axes.Axes`): the axis objet where the field
should be plotted. If set to None, a new figure with one subplot
is created.
set_cbar (bool): whether to add a colorbar to the plot.
extra (dict): options that will be passed on to
:func:`matplotlib.axes.Axes.pcolormesh`.
Returns:
fig, axis, surf, cbar
handles to various :mod:`matplotlib` objects, respectively the
figure, the axis, the surface returned by
:func:`~matplotlib.axes.Axes.pcolormesh`, and the colorbar returned
by :func:`matplotlib.pyplot.colorbar`. | entailment |
def plot_iso(axis, step, var):
"""Plot isocontours of scalar field.
Args:
axis (:class:`matplotlib.axes.Axes`): the axis handler of an
existing matplotlib figure where the isocontours should
be plotted.
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): the scalar field name.
"""
xmesh, ymesh, fld = get_meshes_fld(step, var)
if conf.field.shift:
fld = np.roll(fld, conf.field.shift, axis=0)
axis.contour(xmesh, ymesh, fld, linewidths=1) | Plot isocontours of scalar field.
Args:
axis (:class:`matplotlib.axes.Axes`): the axis handler of an
existing matplotlib figure where the isocontours should
be plotted.
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): the scalar field name. | entailment |
def plot_vec(axis, step, var):
"""Plot vector field.
Args:
axis (:class:`matplotlib.axes.Axes`): the axis handler of an
existing matplotlib figure where the vector field should
be plotted.
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): the vector field name.
"""
xmesh, ymesh, vec1, vec2 = get_meshes_vec(step, var)
dipz = step.geom.nztot // 10
if conf.field.shift:
vec1 = np.roll(vec1, conf.field.shift, axis=0)
vec2 = np.roll(vec2, conf.field.shift, axis=0)
if step.geom.spherical or conf.plot.ratio is None:
dipx = dipz
else:
dipx = step.geom.nytot if step.geom.twod_yz else step.geom.nxtot
dipx = int(dipx // 10 * conf.plot.ratio) + 1
axis.quiver(xmesh[::dipx, ::dipz], ymesh[::dipx, ::dipz],
vec1[::dipx, ::dipz], vec2[::dipx, ::dipz],
linewidths=1) | Plot vector field.
Args:
axis (:class:`matplotlib.axes.Axes`): the axis handler of an
existing matplotlib figure where the vector field should
be plotted.
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): the vector field name. | entailment |
def cmd():
"""Implementation of field subcommand.
Other Parameters:
conf.field
conf.core
"""
sdat = StagyyData(conf.core.path)
sovs = set_of_vars(conf.field.plot)
minmax = {}
if conf.plot.cminmax:
conf.plot.vmin = None
conf.plot.vmax = None
for step in sdat.walk.filter(snap=True):
for var, _ in sovs:
if var in step.fields:
if var in phyvars.FIELD:
dim = phyvars.FIELD[var].dim
else:
dim = phyvars.FIELD_EXTRA[var].dim
field, _ = sdat.scale(step.fields[var], dim)
if var in minmax:
minmax[var] = (min(minmax[var][0], np.nanmin(field)),
max(minmax[var][1], np.nanmax(field)))
else:
minmax[var] = np.nanmin(field), np.nanmax(field)
for step in sdat.walk.filter(snap=True):
for var in sovs:
if var[0] not in step.fields:
print("'{}' field on snap {} not found".format(var[0],
step.isnap))
continue
opts = {}
if var[0] in minmax:
opts = dict(vmin=minmax[var[0]][0], vmax=minmax[var[0]][1])
fig, axis, _, _ = plot_scalar(step, var[0], **opts)
if valid_field_var(var[1]):
plot_iso(axis, step, var[1])
elif var[1]:
plot_vec(axis, step, var[1])
oname = '{}_{}'.format(*var) if var[1] else var[0]
misc.saveplot(fig, oname, step.isnap) | Implementation of field subcommand.
Other Parameters:
conf.field
conf.core | entailment |
def _sub(cmd, *sections):
"""Build Subcmd instance."""
cmd_func = cmd if isfunction(cmd) else cmd.cmd
return Subcmd(baredoc(cmd), *sections, func=cmd_func) | Build Subcmd instance. | entailment |
def _steps_to_slices():
"""parse timesteps and snapshots arguments and return slices"""
if not (conf.core.timesteps or conf.core.snapshots):
# default to the last snap
conf.core.timesteps = None
conf.core.snapshots = slice(-1, None, None)
return
elif conf.core.snapshots:
# snapshots take precedence over timesteps
# if both are defined
conf.core.timesteps = None
steps = conf.core.snapshots
else:
conf.core.snapshots = None
steps = conf.core.timesteps
steps = steps.split(':')
steps[0] = int(steps[0]) if steps[0] else None
if len(steps) == 1:
steps.append(steps[0] + 1)
steps[1] = int(steps[1]) if steps[1] else None
if len(steps) != 3:
steps = steps[0:2] + [1]
steps[2] = int(steps[2]) if steps[2] else None
steps = slice(*steps)
if conf.core.snapshots is not None:
conf.core.snapshots = steps
else:
conf.core.timesteps = steps | parse timesteps and snapshots arguments and return slices | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.