sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def _get_next_page(self, soup, current_page):
"""
Get the relative url of the next page (The "More" link at
the bottom of the page)
"""
# Get the table with all the comments:
if current_page == 1:
table = soup.findChildren('table')[3]
elif current_page > 1:
table = soup.findChildren('table')[2]
# the last row of the table contains the relative url of the next page
anchor = table.findChildren(['tr'])[-1].find('a')
if anchor and anchor.text == u'More':
return anchor.get('href').lstrip(BASE_URL)
else:
return None
|
Get the relative url of the next page (The "More" link at
the bottom of the page)
|
entailment
|
def _build_comments(self, soup):
"""
For the story, builds and returns a list of Comment objects.
"""
comments = []
current_page = 1
while True:
# Get the table holding all comments:
if current_page == 1:
table = soup.findChildren('table')[3]
elif current_page > 1:
table = soup.findChildren('table')[2]
# get all rows (each comment is duplicated twice)
rows = table.findChildren(['tr'])
# last row is more, second last is spacing
rows = rows[:len(rows) - 2]
# now we have unique comments only
rows = [row for i, row in enumerate(rows) if (i % 2 == 0)]
if len(rows) > 1:
for row in rows:
# skip an empty td
if not row.findChildren('td'):
continue
# Builds a flat list of comments
# level of comment, starting with 0
level = int(row.findChildren('td')[1].find('img').get(
'width')) // 40
spans = row.findChildren('td')[3].findAll('span')
# span[0] = submitter details
# [<a href="user?id=jonknee">jonknee</a>, u' 1 hour ago | ', <a href="item?id=6910978">link</a>]
# span[1] = actual comment
if str(spans[0]) != '<span class="comhead"></span>':
# user who submitted the comment
user = spans[0].contents[0].string
# relative time of comment
time_ago = spans[0].contents[1].string.strip(
).rstrip(' |')
try:
comment_id = int(re.match(r'item\?id=(.*)',
spans[0].contents[
2].get(
'href')).groups()[0])
except AttributeError:
comment_id = int(re.match(r'%s/item\?id=(.*)' %
BASE_URL,
spans[0].contents[
2].get(
'href')).groups()[0])
# text representation of comment (unformatted)
body = spans[1].text
if body[-2:] == '--':
body = body[:-5]
# html of comment, may not be valid
try:
pat = re.compile(
r'<span class="comment"><font color=".*">(.*)</font></span>')
body_html = re.match(pat, str(spans[1]).replace(
'\n', '')).groups()[0]
except AttributeError:
pat = re.compile(
r'<span class="comment"><font color=".*">(.*)</font></p><p><font size="1">')
body_html = re.match(pat, str(spans[1]).replace(
'\n', '')).groups()[0]
else:
# comment deleted
user = ''
time_ago = ''
comment_id = -1
body = '[deleted]'
body_html = '[deleted]'
comment = Comment(comment_id, level, user, time_ago,
body, body_html)
comments.append(comment)
# Move on to the next page of comments, or exit the loop if there
# is no next page.
next_page_url = self._get_next_page(soup, current_page)
if not next_page_url:
break
soup = get_soup(page=next_page_url)
current_page += 1
previous_comment = None
# for comment in comments:
# if comment.level == 0:
# previous_comment = comment
# else:
# level_difference = comment.level - previous_comment.level
# previous_comment.body_html += '\n' + '\t' * level_difference \
# + comment.body_html
# previous_comment.body += '\n' + '\t' * level_difference + \
# comment.body
return comments
|
For the story, builds and returns a list of Comment objects.
|
entailment
|
def fromid(self, item_id):
"""
Initializes an instance of Story for given item_id.
It is assumed that the story referenced by item_id is valid
and does not raise any HTTP errors.
item_id is an int.
"""
if not item_id:
raise Exception('Need an item_id for a story')
# get details about a particular story
soup = get_item_soup(item_id)
# this post has not been scraped, so we explititly get all info
story_id = item_id
rank = -1
# to extract meta information about the post
info_table = soup.findChildren('table')[2]
# [0] = title, domain, [1] = points, user, time, comments
info_rows = info_table.findChildren('tr')
# title, domain
title_row = info_rows[0].findChildren('td')[1]
title = title_row.find('a').text
try:
domain = title_row.find('span').string[2:-2]
# domain found
is_self = False
link = title_row.find('a').get('href')
except AttributeError:
# self post
domain = BASE_URL
is_self = True
link = '%s/item?id=%s' % (BASE_URL, item_id)
# points, user, time, comments
meta_row = info_rows[1].findChildren('td')[1].contents
# [<span id="score_7024626">789 points</span>, u' by ', <a href="user?id=endianswap">endianswap</a>,
# u' 8 hours ago | ', <a href="item?id=7024626">238 comments</a>]
points = int(re.match(r'^(\d+)\spoint.*', meta_row[0].text).groups()[0])
submitter = meta_row[2].text
submitter_profile = '%s/%s' % (BASE_URL, meta_row[2].get('href'))
published_time = ' '.join(meta_row[3].strip().split()[:3])
comments_link = '%s/item?id=%s' % (BASE_URL, item_id)
try:
num_comments = int(re.match(r'(\d+)\s.*', meta_row[
4].text).groups()[0])
except AttributeError:
num_comments = 0
story = Story(rank, story_id, title, link, domain, points, submitter,
published_time, submitter_profile, num_comments,
comments_link, is_self)
return story
|
Initializes an instance of Story for given item_id.
It is assumed that the story referenced by item_id is valid
and does not raise any HTTP errors.
item_id is an int.
|
entailment
|
def compare_name_component(list1, list2, settings, use_ratio=False):
"""
Compare a list of names from a name component based on settings
"""
if not list1[0] or not list2[0]:
not_required = not settings['required']
return not_required * 100 if use_ratio else not_required
if len(list1) != len(list2):
return False
compare_func = _ratio_compare if use_ratio else _normal_compare
return compare_func(list1, list2, settings)
|
Compare a list of names from a name component based on settings
|
entailment
|
def equate_initial(name1, name2):
"""
Evaluates whether names match, or one name is the initial of the other
"""
if len(name1) == 0 or len(name2) == 0:
return False
if len(name1) == 1 or len(name2) == 1:
return name1[0] == name2[0]
return name1 == name2
|
Evaluates whether names match, or one name is the initial of the other
|
entailment
|
def equate_prefix(name1, name2):
"""
Evaluates whether names match, or one name prefixes another
"""
if len(name1) == 0 or len(name2) == 0:
return False
return name1.startswith(name2) or name2.startswith(name1)
|
Evaluates whether names match, or one name prefixes another
|
entailment
|
def equate_nickname(name1, name2):
"""
Evaluates whether names match based on common nickname patterns
This is not currently used in any name comparison
"""
# Convert '-ie' and '-y' to the root name
nickname_regex = r'(.)\1(y|ie)$'
root_regex = r'\1'
name1 = re.sub(nickname_regex, root_regex, name1)
name2 = re.sub(nickname_regex, root_regex, name2)
if equate_prefix(name1, name2):
return True
return False
|
Evaluates whether names match based on common nickname patterns
This is not currently used in any name comparison
|
entailment
|
def make_ascii(word):
"""
Converts unicode-specific characters to their equivalent ascii
"""
if sys.version_info < (3, 0, 0):
word = unicode(word)
else:
word = str(word)
normalized = unicodedata.normalize('NFKD', word)
return normalized.encode('ascii', 'ignore').decode('utf-8')
|
Converts unicode-specific characters to their equivalent ascii
|
entailment
|
def seq_ratio(word1, word2):
"""
Returns sequence match ratio for two words
"""
raw_ratio = SequenceMatcher(None, word1, word2).ratio()
return int(round(100 * raw_ratio))
|
Returns sequence match ratio for two words
|
entailment
|
def deep_update_dict(default, options):
"""
Updates the values in a nested dict, while unspecified values will remain
unchanged
"""
for key in options.keys():
default_setting = default.get(key)
new_setting = options.get(key)
if isinstance(default_setting, dict):
deep_update_dict(default_setting, new_setting)
else:
default[key] = new_setting
|
Updates the values in a nested dict, while unspecified values will remain
unchanged
|
entailment
|
def template_to_base_path(template, google_songs):
"""Get base output path for a list of songs for download."""
if template == os.getcwd() or template == '%suggested%':
base_path = os.getcwd()
else:
template = os.path.abspath(template)
song_paths = [template_to_filepath(template, song) for song in google_songs]
base_path = os.path.dirname(os.path.commonprefix(song_paths))
return base_path
|
Get base output path for a list of songs for download.
|
entailment
|
def random(cls, length, bit_prob=.5):
"""Create a bit string of the given length, with the probability of
each bit being set equal to bit_prob, which defaults to .5.
Usage:
# Create a random BitString of length 10 with mostly zeros.
bits = BitString.random(10, bit_prob=.1)
Arguments:
length: An int, indicating the desired length of the result.
bit_prob: A float in the range [0, 1]. This is the probability
of any given bit in the result having a value of 1; default
is .5, giving 0 and 1 equal probabilities of appearance for
each bit's value.
Return:
A randomly generated BitString instance of the requested
length.
"""
assert isinstance(length, int) and length >= 0
assert isinstance(bit_prob, (int, float)) and 0 <= bit_prob <= 1
bits = 0
for _ in range(length):
bits <<= 1
bits += (random.random() < bit_prob)
return cls(bits, length)
|
Create a bit string of the given length, with the probability of
each bit being set equal to bit_prob, which defaults to .5.
Usage:
# Create a random BitString of length 10 with mostly zeros.
bits = BitString.random(10, bit_prob=.1)
Arguments:
length: An int, indicating the desired length of the result.
bit_prob: A float in the range [0, 1]. This is the probability
of any given bit in the result having a value of 1; default
is .5, giving 0 and 1 equal probabilities of appearance for
each bit's value.
Return:
A randomly generated BitString instance of the requested
length.
|
entailment
|
def crossover_template(cls, length, points=2):
"""Create a crossover template with the given number of points. The
crossover template can be used as a mask to crossover two
bitstrings of the same length.
Usage:
assert len(parent1) == len(parent2)
template = BitString.crossover_template(len(parent1))
inv_template = ~template
child1 = (parent1 & template) | (parent2 & inv_template)
child2 = (parent1 & inv_template) | (parent2 & template)
Arguments:
length: An int, indicating the desired length of the result.
points: An int, the number of crossover points.
Return:
A BitString instance of the requested length which can be used
as a crossover template.
"""
assert isinstance(length, int) and length >= 0
assert isinstance(points, int) and points >= 0
# Select the crossover points.
points = random.sample(range(length + 1), points)
# Prep the points for the loop.
points.sort()
points.append(length)
# Fill the bits in with alternating ranges of 0 and 1 according to
# the selected crossover points.
previous = 0
include_range = bool(random.randrange(2))
bits = 0
for point in points:
if point > previous:
bits <<= point - previous
if include_range:
bits += (1 << (point - previous)) - 1
include_range = not include_range
previous = point
return cls(bits, length)
|
Create a crossover template with the given number of points. The
crossover template can be used as a mask to crossover two
bitstrings of the same length.
Usage:
assert len(parent1) == len(parent2)
template = BitString.crossover_template(len(parent1))
inv_template = ~template
child1 = (parent1 & template) | (parent2 & inv_template)
child2 = (parent1 & inv_template) | (parent2 & template)
Arguments:
length: An int, indicating the desired length of the result.
points: An int, the number of crossover points.
Return:
A BitString instance of the requested length which can be used
as a crossover template.
|
entailment
|
def count(self):
"""Returns the number of bits set to True in the bit string.
Usage:
assert BitString('00110').count() == 2
Arguments: None
Return:
An int, the number of bits with value 1.
"""
result = 0
bits = self._bits
while bits:
result += bits % 2
bits >>= 1
return result
|
Returns the number of bits set to True in the bit string.
Usage:
assert BitString('00110').count() == 2
Arguments: None
Return:
An int, the number of bits with value 1.
|
entailment
|
def drain_events(self, allowed_methods=None, timeout=None):
"""Wait for an event on any channel."""
return self.wait_multi(self.channels.values(), timeout=timeout)
|
Wait for an event on any channel.
|
entailment
|
def wait_multi(self, channels, allowed_methods=None, timeout=None):
"""Wait for an event on a channel."""
chanmap = dict((chan.channel_id, chan) for chan in channels)
chanid, method_sig, args, content = self._wait_multiple(
chanmap.keys(), allowed_methods, timeout=timeout)
channel = chanmap[chanid]
if content \
and channel.auto_decode \
and hasattr(content, 'content_encoding'):
try:
content.body = content.body.decode(content.content_encoding)
except Exception:
pass
amqp_method = channel._METHOD_MAP.get(method_sig, None)
if amqp_method is None:
raise Exception('Unknown AMQP method (%d, %d)' % method_sig)
if content is None:
return amqp_method(channel, args)
else:
return amqp_method(channel, args, content)
|
Wait for an event on a channel.
|
entailment
|
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.connection
if not conninfo.hostname:
raise KeyError("Missing hostname for AMQP connection.")
if conninfo.userid is None:
raise KeyError("Missing user id for AMQP connection.")
if conninfo.password is None:
raise KeyError("Missing password for AMQP connection.")
if not conninfo.port:
conninfo.port = self.default_port
return Connection(host=conninfo.host,
userid=conninfo.userid,
password=conninfo.password,
virtual_host=conninfo.virtual_host,
insist=conninfo.insist,
ssl=conninfo.ssl,
connect_timeout=conninfo.connect_timeout)
|
Establish connection to the AMQP broker.
|
entailment
|
def queue_exists(self, queue):
"""Check if a queue has been declared.
:rtype bool:
"""
try:
self.channel.queue_declare(queue=queue, passive=True)
except AMQPChannelException, e:
if e.amqp_reply_code == 404:
return False
raise e
else:
return True
|
Check if a queue has been declared.
:rtype bool:
|
entailment
|
def queue_delete(self, queue, if_unused=False, if_empty=False):
"""Delete queue by name."""
return self.channel.queue_delete(queue, if_unused, if_empty)
|
Delete queue by name.
|
entailment
|
def queue_declare(self, queue, durable, exclusive, auto_delete,
warn_if_exists=False, arguments=None):
"""Declare a named queue."""
if warn_if_exists and self.queue_exists(queue):
warnings.warn(QueueAlreadyExistsWarning(
QueueAlreadyExistsWarning.__doc__))
return self.channel.queue_declare(queue=queue,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
arguments=arguments)
|
Declare a named queue.
|
entailment
|
def exchange_declare(self, exchange, type, durable, auto_delete):
"""Declare an named exchange."""
return self.channel.exchange_declare(exchange=exchange,
type=type,
durable=durable,
auto_delete=auto_delete)
|
Declare an named exchange.
|
entailment
|
def queue_bind(self, queue, exchange, routing_key, arguments=None):
"""Bind queue to an exchange using a routing key."""
return self.channel.queue_bind(queue=queue,
exchange=exchange,
routing_key=routing_key,
arguments=arguments)
|
Bind queue to an exchange using a routing key.
|
entailment
|
def get(self, queue, no_ack=False):
"""Receive a message from a declared queue by name.
:returns: A :class:`Message` object if a message was received,
``None`` otherwise. If ``None`` was returned, it probably means
there was no messages waiting on the queue.
"""
raw_message = self.channel.basic_get(queue, no_ack=no_ack)
if not raw_message:
return None
return self.message_to_python(raw_message)
|
Receive a message from a declared queue by name.
:returns: A :class:`Message` object if a message was received,
``None`` otherwise. If ``None`` was returned, it probably means
there was no messages waiting on the queue.
|
entailment
|
def consume(self, limit=None):
"""Returns an iterator that waits for one message at a time."""
for total_message_count in count():
if limit and total_message_count >= limit:
raise StopIteration
if not self.channel.is_open:
raise StopIteration
self.channel.wait()
yield True
|
Returns an iterator that waits for one message at a time.
|
entailment
|
def cancel(self, consumer_tag):
"""Cancel a channel by consumer tag."""
if not self.channel.connection:
return
self.channel.basic_cancel(consumer_tag)
|
Cancel a channel by consumer tag.
|
entailment
|
def close(self):
"""Close the channel if open."""
if self._channel and self._channel.is_open:
self._channel.close()
self._channel_ref = None
|
Close the channel if open.
|
entailment
|
def qos(self, prefetch_size, prefetch_count, apply_global=False):
"""Request specific Quality of Service."""
self.channel.basic_qos(prefetch_size, prefetch_count,
apply_global)
|
Request specific Quality of Service.
|
entailment
|
def channel(self):
"""If no channel exists, a new one is requested."""
if not self._channel:
self._channel_ref = weakref.ref(self.connection.get_channel())
return self._channel
|
If no channel exists, a new one is requested.
|
entailment
|
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.connection
if not conninfo.hostname:
raise KeyError("Missing hostname for AMQP connection.")
if conninfo.userid is None:
raise KeyError("Missing user id for AMQP connection.")
if conninfo.password is None:
raise KeyError("Missing password for AMQP connection.")
if not conninfo.port:
conninfo.port = self.default_port
conn = amqp.Connection(host=conninfo.hostname,
port=conninfo.port,
userid=conninfo.userid,
password=conninfo.password,
virtual_host=conninfo.virtual_host)
return conn
|
Establish connection to the AMQP broker.
|
entailment
|
def declare_consumer(self, queue, no_ack, callback, consumer_tag,
nowait=False):
"""Declare a consumer."""
return self.channel.basic_consume(queue=queue,
no_ack=no_ack,
callback=callback,
consumer_tag=consumer_tag)
|
Declare a consumer.
|
entailment
|
def consume(self, limit=None):
"""Returns an iterator that waits for one message at a time."""
for total_message_count in count():
if limit and total_message_count >= limit:
raise StopIteration
if not self.channel.is_open:
raise StopIteration
self.channel.conn.drain_events()
yield True
|
Returns an iterator that waits for one message at a time.
|
entailment
|
def cancel(self, consumer_tag):
"""Cancel a channel by consumer tag."""
if not self.channel.conn:
return
self.channel.basic_cancel(consumer_tag)
|
Cancel a channel by consumer tag.
|
entailment
|
def prepare_message(self, message_data, delivery_mode, priority=None,
content_type=None, content_encoding=None):
"""Encapsulate data into a AMQP message."""
return amqp.Message(message_data, properties={
"delivery_mode": delivery_mode,
"priority": priority,
"content_type": content_type,
"content_encoding": content_encoding})
|
Encapsulate data into a AMQP message.
|
entailment
|
def raw_encode(data):
"""Special case serializer."""
content_type = 'application/data'
payload = data
if isinstance(payload, unicode):
content_encoding = 'utf-8'
payload = payload.encode(content_encoding)
else:
content_encoding = 'binary'
return content_type, content_encoding, payload
|
Special case serializer.
|
entailment
|
def register_json():
"""Register a encoder/decoder for JSON serialization."""
from anyjson import serialize as json_serialize
from anyjson import deserialize as json_deserialize
registry.register('json', json_serialize, json_deserialize,
content_type='application/json',
content_encoding='utf-8')
|
Register a encoder/decoder for JSON serialization.
|
entailment
|
def register_yaml():
"""Register a encoder/decoder for YAML serialization.
It is slower than JSON, but allows for more data types
to be serialized. Useful if you need to send data such as dates"""
try:
import yaml
registry.register('yaml', yaml.safe_dump, yaml.safe_load,
content_type='application/x-yaml',
content_encoding='utf-8')
except ImportError:
def not_available(*args, **kwargs):
"""In case a client receives a yaml message, but yaml
isn't installed."""
raise SerializerNotInstalled(
"No decoder installed for YAML. Install the PyYAML library")
registry.register('yaml', None, not_available, 'application/x-yaml')
|
Register a encoder/decoder for YAML serialization.
It is slower than JSON, but allows for more data types
to be serialized. Useful if you need to send data such as dates
|
entailment
|
def register_pickle():
"""The fastest serialization method, but restricts
you to python clients."""
import cPickle
registry.register('pickle', cPickle.dumps, cPickle.loads,
content_type='application/x-python-serialize',
content_encoding='binary')
|
The fastest serialization method, but restricts
you to python clients.
|
entailment
|
def register_msgpack():
"""See http://msgpack.sourceforge.net/"""
try:
import msgpack
registry.register('msgpack', msgpack.packs, msgpack.unpacks,
content_type='application/x-msgpack',
content_encoding='binary')
except ImportError:
def not_available(*args, **kwargs):
"""In case a client receives a msgpack message, but yaml
isn't installed."""
raise SerializerNotInstalled(
"No decoder installed for msgpack. "
"Install the msgpack library")
registry.register('msgpack', None, not_available,
'application/x-msgpack')
|
See http://msgpack.sourceforge.net/
|
entailment
|
def register(self, name, encoder, decoder, content_type,
content_encoding='utf-8'):
"""Register a new encoder/decoder.
:param name: A convenience name for the serialization method.
:param encoder: A method that will be passed a python data structure
and should return a string representing the serialized data.
If ``None``, then only a decoder will be registered. Encoding
will not be possible.
:param decoder: A method that will be passed a string representing
serialized data and should return a python data structure.
If ``None``, then only an encoder will be registered.
Decoding will not be possible.
:param content_type: The mime-type describing the serialized
structure.
:param content_encoding: The content encoding (character set) that
the :param:`decoder` method will be returning. Will usually be
``utf-8``, ``us-ascii``, or ``binary``.
"""
if encoder:
self._encoders[name] = (content_type, content_encoding, encoder)
if decoder:
self._decoders[content_type] = decoder
|
Register a new encoder/decoder.
:param name: A convenience name for the serialization method.
:param encoder: A method that will be passed a python data structure
and should return a string representing the serialized data.
If ``None``, then only a decoder will be registered. Encoding
will not be possible.
:param decoder: A method that will be passed a string representing
serialized data and should return a python data structure.
If ``None``, then only an encoder will be registered.
Decoding will not be possible.
:param content_type: The mime-type describing the serialized
structure.
:param content_encoding: The content encoding (character set) that
the :param:`decoder` method will be returning. Will usually be
``utf-8``, ``us-ascii``, or ``binary``.
|
entailment
|
def _set_default_serializer(self, name):
"""
Set the default serialization method used by this library.
:param name: The name of the registered serialization method.
For example, ``json`` (default), ``pickle``, ``yaml``,
or any custom methods registered using :meth:`register`.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
try:
(self._default_content_type, self._default_content_encoding,
self._default_encode) = self._encoders[name]
except KeyError:
raise SerializerNotInstalled(
"No encoder installed for %s" % name)
|
Set the default serialization method used by this library.
:param name: The name of the registered serialization method.
For example, ``json`` (default), ``pickle``, ``yaml``,
or any custom methods registered using :meth:`register`.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
|
entailment
|
def encode(self, data, serializer=None):
"""
Serialize a data structure into a string suitable for sending
as an AMQP message body.
:param data: The message data to send. Can be a list,
dictionary or a string.
:keyword serializer: An optional string representing
the serialization method you want the data marshalled
into. (For example, ``json``, ``raw``, or ``pickle``).
If ``None`` (default), then `JSON`_ will be used, unless
``data`` is a ``str`` or ``unicode`` object. In this
latter case, no serialization occurs as it would be
unnecessary.
Note that if ``serializer`` is specified, then that
serialization method will be used even if a ``str``
or ``unicode`` object is passed in.
:returns: A three-item tuple containing the content type
(e.g., ``application/json``), content encoding, (e.g.,
``utf-8``) and a string containing the serialized
data.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
if serializer == "raw":
return raw_encode(data)
if serializer and not self._encoders.get(serializer):
raise SerializerNotInstalled(
"No encoder installed for %s" % serializer)
# If a raw string was sent, assume binary encoding
# (it's likely either ASCII or a raw binary file, but 'binary'
# charset will encompass both, even if not ideal.
if not serializer and isinstance(data, str):
# In Python 3+, this would be "bytes"; allow binary data to be
# sent as a message without getting encoder errors
return "application/data", "binary", data
# For unicode objects, force it into a string
if not serializer and isinstance(data, unicode):
payload = data.encode("utf-8")
return "text/plain", "utf-8", payload
if serializer:
content_type, content_encoding, encoder = \
self._encoders[serializer]
else:
encoder = self._default_encode
content_type = self._default_content_type
content_encoding = self._default_content_encoding
payload = encoder(data)
return content_type, content_encoding, payload
|
Serialize a data structure into a string suitable for sending
as an AMQP message body.
:param data: The message data to send. Can be a list,
dictionary or a string.
:keyword serializer: An optional string representing
the serialization method you want the data marshalled
into. (For example, ``json``, ``raw``, or ``pickle``).
If ``None`` (default), then `JSON`_ will be used, unless
``data`` is a ``str`` or ``unicode`` object. In this
latter case, no serialization occurs as it would be
unnecessary.
Note that if ``serializer`` is specified, then that
serialization method will be used even if a ``str``
or ``unicode`` object is passed in.
:returns: A three-item tuple containing the content type
(e.g., ``application/json``), content encoding, (e.g.,
``utf-8``) and a string containing the serialized
data.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
|
entailment
|
def decode(self, data, content_type, content_encoding):
"""Deserialize a data stream as serialized using ``encode``
based on :param:`content_type`.
:param data: The message data to deserialize.
:param content_type: The content-type of the data.
(e.g., ``application/json``).
:param content_encoding: The content-encoding of the data.
(e.g., ``utf-8``, ``binary``, or ``us-ascii``).
:returns: The unserialized data.
"""
content_type = content_type or 'application/data'
content_encoding = (content_encoding or 'utf-8').lower()
# Don't decode 8-bit strings or unicode objects
if content_encoding not in ('binary', 'ascii-8bit') and \
not isinstance(data, unicode):
data = codecs.decode(data, content_encoding)
try:
decoder = self._decoders[content_type]
except KeyError:
return data
return decoder(data)
|
Deserialize a data stream as serialized using ``encode``
based on :param:`content_type`.
:param data: The message data to deserialize.
:param content_type: The content-type of the data.
(e.g., ``application/json``).
:param content_encoding: The content-encoding of the data.
(e.g., ``utf-8``, ``binary``, or ``us-ascii``).
:returns: The unserialized data.
|
entailment
|
def ack(self):
"""Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.ack(self._frame)
self._state = "ACK"
|
Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
|
entailment
|
def consume(self, limit=None):
"""Returns an iterator that waits for one message at a time."""
for total_message_count in count():
if limit and total_message_count >= limit:
raise StopIteration
self.drain_events()
yield True
|
Returns an iterator that waits for one message at a time.
|
entailment
|
def random(cls, length, bit_prob=.5):
"""Create a bit string of the given length, with the probability of
each bit being set equal to bit_prob, which defaults to .5.
Usage:
# Create a random BitString of length 10 with mostly zeros.
bits = BitString.random(10, bit_prob=.1)
Arguments:
length: An int, indicating the desired length of the result.
bit_prob: A float in the range [0, 1]. This is the probability
of any given bit in the result having a value of 1; default
is .5, giving 0 and 1 equal probabilities of appearance for
each bit's value.
Return:
A randomly generated BitString instance of the requested
length.
"""
assert isinstance(length, int) and length >= 0
assert isinstance(bit_prob, (int, float)) and 0 <= bit_prob <= 1
bits = numpy.random.choice(
[False, True],
size=(length,),
p=[1-bit_prob, bit_prob]
)
bits.flags.writeable = False
return cls(bits)
|
Create a bit string of the given length, with the probability of
each bit being set equal to bit_prob, which defaults to .5.
Usage:
# Create a random BitString of length 10 with mostly zeros.
bits = BitString.random(10, bit_prob=.1)
Arguments:
length: An int, indicating the desired length of the result.
bit_prob: A float in the range [0, 1]. This is the probability
of any given bit in the result having a value of 1; default
is .5, giving 0 and 1 equal probabilities of appearance for
each bit's value.
Return:
A randomly generated BitString instance of the requested
length.
|
entailment
|
def crossover_template(cls, length, points=2):
"""Create a crossover template with the given number of points. The
crossover template can be used as a mask to crossover two
bitstrings of the same length.
Usage:
assert len(parent1) == len(parent2)
template = BitString.crossover_template(len(parent1))
inv_template = ~template
child1 = (parent1 & template) | (parent2 & inv_template)
child2 = (parent1 & inv_template) | (parent2 & template)
Arguments:
length: An int, indicating the desired length of the result.
points: An int, the number of crossover points.
Return:
A BitString instance of the requested length which can be used
as a crossover template.
"""
assert isinstance(length, int) and length >= 0
assert isinstance(points, int) and points >= 0
# Select the crossover points.
points = random.sample(range(length + 1), points)
# Prep the points for the loop.
points.sort()
points.append(length)
# Fill the bits in with alternating ranges of 0 and 1 according to
# the selected crossover points.
previous = 0
include_range = bool(random.randrange(2))
pieces = []
for point in points:
if point > previous:
fill = (numpy.ones if include_range else numpy.zeros)
pieces.append(fill(point - previous, dtype=bool))
include_range = not include_range
previous = point
bits = numpy.concatenate(pieces)
bits.flags.writeable = False
return cls(bits)
|
Create a crossover template with the given number of points. The
crossover template can be used as a mask to crossover two
bitstrings of the same length.
Usage:
assert len(parent1) == len(parent2)
template = BitString.crossover_template(len(parent1))
inv_template = ~template
child1 = (parent1 & template) | (parent2 & inv_template)
child2 = (parent1 & inv_template) | (parent2 & template)
Arguments:
length: An int, indicating the desired length of the result.
points: An int, the number of crossover points.
Return:
A BitString instance of the requested length which can be used
as a crossover template.
|
entailment
|
def cover(cls, bits, wildcard_probability):
"""Create a new bit condition that matches the provided bit string,
with the indicated per-index wildcard probability.
Usage:
condition = BitCondition.cover(bitstring, .33)
assert condition(bitstring)
Arguments:
bits: A BitString which the resulting condition must match.
wildcard_probability: A float in the range [0, 1] which
indicates the likelihood of any given bit position containing
a wildcard.
Return:
A randomly generated BitCondition which matches the given bits.
"""
if not isinstance(bits, BitString):
bits = BitString(bits)
mask = BitString([
random.random() > wildcard_probability
for _ in range(len(bits))
])
return cls(bits, mask)
|
Create a new bit condition that matches the provided bit string,
with the indicated per-index wildcard probability.
Usage:
condition = BitCondition.cover(bitstring, .33)
assert condition(bitstring)
Arguments:
bits: A BitString which the resulting condition must match.
wildcard_probability: A float in the range [0, 1] which
indicates the likelihood of any given bit position containing
a wildcard.
Return:
A randomly generated BitCondition which matches the given bits.
|
entailment
|
def crossover_with(self, other, points=2):
"""Perform 2-point crossover on this bit condition and another of
the same length, returning the two resulting children.
Usage:
offspring1, offspring2 = condition1.crossover_with(condition2)
Arguments:
other: A second BitCondition of the same length as this one.
points: An int, the number of crossover points of the
crossover operation.
Return:
A tuple (condition1, condition2) of BitConditions, where the
value at each position of this BitCondition and the other is
preserved in one or the other of the two resulting conditions.
"""
assert isinstance(other, BitCondition)
assert len(self) == len(other)
template = BitString.crossover_template(len(self), points)
inv_template = ~template
bits1 = (self._bits & template) | (other._bits & inv_template)
mask1 = (self._mask & template) | (other._mask & inv_template)
bits2 = (self._bits & inv_template) | (other._bits & template)
mask2 = (self._mask & inv_template) | (other._mask & template)
# Convert the modified sequences back into BitConditions
return type(self)(bits1, mask1), type(self)(bits2, mask2)
|
Perform 2-point crossover on this bit condition and another of
the same length, returning the two resulting children.
Usage:
offspring1, offspring2 = condition1.crossover_with(condition2)
Arguments:
other: A second BitCondition of the same length as this one.
points: An int, the number of crossover points of the
crossover operation.
Return:
A tuple (condition1, condition2) of BitConditions, where the
value at each position of this BitCondition and the other is
preserved in one or the other of the two resulting conditions.
|
entailment
|
def get_backend_cls(self):
"""Get the currently used backend class."""
backend_cls = self.backend_cls
if not backend_cls or isinstance(backend_cls, basestring):
backend_cls = get_backend_cls(backend_cls)
return backend_cls
|
Get the currently used backend class.
|
entailment
|
def ensure_connection(self, errback=None, max_retries=None,
interval_start=2, interval_step=2, interval_max=30):
"""Ensure we have a connection to the server.
If not retry establishing the connection with the settings
specified.
:keyword errback: Optional callback called each time the connection
can't be established. Arguments provided are the exception
raised and the interval that will be slept ``(exc, interval)``.
:keyword max_retries: Maximum number of times to retry.
If this limit is exceeded the connection error will be re-raised.
:keyword interval_start: The number of seconds we start sleeping for.
:keyword interval_step: How many seconds added to the interval
for each retry.
:keyword interval_max: Maximum number of seconds to sleep between
each retry.
"""
retry_over_time(self.connect, self.connection_errors, (), {},
errback, max_retries,
interval_start, interval_step, interval_max)
return self
|
Ensure we have a connection to the server.
If not retry establishing the connection with the settings
specified.
:keyword errback: Optional callback called each time the connection
can't be established. Arguments provided are the exception
raised and the interval that will be slept ``(exc, interval)``.
:keyword max_retries: Maximum number of times to retry.
If this limit is exceeded the connection error will be re-raised.
:keyword interval_start: The number of seconds we start sleeping for.
:keyword interval_step: How many seconds added to the interval
for each retry.
:keyword interval_max: Maximum number of seconds to sleep between
each retry.
|
entailment
|
def close(self):
"""Close the currently open connection."""
try:
if self._connection:
backend = self.create_backend()
backend.close_connection(self._connection)
except socket.error:
pass
self._closed = True
|
Close the currently open connection.
|
entailment
|
def info(self):
"""Get connection info."""
backend_cls = self.backend_cls or "amqplib"
port = self.port or self.create_backend().default_port
return {"hostname": self.hostname,
"userid": self.userid,
"password": self.password,
"virtual_host": self.virtual_host,
"port": port,
"insist": self.insist,
"ssl": self.ssl,
"transport_cls": backend_cls,
"backend_cls": backend_cls,
"connect_timeout": self.connect_timeout}
|
Get connection info.
|
entailment
|
def decode(self):
"""Deserialize the message body, returning the original
python structure sent by the publisher."""
return serialization.decode(self.body, self.content_type,
self.content_encoding)
|
Deserialize the message body, returning the original
python structure sent by the publisher.
|
entailment
|
def payload(self):
"""The decoded message."""
if not self._decoded_cache:
self._decoded_cache = self.decode()
return self._decoded_cache
|
The decoded message.
|
entailment
|
def ack(self):
"""Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.ack(self.delivery_tag)
self._state = "ACK"
|
Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
|
entailment
|
def reject(self):
"""Reject this message.
The message will be discarded by the server.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.reject(self.delivery_tag)
self._state = "REJECTED"
|
Reject this message.
The message will be discarded by the server.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
|
entailment
|
def requeue(self):
"""Reject this message and put it back on the queue.
You must not use this method as a means of selecting messages
to process.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.requeue(self.delivery_tag)
self._state = "REQUEUED"
|
Reject this message and put it back on the queue.
You must not use this method as a means of selecting messages
to process.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
|
entailment
|
def gen_unique_id():
"""Generate a unique id, having - hopefully - a very small chance of
collission.
For now this is provided by :func:`uuid.uuid4`.
"""
# Workaround for http://bugs.python.org/issue4607
if ctypes and _uuid_generate_random:
buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(buffer)
return str(UUID(bytes=buffer.raw))
return str(uuid4())
|
Generate a unique id, having - hopefully - a very small chance of
collission.
For now this is provided by :func:`uuid.uuid4`.
|
entailment
|
def retry_over_time(fun, catch, args=[], kwargs={}, errback=None,
max_retries=None, interval_start=2, interval_step=2, interval_max=30):
"""Retry the function over and over until max retries is exceeded.
For each retry we sleep a for a while before we try again, this interval
is increased for every retry until the max seconds is reached.
:param fun: The function to try
:param catch: Exceptions to catch, can be either tuple or a single
exception class.
:keyword args: Positional arguments passed on to the function.
:keyword kwargs: Keyword arguments passed on to the function.
:keyword errback: Callback for when an exception in ``catch`` is raised.
The callback must take two arguments: ``exc`` and ``interval``, where
``exc`` is the exception instance, and ``interval`` is the time in
seconds to sleep next..
:keyword max_retries: Maximum number of retries before we give up.
If this is not set, we will retry forever.
:keyword interval_start: How long (in seconds) we start sleeping between
retries.
:keyword interval_step: By how much the interval is increased for each
retry.
:keyword interval_max: Maximum number of seconds to sleep between retries.
"""
retries = 0
interval_range = xrange(interval_start,
interval_max + interval_start,
interval_step)
for retries, interval in enumerate(repeatlast(interval_range)):
try:
retval = fun(*args, **kwargs)
except catch, exc:
if max_retries and retries > max_retries:
raise
if errback:
errback(exc, interval)
sleep(interval)
else:
return retval
|
Retry the function over and over until max retries is exceeded.
For each retry we sleep a for a while before we try again, this interval
is increased for every retry until the max seconds is reached.
:param fun: The function to try
:param catch: Exceptions to catch, can be either tuple or a single
exception class.
:keyword args: Positional arguments passed on to the function.
:keyword kwargs: Keyword arguments passed on to the function.
:keyword errback: Callback for when an exception in ``catch`` is raised.
The callback must take two arguments: ``exc`` and ``interval``, where
``exc`` is the exception instance, and ``interval`` is the time in
seconds to sleep next..
:keyword max_retries: Maximum number of retries before we give up.
If this is not set, we will retry forever.
:keyword interval_start: How long (in seconds) we start sleeping between
retries.
:keyword interval_step: By how much the interval is increased for each
retry.
:keyword interval_max: Maximum number of seconds to sleep between retries.
|
entailment
|
def get(self, *args, **kwargs):
"""Get the next waiting message from the queue.
:returns: A :class:`Message` instance, or ``None`` if there is
no messages waiting.
"""
if not mqueue.qsize():
return None
message_data, content_type, content_encoding = mqueue.get()
return self.Message(backend=self, body=message_data,
content_type=content_type,
content_encoding=content_encoding)
|
Get the next waiting message from the queue.
:returns: A :class:`Message` instance, or ``None`` if there is
no messages waiting.
|
entailment
|
def queue_purge(self, queue, **kwargs):
"""Discard all messages in the queue."""
qsize = mqueue.qsize()
mqueue.queue.clear()
return qsize
|
Discard all messages in the queue.
|
entailment
|
def prepare_message(self, message_data, delivery_mode,
content_type, content_encoding, **kwargs):
"""Prepare message for sending."""
return (message_data, content_type, content_encoding)
|
Prepare message for sending.
|
entailment
|
def get_future_expectation(self, match_set):
"""Return a numerical value representing the expected future payoff
of the previously selected action, given only the current match
set. The match_set argument is a MatchSet instance representing the
current match set.
Usage:
match_set = model.match(situation)
expectation = model.algorithm.get_future_expectation(match_set)
payoff = previous_reward + discount_factor * expectation
previous_match_set.payoff = payoff
Arguments:
match_set: A MatchSet instance.
Return:
A float, the estimate of the expected near-future payoff for
the situation for which match_set was generated, based on the
contents of match_set.
"""
assert isinstance(match_set, MatchSet)
assert match_set.algorithm is self
return self.discount_factor * (
self.idealization_factor * match_set.best_prediction +
(1 - self.idealization_factor) * match_set.prediction
)
|
Return a numerical value representing the expected future payoff
of the previously selected action, given only the current match
set. The match_set argument is a MatchSet instance representing the
current match set.
Usage:
match_set = model.match(situation)
expectation = model.algorithm.get_future_expectation(match_set)
payoff = previous_reward + discount_factor * expectation
previous_match_set.payoff = payoff
Arguments:
match_set: A MatchSet instance.
Return:
A float, the estimate of the expected near-future payoff for
the situation for which match_set was generated, based on the
contents of match_set.
|
entailment
|
def covering_is_required(self, match_set):
"""Return a Boolean indicating whether covering is required for the
current match set. The match_set argument is a MatchSet instance
representing the current match set before covering is applied.
Usage:
match_set = model.match(situation)
if model.algorithm.covering_is_required(match_set):
new_rule = model.algorithm.cover(match_set)
assert new_rule.condition(situation)
model.add(new_rule)
match_set = model.match(situation)
Arguments:
match_set: A MatchSet instance.
Return:
A bool indicating whether match_set contains too few matching
classifier rules and therefore needs to be augmented with a
new one.
"""
assert isinstance(match_set, MatchSet)
assert match_set.algorithm is self
if self.minimum_actions is None:
return len(match_set) < len(match_set.model.possible_actions)
else:
return len(match_set) < self.minimum_actions
|
Return a Boolean indicating whether covering is required for the
current match set. The match_set argument is a MatchSet instance
representing the current match set before covering is applied.
Usage:
match_set = model.match(situation)
if model.algorithm.covering_is_required(match_set):
new_rule = model.algorithm.cover(match_set)
assert new_rule.condition(situation)
model.add(new_rule)
match_set = model.match(situation)
Arguments:
match_set: A MatchSet instance.
Return:
A bool indicating whether match_set contains too few matching
classifier rules and therefore needs to be augmented with a
new one.
|
entailment
|
def cover(self, match_set):
"""Return a new classifier rule that can be added to the match set,
with a condition that matches the situation of the match set and an
action selected to avoid duplication of the actions already
contained therein. The match_set argument is a MatchSet instance
representing the match set to which the returned rule may be added.
Usage:
match_set = model.match(situation)
if model.algorithm.covering_is_required(match_set):
new_rule = model.algorithm.cover(match_set)
assert new_rule.condition(situation)
model.add(new_rule)
match_set = model.match(situation)
Arguments:
match_set: A MatchSet instance.
Return:
A new ClassifierRule instance, appropriate for the addition to
match_set and to the classifier set from which match_set was
drawn.
"""
assert isinstance(match_set, MatchSet)
assert match_set.model.algorithm is self
# Create a new condition that matches the situation.
condition = bitstrings.BitCondition.cover(
match_set.situation,
self.wildcard_probability
)
# Pick a random action that (preferably) isn't already suggested by
# some other rule for this situation.
action_candidates = (
frozenset(match_set.model.possible_actions) -
frozenset(match_set)
)
if not action_candidates:
action_candidates = match_set.model.possible_actions
action = random.choice(list(action_candidates))
# Create the new rule.
return XCSClassifierRule(
condition,
action,
self,
match_set.time_stamp
)
|
Return a new classifier rule that can be added to the match set,
with a condition that matches the situation of the match set and an
action selected to avoid duplication of the actions already
contained therein. The match_set argument is a MatchSet instance
representing the match set to which the returned rule may be added.
Usage:
match_set = model.match(situation)
if model.algorithm.covering_is_required(match_set):
new_rule = model.algorithm.cover(match_set)
assert new_rule.condition(situation)
model.add(new_rule)
match_set = model.match(situation)
Arguments:
match_set: A MatchSet instance.
Return:
A new ClassifierRule instance, appropriate for the addition to
match_set and to the classifier set from which match_set was
drawn.
|
entailment
|
def distribute_payoff(self, match_set):
"""Distribute the payoff received in response to the selected
action of the given match set among the rules in the action set
which deserve credit for recommending the action. The match_set
argument is the MatchSet instance which suggested the selected
action and earned the payoff.
Usage:
match_set = model.match(situation)
match_set.select_action()
match_set.payoff = reward
model.algorithm.distribute_payoff(match_set)
Arguments:
match_set: A MatchSet instance for which the accumulated payoff
needs to be distributed among its classifier rules.
Return: None
"""
assert isinstance(match_set, MatchSet)
assert match_set.algorithm is self
assert match_set.selected_action is not None
payoff = float(match_set.payoff)
action_set = match_set[match_set.selected_action]
action_set_size = sum(rule.numerosity for rule in action_set)
# Update the average reward, error, and action set size of each
# rule participating in the action set.
for rule in action_set:
rule.experience += 1
update_rate = max(self.learning_rate, 1 / rule.experience)
rule.average_reward += (
(payoff - rule.average_reward) *
update_rate
)
rule.error += (
(abs(payoff - rule.average_reward) - rule.error) *
update_rate
)
rule.action_set_size += (
(action_set_size - rule.action_set_size) *
update_rate
)
# Update the fitness of the rules.
self._update_fitness(action_set)
# If the parameters so indicate, perform action set subsumption.
if self.do_action_set_subsumption:
self._action_set_subsumption(action_set)
|
Distribute the payoff received in response to the selected
action of the given match set among the rules in the action set
which deserve credit for recommending the action. The match_set
argument is the MatchSet instance which suggested the selected
action and earned the payoff.
Usage:
match_set = model.match(situation)
match_set.select_action()
match_set.payoff = reward
model.algorithm.distribute_payoff(match_set)
Arguments:
match_set: A MatchSet instance for which the accumulated payoff
needs to be distributed among its classifier rules.
Return: None
|
entailment
|
def update(self, match_set):
"""Update the classifier set from which the match set was drawn,
e.g. by applying a genetic algorithm. The match_set argument is the
MatchSet instance whose classifier set should be updated.
Usage:
match_set = model.match(situation)
match_set.select_action()
match_set.payoff = reward
model.algorithm.distribute_payoff(match_set)
model.algorithm.update(match_set)
Arguments:
match_set: A MatchSet instance for which the classifier set
from which it was drawn needs to be updated based on the
match set's payoff distribution.
Return: None
"""
assert isinstance(match_set, MatchSet)
assert match_set.model.algorithm is self
assert match_set.selected_action is not None
# Increment the iteration counter.
match_set.model.update_time_stamp()
action_set = match_set[match_set.selected_action]
# If the average number of iterations since the last update for
# each rule in the action set is too small, return early instead of
# applying the GA.
average_time_passed = (
match_set.model.time_stamp -
self._get_average_time_stamp(action_set)
)
if average_time_passed <= self.ga_threshold:
return
# Update the time step for each rule to indicate that they were
# updated by the GA.
self._set_timestamps(action_set)
# Select two parents from the action set, with probability
# proportionate to their fitness.
parent1 = self._select_parent(action_set)
parent2 = self._select_parent(action_set)
# With the probability specified in the parameters, apply the
# crossover operator to the parents. Otherwise, just take the
# parents unchanged.
if random.random() < self.crossover_probability:
condition1, condition2 = parent1.condition.crossover_with(
parent2.condition
)
else:
condition1, condition2 = parent1.condition, parent2.condition
# Apply the mutation operator to each child, randomly flipping
# their mask bits with a small probability.
condition1 = self._mutate(condition1, action_set.situation)
condition2 = self._mutate(condition2, action_set.situation)
# If the newly generated children are already present in the
# population (or if they should be subsumed due to GA subsumption)
# then simply increment the numerosities of the existing rules in
# the population.
new_children = []
for condition in condition1, condition2:
# If the parameters specify that GA subsumption should be
# performed, look for an accurate parent that can subsume the
# new child.
if self.do_ga_subsumption:
subsumed = False
for parent in parent1, parent2:
should_subsume = (
(parent.experience >
self.subsumption_threshold) and
parent.error < self.error_threshold and
parent.condition(condition)
)
if should_subsume:
if parent in action_set.model:
parent.numerosity += 1
self.prune(action_set.model)
else:
# Sometimes the parent is removed from a
# previous subsumption
parent.numerosity = 1
action_set.model.add(parent)
subsumed = True
break
if subsumed:
continue
# Provided the child has not already been subsumed and it is
# present in the population, just increment its numerosity.
# Otherwise, if the child has neither been subsumed nor does it
# already exist, remember it so we can add it to the classifier
# set in just a moment.
child = XCSClassifierRule(
condition,
action_set.action,
self,
action_set.model.time_stamp
)
if child in action_set.model:
action_set.model.add(child)
else:
new_children.append(child)
# If there were any children which weren't subsumed and weren't
# already present in the classifier set, add them.
if new_children:
average_reward = .5 * (
parent1.average_reward +
parent2.average_reward
)
error = .5 * (parent1.error + parent2.error)
# .1 * (average fitness of parents)
fitness = .05 * (
parent1.fitness +
parent2.fitness
)
for child in new_children:
child.average_reward = average_reward
child.error = error
child.fitness = fitness
action_set.model.add(child)
|
Update the classifier set from which the match set was drawn,
e.g. by applying a genetic algorithm. The match_set argument is the
MatchSet instance whose classifier set should be updated.
Usage:
match_set = model.match(situation)
match_set.select_action()
match_set.payoff = reward
model.algorithm.distribute_payoff(match_set)
model.algorithm.update(match_set)
Arguments:
match_set: A MatchSet instance for which the classifier set
from which it was drawn needs to be updated based on the
match set's payoff distribution.
Return: None
|
entailment
|
def prune(self, model):
"""Reduce the classifier set's population size, if necessary, by
removing lower-quality *rules. Return a list containing any rules
whose numerosities dropped to zero as a result of this call. (The
list may be empty, if no rule's numerosity dropped to 0.) The
model argument is a ClassifierSet instance which utilizes this
algorithm.
Usage:
deleted_rules = model.algorithm.prune(model)
Arguments:
model: A ClassifierSet instance whose population may need to
be reduced in size.
Return:
A possibly empty list of ClassifierRule instances which were
removed entirely from the classifier set because their
numerosities dropped to 0.
"""
assert isinstance(model, ClassifierSet)
assert model.algorithm is self
# Determine the (virtual) population size.
total_numerosity = sum(rule.numerosity for rule in model)
# If the population size is already small enough, just return early
if total_numerosity <= self.max_population_size:
return [] # No rule's numerosity dropped to zero.
# Determine the average fitness of the rules in the population.
total_fitness = sum(rule.fitness for rule in model)
average_fitness = total_fitness / total_numerosity
# Determine the probability of deletion, as a function of both
# accuracy and niche sparsity.
total_votes = 0
deletion_votes = {}
for rule in model:
vote = rule.action_set_size * rule.numerosity
sufficient_experience = (
rule.experience > self.deletion_threshold
)
low_fitness = (
rule.fitness / rule.numerosity <
self.fitness_threshold * average_fitness
)
if sufficient_experience and low_fitness:
vote *= average_fitness / (rule.fitness /
rule.numerosity)
deletion_votes[rule] = vote
total_votes += vote
# Choose a rule to delete based on the probabilities just computed.
selector = random.uniform(0, total_votes)
for rule, vote in deletion_votes.items():
selector -= vote
if selector <= 0:
assert rule in model
if model.discard(rule):
return [rule]
else:
return []
assert False
|
Reduce the classifier set's population size, if necessary, by
removing lower-quality *rules. Return a list containing any rules
whose numerosities dropped to zero as a result of this call. (The
list may be empty, if no rule's numerosity dropped to 0.) The
model argument is a ClassifierSet instance which utilizes this
algorithm.
Usage:
deleted_rules = model.algorithm.prune(model)
Arguments:
model: A ClassifierSet instance whose population may need to
be reduced in size.
Return:
A possibly empty list of ClassifierRule instances which were
removed entirely from the classifier set because their
numerosities dropped to 0.
|
entailment
|
def _update_fitness(self, action_set):
"""Update the fitness values of the rules belonging to this action
set."""
# Compute the accuracy of each rule. Accuracy is inversely
# proportional to error. Below a certain error threshold, accuracy
# becomes constant. Accuracy values range over (0, 1].
total_accuracy = 0
accuracies = {}
for rule in action_set:
if rule.error < self.error_threshold:
accuracy = 1
else:
accuracy = (
self.accuracy_coefficient *
(rule.error / self.error_threshold) **
-self.accuracy_power
)
accuracies[rule] = accuracy
total_accuracy += accuracy * rule.numerosity
# On rare occasions we have zero total accuracy. This avoids a div
# by zero
total_accuracy = total_accuracy or 1
# Use the relative accuracies of the rules to update their fitness
for rule in action_set:
accuracy = accuracies[rule]
rule.fitness += (
self.learning_rate *
(accuracy * rule.numerosity / total_accuracy -
rule.fitness)
)
|
Update the fitness values of the rules belonging to this action
set.
|
entailment
|
def _action_set_subsumption(self, action_set):
"""Perform action set subsumption."""
# Select a condition with maximum bit count among those having
# sufficient experience and sufficiently low error.
selected_rule = None
selected_bit_count = None
for rule in action_set:
if not (rule.experience > self.subsumption_threshold and
rule.error < self.error_threshold):
continue
bit_count = rule.condition.count()
if (selected_rule is None or
bit_count > selected_bit_count or
(bit_count == selected_bit_count and
random.randrange(2))):
selected_rule = rule
selected_bit_count = bit_count
# If no rule was found satisfying the requirements, return
# early.
if selected_rule is None:
return
# Subsume each rule which the selected rule generalizes. When a
# rule is subsumed, all instances of the subsumed rule are replaced
# with instances of the more general one in the population.
to_remove = []
for rule in action_set:
if (selected_rule is not rule and
selected_rule.condition(rule.condition)):
selected_rule.numerosity += rule.numerosity
action_set.model.discard(rule, rule.numerosity)
to_remove.append(rule)
for rule in to_remove:
action_set.remove(rule)
|
Perform action set subsumption.
|
entailment
|
def _get_average_time_stamp(action_set):
"""Return the average time stamp for the rules in this action
set."""
# This is the average value of the iteration counter upon the most
# recent update of each rule in this action set.
total_time_stamps = sum(rule.time_stamp * rule.numerosity
for rule in action_set)
total_numerosity = sum(rule.numerosity for rule in action_set)
return total_time_stamps / (total_numerosity or 1)
|
Return the average time stamp for the rules in this action
set.
|
entailment
|
def _select_parent(action_set):
"""Select a rule from this action set, with probability
proportionate to its fitness, to act as a parent for a new rule in
the classifier set. Return the selected rule."""
total_fitness = sum(rule.fitness for rule in action_set)
selector = random.uniform(0, total_fitness)
for rule in action_set:
selector -= rule.fitness
if selector <= 0:
return rule
# If for some reason a case slips through the above loop, perhaps
# due to floating point error, we fall back on uniform selection.
return random.choice(list(action_set))
|
Select a rule from this action set, with probability
proportionate to its fitness, to act as a parent for a new rule in
the classifier set. Return the selected rule.
|
entailment
|
def _mutate(self, condition, situation):
"""Create a new condition from the given one by probabilistically
applying point-wise mutations. Bits that were originally wildcarded
in the parent condition acquire their values from the provided
situation, to ensure the child condition continues to match it."""
# Go through each position in the condition, randomly flipping
# whether the position is a value (0 or 1) or a wildcard (#). We do
# this in a new list because the original condition's mask is
# immutable.
mutation_points = bitstrings.BitString.random(
len(condition.mask),
self.mutation_probability
)
mask = condition.mask ^ mutation_points
# The bits that aren't wildcards always have the same value as the
# situation, which ensures that the mutated condition still matches
# the situation.
if isinstance(situation, bitstrings.BitCondition):
mask &= situation.mask
return bitstrings.BitCondition(situation.bits, mask)
return bitstrings.BitCondition(situation, mask)
|
Create a new condition from the given one by probabilistically
applying point-wise mutations. Bits that were originally wildcarded
in the parent condition acquire their values from the provided
situation, to ensure the child condition continues to match it.
|
entailment
|
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.connection
if not conninfo.port:
conninfo.port = self.default_port
credentials = pika.PlainCredentials(conninfo.userid,
conninfo.password)
return self._connection_cls(pika.ConnectionParameters(
conninfo.hostname,
port=conninfo.port,
virtual_host=conninfo.virtual_host,
credentials=credentials))
|
Establish connection to the AMQP broker.
|
entailment
|
def queue_purge(self, queue, **kwargs):
"""Discard all messages in the queue. This will delete the messages
and results in an empty queue."""
return self.channel.queue_purge(queue=queue).message_count
|
Discard all messages in the queue. This will delete the messages
and results in an empty queue.
|
entailment
|
def queue_declare(self, queue, durable, exclusive, auto_delete,
warn_if_exists=False, arguments=None):
"""Declare a named queue."""
return self.channel.queue_declare(queue=queue,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
arguments=arguments)
|
Declare a named queue.
|
entailment
|
def declare_consumer(self, queue, no_ack, callback, consumer_tag,
nowait=False):
"""Declare a consumer."""
@functools.wraps(callback)
def _callback_decode(channel, method, header, body):
return callback((channel, method, header, body))
return self.channel.basic_consume(_callback_decode,
queue=queue,
no_ack=no_ack,
consumer_tag=consumer_tag)
|
Declare a consumer.
|
entailment
|
def close(self):
"""Close the channel if open."""
if self._channel and not self._channel.handler.channel_close:
self._channel.close()
self._channel_ref = None
|
Close the channel if open.
|
entailment
|
def prepare_message(self, message_data, delivery_mode, priority=None,
content_type=None, content_encoding=None):
"""Encapsulate data into a AMQP message."""
properties = pika.BasicProperties(priority=priority,
content_type=content_type,
content_encoding=content_encoding,
delivery_mode=delivery_mode)
return message_data, properties
|
Encapsulate data into a AMQP message.
|
entailment
|
def publish(self, message, exchange, routing_key, mandatory=None,
immediate=None, headers=None):
"""Publish a message to a named exchange."""
body, properties = message
if headers:
properties.headers = headers
ret = self.channel.basic_publish(body=body,
properties=properties,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
if mandatory or immediate:
self.close()
|
Publish a message to a named exchange.
|
entailment
|
def _generate_consumer_tag(self):
"""Generate a unique consumer tag.
:rtype string:
"""
return "%s.%s%s" % (
self.__class__.__module__,
self.__class__.__name__,
self._next_consumer_tag())
|
Generate a unique consumer tag.
:rtype string:
|
entailment
|
def declare(self):
"""Declares the queue, the exchange and binds the queue to
the exchange."""
arguments = None
routing_key = self.routing_key
if self.exchange_type == "headers":
arguments, routing_key = routing_key, ""
if self.queue:
self.backend.queue_declare(queue=self.queue, durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.queue_arguments,
warn_if_exists=self.warn_if_exists)
if self.exchange:
self.backend.exchange_declare(exchange=self.exchange,
type=self.exchange_type,
durable=self.durable,
auto_delete=self.auto_delete)
if self.queue:
self.backend.queue_bind(queue=self.queue,
exchange=self.exchange,
routing_key=routing_key,
arguments=arguments)
self._closed = False
return self
|
Declares the queue, the exchange and binds the queue to
the exchange.
|
entailment
|
def _receive_callback(self, raw_message):
"""Internal method used when a message is received in consume mode."""
message = self.backend.message_to_python(raw_message)
if self.auto_ack and not message.acknowledged:
message.ack()
self.receive(message.payload, message)
|
Internal method used when a message is received in consume mode.
|
entailment
|
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
"""Receive the next message waiting on the queue.
:returns: A :class:`carrot.backends.base.BaseMessage` instance,
or ``None`` if there's no messages to be received.
:keyword enable_callbacks: Enable callbacks. The message will be
processed with all registered callbacks. Default is disabled.
:keyword auto_ack: Override the default :attr:`auto_ack` setting.
:keyword no_ack: Override the default :attr:`no_ack` setting.
"""
no_ack = no_ack or self.no_ack
auto_ack = auto_ack or self.auto_ack
message = self.backend.get(self.queue, no_ack=no_ack)
if message:
if auto_ack and not message.acknowledged:
message.ack()
if enable_callbacks:
self.receive(message.payload, message)
return message
|
Receive the next message waiting on the queue.
:returns: A :class:`carrot.backends.base.BaseMessage` instance,
or ``None`` if there's no messages to be received.
:keyword enable_callbacks: Enable callbacks. The message will be
processed with all registered callbacks. Default is disabled.
:keyword auto_ack: Override the default :attr:`auto_ack` setting.
:keyword no_ack: Override the default :attr:`no_ack` setting.
|
entailment
|
def receive(self, message_data, message):
"""This method is called when a new message is received by
running :meth:`wait`, :meth:`process_next` or :meth:`iterqueue`.
When a message is received, it passes the message on to the
callbacks listed in the :attr:`callbacks` attribute.
You can register callbacks using :meth:`register_callback`.
:param message_data: The deserialized message data.
:param message: The :class:`carrot.backends.base.BaseMessage` instance.
:raises NotImplementedError: If no callbacks has been registered.
"""
if not self.callbacks:
raise NotImplementedError("No consumer callbacks registered")
for callback in self.callbacks:
callback(message_data, message)
|
This method is called when a new message is received by
running :meth:`wait`, :meth:`process_next` or :meth:`iterqueue`.
When a message is received, it passes the message on to the
callbacks listed in the :attr:`callbacks` attribute.
You can register callbacks using :meth:`register_callback`.
:param message_data: The deserialized message data.
:param message: The :class:`carrot.backends.base.BaseMessage` instance.
:raises NotImplementedError: If no callbacks has been registered.
|
entailment
|
def discard_all(self, filterfunc=None):
"""Discard all waiting messages.
:param filterfunc: A filter function to only discard the messages this
filter returns.
:returns: the number of messages discarded.
*WARNING*: All incoming messages will be ignored and not processed.
Example using filter:
>>> def waiting_feeds_only(message):
... try:
... message_data = message.decode()
... except: # Should probably be more specific.
... pass
...
... if message_data.get("type") == "feed":
... return True
... else:
... return False
"""
if not filterfunc:
return self.backend.queue_purge(self.queue)
if self.no_ack or self.auto_ack:
raise Exception("discard_all: Can't use filter with auto/no-ack.")
discarded_count = 0
while True:
message = self.fetch()
if message is None:
return discarded_count
if filterfunc(message):
message.ack()
discarded_count += 1
|
Discard all waiting messages.
:param filterfunc: A filter function to only discard the messages this
filter returns.
:returns: the number of messages discarded.
*WARNING*: All incoming messages will be ignored and not processed.
Example using filter:
>>> def waiting_feeds_only(message):
... try:
... message_data = message.decode()
... except: # Should probably be more specific.
... pass
...
... if message_data.get("type") == "feed":
... return True
... else:
... return False
|
entailment
|
def consume(self, no_ack=None):
"""Declare consumer."""
no_ack = no_ack or self.no_ack
self.backend.declare_consumer(queue=self.queue, no_ack=no_ack,
callback=self._receive_callback,
consumer_tag=self.consumer_tag,
nowait=True)
self.channel_open = True
|
Declare consumer.
|
entailment
|
def wait(self, limit=None):
"""Go into consume mode.
Mostly for testing purposes and simple programs, you probably
want :meth:`iterconsume` or :meth:`iterqueue` instead.
This runs an infinite loop, processing all incoming messages
using :meth:`receive` to apply the message to all registered
callbacks.
"""
it = self.iterconsume(limit)
while True:
it.next()
|
Go into consume mode.
Mostly for testing purposes and simple programs, you probably
want :meth:`iterconsume` or :meth:`iterqueue` instead.
This runs an infinite loop, processing all incoming messages
using :meth:`receive` to apply the message to all registered
callbacks.
|
entailment
|
def iterqueue(self, limit=None, infinite=False):
"""Infinite iterator yielding pending messages, by using
synchronous direct access to the queue (``basic_get``).
:meth:`iterqueue` is used where synchronous functionality is more
important than performance. If you can, use :meth:`iterconsume`
instead.
:keyword limit: If set, the iterator stops when it has processed
this number of messages in total.
:keyword infinite: Don't raise :exc:`StopIteration` if there is no
messages waiting, but return ``None`` instead. If infinite you
obviously shouldn't consume the whole iterator at once without
using a ``limit``.
:raises StopIteration: If there is no messages waiting, and the
iterator is not infinite.
"""
for items_since_start in count():
item = self.fetch()
if (not infinite and item is None) or \
(limit and items_since_start >= limit):
raise StopIteration
yield item
|
Infinite iterator yielding pending messages, by using
synchronous direct access to the queue (``basic_get``).
:meth:`iterqueue` is used where synchronous functionality is more
important than performance. If you can, use :meth:`iterconsume`
instead.
:keyword limit: If set, the iterator stops when it has processed
this number of messages in total.
:keyword infinite: Don't raise :exc:`StopIteration` if there is no
messages waiting, but return ``None`` instead. If infinite you
obviously shouldn't consume the whole iterator at once without
using a ``limit``.
:raises StopIteration: If there is no messages waiting, and the
iterator is not infinite.
|
entailment
|
def cancel(self):
"""Cancel a running :meth:`iterconsume` session."""
if self.channel_open:
try:
self.backend.cancel(self.consumer_tag)
except KeyError:
pass
|
Cancel a running :meth:`iterconsume` session.
|
entailment
|
def close(self):
"""Close the channel to the queue."""
self.cancel()
self.backend.close()
self._closed = True
|
Close the channel to the queue.
|
entailment
|
def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False):
"""Request specific Quality of Service.
This method requests a specific quality of service. The QoS
can be specified for the current channel or for all channels
on the connection. The particular properties and semantics of
a qos method always depend on the content class semantics.
Though the qos method could in principle apply to both peers,
it is currently meaningful only for the server.
:param prefetch_size: Prefetch window in octets.
The client can request that messages be sent in
advance so that when the client finishes processing a
message, the following message is already held
locally, rather than needing to be sent down the
channel. Prefetching gives a performance improvement.
This field specifies the prefetch window size in
octets. The server will send a message in advance if
it is equal to or smaller in size than the available
prefetch size (and also falls into other prefetch
limits). May be set to zero, meaning "no specific
limit", although other prefetch limits may still
apply. The ``prefetch_size`` is ignored if the
:attr:`no_ack` option is set.
:param prefetch_count: Specifies a prefetch window in terms of whole
messages. This field may be used in combination with
``prefetch_size``; A message will only be sent
in advance if both prefetch windows (and those at the
channel and connection level) allow it. The prefetch-
count is ignored if the :attr:`no_ack` option is set.
:keyword apply_global: By default the QoS settings apply to the
current channel only. If this is set, they are applied
to the entire connection.
"""
return self.backend.qos(prefetch_size, prefetch_count, apply_global)
|
Request specific Quality of Service.
This method requests a specific quality of service. The QoS
can be specified for the current channel or for all channels
on the connection. The particular properties and semantics of
a qos method always depend on the content class semantics.
Though the qos method could in principle apply to both peers,
it is currently meaningful only for the server.
:param prefetch_size: Prefetch window in octets.
The client can request that messages be sent in
advance so that when the client finishes processing a
message, the following message is already held
locally, rather than needing to be sent down the
channel. Prefetching gives a performance improvement.
This field specifies the prefetch window size in
octets. The server will send a message in advance if
it is equal to or smaller in size than the available
prefetch size (and also falls into other prefetch
limits). May be set to zero, meaning "no specific
limit", although other prefetch limits may still
apply. The ``prefetch_size`` is ignored if the
:attr:`no_ack` option is set.
:param prefetch_count: Specifies a prefetch window in terms of whole
messages. This field may be used in combination with
``prefetch_size``; A message will only be sent
in advance if both prefetch windows (and those at the
channel and connection level) allow it. The prefetch-
count is ignored if the :attr:`no_ack` option is set.
:keyword apply_global: By default the QoS settings apply to the
current channel only. If this is set, they are applied
to the entire connection.
|
entailment
|
def declare(self):
"""Declare the exchange.
Creates the exchange on the broker.
"""
self.backend.exchange_declare(exchange=self.exchange,
type=self.exchange_type,
durable=self.durable,
auto_delete=self.auto_delete)
|
Declare the exchange.
Creates the exchange on the broker.
|
entailment
|
def create_message(self, message_data, delivery_mode=None, priority=None,
content_type=None, content_encoding=None,
serializer=None):
"""With any data, serialize it and encapsulate it in a AMQP
message with the proper headers set."""
delivery_mode = delivery_mode or self.delivery_mode
# No content_type? Then we're serializing the data internally.
if not content_type:
serializer = serializer or self.serializer
(content_type, content_encoding,
message_data) = serialization.encode(message_data,
serializer=serializer)
else:
# If the programmer doesn't want us to serialize,
# make sure content_encoding is set.
if isinstance(message_data, unicode):
if not content_encoding:
content_encoding = 'utf-8'
message_data = message_data.encode(content_encoding)
# If they passed in a string, we can't know anything
# about it. So assume it's binary data.
elif not content_encoding:
content_encoding = 'binary'
return self.backend.prepare_message(message_data, delivery_mode,
priority=priority,
content_type=content_type,
content_encoding=content_encoding)
|
With any data, serialize it and encapsulate it in a AMQP
message with the proper headers set.
|
entailment
|
def send(self, message_data, routing_key=None, delivery_mode=None,
mandatory=False, immediate=False, priority=0, content_type=None,
content_encoding=None, serializer=None, exchange=None):
"""Send a message.
:param message_data: The message data to send. Can be a list,
dictionary or a string.
:keyword routing_key: A custom routing key for the message.
If not set, the default routing key set in the :attr:`routing_key`
attribute is used.
:keyword mandatory: If set, the message has mandatory routing.
By default the message is silently dropped by the server if it
can't be routed to a queue. However - If the message is mandatory,
an exception will be raised instead.
:keyword immediate: Request immediate delivery.
If the message cannot be routed to a queue consumer immediately,
an exception will be raised. This is instead of the default
behaviour, where the server will accept and queue the message,
but with no guarantee that the message will ever be consumed.
:keyword delivery_mode: Override the default :attr:`delivery_mode`.
:keyword priority: The message priority, ``0`` to ``9``.
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword serializer: Override the default :attr:`serializer`.
:keyword exchange: Override the exchange to publish to.
Note that this exchange must have been declared.
"""
headers = None
routing_key = routing_key or self.routing_key
if self.exchange_type == "headers":
headers, routing_key = routing_key, ""
exchange = exchange or self.exchange
message = self.create_message(message_data, priority=priority,
delivery_mode=delivery_mode,
content_type=content_type,
content_encoding=content_encoding,
serializer=serializer)
self.backend.publish(message,
exchange=exchange, routing_key=routing_key,
mandatory=mandatory, immediate=immediate,
headers=headers)
|
Send a message.
:param message_data: The message data to send. Can be a list,
dictionary or a string.
:keyword routing_key: A custom routing key for the message.
If not set, the default routing key set in the :attr:`routing_key`
attribute is used.
:keyword mandatory: If set, the message has mandatory routing.
By default the message is silently dropped by the server if it
can't be routed to a queue. However - If the message is mandatory,
an exception will be raised instead.
:keyword immediate: Request immediate delivery.
If the message cannot be routed to a queue consumer immediately,
an exception will be raised. This is instead of the default
behaviour, where the server will accept and queue the message,
but with no guarantee that the message will ever be consumed.
:keyword delivery_mode: Override the default :attr:`delivery_mode`.
:keyword priority: The message priority, ``0`` to ``9``.
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword serializer: Override the default :attr:`serializer`.
:keyword exchange: Override the exchange to publish to.
Note that this exchange must have been declared.
|
entailment
|
def send(self, message_data, delivery_mode=None):
"""See :meth:`Publisher.send`"""
self.publisher.send(message_data, delivery_mode=delivery_mode)
|
See :meth:`Publisher.send`
|
entailment
|
def close(self):
"""Close any open channels."""
self.consumer.close()
self.publisher.close()
self._closed = True
|
Close any open channels.
|
entailment
|
def _receive_callback(self, raw_message):
"""Internal method used when a message is received in consume mode."""
message = self.backend.message_to_python(raw_message)
if self.auto_ack and not message.acknowledged:
message.ack()
try:
decoded = message.decode()
except Exception, exc:
if not self.on_decode_error:
raise
self.on_decode_error(message, exc)
else:
self.receive(decoded, message)
|
Internal method used when a message is received in consume mode.
|
entailment
|
def add_consumer_from_dict(self, queue, **options):
"""Add another consumer from dictionary configuration."""
options.setdefault("routing_key", options.pop("binding_key", None))
consumer = Consumer(self.connection, queue=queue,
backend=self.backend, **options)
self.consumers.append(consumer)
return consumer
|
Add another consumer from dictionary configuration.
|
entailment
|
def add_consumer(self, consumer):
"""Add another consumer from a :class:`Consumer` instance."""
consumer.backend = self.backend
self.consumers.append(consumer)
|
Add another consumer from a :class:`Consumer` instance.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.