code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
'''
Abandon all message publications and acks in the current transaction.
Caller can specify a callback to use when the transaction has been
aborted.
'''
# Could call select() but spec 1.9.2.5 says to raise an exception
if not self.enabled:
raise self.TransactionsNotEnabled()
self.send_frame(MethodFrame(self.channel_id, 90, 30))
self._rollback_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_rollback_ok)
|
def rollback(self, cb=None)
|
Abandon all message publications and acks in the current transaction.
Caller can specify a callback to use when the transaction has been
aborted.
| 9.576322
| 5.751054
| 1.665142
|
'''
True if transport is synchronous or the connection has been forced
into synchronous mode, False otherwise.
'''
if self._transport is None:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
raise ConnectionClosed("connection is closed")
return self.transport.synchronous or self._synchronous
|
def synchronous(self)
|
True if transport is synchronous or the connection has been forced
into synchronous mode, False otherwise.
| 4.454195
| 2.943401
| 1.513282
|
'''
Connect to a host and port.
'''
# Clear the connect state immediately since we're no longer connected
# at this point.
self._connected = False
# Only after the socket has connected do we clear this state; closed
# must be False so that writes can be buffered in writePacket(). The
# closed state might have been set to True due to a socket error or a
# redirect.
self._host = "%s:%d" % (host, port)
self._closed = False
self._close_info = {
'reply_code': 0,
'reply_text': 'failed to connect to %s' % (self._host),
'class_id': 0,
'method_id': 0
}
self._transport.connect((host, port))
self._transport.write(PROTOCOL_HEADER)
self._last_octet_time = time.time()
if self._synchronous_connect:
# Have to queue this callback just after connect, it can't go
# into the constructor because the channel needs to be
# "always there" for frame processing, but the synchronous
# callback can't be added until after the protocol header has
# been written. This SHOULD be registered before the protocol
# header is written, in the case where the header bytes are
# written, but this thread/greenlet/context does not return until
# after another thread/greenlet/context has read and processed the
# recv_start frame. Without more re-write to add_sync_cb though,
# it will block on reading responses that will never arrive
# because the protocol header isn't written yet. TBD if needs
# refactoring. Could encapsulate entirely here, wherein
# read_frames exits if protocol header not yet written. Like other
# synchronous behaviors, adding this callback will result in a
# blocking frame read and process loop until _recv_start and any
# subsequent synchronous callbacks have been processed. In the
# event that this is /not/ a synchronous transport, but the
# caller wants the connect to be synchronous so as to ensure that
# the connection is ready, then do a read frame loop here.
self._channels[0].add_synchronous_cb(self._channels[0]._recv_start)
while not self._connected:
self.read_frames()
|
def connect(self, host, port)
|
Connect to a host and port.
| 9.94875
| 9.838934
| 1.011161
|
'''
Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway.
'''
self._connected = False
if self._transport is not None:
try:
self._transport.disconnect()
except Exception:
self.logger.error(
"Failed to disconnect from %s", self._host, exc_info=True)
raise
finally:
self._transport = None
|
def disconnect(self)
|
Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway.
| 8.125326
| 1.765702
| 4.601753
|
msg = 'unknown cause'
self.logger.warning('transport to %s closed : %s' %
(self._host, kwargs.get('msg', msg)))
self._close_info = {
'reply_code': kwargs.get('reply_code', 0),
'reply_text': kwargs.get('msg', msg),
'class_id': kwargs.get('class_id', 0),
'method_id': kwargs.get('method_id', 0)
}
# We're not connected any more, but we're not closed without an
# explicit close call.
self._connected = False
self._transport = None
# Call back to a user-provided close function
self._callback_close()
|
def transport_closed(self, **kwargs)
|
Called by Transports when they close unexpectedly, not as a result of
Connection.disconnect().
TODO: document args
| 4.175374
| 4.115197
| 1.014623
|
'''Return the next possible channel id. Is a circular enumeration.'''
self._channel_counter += 1
if self._channel_counter >= self._channel_max:
self._channel_counter = 1
return self._channel_counter
|
def _next_channel_id(self)
|
Return the next possible channel id. Is a circular enumeration.
| 5.421177
| 2.502099
| 2.166652
|
if channel_id is None:
# adjust for channel 0
if len(self._channels) - 1 >= self._channel_max:
raise Connection.TooManyChannels(
"%d channels already open, max %d",
len(self._channels) - 1,
self._channel_max)
channel_id = self._next_channel_id()
while channel_id in self._channels:
channel_id = self._next_channel_id()
elif channel_id in self._channels:
return self._channels[channel_id]
else:
raise Connection.InvalidChannel(
"%s is not a valid channel id", channel_id)
# Call open() here so that ConnectionChannel doesn't have it called.
# Could also solve this other ways, but it's a HACK regardless.
rval = Channel(
self, channel_id, self._class_map, synchronous=synchronous)
self._channels[channel_id] = rval
rval.add_close_listener(self._channel_closed)
rval.open()
return rval
|
def channel(self, channel_id=None, synchronous=False)
|
Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. If channel_id is not
None but no channel exists for that id, will raise InvalidChannel. If
there are already too many channels open, will raise TooManyChannels.
If synchronous=True, then the channel will act synchronous in all cases
where a protocol method supports `nowait=False`, or where there is an
implied callback in the protocol.
| 3.724746
| 3.681823
| 1.011658
|
'''
Close this connection.
'''
self._close_info = {
'reply_code': reply_code,
'reply_text': reply_text,
'class_id': class_id,
'method_id': method_id
}
if disconnect:
self._closed = True
self.disconnect()
self._callback_close()
else:
self._channels[0].close()
|
def close(self, reply_code=0, reply_text='', class_id=0, method_id=0,
disconnect=False)
|
Close this connection.
| 2.937835
| 2.871547
| 1.023084
|
'''
Read frames from the transport and process them. Some transports may
choose to do this in the background, in several threads, and so on.
'''
# It's possible in a concurrent environment that our transport handle
# has gone away, so handle that cleanly.
# TODO: Consider moving this block into Translator base class. In many
# ways it belongs there. One of the problems though is that this is
# essentially the read loop. Each Transport has different rules for
# how to kick this off, and in the case of gevent, this is how a
# blocking call to read from the socket is kicked off.
if self._transport is None:
return
# Send a heartbeat (if needed)
self._channels[0].send_heartbeat()
data = self._transport.read(self._heartbeat)
current_time = time.time()
if data is None:
# Wait for 2 heartbeat intervals before giving up. See AMQP 4.2.7:
# "If a peer detects no incoming traffic (i.e. received octets) for two heartbeat intervals or longer,
# it should close the connection"
if self._heartbeat and (current_time-self._last_octet_time > 2*self._heartbeat):
msg = 'Heartbeats not received from %s for %d seconds' % (self._host, 2*self._heartbeat)
self.transport_closed(msg=msg)
raise ConnectionClosed('Connection is closed: ' + msg)
return
self._last_octet_time = current_time
reader = Reader(data)
p_channels = set()
try:
for frame in Frame.read_frames(reader):
if self._debug > 1:
self.logger.debug("READ: %s", frame)
self._frames_read += 1
ch = self.channel(frame.channel_id)
ch.buffer_frame(frame)
p_channels.add(ch)
except Frame.FrameError as e:
# Frame error in the peer, disconnect
self.close(reply_code=501,
reply_text='frame error from %s : %s' % (
self._host, str(e)),
class_id=0, method_id=0, disconnect=True)
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
# NOTE: we process channels after buffering unused data in order to
# preserve the integrity of the input stream in case a channel needs to
# read input, such as when a channel framing error necessitates the use
# of the synchronous channel.close method. See `Channel.process_frames`.
#
# HACK: read the buffer contents and re-buffer. Would prefer to pass
# buffer back, but there's no good way of asking the total size of the
# buffer, comparing to tell(), and then re-buffering. There's also no
# ability to clear the buffer up to the current position. It would be
# awesome if we could free that memory without a new allocation.
if reader.tell() < len(data):
self._transport.buffer(data[reader.tell():])
self._transport.process_channels(p_channels)
|
def read_frames(self)
|
Read frames from the transport and process them. Some transports may
choose to do this in the background, in several threads, and so on.
| 7.02347
| 6.380361
| 1.100795
|
'''
Callback when protocol has been initialized on channel 0 and we're
ready to send out frames to set up any channels that have been
created.
'''
# In the rare case (a bug) where this is called but send_frame thinks
# they should be buffered, don't clobber.
frames = self._output_frame_buffer
self._output_frame_buffer = []
for frame in frames:
self.send_frame(frame)
|
def _flush_buffered_frames(self)
|
Callback when protocol has been initialized on channel 0 and we're
ready to send out frames to set up any channels that have been
created.
| 11.112757
| 4.243769
| 2.618606
|
'''
Send a single frame. If there is no transport or we're not connected
yet, append to the output buffer, else send immediately to the socket.
This is called from within the MethodFrames.
'''
if self._closed:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
raise ConnectionClosed("connection is closed")
if self._transport is None or \
(not self._connected and frame.channel_id != 0):
self._output_frame_buffer.append(frame)
return
if self._debug > 1:
self.logger.debug("WRITE: %s", frame)
buf = bytearray()
frame.write_frame(buf)
if len(buf) > self._frame_max:
self.close(
reply_code=501,
reply_text='attempted to send frame of %d bytes, frame max %d' % (
len(buf), self._frame_max),
class_id=0, method_id=0, disconnect=True)
raise ConnectionClosed(
"connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
self._transport.write(buf)
self._frames_written += 1
|
def send_frame(self, frame)
|
Send a single frame. If there is no transport or we're not connected
yet, append to the output buffer, else send immediately to the socket.
This is called from within the MethodFrames.
| 3.413217
| 2.480149
| 1.376214
|
'''
Override the default dispatch since we don't need the rest of
the stack.
'''
if frame.type() == HeartbeatFrame.type():
self.send_heartbeat()
elif frame.type() == MethodFrame.type():
if frame.class_id == 10:
cb = self._method_map.get(frame.method_id)
if cb:
method = self.clear_synchronous_cb(cb)
method(frame)
else:
raise Channel.InvalidMethod(
"unsupported method %d on channel %d",
frame.method_id, self.channel_id)
else:
raise Channel.InvalidClass(
"class %d is not supported on channel %d",
frame.class_id, self.channel_id)
else:
raise Frame.InvalidFrameType(
"frame type %d is not supported on channel %d",
frame.type(), self.channel_id)
|
def dispatch(self, frame)
|
Override the default dispatch since we don't need the rest of
the stack.
| 3.522742
| 2.986824
| 1.179427
|
'''
Send a heartbeat if needed. Tracks last heartbeat send time.
'''
# Note that this does not take into account the time that we last
# sent a frame. Hearbeats are so small the effect should be quite
# limited. Also note that we're looking for something near to our
# scheduled interval, because if this is exact, then we'll likely
# actually send a heartbeat at twice the period, which could cause
# a broker to kill the connection if the period is large enough. The
# 90% bound is arbitrary but seems a sensible enough default.
if self.connection._heartbeat:
if time.time() >= (self._last_heartbeat_send + 0.9 *
self.connection._heartbeat):
self.send_frame(HeartbeatFrame(self.channel_id))
self._last_heartbeat_send = time.time()
|
def send_heartbeat(self)
|
Send a heartbeat if needed. Tracks last heartbeat send time.
| 9.007919
| 7.490956
| 1.202506
|
'''Send the start_ok message.'''
args = Writer()
args.write_table(self.connection._properties)
args.write_shortstr(self.connection._login_method)
args.write_longstr(self.connection._login_response)
args.write_shortstr(self.connection._locale)
self.send_frame(MethodFrame(self.channel_id, 10, 11, args))
self.add_synchronous_cb(self._recv_tune)
|
def _send_start_ok(self)
|
Send the start_ok message.
| 5.078632
| 5.11046
| 0.993772
|
'''
Cleanup local data.
'''
self._declare_cb = None
self._delete_cb = None
super(ExchangeClass, self)._cleanup()
|
def _cleanup(self)
|
Cleanup local data.
| 16.081564
| 9.383814
| 1.713756
|
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(type).\
write_bits(passive, durable, False, False, nowait).\
write_table(arguments or {})
self.send_frame(MethodFrame(self.channel_id, 40, 10, args))
if not nowait:
self._declare_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_declare_ok)
|
def declare(self, exchange, type, passive=False, durable=False,
nowait=True, arguments=None, ticket=None, cb=None)
|
Declare the exchange.
exchange - The name of the exchange to declare
type - One of
| 3.850659
| 4.91465
| 0.783506
|
'''
Delete an exchange.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_bits(if_unused, nowait)
self.send_frame(MethodFrame(self.channel_id, 40, 20, args))
if not nowait:
self._delete_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_delete_ok)
|
def delete(self, exchange, if_unused=False, nowait=True, ticket=None,
cb=None)
|
Delete an exchange.
| 4.920982
| 4.776014
| 1.030353
|
'''Connect assuming a host and port tuple.
:param tuple: A tuple containing host and port for a connection.
:param klass: A implementation of socket.socket.
:raises socket.gaierror: If no address can be resolved.
:raises socket.error: If no connection can be made.
'''
self._host = "%s:%s" % (host, port)
for info in socket.getaddrinfo(host, port, 0, 0, socket.IPPROTO_TCP):
family, socktype, proto, _, sockaddr = info
self._sock = klass(family, socktype, proto)
self._sock.settimeout(self.connection._connect_timeout)
if self.connection._sock_opts:
_sock_opts = self.connection._sock_opts
for (level, optname), value in _sock_opts.iteritems():
self._sock.setsockopt(level, optname, value)
try:
self._sock.connect(sockaddr)
except socket.error:
self.connection.logger.exception(
"Failed to connect to %s:",
sockaddr,
)
continue
# After connecting, switch to full-blocking mode.
self._sock.settimeout(None)
break
else:
raise
|
def connect(self, (host, port), klass=socket.socket)
|
Connect assuming a host and port tuple.
:param tuple: A tuple containing host and port for a connection.
:param klass: A implementation of socket.socket.
:raises socket.gaierror: If no address can be resolved.
:raises socket.error: If no connection can be made.
| 3.329957
| 2.498748
| 1.33265
|
'''
Read from the transport. If timeout>0, will only block for `timeout`
seconds.
'''
e = None
if not hasattr(self, '_sock'):
return None
try:
# Note that we ignore both None and 0, i.e. we either block with a
# timeout or block completely and let gevent sort it out.
if timeout:
self._sock.settimeout(timeout)
else:
self._sock.settimeout(None)
data = self._sock.recv(
self._sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
if len(data):
if self.connection.debug > 1:
self.connection.logger.debug(
'read %d bytes from %s' % (len(data), self._host))
if len(self._buffer):
self._buffer.extend(data)
data = self._buffer
self._buffer = bytearray()
return data
# Note that no data means the socket is closed and we'll mark that
# below
except socket.timeout as e:
# Note that this is implemented differently and though it would be
# caught as an EnvironmentError, it has no errno. Not sure whose
# fault that is.
return None
except EnvironmentError as e:
# thrown if we have a timeout and no data
if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR):
return None
self.connection.logger.exception(
'error reading from %s' % (self._host))
self.connection.transport_closed(
msg='error reading from %s' % (self._host))
if e:
raise
|
def read(self, timeout=None)
|
Read from the transport. If timeout>0, will only block for `timeout`
seconds.
| 4.59308
| 4.192784
| 1.095473
|
'''
Buffer unused bytes from the input stream.
'''
if not hasattr(self, '_sock'):
return None
# data will always be a byte array
if len(self._buffer):
self._buffer.extend(data)
else:
self._buffer = bytearray(data)
|
def buffer(self, data)
|
Buffer unused bytes from the input stream.
| 5.427904
| 4.427286
| 1.226012
|
'''
Write some bytes to the transport.
'''
if not hasattr(self, '_sock'):
return None
try:
self._sock.sendall(data)
if self.connection.debug > 1:
self.connection.logger.debug(
'sent %d bytes to %s' % (len(data), self._host))
return
except EnvironmentError:
# sockets raise this type of error, and since if sendall() fails
# we're left in an indeterminate state, assume that any error we
# catch means that the connection is dead. Note that this
# assumption requires this to be a blocking socket; if we ever
# support non-blocking in this class then this whole method has
# to change a lot.
self.connection.logger.exception(
'error writing to %s' % (self._host))
self.connection.transport_closed(
msg='error writing to %s' % (self._host))
|
def write(self, data)
|
Write some bytes to the transport.
| 6.109616
| 5.854407
| 1.043593
|
'''
Cleanup all the local data.
'''
self._pending_consumers = None
self._consumer_cb = None
self._get_cb = None
self._recover_cb = None
self._cancel_cb = None
self._return_listener = None
super(BasicClass, self)._cleanup()
|
def _cleanup(self)
|
Cleanup all the local data.
| 8.455869
| 6.037729
| 1.400505
|
'''
Set a callback for basic.return listening. Will be called with a single
Message argument.
The return_info attribute of the Message will have the following
properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
RabbitMQ NOTE: if the channel was in confirmation mode when the message
was published, then basic.return will still be followed by basic.ack
later.
:param cb: callable cb(Message); pass None to reset
'''
if cb is not None and not callable(cb):
raise ValueError('return_listener callback must either be None or '
'a callable, but got: %r' % (cb,))
self._return_listener = cb
|
def set_return_listener(self, cb)
|
Set a callback for basic.return listening. Will be called with a single
Message argument.
The return_info attribute of the Message will have the following
properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
RabbitMQ NOTE: if the channel was in confirmation mode when the message
was published, then basic.return will still be followed by basic.ack
later.
:param cb: callable cb(Message); pass None to reset
| 6.471955
| 1.668473
| 3.878968
|
'''
Set QoS on this channel.
'''
args = Writer()
args.write_long(prefetch_size).\
write_short(prefetch_count).\
write_bit(is_global)
self.send_frame(MethodFrame(self.channel_id, 60, 10, args))
self.channel.add_synchronous_cb(self._recv_qos_ok)
|
def qos(self, prefetch_size=0, prefetch_count=0, is_global=False)
|
Set QoS on this channel.
| 6.420998
| 5.957142
| 1.077865
|
'''
Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered.
'''
nowait = nowait and self.allow_nowait() and not cb
if nowait and consumer_tag == '':
consumer_tag = self._generate_consumer_tag()
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(consumer_tag).\
write_bits(no_local, no_ack, exclusive, nowait).\
write_table({}) # unused according to spec
self.send_frame(MethodFrame(self.channel_id, 60, 20, args))
if not nowait:
self._pending_consumers.append((consumer, cb))
self.channel.add_synchronous_cb(self._recv_consume_ok)
else:
self._consumer_cb[consumer_tag] = consumer
|
def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None)
|
Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered.
| 4.454838
| 3.559598
| 1.2515
|
'''
Cancel a consumer. Can choose to delete based on a consumer tag or
the function which is consuming. If deleting by function, take care
to only use a consumer once per channel.
'''
if consumer:
tag = self._lookup_consumer_tag_by_consumer(consumer)
if tag:
consumer_tag = tag
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_shortstr(consumer_tag).\
write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 60, 30, args))
if not nowait:
self._cancel_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_cancel_ok)
else:
self._purge_consumer_by_tag(consumer_tag)
|
def cancel(self, consumer_tag='', nowait=True, consumer=None, cb=None)
|
Cancel a consumer. Can choose to delete based on a consumer tag or
the function which is consuming. If deleting by function, take care
to only use a consumer once per channel.
| 5.592693
| 3.313572
| 1.687814
|
'''Look up consumer tag given its consumer function
NOTE: this protected method may be called by derived classes
:param callable consumer: consumer function
:returns: matching consumer tag or None
:rtype: str or None
'''
for (tag, func) in self._consumer_cb.iteritems():
if func == consumer:
return tag
|
def _lookup_consumer_tag_by_consumer(self, consumer)
|
Look up consumer tag given its consumer function
NOTE: this protected method may be called by derived classes
:param callable consumer: consumer function
:returns: matching consumer tag or None
:rtype: str or None
| 6.581962
| 2.488221
| 2.645248
|
'''Purge consumer entry from this basic instance
NOTE: this protected method may be called by derived classes
:param str consumer_tag:
'''
try:
del self._consumer_cb[consumer_tag]
except KeyError:
self.logger.warning(
'no callback registered for consumer tag " %s "', consumer_tag)
else:
self.logger.info('purged consumer with tag " %s "', consumer_tag)
|
def _purge_consumer_by_tag(self, consumer_tag)
|
Purge consumer entry from this basic instance
NOTE: this protected method may be called by derived classes
:param str consumer_tag:
| 5.890861
| 2.983911
| 1.974208
|
'''
publish a message.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_bits(mandatory, immediate)
self.send_frame(MethodFrame(self.channel_id, 60, 40, args))
self.send_frame(
HeaderFrame(self.channel_id, 60, 0, len(msg), msg.properties))
f_max = self.channel.connection.frame_max
for f in ContentFrame.create_frames(self.channel_id, msg.body, f_max):
self.send_frame(f)
|
def publish(self, msg, exchange, routing_key, mandatory=False,
immediate=False, ticket=None)
|
publish a message.
| 4.218167
| 4.13621
| 1.019814
|
'''
Return a failed message. Not named "return" because python interpreter
can't deal with that.
'''
args = Writer()
args.write_short(reply_code).\
write_shortstr(reply_text).\
write_shortstr(exchange).\
write_shortstr(routing_key)
self.send_frame(MethodFrame(self.channel_id, 60, 50, args))
|
def return_msg(self, reply_code, reply_text, exchange, routing_key)
|
Return a failed message. Not named "return" because python interpreter
can't deal with that.
| 5.573202
| 2.801154
| 1.98961
|
'''
Handle basic.return method. If we have a complete message, will call the
user's return listener callabck (if any). If there are not enough
frames, will re-queue current frames and raise a FrameUnderflow
NOTE: if the channel was in confirmation mode when the message was
published, then this will still be followed by basic.ack later
'''
msg = self._read_returned_msg(method_frame)
if callable(self._return_listener):
self._return_listener(msg)
else:
self.logger.error(
"Published message returned by broker: info=%s, properties=%s",
msg.return_info, msg.properties)
|
def _recv_return(self, method_frame)
|
Handle basic.return method. If we have a complete message, will call the
user's return listener callabck (if any). If there are not enough
frames, will re-queue current frames and raise a FrameUnderflow
NOTE: if the channel was in confirmation mode when the message was
published, then this will still be followed by basic.ack later
| 11.839987
| 2.580518
| 4.588222
|
'''
Ask to fetch a single message from a queue. If a consumer is supplied,
the consumer will be called with either a Message argument, or None if
there is no message in queue. If a synchronous transport, Message or
None is returned.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_bit(no_ack)
self._get_cb.append(consumer)
self.send_frame(MethodFrame(self.channel_id, 60, 70, args))
return self.channel.add_synchronous_cb(self._recv_get_response)
|
def get(self, queue, consumer=None, no_ack=True, ticket=None)
|
Ask to fetch a single message from a queue. If a consumer is supplied,
the consumer will be called with either a Message argument, or None if
there is no message in queue. If a synchronous transport, Message or
None is returned.
| 6.921235
| 3.50443
| 1.974996
|
'''
Handle either get_ok or get_empty. This is a hack because the
synchronous callback stack is expecting one method to satisfy the
expectation. To keep that loop as tight as possible, work within
those constraints. Use of get is not recommended anyway.
'''
if method_frame.method_id == 71:
return self._recv_get_ok(method_frame)
elif method_frame.method_id == 72:
return self._recv_get_empty(method_frame)
|
def _recv_get_response(self, method_frame)
|
Handle either get_ok or get_empty. This is a hack because the
synchronous callback stack is expecting one method to satisfy the
expectation. To keep that loop as tight as possible, work within
those constraints. Use of get is not recommended anyway.
| 9.405009
| 1.62799
| 5.77707
|
'''
Acknowledge delivery of a message. If multiple=True, acknowledge up-to
and including delivery_tag.
'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bit(multiple)
self.send_frame(MethodFrame(self.channel_id, 60, 80, args))
|
def ack(self, delivery_tag, multiple=False)
|
Acknowledge delivery of a message. If multiple=True, acknowledge up-to
and including delivery_tag.
| 6.036528
| 4.19162
| 1.440142
|
'''
Reject a message.
'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bit(requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 90, args))
|
def reject(self, delivery_tag, requeue=False)
|
Reject a message.
| 6.304851
| 6.374498
| 0.989074
|
'''
Redeliver all unacknowledged messages on this channel.
This method is deprecated in favour of the synchronous
recover/recover-ok
'''
args = Writer()
args.write_bit(requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 100, args))
|
def recover_async(self, requeue=False)
|
Redeliver all unacknowledged messages on this channel.
This method is deprecated in favour of the synchronous
recover/recover-ok
| 7.102159
| 4.064595
| 1.747323
|
'''
Ask server to redeliver all unacknowledged messages.
'''
args = Writer()
args.write_bit(requeue)
# The XML spec is incorrect; this method is always synchronous
# http://lists.rabbitmq.com/pipermail/rabbitmq-discuss/2011-January/010738.html
self._recover_cb.append(cb)
self.send_frame(MethodFrame(self.channel_id, 60, 110, args))
self.channel.add_synchronous_cb(self._recv_recover_ok)
|
def recover(self, requeue=False, cb=None)
|
Ask server to redeliver all unacknowledged messages.
| 6.629932
| 6.200655
| 1.069231
|
'''
Support method to read a Message from the current frame buffer.
Will return a Message, or re-queue current frames and raise a
FrameUnderflow. Takes an optional argument on whether to read the
consumer tag so it can be used for both deliver and get-ok.
'''
header_frame, body = self._reap_msg_frames(method_frame)
if with_consumer_tag:
consumer_tag = method_frame.args.read_shortstr()
delivery_tag = method_frame.args.read_longlong()
redelivered = method_frame.args.read_bit()
exchange = method_frame.args.read_shortstr()
routing_key = method_frame.args.read_shortstr()
if with_message_count:
message_count = method_frame.args.read_long()
delivery_info = {
'channel': self.channel,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
if with_consumer_tag:
delivery_info['consumer_tag'] = consumer_tag
if with_message_count:
delivery_info['message_count'] = message_count
return Message(body=body, delivery_info=delivery_info,
**header_frame.properties)
|
def _read_msg(self, method_frame, with_consumer_tag=False,
with_message_count=False)
|
Support method to read a Message from the current frame buffer.
Will return a Message, or re-queue current frames and raise a
FrameUnderflow. Takes an optional argument on whether to read the
consumer tag so it can be used for both deliver and get-ok.
| 2.931603
| 1.779614
| 1.647325
|
'''
Support method to read a returned (basic.return) Message from the
current frame buffer. Will return a Message with return_info, or
re-queue current frames and raise a FrameUnderflow.
:returns: Message with the return_info attribute set, where return_info
is a dict with the following properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
'''
header_frame, body = self._reap_msg_frames(method_frame)
return_info = {
'channel': self.channel,
'reply_code': method_frame.args.read_short(),
'reply_text': method_frame.args.read_shortstr(),
'exchange': method_frame.args.read_shortstr(),
'routing_key': method_frame.args.read_shortstr()
}
return Message(body=body, return_info=return_info,
**header_frame.properties)
|
def _read_returned_msg(self, method_frame)
|
Support method to read a returned (basic.return) Message from the
current frame buffer. Will return a Message with return_info, or
re-queue current frames and raise a FrameUnderflow.
:returns: Message with the return_info attribute set, where return_info
is a dict with the following properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
| 4.395488
| 1.731553
| 2.538466
|
'''
Support method to reap header frame and body from current frame buffer.
Used in processing of basic.return, basic.deliver, and basic.get_ok.
Will return a pair (<header frame>, <body>), or re-queue current frames
and raise a FrameUnderflow.
:returns: pair (<header frame>, <body>)
:rtype: tuple of (HeaderFrame, bytearray)
'''
# No need to assert that is instance of Header or Content frames
# because failure to access as such will result in exception that
# channel will pick up and handle accordingly.
header_frame = self.channel.next_frame()
if header_frame:
size = header_frame.size
body = bytearray()
rbuf_frames = deque([header_frame, method_frame])
while len(body) < size:
content_frame = self.channel.next_frame()
if content_frame:
rbuf_frames.appendleft(content_frame)
body.extend(content_frame.payload.buffer())
else:
self.channel.requeue_frames(rbuf_frames)
raise self.FrameUnderflow()
else:
self.channel.requeue_frames([method_frame])
raise self.FrameUnderflow()
return (header_frame, body)
|
def _reap_msg_frames(self, method_frame)
|
Support method to reap header frame and body from current frame buffer.
Used in processing of basic.return, basic.deliver, and basic.get_ok.
Will return a pair (<header frame>, <body>), or re-queue current frames
and raise a FrameUnderflow.
:returns: pair (<header frame>, <body>)
:rtype: tuple of (HeaderFrame, bytearray)
| 5.355059
| 2.828643
| 1.893155
|
'''
Publish a message. Caller can supply an optional callback which will
be fired when the transaction is committed. Tries very hard to avoid
closed and inactive channels, but a ChannelError or ConnectionError
may still be raised.
'''
user_cb = kwargs.pop('cb', None)
# If the first channel we grab is inactive, continue fetching until
# we get an active channel, then put the inactive channels back in
# the pool. Try to keep the overhead to a minimum.
channel = self._get_channel()
if channel and not channel.active:
inactive_channels = set()
while channel and not channel.active:
inactive_channels.add(channel)
channel = self._get_channel()
self._free_channels.update(inactive_channels)
# When the transaction is committed, add the channel back to the pool
# and call any user-defined callbacks. If there is anything in queue,
# pop it and call back to publish(). Only do so if the channel is
# still active though, because otherwise the message will end up at
# the back of the queue, breaking the original order.
def committed():
self._free_channels.add(channel)
if channel.active and not channel.closed:
self._process_queue()
if user_cb is not None:
user_cb()
if channel:
channel.publish_synchronous(*args, cb=committed, **kwargs)
else:
kwargs['cb'] = user_cb
self._queue.append((args, kwargs))
|
def publish(self, *args, **kwargs)
|
Publish a message. Caller can supply an optional callback which will
be fired when the transaction is committed. Tries very hard to avoid
closed and inactive channels, but a ChannelError or ConnectionError
may still be raised.
| 5.327984
| 3.782934
| 1.408426
|
'''
If there are any message in the queue, process one of them.
'''
if len(self._queue):
args, kwargs = self._queue.popleft()
self.publish(*args, **kwargs)
|
def _process_queue(self)
|
If there are any message in the queue, process one of them.
| 5.9488
| 3.154197
| 1.885995
|
'''
Fetch a channel from the pool. Will return a new one if necessary. If
a channel in the free pool is closed, will remove it. Will return None
if we hit the cap. Will clean up any channels that were published to
but closed due to error.
'''
while len(self._free_channels):
rval = self._free_channels.pop()
if not rval.closed:
return rval
# don't adjust _channels value because the callback will do that
# and we don't want to double count it.
if not self._size or self._channels < self._size:
rval = self._connection.channel()
self._channels += 1
rval.add_close_listener(self._channel_closed_cb)
return rval
|
def _get_channel(self)
|
Fetch a channel from the pool. Will return a new one if necessary. If
a channel in the free pool is closed, will remove it. Will return None
if we hit the cap. Will clean up any channels that were published to
but closed due to error.
| 6.921582
| 2.964341
| 2.334948
|
'''
A generator which will create frames from a buffer given a max
frame size.
'''
size = frame_max - 8 # 8 bytes overhead for frame header and footer
offset = 0
while True:
payload = buf[offset:(offset + size)]
if len(payload) == 0:
break
offset += size
yield ContentFrame(channel_id, payload)
if offset >= len(buf):
break
|
def create_frames(self, channel_id, buf, frame_max)
|
A generator which will create frames from a buffer given a max
frame size.
| 4.740733
| 3.407645
| 1.391205
|
'''
Write the frame into an existing buffer.
'''
writer = Writer(buf)
writer.write_octet(self.type()).\
write_short(self.channel_id).\
write_long(len(self._payload)).\
write(self._payload).\
write_octet(0xce)
|
def write_frame(self, buf)
|
Write the frame into an existing buffer.
| 6.803553
| 5.580836
| 1.219092
|
'''
Connect assuming a host and port tuple. Implemented as non-blocking,
and will close the transport if there's an error
'''
self._host = "%s:%s" % (host, port)
self._sock = EventSocket(
read_cb=self._sock_read_cb,
close_cb=self._sock_close_cb,
error_cb=self._sock_error_cb,
debug=self.connection.debug,
logger=self.connection.logger)
if self.connection._sock_opts:
for k, v in self.connection._sock_opts.iteritems():
family, type = k
self._sock.setsockopt(family, type, v)
self._sock.setblocking(False)
self._sock.connect(
(host, port), timeout=self.connection._connect_timeout)
self._heartbeat_timeout = None
|
def connect(self, (host, port))
|
Connect assuming a host and port tuple. Implemented as non-blocking,
and will close the transport if there's an error
| 3.873752
| 2.814014
| 1.376593
|
'''
Read from the transport. If no data is available, should return None.
The timeout is ignored as this returns only data that has already
been buffered locally.
'''
# NOTE: copying over this comment from Connection, because there is
# knowledge captured here, even if the details are stale
# Because of the timer callback to dataRead when we re-buffered,
# there's a chance that in between we've lost the socket. If that's
# the case, just silently return as some code elsewhere would have
# already notified us. That bug could be fixed by improving the
# message reading so that we consume all possible messages and ensure
# that only a partial message was rebuffered, so that we can rely on
# the next read event to read the subsequent message.
if not hasattr(self, '_sock'):
return None
# This is sort of a hack because we're faking that data is ready, but
# it works for purposes of supporting timeouts
if timeout:
if self._heartbeat_timeout:
self._heartbeat_timeout.delete()
self._heartbeat_timeout = \
event.timeout(timeout, self._sock_read_cb, self._sock)
elif self._heartbeat_timeout:
self._heartbeat_timeout.delete()
self._heartbeat_timeout = None
return self._sock.read()
|
def read(self, timeout=None)
|
Read from the transport. If no data is available, should return None.
The timeout is ignored as this returns only data that has already
been buffered locally.
| 11.479639
| 8.837406
| 1.298983
|
'''
Disconnect from the transport. Typically socket.close(). This call is
welcome to raise exceptions, which the Connection will catch.
The transport is encouraged to allow for any pending writes to complete
before closing the socket.
'''
if not hasattr(self, '_sock'):
return
# TODO: If there are bytes left on the output, queue the close for
# later.
self._sock.close_cb = None
self._sock.close()
|
def disconnect(self)
|
Disconnect from the transport. Typically socket.close(). This call is
welcome to raise exceptions, which the Connection will catch.
The transport is encouraged to allow for any pending writes to complete
before closing the socket.
| 11.234509
| 3.233162
| 3.474774
|
'''
Parse a header frame for a channel given a Reader payload.
'''
class_id = payload.read_short()
weight = payload.read_short()
size = payload.read_longlong()
properties = {}
# The AMQP spec is overly-complex when it comes to handling header
# frames. The spec says that in addition to the first 16bit field,
# additional ones can follow which /may/ then be in the property list
# (because bit flags aren't in the list). Properly implementing custom
# values requires the ability change the properties and their types,
# which someone is welcome to do, but seriously, what's the point?
# Because the complexity of parsing and writing this frame directly
# impacts the speed at which messages can be processed, there are two
# branches for both a fast parse which assumes no changes to the
# properties and a slow parse. For now it's up to someone using custom
# headers to flip the flag.
if self.DEFAULT_PROPERTIES:
flag_bits = payload.read_short()
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if flag_bits & mask:
properties[key] = rfunc(payload)
else:
flags = []
while True:
flag_bits = payload.read_short()
flags.append(flag_bits)
if flag_bits & 1 == 0:
break
shift = 0
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if shift == 0:
if not flags:
break
flag_bits, flags = flags[0], flags[1:]
shift = 15
if flag_bits & (1 << shift):
properties[key] = rfunc(payload)
shift -= 1
return HeaderFrame(channel_id, class_id, weight, size, properties)
|
def parse(self, channel_id, payload)
|
Parse a header frame for a channel given a Reader payload.
| 6.437378
| 5.790168
| 1.111778
|
'''
Write the frame into an existing buffer.
'''
writer = Writer(buf)
writer.write_octet(self.type())
writer.write_short(self.channel_id)
# Track the position where we're going to write the total length
# of the frame arguments.
stream_args_len_pos = len(buf)
writer.write_long(0)
stream_method_pos = len(buf)
writer.write_short(self._class_id)
writer.write_short(self._weight)
writer.write_longlong(self._size)
# Like frame parsing, branch to faster code for default properties
if self.DEFAULT_PROPERTIES:
# Track the position where we're going to write the flags.
flags_pos = len(buf)
writer.write_short(0)
flag_bits = 0
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
val = self._properties.get(key, None)
if val is not None:
flag_bits |= mask
wfunc(writer, val)
writer.write_short_at(flag_bits, flags_pos)
else:
shift = 15
flag_bits = 0
flags = []
stack = deque()
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
val = self._properties.get(key, None)
if val is not None:
if shift == 0:
flags.append(flag_bits)
flag_bits = 0
shift = 15
flag_bits |= (1 << shift)
stack.append((wfunc, val))
shift -= 1
flags.append(flag_bits)
for flag_bits in flags:
writer.write_short(flag_bits)
for method, val in stack:
method(writer, val)
# Write the total length back at the beginning of the frame
stream_len = len(buf) - stream_method_pos
writer.write_long_at(stream_len, stream_args_len_pos)
writer.write_octet(0xce)
|
def write_frame(self, buf)
|
Write the frame into an existing buffer.
| 3.483538
| 3.390383
| 1.027476
|
'''
Read one or more frames from an IO stream. Buffer must support file
object interface.
After reading, caller will need to check if there are bytes remaining
in the stream. If there are, then that implies that there is one or
more incomplete frames and more data needs to be read. The position
of the cursor in the frame stream will mark the point at which the
last good frame was read. If the caller is expecting a sequence of
frames and only received a part of that sequence, they are responsible
for buffering those frames until the rest of the frames in the sequence
have arrived.
'''
rval = deque()
while True:
frame_start_pos = reader.tell()
try:
frame = Frame._read_frame(reader)
except Reader.BufferUnderflow:
# No more data in the stream
frame = None
except Reader.ReaderError as e:
# Some other format error
raise Frame.FormatError, str(e), sys.exc_info()[-1]
except struct.error as e:
raise Frame.FormatError, str(e), sys.exc_info()[-1]
if frame is None:
reader.seek(frame_start_pos)
break
rval.append(frame)
return rval
|
def read_frames(cls, reader)
|
Read one or more frames from an IO stream. Buffer must support file
object interface.
After reading, caller will need to check if there are bytes remaining
in the stream. If there are, then that implies that there is one or
more incomplete frames and more data needs to be read. The position
of the cursor in the frame stream will mark the point at which the
last good frame was read. If the caller is expecting a sequence of
frames and only received a part of that sequence, they are responsible
for buffering those frames until the rest of the frames in the sequence
have arrived.
| 5.247185
| 1.868489
| 2.808251
|
'''
Read a single frame from a Reader. Will return None if there is an
incomplete frame in the stream.
Raise MissingFooter if there's a problem reading the footer byte.
'''
frame_type = reader.read_octet()
channel_id = reader.read_short()
size = reader.read_long()
payload = Reader(reader, reader.tell(), size)
# Seek to end of payload
reader.seek(size, 1)
ch = reader.read_octet() # footer
if ch != 0xce:
raise Frame.FormatError(
'Framing error, unexpected byte: %x. frame type %x. channel %d, payload size %d',
ch, frame_type, channel_id, size)
frame_class = cls._frame_type_map.get(frame_type)
if not frame_class:
raise Frame.InvalidFrameType("Unknown frame type %x", frame_type)
return frame_class.parse(channel_id, payload)
|
def _read_frame(cls, reader)
|
Read a single frame from a Reader. Will return None if there is an
incomplete frame in the stream.
Raise MissingFooter if there's a problem reading the footer byte.
| 4.667657
| 3.28526
| 1.420788
|
'''
Unbind an exchange from another.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(source).\
write_shortstr(routing_key).\
write_bit(nowait).\
write_table(arguments or {})
self.send_frame(MethodFrame(self.channel_id, 40, 40, args))
if not nowait:
self._unbind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_unbind_ok)
|
def unbind(self, exchange, source, routing_key='', nowait=True,
arguments={}, ticket=None, cb=None)
|
Unbind an exchange from another.
| 3.804767
| 3.579444
| 1.062949
|
'''
Cleanup all the local data.
'''
self._ack_listener = None
self._nack_listener = None
self._broker_cancel_cb_map = None
super(RabbitBasicClass, self)._cleanup()
|
def _cleanup(self)
|
Cleanup all the local data.
| 14.503791
| 9.34201
| 1.552534
|
'''
Publish a message. Will return the id of the message if publisher
confirmations are enabled, else will return 0.
'''
if self.channel.confirm._enabled:
self._msg_id += 1
super(RabbitBasicClass, self).publish(*args, **kwargs)
return self._msg_id
|
def publish(self, *args, **kwargs)
|
Publish a message. Will return the id of the message if publisher
confirmations are enabled, else will return 0.
| 8.646436
| 4.089672
| 2.114213
|
'''Receive an ack from the broker.'''
if self._ack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple = method_frame.args.read_bit()
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._ack_listener(self._last_ack_id)
else:
self._last_ack_id = delivery_tag
self._ack_listener(self._last_ack_id)
|
def _recv_ack(self, method_frame)
|
Receive an ack from the broker.
| 2.940122
| 2.977216
| 0.987541
|
'''Send a nack to the broker.'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bits(multiple, requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 120, args))
|
def nack(self, delivery_tag, multiple=False, requeue=False)
|
Send a nack to the broker.
| 6.550632
| 6.646092
| 0.985637
|
'''Receive a nack from the broker.'''
if self._nack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple, requeue = method_frame.args.read_bits(2)
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._nack_listener(self._last_ack_id, requeue)
else:
self._last_ack_id = delivery_tag
self._nack_listener(self._last_ack_id, requeue)
|
def _recv_nack(self, method_frame)
|
Receive a nack from the broker.
| 2.966125
| 2.965705
| 1.000142
|
'''Start a queue consumer.
Accepts the following optional arg in addition to those of
`BasicClass.consume()`:
:param cancel_cb: a callable to be called when the broker cancels the
consumer; e.g., when the consumer's queue is deleted. See
www.rabbitmq.com/consumer-cancel.html.
:type cancel_cb: None or callable with signature cancel_cb(consumer_tag)
'''
# Register the consumer's broker-cancel callback entry
if cancel_cb is not None:
if not callable(cancel_cb):
raise ValueError('cancel_cb is not callable: %r' % (cancel_cb,))
if not consumer_tag:
consumer_tag = self._generate_consumer_tag()
self._broker_cancel_cb_map[consumer_tag] = cancel_cb
# Start consumer
super(RabbitBasicClass, self).consume(queue, consumer, consumer_tag,
no_local, no_ack, exclusive,
nowait, ticket, cb)
|
def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None, cancel_cb=None)
|
Start a queue consumer.
Accepts the following optional arg in addition to those of
`BasicClass.consume()`:
:param cancel_cb: a callable to be called when the broker cancels the
consumer; e.g., when the consumer's queue is deleted. See
www.rabbitmq.com/consumer-cancel.html.
:type cancel_cb: None or callable with signature cancel_cb(consumer_tag)
| 4.049529
| 2.131214
| 1.900105
|
'''
Cancel a consumer. Can choose to delete based on a consumer tag or
the function which is consuming. If deleting by function, take care
to only use a consumer once per channel.
'''
# Remove the consumer's broker-cancel callback entry
if consumer:
tag = self._lookup_consumer_tag_by_consumer(consumer)
if tag:
consumer_tag = tag
try:
del self._broker_cancel_cb_map[consumer_tag]
except KeyError:
self.logger.warning(
'cancel: no broker-cancel-cb entry for consumer tag %r '
'(consumer %r)', consumer_tag, consumer)
# Cancel consumer
super(RabbitBasicClass, self).cancel(consumer_tag, nowait, consumer, cb)
|
def cancel(self, consumer_tag='', nowait=True, consumer=None, cb=None)
|
Cancel a consumer. Can choose to delete based on a consumer tag or
the function which is consuming. If deleting by function, take care
to only use a consumer once per channel.
| 6.652439
| 3.604068
| 1.845814
|
'''Handle Basic.Cancel from broker
:param MethodFrame method_frame: Basic.Cancel method frame from broker
'''
self.logger.warning("consumer cancelled by broker: %r", method_frame)
consumer_tag = method_frame.args.read_shortstr()
# NOTE: per RabbitMQ spec, no-wait is always true in Basic.Cancel from
# broker
# Remove consumer from this basic instance
try:
cancel_cb = self._broker_cancel_cb_map.pop(consumer_tag)
except KeyError:
# Must be a race condition between user's cancel and broker's cancel
self.logger.warning(
'_recv_cancel: no broker-cancel-cb entry for consumer tag %r',
consumer_tag)
else:
if callable(cancel_cb):
# Purge from base class only when user supplies cancel_cb
self._purge_consumer_by_tag(consumer_tag)
# Notify user
cancel_cb(consumer_tag)
|
def _recv_cancel(self, method_frame)
|
Handle Basic.Cancel from broker
:param MethodFrame method_frame: Basic.Cancel method frame from broker
| 5.987456
| 5.332416
| 1.122841
|
'''
Set this channel to use publisher confirmations.
'''
nowait = nowait and self.allow_nowait() and not cb
if not self._enabled:
self._enabled = True
self.channel.basic._msg_id = 0
self.channel.basic._last_ack_id = 0
args = Writer()
args.write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 85, 10, args))
if not nowait:
self._select_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_select_ok)
|
def select(self, nowait=True, cb=None)
|
Set this channel to use publisher confirmations.
| 6.145918
| 5.268094
| 1.16663
|
'''
Close this channel. Routes to channel.close.
'''
# In the off chance that we call this twice. A good example is if
# there's an error in close listeners and so we're still inside a
# single call to process_frames, which will try to close this channel
# if there's an exception.
if hasattr(self, 'channel'):
self.channel.close(reply_code, reply_text, class_id, method_id)
|
def close(self, reply_code=0, reply_text='', class_id=0, method_id=0)
|
Close this channel. Routes to channel.close.
| 9.634088
| 8.100328
| 1.189345
|
'''
Helper for publishing a message using transactions. If 'cb' keyword
arg is supplied, will be called when the transaction is committed.
'''
cb = kwargs.pop('cb', None)
self.tx.select()
self.basic.publish(*args, **kwargs)
self.tx.commit(cb=cb)
|
def publish_synchronous(self, *args, **kwargs)
|
Helper for publishing a message using transactions. If 'cb' keyword
arg is supplied, will be called when the transaction is committed.
| 6.565114
| 2.620167
| 2.505609
|
'''
Dispatch a method.
'''
klass = self._class_map.get(method_frame.class_id)
if klass:
klass.dispatch(method_frame)
else:
raise Channel.InvalidClass(
"class %d is not supported on channel %d",
method_frame.class_id, self.channel_id)
|
def dispatch(self, method_frame)
|
Dispatch a method.
| 4.075304
| 3.867203
| 1.053812
|
'''
Process the input buffer.
'''
while len(self._frame_buffer):
# It would make sense to call next_frame, but it's
# technically faster to repeat the code here.
frame = self._frame_buffer.popleft()
if self._emergency_close_pending:
# Implement stability rule from AMQP 0.9.1 section 1.5.2.5.
# Method channel.close: "After sending this method, any
# received methods except Close and Close-OK MUST be discarded."
#
# NOTE: presently, we limit our implementation of the rule to
# the "emergency close" scenario to avoid potential adverse
# side-effect during normal user-initiated close
if (not isinstance(frame, MethodFrame) or
frame.class_id != self.channel.CLASS_ID or
frame.method_id not in (self.channel.CLOSE_METHOD_ID,
self.channel.CLOSE_OK_METHOD_ID)):
self.logger.warn("Emergency channel close: dropping input "
"frame %.255s", frame)
continue
try:
self.dispatch(frame)
except ProtocolClass.FrameUnderflow:
return
except (ConnectionClosed, ChannelClosed):
# Immediately raise if connection or channel is closed
raise
except Exception:
self.logger.exception(
"Closing on failed dispatch of frame %.255s", frame)
# Spec says that channel should be closed if there's a framing
# error. Unsure if we can send close if the current exception
# is transport level (e.g. gevent.GreenletExit)
self._emergency_close_pending = True
# Preserve the original exception and traceback during cleanup,
# only allowing system-exiting exceptions (e.g., SystemExit,
# KeyboardInterrupt) to override it
try:
raise
finally:
try:
self.close(500, "Failed to dispatch %s" % (str(frame)))
except Exception:
# Suppress secondary non-system-exiting exception in
# favor of the original exception
self.logger.exception("Channel close failed")
pass
|
def process_frames(self)
|
Process the input buffer.
| 7.766389
| 7.483155
| 1.03785
|
'''
Queue a frame for sending. Will send immediately if there are no
pending synchronous transactions on this connection.
'''
if self.closed:
if self.close_info and len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
# If there's any pending event at all, then it means that when the
# current dispatch loop started, all possible frames were flushed
# and the remaining item(s) starts with a sync callback. After careful
# consideration, it seems that it's safe to assume the len>0 means to
# buffer the frame. The other advantage here is
if not len(self._pending_events):
if not self._active and \
isinstance(frame, (ContentFrame, HeaderFrame)):
raise Channel.Inactive(
"Channel %d flow control activated", self.channel_id)
self._connection.send_frame(frame)
else:
self._pending_events.append(frame)
|
def send_frame(self, frame)
|
Queue a frame for sending. Will send immediately if there are no
pending synchronous transactions on this connection.
| 7.94731
| 6.613481
| 1.201683
|
'''
Add an expectation of a callback to release a synchronous transaction.
'''
if self.connection.synchronous or self._synchronous:
wrapper = SyncWrapper(cb)
self._pending_events.append(wrapper)
while wrapper._read:
# Don't check that the channel has been closed until after
# reading frames, in the case that this is processing a clean
# channel closed. If there's a protocol error during
# read_frames, this will loop back around and result in a
# channel closed exception.
if self.closed:
if self.close_info and \
len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
self.connection.read_frames()
return wrapper._result
else:
self._pending_events.append(cb)
|
def add_synchronous_cb(self, cb)
|
Add an expectation of a callback to release a synchronous transaction.
| 6.417976
| 5.471066
| 1.173076
|
'''
If the callback is the current expected callback, will clear it off the
stack. Else will raise in exception if there's an expectation but this
doesn't satisfy it.
'''
if len(self._pending_events):
ev = self._pending_events[0]
# We can't have a strict check using this simple mechanism,
# because we could be waiting for a synch response while messages
# are being published. So for now, if it's not in the list, do a
# check to see if the callback is in the pending list, and if so,
# then raise, because it means we received stuff out of order.
# Else just pass it through. Note that this situation could happen
# on any broker-initiated message.
if ev == cb:
self._pending_events.popleft()
self._flush_pending_events()
return ev
elif cb in self._pending_events:
raise ChannelError(
"Expected synchronous callback %s, got %s", ev, cb)
# Return the passed-in callback by default
return cb
|
def clear_synchronous_cb(self, cb)
|
If the callback is the current expected callback, will clear it off the
stack. Else will raise in exception if there's an expectation but this
doesn't satisfy it.
| 9.271934
| 6.033352
| 1.53678
|
'''
Send pending frames that are in the event queue.
'''
while len(self._pending_events) and \
isinstance(self._pending_events[0], Frame):
self._connection.send_frame(self._pending_events.popleft())
|
def _flush_pending_events(self)
|
Send pending frames that are in the event queue.
| 4.965882
| 2.846639
| 1.744472
|
'''
"Private" callback from the ChannelClass when a channel is closed. Only
called after broker initiated close, or we receive a close_ok. Caller
has the option to send a final frame, to be used to bypass any
synchronous or otherwise-pending frames so that the channel can be
cleanly closed.
'''
# delete all pending data and send final frame if thre is one. note
# that it bypasses send_frame so that even if the closed state is set,
# the frame is published.
if final_frame:
self._connection.send_frame(final_frame)
try:
self._notify_close_listeners()
finally:
self._pending_events = deque()
self._frame_buffer = deque()
# clear out other references for faster cleanup
for protocol_class in self._class_map.values():
protocol_class._cleanup()
delattr(self, protocol_class.name)
self._connection = None
self._class_map = None
self._close_listeners = set()
|
def _closed_cb(self, final_frame=None)
|
"Private" callback from the ChannelClass when a channel is closed. Only
called after broker initiated close, or we receive a close_ok. Caller
has the option to send a final frame, to be used to bypass any
synchronous or otherwise-pending frames so that the channel can be
cleanly closed.
| 9.828916
| 4.255165
| 2.309879
|
'''
Connect using a host,port tuple
'''
super(GeventTransport, self).connect((host, port), klass=socket.socket)
|
def connect(self, (host, port))
|
Connect using a host,port tuple
| 13.090016
| 8.631221
| 1.516589
|
'''
Read from the transport. If no data is available, should return None.
If timeout>0, will only block for `timeout` seconds.
'''
# If currently locked, another greenlet is trying to read, so yield
# control and then return none. Required if a Connection is configured
# to be synchronous, a sync callback is trying to read, and there's
# another read loop running read_frames. Without it, the run loop will
# release the lock but then immediately acquire it again. Yielding
# control in the reading thread after bytes are read won't fix
# anything, because it's quite possible the bytes read resulted in a
# frame that satisfied the synchronous callback, and so this needs to
# return immediately to first check the current status of synchronous
# callbacks before attempting to read again.
if self._read_lock.locked():
self._read_wait.wait(timeout)
return None
self._read_lock.acquire()
try:
return super(GeventTransport, self).read(timeout=timeout)
finally:
self._read_lock.release()
self._read_wait.set()
self._read_wait.clear()
|
def read(self, timeout=None)
|
Read from the transport. If no data is available, should return None.
If timeout>0, will only block for `timeout` seconds.
| 9.107909
| 7.422815
| 1.227016
|
'''
Buffer unused bytes from the input stream.
'''
self._read_lock.acquire()
try:
return super(GeventTransport, self).buffer(data)
finally:
self._read_lock.release()
|
def buffer(self, data)
|
Buffer unused bytes from the input stream.
| 4.921143
| 3.799305
| 1.295274
|
'''
Write some bytes to the transport.
'''
# MUST use a lock here else gevent could raise an exception if 2
# greenlets try to write at the same time. I was hoping that
# sendall() would do that blocking for me, but I guess not. May
# require an eventsocket-like buffer to speed up under high load.
self._write_lock.acquire()
try:
return super(GeventTransport, self).write(data)
finally:
self._write_lock.release()
|
def write(self, data)
|
Write some bytes to the transport.
| 9.686846
| 8.898329
| 1.088614
|
'''
Process a set of channels by calling Channel.process_frames() on each.
Some transports may choose to do this in unique ways, such as through
a pool of threads.
The default implementation will simply iterate over them and call
process_frames() on each.
'''
for channel in channels:
self._pool.spawn(channel.process_frames)
|
def process_channels(self, channels)
|
Process a set of channels by calling Channel.process_frames() on each.
Some transports may choose to do this in unique ways, such as through
a pool of threads.
The default implementation will simply iterate over them and call
process_frames() on each.
| 8.295458
| 1.978615
| 4.192558
|
'''
Dispatch a method for this protocol.
'''
method = self.dispatch_map.get(method_frame.method_id)
if method:
callback = self.channel.clear_synchronous_cb(method)
callback(method_frame)
else:
raise self.InvalidMethod(
"no method is registered with id: %d" % method_frame.method_id)
|
def dispatch(self, method_frame)
|
Dispatch a method for this protocol.
| 4.859916
| 4.203024
| 1.15629
|
'''
Simple seek. Follows standard interface.
'''
if whence == 0:
self._pos = self._start_pos + offset
elif whence == 1:
self._pos += offset
else:
self._pos = (self._end_pos - 1) + offset
|
def seek(self, offset, whence=0)
|
Simple seek. Follows standard interface.
| 3.78429
| 2.621255
| 1.443694
|
'''
Raise BufferUnderflow if there's not enough bytes to satisfy
the request.
'''
if self._pos + n > self._end_pos:
raise self.BufferUnderflow()
|
def _check_underflow(self, n)
|
Raise BufferUnderflow if there's not enough bytes to satisfy
the request.
| 6.738605
| 3.320427
| 2.029439
|
'''
Get a copy of the buffer that this is reading from. Returns a
buffer object
'''
return buffer(self._input, self._start_pos,
(self._end_pos - self._start_pos))
|
def buffer(self)
|
Get a copy of the buffer that this is reading from. Returns a
buffer object
| 9.470531
| 3.974603
| 2.382761
|
self._check_underflow(n)
rval = self._input[self._pos:self._pos + n]
self._pos += n
return rval
|
def read(self, n)
|
Read n bytes.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
| 4.141732
| 3.838335
| 1.079044
|
# Perform a faster check on underflow
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
result = ord(self._input[self._pos]) & 1
self._pos += 1
return result
|
def read_bit(self)
|
Read a single boolean value, returns 0 or 1. Convience for single
bit fields.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
| 6.330437
| 5.71573
| 1.107546
|
'''
Read several bits packed into the same field. Will return as a list.
The bit field itself is little-endian, though the order of the
returned array looks big-endian for ease of decomposition.
Reader('\x02').read_bits(2) -> [False,True]
Reader('\x08').read_bits(2) ->
[False,True,False,False,False,False,False,False]
first_field, second_field = Reader('\x02').read_bits(2)
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise ValueError if num < 0 or num > 9
'''
# Perform a faster check on underflow
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
if num < 0 or num >= 9:
raise ValueError("8 bits per field")
field = ord(self._input[self._pos])
result = map(lambda x: field >> x & 1, xrange(num))
self._pos += 1
return result
|
def read_bits(self, num)
|
Read several bits packed into the same field. Will return as a list.
The bit field itself is little-endian, though the order of the
returned array looks big-endian for ease of decomposition.
Reader('\x02').read_bits(2) -> [False,True]
Reader('\x08').read_bits(2) ->
[False,True,False,False,False,False,False,False]
first_field, second_field = Reader('\x02').read_bits(2)
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise ValueError if num < 0 or num > 9
| 6.207512
| 2.000917
| 3.102333
|
# Technically should look at unpacker.size, but skipping that is way
# faster and this method is the most-called of the readers
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
rval = unpacker(self._input, self._pos)[0]
self._pos += size
return rval
|
def read_octet(self, unpacker=Struct('B').unpack_from,
size=Struct('B').size)
|
Read one byte, return as an integer
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise struct.error if the data is malformed
| 8.13466
| 8.173656
| 0.995229
|
self._check_underflow(size)
rval = unpacker(self._input, self._pos)[0]
self._pos += size
return rval
|
def read_short(self, unpacker=Struct('>H').unpack_from,
size=Struct('>H').size)
|
Read an unsigned 16-bit integer
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise struct.error if the data is malformed
| 4.284645
| 4.878577
| 0.878257
|
# Only need to check underflow on the table once
tlen = self.read_long()
self._check_underflow(tlen)
end_pos = self._pos + tlen
result = {}
while self._pos < end_pos:
name = self._field_shortstr()
result[name] = self._read_field()
return result
|
def read_table(self)
|
Read an AMQP table, and return as a Python dictionary.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise UnicodeDecodeError if the text is mal-formed.
Will raise struct.error if the data is malformed
| 5.874676
| 5.226996
| 1.12391
|
'''
Read a single byte for field type, then read the value.
'''
ftype = self._input[self._pos]
self._pos += 1
reader = self.field_type_map.get(ftype)
if reader:
return reader(self)
raise Reader.FieldError('Unknown field type %s', ftype)
|
def _read_field(self)
|
Read a single byte for field type, then read the value.
| 6.133451
| 3.671945
| 1.670355
|
'''
Open the channel for communication.
'''
args = Writer()
args.write_shortstr('')
self.send_frame(MethodFrame(self.channel_id, 20, 10, args))
self.channel.add_synchronous_cb(self._recv_open_ok)
|
def open(self)
|
Open the channel for communication.
| 9.17525
| 7.276706
| 1.260907
|
'''
Send a flow control command.
'''
args = Writer()
args.write_bit(active)
self.send_frame(MethodFrame(self.channel_id, 20, 20, args))
self.channel.add_synchronous_cb(self._recv_flow_ok)
|
def _send_flow(self, active)
|
Send a flow control command.
| 7.453441
| 5.917195
| 1.259624
|
'''
Receive a flow control command from the broker
'''
self.channel._active = method_frame.args.read_bit()
args = Writer()
args.write_bit(self.channel.active)
self.send_frame(MethodFrame(self.channel_id, 20, 21, args))
if self._flow_control_cb is not None:
self._flow_control_cb()
|
def _recv_flow(self, method_frame)
|
Receive a flow control command from the broker
| 6.258631
| 5.307616
| 1.179179
|
'''
Receive a flow control ack from the broker.
'''
self.channel._active = method_frame.args.read_bit()
if self._flow_control_cb is not None:
self._flow_control_cb()
|
def _recv_flow_ok(self, method_frame)
|
Receive a flow control ack from the broker.
| 9.443575
| 6.917616
| 1.365149
|
'''
Close this channel. Caller has the option of specifying the reason for
closure and the class and method ids of the current frame in which an
error occurred. If in the event of an exception, the channel will be
marked as immediately closed. If channel is already closed, call is
ignored.
'''
if not getattr(self, 'channel', None) or self.channel._closed:
return
self.channel._close_info = {
'reply_code': reply_code,
'reply_text': reply_text,
'class_id': class_id,
'method_id': method_id
}
# exceptions here likely due to race condition as connection is closing
# cap the reply_text we send because it may be arbitrarily long
try:
args = Writer()
args.write_short(reply_code)
args.write_shortstr(reply_text[:255])
args.write_short(class_id)
args.write_short(method_id)
self.send_frame(MethodFrame(self.channel_id, 20, 40, args))
self.channel.add_synchronous_cb(self._recv_close_ok)
finally:
# Immediately set the closed flag so no more frames can be sent
# NOTE: in synchronous mode, by the time this is called we will
# have already run self.channel._closed_cb and so the channel
# reference is gone.
if self.channel:
self.channel._closed = True
|
def close(self, reply_code=0, reply_text='', class_id=0, method_id=0)
|
Close this channel. Caller has the option of specifying the reason for
closure and the class and method ids of the current frame in which an
error occurred. If in the event of an exception, the channel will be
marked as immediately closed. If channel is already closed, call is
ignored.
| 4.908768
| 3.377535
| 1.453358
|
'''
Receive a close command from the broker.
'''
self.channel._close_info = {
'reply_code': method_frame.args.read_short(),
'reply_text': method_frame.args.read_shortstr(),
'class_id': method_frame.args.read_short(),
'method_id': method_frame.args.read_short()
}
self.channel._closed = True
self.channel._closed_cb(
final_frame=MethodFrame(self.channel_id, 20, 41))
|
def _recv_close(self, method_frame)
|
Receive a close command from the broker.
| 3.412823
| 2.976516
| 1.146583
|
'''
Receive a close ack from the broker.
'''
self.channel._closed = True
self.channel._closed_cb()
|
def _recv_close_ok(self, method_frame)
|
Receive a close ack from the broker.
| 10.431999
| 5.806342
| 1.796656
|
'''
Cleanup all the local data.
'''
self._declare_cb = None
self._bind_cb = None
self._unbind_cb = None
self._delete_cb = None
self._purge_cb = None
super(QueueClass, self)._cleanup()
|
def _cleanup(self)
|
Cleanup all the local data.
| 6.60109
| 4.518812
| 1.460802
|
'''
bind to a queue.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_bit(nowait).\
write_table(arguments)
self.send_frame(MethodFrame(self.channel_id, 50, 20, args))
if not nowait:
self._bind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_bind_ok)
|
def bind(self, queue, exchange, routing_key='', nowait=True, arguments={},
ticket=None, cb=None)
|
bind to a queue.
| 3.787053
| 3.704706
| 1.022228
|
'''
Unbind a queue from an exchange. This is always synchronous.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_table(arguments)
self.send_frame(MethodFrame(self.channel_id, 50, 50, args))
self._unbind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_unbind_ok)
|
def unbind(self, queue, exchange, routing_key='', arguments={},
ticket=None, cb=None)
|
Unbind a queue from an exchange. This is always synchronous.
| 4.163915
| 3.501419
| 1.189208
|
'''
Purge all messages in a queue.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 50, 30, args))
if not nowait:
self._purge_cb.append(cb)
return self.channel.add_synchronous_cb(self._recv_purge_ok)
|
def purge(self, queue, nowait=True, ticket=None, cb=None)
|
Purge all messages in a queue.
| 5.403135
| 5.431325
| 0.99481
|
'''
Write multiple bits in a single byte field. The bits will be written in
little-endian order, but should be supplied in big endian order. Will
raise ValueError when more than 8 arguments are supplied.
write_bits(True, False) => 0x02
'''
# Would be nice to make this a bit smarter
if len(args) > 8:
raise ValueError("Can only write 8 bits at a time")
self._output_buffer.append(chr(
reduce(lambda x, y: xor(x, args[y] << y), xrange(len(args)), 0)))
return self
|
def write_bits(self, *args)
|
Write multiple bits in a single byte field. The bits will be written in
little-endian order, but should be supplied in big endian order. Will
raise ValueError when more than 8 arguments are supplied.
write_bits(True, False) => 0x02
| 6.64763
| 3.016879
| 2.203479
|
'''
Write a single bit. Convenience method for single bit args.
'''
self._output_buffer.append(pack(True if b else False))
return self
|
def write_bit(self, b, pack=Struct('B').pack)
|
Write a single bit. Convenience method for single bit args.
| 11.708279
| 4.494821
| 2.604838
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.