_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q7800
|
NounPhraseChunker._getPOS
|
train
|
def _getPOS( self, token, onlyFirst = True ):
''' Returns POS of the current token.
'''
if onlyFirst:
return token[ANALYSIS][0][POSTAG]
else:
return [ a[POSTAG] for a in token[ANALYSIS] ]
|
python
|
{
"resource": ""
}
|
q7801
|
divide
|
train
|
def divide(elements, by, translate=False, sep=' '):
"""Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements.
"""
outer_spans = [spans(elem) for elem in by]
return divide_by_spans(elements, outer_spans, translate=translate, sep=sep)
|
python
|
{
"resource": ""
}
|
q7802
|
Disambiguator.__isListOfTexts
|
train
|
def __isListOfTexts(self, docs):
""" Checks whether the input is a list of strings or Text-s;
"""
return isinstance(docs, list) and \
all(isinstance(d, (basestring, Text)) for d in docs)
|
python
|
{
"resource": ""
}
|
q7803
|
WordnetTagger.tag_text
|
train
|
def tag_text(self, text, **kwargs):
"""Annotates `analysis` entries in `corpus` with a list of lemmas` synsets and queried WordNet data in a 'wordnet' entry.
Note
----
Annotates every `analysis` entry with a `wordnet`:{`synsets`:[..]}.
Parameters
----------
text: estnltk.text.Text
Representation of a corpus in a disassembled form for automatic text analysis with word-level `analysis` entry.
E.g. corpus disassembled into paragraphs, sentences, words ({'paragraphs':[{'sentences':[{'words':[{'analysis':{...}},..]},..]},..]}).
pos : boolean, optional
If True, annotates each synset with a correspnding `pos` (part-of-speech) tag.
variants : boolean, optional
If True, annotates each synset with a list of all its variants' (lemmas') literals.
var_sense : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its sense number.
var_definition : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its definition. Definitions often missing in WordNet.
var_examples : boolean, optional
If True and `variants` is True, annotates each variant/lemma with a list of its examples. Examples often missing in WordNet.
relations : list of str, optional
Holds interested relations. Legal relations are as follows:
`antonym`, `be_in_state`, `belongs_to_class`, `causes`, `fuzzynym`, `has_holo_location`, `has_holo_madeof`, `has_holo_member`,
`has_holo_part`, `has_holo_portion`, `has_holonym`, `has_hyperonym`, `has_hyponym`, `has_instance`, `has_mero_location`,
`has_mero_madeof`, `has_mero_member`, `has_mero_part`, `has_mero_portion`, `has_meronym`, `has_subevent`, `has_xpos_hyperonym`,
`has_xpos_hyponym`, `involved`, `involved_agent`, `involved_instrument`, `involved_location`, `involved_patient`,
`involved_target_direction`, `is_caused_by`, `is_subevent_of`, `near_antonym`, `near_synonym`, `role`, `role_agent`, `role_instrument`,
`role_location`, `role_patient`, `role_target_direction`, `state_of`, `xpos_fuzzynym`, `xpos_near_antonym`, `xpos_near_synonym`.
Annotates each synset with related synsets' indices with respect to queried relations.
Returns
-------
estnltk.text.Text
In-place annotated `text`.
"""
for analysis_match in text.analysis:
for candidate in analysis_match:
if candidate['partofspeech'] in PYVABAMORF_TO_WORDNET_POS_MAP:
# Wordnet contains data about the given lemma and pos combination - will annotate.
wordnet_obj = {}
tag_synsets(wordnet_obj, candidate, **kwargs)
return text
|
python
|
{
"resource": ""
}
|
q7804
|
get_texts_and_labels
|
train
|
def get_texts_and_labels(sentence_chunk):
"""Given a sentence chunk, extract original texts and labels."""
words = sentence_chunk.split('\n')
texts = []
labels = []
for word in words:
word = word.strip()
if len(word) > 0:
toks = word.split('\t')
texts.append(toks[0].strip())
labels.append(toks[-1].strip())
return texts, labels
|
python
|
{
"resource": ""
}
|
q7805
|
convert
|
train
|
def convert(document):
"""Convert a document to a Text object"""
raw_tokens = []
curpos = 0
text_spans = []
all_labels = []
sent_spans = []
word_texts = []
for sentence in document:
startpos = curpos
for idx, (text, label) in enumerate(sentence):
raw_tokens.append(text)
word_texts.append(text)
all_labels.append(label)
text_spans.append((curpos, curpos+len(text)))
curpos += len(text)
if idx < len(sentence) - 1:
raw_tokens.append(' ')
curpos += 1
sent_spans.append((startpos, curpos))
raw_tokens.append('\n')
curpos += 1
return {
TEXT: ''.join(raw_tokens),
WORDS: [{TEXT: text, START: start, END: end, LABEL: label} for text, (start, end), label in zip(word_texts, text_spans, all_labels)],
SENTENCES: [{START: start, END:end} for start, end in sent_spans]
}
|
python
|
{
"resource": ""
}
|
q7806
|
TransactionClass.select
|
train
|
def select(self, cb=None):
'''
Set this channel to use transactions.
'''
if not self._enabled:
self._enabled = True
self.send_frame(MethodFrame(self.channel_id, 90, 10))
self._select_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_select_ok)
|
python
|
{
"resource": ""
}
|
q7807
|
TransactionClass.commit
|
train
|
def commit(self, cb=None):
'''
Commit the current transaction. Caller can specify a callback to use
when the transaction is committed.
'''
# Could call select() but spec 1.9.2.3 says to raise an exception
if not self.enabled:
raise self.TransactionsNotEnabled()
self.send_frame(MethodFrame(self.channel_id, 90, 20))
self._commit_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_commit_ok)
|
python
|
{
"resource": ""
}
|
q7808
|
TransactionClass.rollback
|
train
|
def rollback(self, cb=None):
'''
Abandon all message publications and acks in the current transaction.
Caller can specify a callback to use when the transaction has been
aborted.
'''
# Could call select() but spec 1.9.2.5 says to raise an exception
if not self.enabled:
raise self.TransactionsNotEnabled()
self.send_frame(MethodFrame(self.channel_id, 90, 30))
self._rollback_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_rollback_ok)
|
python
|
{
"resource": ""
}
|
q7809
|
Connection.synchronous
|
train
|
def synchronous(self):
'''
True if transport is synchronous or the connection has been forced
into synchronous mode, False otherwise.
'''
if self._transport is None:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
raise ConnectionClosed("connection is closed")
return self.transport.synchronous or self._synchronous
|
python
|
{
"resource": ""
}
|
q7810
|
Connection.connect
|
train
|
def connect(self, host, port):
'''
Connect to a host and port.
'''
# Clear the connect state immediately since we're no longer connected
# at this point.
self._connected = False
# Only after the socket has connected do we clear this state; closed
# must be False so that writes can be buffered in writePacket(). The
# closed state might have been set to True due to a socket error or a
# redirect.
self._host = "%s:%d" % (host, port)
self._closed = False
self._close_info = {
'reply_code': 0,
'reply_text': 'failed to connect to %s' % (self._host),
'class_id': 0,
'method_id': 0
}
self._transport.connect((host, port))
self._transport.write(PROTOCOL_HEADER)
self._last_octet_time = time.time()
if self._synchronous_connect:
# Have to queue this callback just after connect, it can't go
# into the constructor because the channel needs to be
# "always there" for frame processing, but the synchronous
# callback can't be added until after the protocol header has
# been written. This SHOULD be registered before the protocol
# header is written, in the case where the header bytes are
# written, but this thread/greenlet/context does not return until
# after another thread/greenlet/context has read and processed the
# recv_start frame. Without more re-write to add_sync_cb though,
# it will block on reading responses that will never arrive
# because the protocol header isn't written yet. TBD if needs
# refactoring. Could encapsulate entirely here, wherein
# read_frames exits if protocol header not yet written. Like other
# synchronous behaviors, adding this callback will result in a
# blocking frame read and process loop until _recv_start and any
# subsequent synchronous callbacks have been processed. In the
# event that this is /not/ a synchronous transport, but the
# caller wants the connect to be synchronous so as to ensure that
# the connection is ready, then do a read frame loop here.
self._channels[0].add_synchronous_cb(self._channels[0]._recv_start)
while not self._connected:
self.read_frames()
|
python
|
{
"resource": ""
}
|
q7811
|
Connection.disconnect
|
train
|
def disconnect(self):
'''
Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway.
'''
self._connected = False
if self._transport is not None:
try:
self._transport.disconnect()
except Exception:
self.logger.error(
"Failed to disconnect from %s", self._host, exc_info=True)
raise
finally:
self._transport = None
|
python
|
{
"resource": ""
}
|
q7812
|
Connection._next_channel_id
|
train
|
def _next_channel_id(self):
'''Return the next possible channel id. Is a circular enumeration.'''
self._channel_counter += 1
if self._channel_counter >= self._channel_max:
self._channel_counter = 1
return self._channel_counter
|
python
|
{
"resource": ""
}
|
q7813
|
Connection.channel
|
train
|
def channel(self, channel_id=None, synchronous=False):
"""
Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. If channel_id is not
None but no channel exists for that id, will raise InvalidChannel. If
there are already too many channels open, will raise TooManyChannels.
If synchronous=True, then the channel will act synchronous in all cases
where a protocol method supports `nowait=False`, or where there is an
implied callback in the protocol.
"""
if channel_id is None:
# adjust for channel 0
if len(self._channels) - 1 >= self._channel_max:
raise Connection.TooManyChannels(
"%d channels already open, max %d",
len(self._channels) - 1,
self._channel_max)
channel_id = self._next_channel_id()
while channel_id in self._channels:
channel_id = self._next_channel_id()
elif channel_id in self._channels:
return self._channels[channel_id]
else:
raise Connection.InvalidChannel(
"%s is not a valid channel id", channel_id)
# Call open() here so that ConnectionChannel doesn't have it called.
# Could also solve this other ways, but it's a HACK regardless.
rval = Channel(
self, channel_id, self._class_map, synchronous=synchronous)
self._channels[channel_id] = rval
rval.add_close_listener(self._channel_closed)
rval.open()
return rval
|
python
|
{
"resource": ""
}
|
q7814
|
Connection.read_frames
|
train
|
def read_frames(self):
'''
Read frames from the transport and process them. Some transports may
choose to do this in the background, in several threads, and so on.
'''
# It's possible in a concurrent environment that our transport handle
# has gone away, so handle that cleanly.
# TODO: Consider moving this block into Translator base class. In many
# ways it belongs there. One of the problems though is that this is
# essentially the read loop. Each Transport has different rules for
# how to kick this off, and in the case of gevent, this is how a
# blocking call to read from the socket is kicked off.
if self._transport is None:
return
# Send a heartbeat (if needed)
self._channels[0].send_heartbeat()
data = self._transport.read(self._heartbeat)
current_time = time.time()
if data is None:
# Wait for 2 heartbeat intervals before giving up. See AMQP 4.2.7:
# "If a peer detects no incoming traffic (i.e. received octets) for two heartbeat intervals or longer,
# it should close the connection"
if self._heartbeat and (current_time-self._last_octet_time > 2*self._heartbeat):
msg = 'Heartbeats not received from %s for %d seconds' % (self._host, 2*self._heartbeat)
self.transport_closed(msg=msg)
raise ConnectionClosed('Connection is closed: ' + msg)
return
self._last_octet_time = current_time
reader = Reader(data)
p_channels = set()
try:
for frame in Frame.read_frames(reader):
if self._debug > 1:
self.logger.debug("READ: %s", frame)
self._frames_read += 1
ch = self.channel(frame.channel_id)
ch.buffer_frame(frame)
p_channels.add(ch)
except Frame.FrameError as e:
# Frame error in the peer, disconnect
self.close(reply_code=501,
reply_text='frame error from %s : %s' % (
self._host, str(e)),
class_id=0, method_id=0, disconnect=True)
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
# NOTE: we process channels after buffering unused data in order to
# preserve the integrity of the input stream in case a channel needs to
# read input, such as when a channel framing error necessitates the use
# of the synchronous channel.close method. See `Channel.process_frames`.
#
# HACK: read the buffer contents and re-buffer. Would prefer to pass
# buffer back, but there's no good way of asking the total size of the
# buffer, comparing to tell(), and then re-buffering. There's also no
# ability to clear the buffer up to the current position. It would be
# awesome if we could free that memory without a new allocation.
if reader.tell() < len(data):
self._transport.buffer(data[reader.tell():])
self._transport.process_channels(p_channels)
|
python
|
{
"resource": ""
}
|
q7815
|
Connection._flush_buffered_frames
|
train
|
def _flush_buffered_frames(self):
'''
Callback when protocol has been initialized on channel 0 and we're
ready to send out frames to set up any channels that have been
created.
'''
# In the rare case (a bug) where this is called but send_frame thinks
# they should be buffered, don't clobber.
frames = self._output_frame_buffer
self._output_frame_buffer = []
for frame in frames:
self.send_frame(frame)
|
python
|
{
"resource": ""
}
|
q7816
|
Connection.send_frame
|
train
|
def send_frame(self, frame):
'''
Send a single frame. If there is no transport or we're not connected
yet, append to the output buffer, else send immediately to the socket.
This is called from within the MethodFrames.
'''
if self._closed:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
raise ConnectionClosed("connection is closed")
if self._transport is None or \
(not self._connected and frame.channel_id != 0):
self._output_frame_buffer.append(frame)
return
if self._debug > 1:
self.logger.debug("WRITE: %s", frame)
buf = bytearray()
frame.write_frame(buf)
if len(buf) > self._frame_max:
self.close(
reply_code=501,
reply_text='attempted to send frame of %d bytes, frame max %d' % (
len(buf), self._frame_max),
class_id=0, method_id=0, disconnect=True)
raise ConnectionClosed(
"connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
self._transport.write(buf)
self._frames_written += 1
|
python
|
{
"resource": ""
}
|
q7817
|
ConnectionChannel.dispatch
|
train
|
def dispatch(self, frame):
'''
Override the default dispatch since we don't need the rest of
the stack.
'''
if frame.type() == HeartbeatFrame.type():
self.send_heartbeat()
elif frame.type() == MethodFrame.type():
if frame.class_id == 10:
cb = self._method_map.get(frame.method_id)
if cb:
method = self.clear_synchronous_cb(cb)
method(frame)
else:
raise Channel.InvalidMethod(
"unsupported method %d on channel %d",
frame.method_id, self.channel_id)
else:
raise Channel.InvalidClass(
"class %d is not supported on channel %d",
frame.class_id, self.channel_id)
else:
raise Frame.InvalidFrameType(
"frame type %d is not supported on channel %d",
frame.type(), self.channel_id)
|
python
|
{
"resource": ""
}
|
q7818
|
ConnectionChannel.send_heartbeat
|
train
|
def send_heartbeat(self):
'''
Send a heartbeat if needed. Tracks last heartbeat send time.
'''
# Note that this does not take into account the time that we last
# sent a frame. Hearbeats are so small the effect should be quite
# limited. Also note that we're looking for something near to our
# scheduled interval, because if this is exact, then we'll likely
# actually send a heartbeat at twice the period, which could cause
# a broker to kill the connection if the period is large enough. The
# 90% bound is arbitrary but seems a sensible enough default.
if self.connection._heartbeat:
if time.time() >= (self._last_heartbeat_send + 0.9 *
self.connection._heartbeat):
self.send_frame(HeartbeatFrame(self.channel_id))
self._last_heartbeat_send = time.time()
|
python
|
{
"resource": ""
}
|
q7819
|
ConnectionChannel._send_start_ok
|
train
|
def _send_start_ok(self):
'''Send the start_ok message.'''
args = Writer()
args.write_table(self.connection._properties)
args.write_shortstr(self.connection._login_method)
args.write_longstr(self.connection._login_response)
args.write_shortstr(self.connection._locale)
self.send_frame(MethodFrame(self.channel_id, 10, 11, args))
self.add_synchronous_cb(self._recv_tune)
|
python
|
{
"resource": ""
}
|
q7820
|
ExchangeClass._cleanup
|
train
|
def _cleanup(self):
'''
Cleanup local data.
'''
self._declare_cb = None
self._delete_cb = None
super(ExchangeClass, self)._cleanup()
|
python
|
{
"resource": ""
}
|
q7821
|
ExchangeClass.delete
|
train
|
def delete(self, exchange, if_unused=False, nowait=True, ticket=None,
cb=None):
'''
Delete an exchange.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_bits(if_unused, nowait)
self.send_frame(MethodFrame(self.channel_id, 40, 20, args))
if not nowait:
self._delete_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_delete_ok)
|
python
|
{
"resource": ""
}
|
q7822
|
SocketTransport.connect
|
train
|
def connect(self, (host, port), klass=socket.socket):
'''Connect assuming a host and port tuple.
:param tuple: A tuple containing host and port for a connection.
:param klass: A implementation of socket.socket.
:raises socket.gaierror: If no address can be resolved.
:raises socket.error: If no connection can be made.
'''
self._host = "%s:%s" % (host, port)
for info in socket.getaddrinfo(host, port, 0, 0, socket.IPPROTO_TCP):
family, socktype, proto, _, sockaddr = info
self._sock = klass(family, socktype, proto)
self._sock.settimeout(self.connection._connect_timeout)
if self.connection._sock_opts:
_sock_opts = self.connection._sock_opts
for (level, optname), value in _sock_opts.iteritems():
self._sock.setsockopt(level, optname, value)
try:
self._sock.connect(sockaddr)
except socket.error:
self.connection.logger.exception(
"Failed to connect to %s:",
sockaddr,
)
continue
# After connecting, switch to full-blocking mode.
self._sock.settimeout(None)
break
else:
raise
|
python
|
{
"resource": ""
}
|
q7823
|
SocketTransport.read
|
train
|
def read(self, timeout=None):
'''
Read from the transport. If timeout>0, will only block for `timeout`
seconds.
'''
e = None
if not hasattr(self, '_sock'):
return None
try:
# Note that we ignore both None and 0, i.e. we either block with a
# timeout or block completely and let gevent sort it out.
if timeout:
self._sock.settimeout(timeout)
else:
self._sock.settimeout(None)
data = self._sock.recv(
self._sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
if len(data):
if self.connection.debug > 1:
self.connection.logger.debug(
'read %d bytes from %s' % (len(data), self._host))
if len(self._buffer):
self._buffer.extend(data)
data = self._buffer
self._buffer = bytearray()
return data
# Note that no data means the socket is closed and we'll mark that
# below
except socket.timeout as e:
# Note that this is implemented differently and though it would be
# caught as an EnvironmentError, it has no errno. Not sure whose
# fault that is.
return None
except EnvironmentError as e:
# thrown if we have a timeout and no data
if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR):
return None
self.connection.logger.exception(
'error reading from %s' % (self._host))
self.connection.transport_closed(
msg='error reading from %s' % (self._host))
if e:
raise
|
python
|
{
"resource": ""
}
|
q7824
|
BasicClass.set_return_listener
|
train
|
def set_return_listener(self, cb):
'''
Set a callback for basic.return listening. Will be called with a single
Message argument.
The return_info attribute of the Message will have the following
properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
RabbitMQ NOTE: if the channel was in confirmation mode when the message
was published, then basic.return will still be followed by basic.ack
later.
:param cb: callable cb(Message); pass None to reset
'''
if cb is not None and not callable(cb):
raise ValueError('return_listener callback must either be None or '
'a callable, but got: %r' % (cb,))
self._return_listener = cb
|
python
|
{
"resource": ""
}
|
q7825
|
BasicClass.qos
|
train
|
def qos(self, prefetch_size=0, prefetch_count=0, is_global=False):
'''
Set QoS on this channel.
'''
args = Writer()
args.write_long(prefetch_size).\
write_short(prefetch_count).\
write_bit(is_global)
self.send_frame(MethodFrame(self.channel_id, 60, 10, args))
self.channel.add_synchronous_cb(self._recv_qos_ok)
|
python
|
{
"resource": ""
}
|
q7826
|
BasicClass.consume
|
train
|
def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None):
'''
Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered.
'''
nowait = nowait and self.allow_nowait() and not cb
if nowait and consumer_tag == '':
consumer_tag = self._generate_consumer_tag()
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(consumer_tag).\
write_bits(no_local, no_ack, exclusive, nowait).\
write_table({}) # unused according to spec
self.send_frame(MethodFrame(self.channel_id, 60, 20, args))
if not nowait:
self._pending_consumers.append((consumer, cb))
self.channel.add_synchronous_cb(self._recv_consume_ok)
else:
self._consumer_cb[consumer_tag] = consumer
|
python
|
{
"resource": ""
}
|
q7827
|
BasicClass._lookup_consumer_tag_by_consumer
|
train
|
def _lookup_consumer_tag_by_consumer(self, consumer):
'''Look up consumer tag given its consumer function
NOTE: this protected method may be called by derived classes
:param callable consumer: consumer function
:returns: matching consumer tag or None
:rtype: str or None
'''
for (tag, func) in self._consumer_cb.iteritems():
if func == consumer:
return tag
|
python
|
{
"resource": ""
}
|
q7828
|
BasicClass._purge_consumer_by_tag
|
train
|
def _purge_consumer_by_tag(self, consumer_tag):
'''Purge consumer entry from this basic instance
NOTE: this protected method may be called by derived classes
:param str consumer_tag:
'''
try:
del self._consumer_cb[consumer_tag]
except KeyError:
self.logger.warning(
'no callback registered for consumer tag " %s "', consumer_tag)
else:
self.logger.info('purged consumer with tag " %s "', consumer_tag)
|
python
|
{
"resource": ""
}
|
q7829
|
BasicClass.publish
|
train
|
def publish(self, msg, exchange, routing_key, mandatory=False,
immediate=False, ticket=None):
'''
publish a message.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_bits(mandatory, immediate)
self.send_frame(MethodFrame(self.channel_id, 60, 40, args))
self.send_frame(
HeaderFrame(self.channel_id, 60, 0, len(msg), msg.properties))
f_max = self.channel.connection.frame_max
for f in ContentFrame.create_frames(self.channel_id, msg.body, f_max):
self.send_frame(f)
|
python
|
{
"resource": ""
}
|
q7830
|
BasicClass.return_msg
|
train
|
def return_msg(self, reply_code, reply_text, exchange, routing_key):
'''
Return a failed message. Not named "return" because python interpreter
can't deal with that.
'''
args = Writer()
args.write_short(reply_code).\
write_shortstr(reply_text).\
write_shortstr(exchange).\
write_shortstr(routing_key)
self.send_frame(MethodFrame(self.channel_id, 60, 50, args))
|
python
|
{
"resource": ""
}
|
q7831
|
BasicClass.get
|
train
|
def get(self, queue, consumer=None, no_ack=True, ticket=None):
'''
Ask to fetch a single message from a queue. If a consumer is supplied,
the consumer will be called with either a Message argument, or None if
there is no message in queue. If a synchronous transport, Message or
None is returned.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_bit(no_ack)
self._get_cb.append(consumer)
self.send_frame(MethodFrame(self.channel_id, 60, 70, args))
return self.channel.add_synchronous_cb(self._recv_get_response)
|
python
|
{
"resource": ""
}
|
q7832
|
BasicClass._recv_get_response
|
train
|
def _recv_get_response(self, method_frame):
'''
Handle either get_ok or get_empty. This is a hack because the
synchronous callback stack is expecting one method to satisfy the
expectation. To keep that loop as tight as possible, work within
those constraints. Use of get is not recommended anyway.
'''
if method_frame.method_id == 71:
return self._recv_get_ok(method_frame)
elif method_frame.method_id == 72:
return self._recv_get_empty(method_frame)
|
python
|
{
"resource": ""
}
|
q7833
|
BasicClass.ack
|
train
|
def ack(self, delivery_tag, multiple=False):
'''
Acknowledge delivery of a message. If multiple=True, acknowledge up-to
and including delivery_tag.
'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bit(multiple)
self.send_frame(MethodFrame(self.channel_id, 60, 80, args))
|
python
|
{
"resource": ""
}
|
q7834
|
BasicClass.reject
|
train
|
def reject(self, delivery_tag, requeue=False):
'''
Reject a message.
'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bit(requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 90, args))
|
python
|
{
"resource": ""
}
|
q7835
|
BasicClass.recover_async
|
train
|
def recover_async(self, requeue=False):
'''
Redeliver all unacknowledged messages on this channel.
This method is deprecated in favour of the synchronous
recover/recover-ok
'''
args = Writer()
args.write_bit(requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 100, args))
|
python
|
{
"resource": ""
}
|
q7836
|
BasicClass.recover
|
train
|
def recover(self, requeue=False, cb=None):
'''
Ask server to redeliver all unacknowledged messages.
'''
args = Writer()
args.write_bit(requeue)
# The XML spec is incorrect; this method is always synchronous
# http://lists.rabbitmq.com/pipermail/rabbitmq-discuss/2011-January/010738.html
self._recover_cb.append(cb)
self.send_frame(MethodFrame(self.channel_id, 60, 110, args))
self.channel.add_synchronous_cb(self._recv_recover_ok)
|
python
|
{
"resource": ""
}
|
q7837
|
BasicClass._read_msg
|
train
|
def _read_msg(self, method_frame, with_consumer_tag=False,
with_message_count=False):
'''
Support method to read a Message from the current frame buffer.
Will return a Message, or re-queue current frames and raise a
FrameUnderflow. Takes an optional argument on whether to read the
consumer tag so it can be used for both deliver and get-ok.
'''
header_frame, body = self._reap_msg_frames(method_frame)
if with_consumer_tag:
consumer_tag = method_frame.args.read_shortstr()
delivery_tag = method_frame.args.read_longlong()
redelivered = method_frame.args.read_bit()
exchange = method_frame.args.read_shortstr()
routing_key = method_frame.args.read_shortstr()
if with_message_count:
message_count = method_frame.args.read_long()
delivery_info = {
'channel': self.channel,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
if with_consumer_tag:
delivery_info['consumer_tag'] = consumer_tag
if with_message_count:
delivery_info['message_count'] = message_count
return Message(body=body, delivery_info=delivery_info,
**header_frame.properties)
|
python
|
{
"resource": ""
}
|
q7838
|
ChannelPool.publish
|
train
|
def publish(self, *args, **kwargs):
'''
Publish a message. Caller can supply an optional callback which will
be fired when the transaction is committed. Tries very hard to avoid
closed and inactive channels, but a ChannelError or ConnectionError
may still be raised.
'''
user_cb = kwargs.pop('cb', None)
# If the first channel we grab is inactive, continue fetching until
# we get an active channel, then put the inactive channels back in
# the pool. Try to keep the overhead to a minimum.
channel = self._get_channel()
if channel and not channel.active:
inactive_channels = set()
while channel and not channel.active:
inactive_channels.add(channel)
channel = self._get_channel()
self._free_channels.update(inactive_channels)
# When the transaction is committed, add the channel back to the pool
# and call any user-defined callbacks. If there is anything in queue,
# pop it and call back to publish(). Only do so if the channel is
# still active though, because otherwise the message will end up at
# the back of the queue, breaking the original order.
def committed():
self._free_channels.add(channel)
if channel.active and not channel.closed:
self._process_queue()
if user_cb is not None:
user_cb()
if channel:
channel.publish_synchronous(*args, cb=committed, **kwargs)
else:
kwargs['cb'] = user_cb
self._queue.append((args, kwargs))
|
python
|
{
"resource": ""
}
|
q7839
|
ChannelPool._process_queue
|
train
|
def _process_queue(self):
'''
If there are any message in the queue, process one of them.
'''
if len(self._queue):
args, kwargs = self._queue.popleft()
self.publish(*args, **kwargs)
|
python
|
{
"resource": ""
}
|
q7840
|
ChannelPool._get_channel
|
train
|
def _get_channel(self):
'''
Fetch a channel from the pool. Will return a new one if necessary. If
a channel in the free pool is closed, will remove it. Will return None
if we hit the cap. Will clean up any channels that were published to
but closed due to error.
'''
while len(self._free_channels):
rval = self._free_channels.pop()
if not rval.closed:
return rval
# don't adjust _channels value because the callback will do that
# and we don't want to double count it.
if not self._size or self._channels < self._size:
rval = self._connection.channel()
self._channels += 1
rval.add_close_listener(self._channel_closed_cb)
return rval
|
python
|
{
"resource": ""
}
|
q7841
|
ContentFrame.create_frames
|
train
|
def create_frames(self, channel_id, buf, frame_max):
'''
A generator which will create frames from a buffer given a max
frame size.
'''
size = frame_max - 8 # 8 bytes overhead for frame header and footer
offset = 0
while True:
payload = buf[offset:(offset + size)]
if len(payload) == 0:
break
offset += size
yield ContentFrame(channel_id, payload)
if offset >= len(buf):
break
|
python
|
{
"resource": ""
}
|
q7842
|
EventTransport.connect
|
train
|
def connect(self, (host, port)):
'''
Connect assuming a host and port tuple. Implemented as non-blocking,
and will close the transport if there's an error
'''
self._host = "%s:%s" % (host, port)
self._sock = EventSocket(
read_cb=self._sock_read_cb,
close_cb=self._sock_close_cb,
error_cb=self._sock_error_cb,
debug=self.connection.debug,
logger=self.connection.logger)
if self.connection._sock_opts:
for k, v in self.connection._sock_opts.iteritems():
family, type = k
self._sock.setsockopt(family, type, v)
self._sock.setblocking(False)
self._sock.connect(
(host, port), timeout=self.connection._connect_timeout)
self._heartbeat_timeout = None
|
python
|
{
"resource": ""
}
|
q7843
|
EventTransport.read
|
train
|
def read(self, timeout=None):
'''
Read from the transport. If no data is available, should return None.
The timeout is ignored as this returns only data that has already
been buffered locally.
'''
# NOTE: copying over this comment from Connection, because there is
# knowledge captured here, even if the details are stale
# Because of the timer callback to dataRead when we re-buffered,
# there's a chance that in between we've lost the socket. If that's
# the case, just silently return as some code elsewhere would have
# already notified us. That bug could be fixed by improving the
# message reading so that we consume all possible messages and ensure
# that only a partial message was rebuffered, so that we can rely on
# the next read event to read the subsequent message.
if not hasattr(self, '_sock'):
return None
# This is sort of a hack because we're faking that data is ready, but
# it works for purposes of supporting timeouts
if timeout:
if self._heartbeat_timeout:
self._heartbeat_timeout.delete()
self._heartbeat_timeout = \
event.timeout(timeout, self._sock_read_cb, self._sock)
elif self._heartbeat_timeout:
self._heartbeat_timeout.delete()
self._heartbeat_timeout = None
return self._sock.read()
|
python
|
{
"resource": ""
}
|
q7844
|
HeaderFrame.parse
|
train
|
def parse(self, channel_id, payload):
'''
Parse a header frame for a channel given a Reader payload.
'''
class_id = payload.read_short()
weight = payload.read_short()
size = payload.read_longlong()
properties = {}
# The AMQP spec is overly-complex when it comes to handling header
# frames. The spec says that in addition to the first 16bit field,
# additional ones can follow which /may/ then be in the property list
# (because bit flags aren't in the list). Properly implementing custom
# values requires the ability change the properties and their types,
# which someone is welcome to do, but seriously, what's the point?
# Because the complexity of parsing and writing this frame directly
# impacts the speed at which messages can be processed, there are two
# branches for both a fast parse which assumes no changes to the
# properties and a slow parse. For now it's up to someone using custom
# headers to flip the flag.
if self.DEFAULT_PROPERTIES:
flag_bits = payload.read_short()
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if flag_bits & mask:
properties[key] = rfunc(payload)
else:
flags = []
while True:
flag_bits = payload.read_short()
flags.append(flag_bits)
if flag_bits & 1 == 0:
break
shift = 0
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if shift == 0:
if not flags:
break
flag_bits, flags = flags[0], flags[1:]
shift = 15
if flag_bits & (1 << shift):
properties[key] = rfunc(payload)
shift -= 1
return HeaderFrame(channel_id, class_id, weight, size, properties)
|
python
|
{
"resource": ""
}
|
q7845
|
Frame.read_frames
|
train
|
def read_frames(cls, reader):
'''
Read one or more frames from an IO stream. Buffer must support file
object interface.
After reading, caller will need to check if there are bytes remaining
in the stream. If there are, then that implies that there is one or
more incomplete frames and more data needs to be read. The position
of the cursor in the frame stream will mark the point at which the
last good frame was read. If the caller is expecting a sequence of
frames and only received a part of that sequence, they are responsible
for buffering those frames until the rest of the frames in the sequence
have arrived.
'''
rval = deque()
while True:
frame_start_pos = reader.tell()
try:
frame = Frame._read_frame(reader)
except Reader.BufferUnderflow:
# No more data in the stream
frame = None
except Reader.ReaderError as e:
# Some other format error
raise Frame.FormatError, str(e), sys.exc_info()[-1]
except struct.error as e:
raise Frame.FormatError, str(e), sys.exc_info()[-1]
if frame is None:
reader.seek(frame_start_pos)
break
rval.append(frame)
return rval
|
python
|
{
"resource": ""
}
|
q7846
|
Frame._read_frame
|
train
|
def _read_frame(cls, reader):
'''
Read a single frame from a Reader. Will return None if there is an
incomplete frame in the stream.
Raise MissingFooter if there's a problem reading the footer byte.
'''
frame_type = reader.read_octet()
channel_id = reader.read_short()
size = reader.read_long()
payload = Reader(reader, reader.tell(), size)
# Seek to end of payload
reader.seek(size, 1)
ch = reader.read_octet() # footer
if ch != 0xce:
raise Frame.FormatError(
'Framing error, unexpected byte: %x. frame type %x. channel %d, payload size %d',
ch, frame_type, channel_id, size)
frame_class = cls._frame_type_map.get(frame_type)
if not frame_class:
raise Frame.InvalidFrameType("Unknown frame type %x", frame_type)
return frame_class.parse(channel_id, payload)
|
python
|
{
"resource": ""
}
|
q7847
|
RabbitExchangeClass.unbind
|
train
|
def unbind(self, exchange, source, routing_key='', nowait=True,
arguments={}, ticket=None, cb=None):
'''
Unbind an exchange from another.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(source).\
write_shortstr(routing_key).\
write_bit(nowait).\
write_table(arguments or {})
self.send_frame(MethodFrame(self.channel_id, 40, 40, args))
if not nowait:
self._unbind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_unbind_ok)
|
python
|
{
"resource": ""
}
|
q7848
|
RabbitBasicClass.publish
|
train
|
def publish(self, *args, **kwargs):
'''
Publish a message. Will return the id of the message if publisher
confirmations are enabled, else will return 0.
'''
if self.channel.confirm._enabled:
self._msg_id += 1
super(RabbitBasicClass, self).publish(*args, **kwargs)
return self._msg_id
|
python
|
{
"resource": ""
}
|
q7849
|
RabbitBasicClass._recv_ack
|
train
|
def _recv_ack(self, method_frame):
'''Receive an ack from the broker.'''
if self._ack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple = method_frame.args.read_bit()
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._ack_listener(self._last_ack_id)
else:
self._last_ack_id = delivery_tag
self._ack_listener(self._last_ack_id)
|
python
|
{
"resource": ""
}
|
q7850
|
RabbitBasicClass.nack
|
train
|
def nack(self, delivery_tag, multiple=False, requeue=False):
'''Send a nack to the broker.'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bits(multiple, requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 120, args))
|
python
|
{
"resource": ""
}
|
q7851
|
RabbitBasicClass._recv_nack
|
train
|
def _recv_nack(self, method_frame):
'''Receive a nack from the broker.'''
if self._nack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple, requeue = method_frame.args.read_bits(2)
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._nack_listener(self._last_ack_id, requeue)
else:
self._last_ack_id = delivery_tag
self._nack_listener(self._last_ack_id, requeue)
|
python
|
{
"resource": ""
}
|
q7852
|
RabbitBasicClass._recv_cancel
|
train
|
def _recv_cancel(self, method_frame):
'''Handle Basic.Cancel from broker
:param MethodFrame method_frame: Basic.Cancel method frame from broker
'''
self.logger.warning("consumer cancelled by broker: %r", method_frame)
consumer_tag = method_frame.args.read_shortstr()
# NOTE: per RabbitMQ spec, no-wait is always true in Basic.Cancel from
# broker
# Remove consumer from this basic instance
try:
cancel_cb = self._broker_cancel_cb_map.pop(consumer_tag)
except KeyError:
# Must be a race condition between user's cancel and broker's cancel
self.logger.warning(
'_recv_cancel: no broker-cancel-cb entry for consumer tag %r',
consumer_tag)
else:
if callable(cancel_cb):
# Purge from base class only when user supplies cancel_cb
self._purge_consumer_by_tag(consumer_tag)
# Notify user
cancel_cb(consumer_tag)
|
python
|
{
"resource": ""
}
|
q7853
|
RabbitConfirmClass.select
|
train
|
def select(self, nowait=True, cb=None):
'''
Set this channel to use publisher confirmations.
'''
nowait = nowait and self.allow_nowait() and not cb
if not self._enabled:
self._enabled = True
self.channel.basic._msg_id = 0
self.channel.basic._last_ack_id = 0
args = Writer()
args.write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 85, 10, args))
if not nowait:
self._select_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_select_ok)
|
python
|
{
"resource": ""
}
|
q7854
|
Channel.close
|
train
|
def close(self, reply_code=0, reply_text='', class_id=0, method_id=0):
'''
Close this channel. Routes to channel.close.
'''
# In the off chance that we call this twice. A good example is if
# there's an error in close listeners and so we're still inside a
# single call to process_frames, which will try to close this channel
# if there's an exception.
if hasattr(self, 'channel'):
self.channel.close(reply_code, reply_text, class_id, method_id)
|
python
|
{
"resource": ""
}
|
q7855
|
Channel.publish_synchronous
|
train
|
def publish_synchronous(self, *args, **kwargs):
'''
Helper for publishing a message using transactions. If 'cb' keyword
arg is supplied, will be called when the transaction is committed.
'''
cb = kwargs.pop('cb', None)
self.tx.select()
self.basic.publish(*args, **kwargs)
self.tx.commit(cb=cb)
|
python
|
{
"resource": ""
}
|
q7856
|
Channel.dispatch
|
train
|
def dispatch(self, method_frame):
'''
Dispatch a method.
'''
klass = self._class_map.get(method_frame.class_id)
if klass:
klass.dispatch(method_frame)
else:
raise Channel.InvalidClass(
"class %d is not supported on channel %d",
method_frame.class_id, self.channel_id)
|
python
|
{
"resource": ""
}
|
q7857
|
Channel.process_frames
|
train
|
def process_frames(self):
'''
Process the input buffer.
'''
while len(self._frame_buffer):
# It would make sense to call next_frame, but it's
# technically faster to repeat the code here.
frame = self._frame_buffer.popleft()
if self._emergency_close_pending:
# Implement stability rule from AMQP 0.9.1 section 1.5.2.5.
# Method channel.close: "After sending this method, any
# received methods except Close and Close-OK MUST be discarded."
#
# NOTE: presently, we limit our implementation of the rule to
# the "emergency close" scenario to avoid potential adverse
# side-effect during normal user-initiated close
if (not isinstance(frame, MethodFrame) or
frame.class_id != self.channel.CLASS_ID or
frame.method_id not in (self.channel.CLOSE_METHOD_ID,
self.channel.CLOSE_OK_METHOD_ID)):
self.logger.warn("Emergency channel close: dropping input "
"frame %.255s", frame)
continue
try:
self.dispatch(frame)
except ProtocolClass.FrameUnderflow:
return
except (ConnectionClosed, ChannelClosed):
# Immediately raise if connection or channel is closed
raise
except Exception:
self.logger.exception(
"Closing on failed dispatch of frame %.255s", frame)
# Spec says that channel should be closed if there's a framing
# error. Unsure if we can send close if the current exception
# is transport level (e.g. gevent.GreenletExit)
self._emergency_close_pending = True
# Preserve the original exception and traceback during cleanup,
# only allowing system-exiting exceptions (e.g., SystemExit,
# KeyboardInterrupt) to override it
try:
raise
finally:
try:
self.close(500, "Failed to dispatch %s" % (str(frame)))
except Exception:
# Suppress secondary non-system-exiting exception in
# favor of the original exception
self.logger.exception("Channel close failed")
pass
|
python
|
{
"resource": ""
}
|
q7858
|
Channel.send_frame
|
train
|
def send_frame(self, frame):
'''
Queue a frame for sending. Will send immediately if there are no
pending synchronous transactions on this connection.
'''
if self.closed:
if self.close_info and len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
# If there's any pending event at all, then it means that when the
# current dispatch loop started, all possible frames were flushed
# and the remaining item(s) starts with a sync callback. After careful
# consideration, it seems that it's safe to assume the len>0 means to
# buffer the frame. The other advantage here is
if not len(self._pending_events):
if not self._active and \
isinstance(frame, (ContentFrame, HeaderFrame)):
raise Channel.Inactive(
"Channel %d flow control activated", self.channel_id)
self._connection.send_frame(frame)
else:
self._pending_events.append(frame)
|
python
|
{
"resource": ""
}
|
q7859
|
Channel.add_synchronous_cb
|
train
|
def add_synchronous_cb(self, cb):
'''
Add an expectation of a callback to release a synchronous transaction.
'''
if self.connection.synchronous or self._synchronous:
wrapper = SyncWrapper(cb)
self._pending_events.append(wrapper)
while wrapper._read:
# Don't check that the channel has been closed until after
# reading frames, in the case that this is processing a clean
# channel closed. If there's a protocol error during
# read_frames, this will loop back around and result in a
# channel closed exception.
if self.closed:
if self.close_info and \
len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
self.connection.read_frames()
return wrapper._result
else:
self._pending_events.append(cb)
|
python
|
{
"resource": ""
}
|
q7860
|
Channel.clear_synchronous_cb
|
train
|
def clear_synchronous_cb(self, cb):
'''
If the callback is the current expected callback, will clear it off the
stack. Else will raise in exception if there's an expectation but this
doesn't satisfy it.
'''
if len(self._pending_events):
ev = self._pending_events[0]
# We can't have a strict check using this simple mechanism,
# because we could be waiting for a synch response while messages
# are being published. So for now, if it's not in the list, do a
# check to see if the callback is in the pending list, and if so,
# then raise, because it means we received stuff out of order.
# Else just pass it through. Note that this situation could happen
# on any broker-initiated message.
if ev == cb:
self._pending_events.popleft()
self._flush_pending_events()
return ev
elif cb in self._pending_events:
raise ChannelError(
"Expected synchronous callback %s, got %s", ev, cb)
# Return the passed-in callback by default
return cb
|
python
|
{
"resource": ""
}
|
q7861
|
Channel._flush_pending_events
|
train
|
def _flush_pending_events(self):
'''
Send pending frames that are in the event queue.
'''
while len(self._pending_events) and \
isinstance(self._pending_events[0], Frame):
self._connection.send_frame(self._pending_events.popleft())
|
python
|
{
"resource": ""
}
|
q7862
|
Channel._closed_cb
|
train
|
def _closed_cb(self, final_frame=None):
'''
"Private" callback from the ChannelClass when a channel is closed. Only
called after broker initiated close, or we receive a close_ok. Caller
has the option to send a final frame, to be used to bypass any
synchronous or otherwise-pending frames so that the channel can be
cleanly closed.
'''
# delete all pending data and send final frame if thre is one. note
# that it bypasses send_frame so that even if the closed state is set,
# the frame is published.
if final_frame:
self._connection.send_frame(final_frame)
try:
self._notify_close_listeners()
finally:
self._pending_events = deque()
self._frame_buffer = deque()
# clear out other references for faster cleanup
for protocol_class in self._class_map.values():
protocol_class._cleanup()
delattr(self, protocol_class.name)
self._connection = None
self._class_map = None
self._close_listeners = set()
|
python
|
{
"resource": ""
}
|
q7863
|
GeventTransport.connect
|
train
|
def connect(self, (host, port)):
'''
Connect using a host,port tuple
'''
super(GeventTransport, self).connect((host, port), klass=socket.socket)
|
python
|
{
"resource": ""
}
|
q7864
|
GeventTransport.read
|
train
|
def read(self, timeout=None):
'''
Read from the transport. If no data is available, should return None.
If timeout>0, will only block for `timeout` seconds.
'''
# If currently locked, another greenlet is trying to read, so yield
# control and then return none. Required if a Connection is configured
# to be synchronous, a sync callback is trying to read, and there's
# another read loop running read_frames. Without it, the run loop will
# release the lock but then immediately acquire it again. Yielding
# control in the reading thread after bytes are read won't fix
# anything, because it's quite possible the bytes read resulted in a
# frame that satisfied the synchronous callback, and so this needs to
# return immediately to first check the current status of synchronous
# callbacks before attempting to read again.
if self._read_lock.locked():
self._read_wait.wait(timeout)
return None
self._read_lock.acquire()
try:
return super(GeventTransport, self).read(timeout=timeout)
finally:
self._read_lock.release()
self._read_wait.set()
self._read_wait.clear()
|
python
|
{
"resource": ""
}
|
q7865
|
ProtocolClass.dispatch
|
train
|
def dispatch(self, method_frame):
'''
Dispatch a method for this protocol.
'''
method = self.dispatch_map.get(method_frame.method_id)
if method:
callback = self.channel.clear_synchronous_cb(method)
callback(method_frame)
else:
raise self.InvalidMethod(
"no method is registered with id: %d" % method_frame.method_id)
|
python
|
{
"resource": ""
}
|
q7866
|
Reader.seek
|
train
|
def seek(self, offset, whence=0):
'''
Simple seek. Follows standard interface.
'''
if whence == 0:
self._pos = self._start_pos + offset
elif whence == 1:
self._pos += offset
else:
self._pos = (self._end_pos - 1) + offset
|
python
|
{
"resource": ""
}
|
q7867
|
Reader._check_underflow
|
train
|
def _check_underflow(self, n):
'''
Raise BufferUnderflow if there's not enough bytes to satisfy
the request.
'''
if self._pos + n > self._end_pos:
raise self.BufferUnderflow()
|
python
|
{
"resource": ""
}
|
q7868
|
Reader.buffer
|
train
|
def buffer(self):
'''
Get a copy of the buffer that this is reading from. Returns a
buffer object
'''
return buffer(self._input, self._start_pos,
(self._end_pos - self._start_pos))
|
python
|
{
"resource": ""
}
|
q7869
|
Reader.read_bit
|
train
|
def read_bit(self):
"""
Read a single boolean value, returns 0 or 1. Convience for single
bit fields.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
"""
# Perform a faster check on underflow
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
result = ord(self._input[self._pos]) & 1
self._pos += 1
return result
|
python
|
{
"resource": ""
}
|
q7870
|
Reader.read_bits
|
train
|
def read_bits(self, num):
'''
Read several bits packed into the same field. Will return as a list.
The bit field itself is little-endian, though the order of the
returned array looks big-endian for ease of decomposition.
Reader('\x02').read_bits(2) -> [False,True]
Reader('\x08').read_bits(2) ->
[False,True,False,False,False,False,False,False]
first_field, second_field = Reader('\x02').read_bits(2)
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise ValueError if num < 0 or num > 9
'''
# Perform a faster check on underflow
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
if num < 0 or num >= 9:
raise ValueError("8 bits per field")
field = ord(self._input[self._pos])
result = map(lambda x: field >> x & 1, xrange(num))
self._pos += 1
return result
|
python
|
{
"resource": ""
}
|
q7871
|
Reader._read_field
|
train
|
def _read_field(self):
'''
Read a single byte for field type, then read the value.
'''
ftype = self._input[self._pos]
self._pos += 1
reader = self.field_type_map.get(ftype)
if reader:
return reader(self)
raise Reader.FieldError('Unknown field type %s', ftype)
|
python
|
{
"resource": ""
}
|
q7872
|
ChannelClass.open
|
train
|
def open(self):
'''
Open the channel for communication.
'''
args = Writer()
args.write_shortstr('')
self.send_frame(MethodFrame(self.channel_id, 20, 10, args))
self.channel.add_synchronous_cb(self._recv_open_ok)
|
python
|
{
"resource": ""
}
|
q7873
|
ChannelClass._send_flow
|
train
|
def _send_flow(self, active):
'''
Send a flow control command.
'''
args = Writer()
args.write_bit(active)
self.send_frame(MethodFrame(self.channel_id, 20, 20, args))
self.channel.add_synchronous_cb(self._recv_flow_ok)
|
python
|
{
"resource": ""
}
|
q7874
|
ChannelClass._recv_flow
|
train
|
def _recv_flow(self, method_frame):
'''
Receive a flow control command from the broker
'''
self.channel._active = method_frame.args.read_bit()
args = Writer()
args.write_bit(self.channel.active)
self.send_frame(MethodFrame(self.channel_id, 20, 21, args))
if self._flow_control_cb is not None:
self._flow_control_cb()
|
python
|
{
"resource": ""
}
|
q7875
|
ChannelClass._recv_flow_ok
|
train
|
def _recv_flow_ok(self, method_frame):
'''
Receive a flow control ack from the broker.
'''
self.channel._active = method_frame.args.read_bit()
if self._flow_control_cb is not None:
self._flow_control_cb()
|
python
|
{
"resource": ""
}
|
q7876
|
ChannelClass.close
|
train
|
def close(self, reply_code=0, reply_text='', class_id=0, method_id=0):
'''
Close this channel. Caller has the option of specifying the reason for
closure and the class and method ids of the current frame in which an
error occurred. If in the event of an exception, the channel will be
marked as immediately closed. If channel is already closed, call is
ignored.
'''
if not getattr(self, 'channel', None) or self.channel._closed:
return
self.channel._close_info = {
'reply_code': reply_code,
'reply_text': reply_text,
'class_id': class_id,
'method_id': method_id
}
# exceptions here likely due to race condition as connection is closing
# cap the reply_text we send because it may be arbitrarily long
try:
args = Writer()
args.write_short(reply_code)
args.write_shortstr(reply_text[:255])
args.write_short(class_id)
args.write_short(method_id)
self.send_frame(MethodFrame(self.channel_id, 20, 40, args))
self.channel.add_synchronous_cb(self._recv_close_ok)
finally:
# Immediately set the closed flag so no more frames can be sent
# NOTE: in synchronous mode, by the time this is called we will
# have already run self.channel._closed_cb and so the channel
# reference is gone.
if self.channel:
self.channel._closed = True
|
python
|
{
"resource": ""
}
|
q7877
|
ChannelClass._recv_close
|
train
|
def _recv_close(self, method_frame):
'''
Receive a close command from the broker.
'''
self.channel._close_info = {
'reply_code': method_frame.args.read_short(),
'reply_text': method_frame.args.read_shortstr(),
'class_id': method_frame.args.read_short(),
'method_id': method_frame.args.read_short()
}
self.channel._closed = True
self.channel._closed_cb(
final_frame=MethodFrame(self.channel_id, 20, 41))
|
python
|
{
"resource": ""
}
|
q7878
|
ChannelClass._recv_close_ok
|
train
|
def _recv_close_ok(self, method_frame):
'''
Receive a close ack from the broker.
'''
self.channel._closed = True
self.channel._closed_cb()
|
python
|
{
"resource": ""
}
|
q7879
|
QueueClass.bind
|
train
|
def bind(self, queue, exchange, routing_key='', nowait=True, arguments={},
ticket=None, cb=None):
'''
bind to a queue.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_bit(nowait).\
write_table(arguments)
self.send_frame(MethodFrame(self.channel_id, 50, 20, args))
if not nowait:
self._bind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_bind_ok)
|
python
|
{
"resource": ""
}
|
q7880
|
QueueClass.unbind
|
train
|
def unbind(self, queue, exchange, routing_key='', arguments={},
ticket=None, cb=None):
'''
Unbind a queue from an exchange. This is always synchronous.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_table(arguments)
self.send_frame(MethodFrame(self.channel_id, 50, 50, args))
self._unbind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_unbind_ok)
|
python
|
{
"resource": ""
}
|
q7881
|
QueueClass.purge
|
train
|
def purge(self, queue, nowait=True, ticket=None, cb=None):
'''
Purge all messages in a queue.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 50, 30, args))
if not nowait:
self._purge_cb.append(cb)
return self.channel.add_synchronous_cb(self._recv_purge_ok)
|
python
|
{
"resource": ""
}
|
q7882
|
Writer.write_bits
|
train
|
def write_bits(self, *args):
'''
Write multiple bits in a single byte field. The bits will be written in
little-endian order, but should be supplied in big endian order. Will
raise ValueError when more than 8 arguments are supplied.
write_bits(True, False) => 0x02
'''
# Would be nice to make this a bit smarter
if len(args) > 8:
raise ValueError("Can only write 8 bits at a time")
self._output_buffer.append(chr(
reduce(lambda x, y: xor(x, args[y] << y), xrange(len(args)), 0)))
return self
|
python
|
{
"resource": ""
}
|
q7883
|
Writer.write_bit
|
train
|
def write_bit(self, b, pack=Struct('B').pack):
'''
Write a single bit. Convenience method for single bit args.
'''
self._output_buffer.append(pack(True if b else False))
return self
|
python
|
{
"resource": ""
}
|
q7884
|
Writer.write_short_at
|
train
|
def write_short_at(self, n, pos, pack_into=Struct('>H').pack_into):
'''
Write an unsigned 16bit value at a specific position in the buffer.
Used for writing tables and frames.
'''
if 0 <= n <= 0xFFFF:
pack_into(self._output_buffer, pos, n)
else:
raise ValueError('Short %d out of range 0..0xFFFF', n)
return self
|
python
|
{
"resource": ""
}
|
q7885
|
Writer.write_long
|
train
|
def write_long(self, n, pack=Struct('>I').pack):
"""
Write an integer as an unsigned 32-bit value.
"""
if 0 <= n <= 0xFFFFFFFF:
self._output_buffer.extend(pack(n))
else:
raise ValueError('Long %d out of range 0..0xFFFFFFFF', n)
return self
|
python
|
{
"resource": ""
}
|
q7886
|
Writer.write_long_at
|
train
|
def write_long_at(self, n, pos, pack_into=Struct('>I').pack_into):
'''
Write an unsigned 32bit value at a specific position in the buffer.
Used for writing tables and frames.
'''
if 0 <= n <= 0xFFFFFFFF:
pack_into(self._output_buffer, pos, n)
else:
raise ValueError('Long %d out of range 0..0xFFFFFFFF', n)
return self
|
python
|
{
"resource": ""
}
|
q7887
|
Writer.write_shortstr
|
train
|
def write_shortstr(self, s):
"""
Write a string up to 255 bytes long after encoding. If passed
a unicode string, encode as UTF-8.
"""
if isinstance(s, unicode):
s = s.encode('utf-8')
self.write_octet(len(s))
self.write(s)
return self
|
python
|
{
"resource": ""
}
|
q7888
|
Writer.write_timestamp
|
train
|
def write_timestamp(self, t, pack=Struct('>Q').pack):
"""
Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix UTC epoch.
"""
# Double check timestamp, can't imagine why it would be signed
self._output_buffer.extend(pack(long(timegm(t.timetuple()))))
return self
|
python
|
{
"resource": ""
}
|
q7889
|
RTMPPacket.body
|
train
|
def body(self):
"""The body of the packet."""
view = ffi.buffer(self.packet.m_body, self.packet.m_nBodySize)
return view[:]
|
python
|
{
"resource": ""
}
|
q7890
|
add_log_callback
|
train
|
def add_log_callback(callback):
"""Adds a log callback."""
global _log_callbacks
if not callable(callback):
raise ValueError("Callback must be callable")
_log_callbacks.add(callback)
return callback
|
python
|
{
"resource": ""
}
|
q7891
|
RTMPStream.read
|
train
|
def read(self, size):
"""Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error.
"""
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and self.duration:
self.update_buffer((self.duration * 1000) + 5000)
self._updated_buffer = True
if not self._buf or len(self._buf) != size:
self._buf = ffi.new("char[]", size)
self._view = ffi.buffer(self._buf, size)
res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)
if res < 0:
raise IOError("Failed to read data")
return self._view[:res]
|
python
|
{
"resource": ""
}
|
q7892
|
RTMPStream.write
|
train
|
def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res
|
python
|
{
"resource": ""
}
|
q7893
|
RTMPStream.pause
|
train
|
def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause")
|
python
|
{
"resource": ""
}
|
q7894
|
RTMPStream.unpause
|
train
|
def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause")
|
python
|
{
"resource": ""
}
|
q7895
|
RTMPStream.seek
|
train
|
def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek")
|
python
|
{
"resource": ""
}
|
q7896
|
RTMP.set_option
|
train
|
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval
|
python
|
{
"resource": ""
}
|
q7897
|
RTMP.setup_url
|
train
|
def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL")
|
python
|
{
"resource": ""
}
|
q7898
|
RTMP.read_packet
|
train
|
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet)
|
python
|
{
"resource": ""
}
|
q7899
|
RTMP.send_packet
|
train
|
def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue))
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.