_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q5500
|
press_check
|
train
|
def press_check(df):
"""
Remove pressure reversals from the index.
"""
new_df = df.copy()
press = new_df.copy().index.values
ref = press[0]
inversions = np.diff(np.r_[press, press[-1]]) < 0
mask = np.zeros_like(inversions)
for k, p in enumerate(inversions):
if p:
ref = press[k]
cut = press[k + 1 :] < ref
mask[k + 1 :][cut] = True
new_df[mask] = np.NaN
return new_df
|
python
|
{
"resource": ""
}
|
q5501
|
Client.put
|
train
|
def put(self,
body: Body,
priority: int = DEFAULT_PRIORITY,
delay: int = DEFAULT_DELAY,
ttr: int = DEFAULT_TTR) -> int:
"""Inserts a job into the currently used tube and returns the job ID.
:param body: The data representing the job.
:param priority: An integer between 0 and 4,294,967,295 where 0 is the
most urgent.
:param delay: The number of seconds to delay the job for.
:param ttr: The maximum number of seconds the job can be reserved for
before timing out.
"""
if isinstance(body, str):
if self.encoding is None:
raise TypeError("Unable to encode string with no encoding set")
body = body.encode(self.encoding)
cmd = b'put %d %d %d %d\r\n%b' % (priority, delay, ttr, len(body), body)
return self._int_cmd(cmd, b'INSERTED')
|
python
|
{
"resource": ""
}
|
q5502
|
Client.use
|
train
|
def use(self, tube: str) -> None:
"""Changes the currently used tube.
:param tube: The tube to use.
"""
self._send_cmd(b'use %b' % tube.encode('ascii'), b'USING')
|
python
|
{
"resource": ""
}
|
q5503
|
Client.reserve
|
train
|
def reserve(self, timeout: Optional[int] = None) -> Job:
"""Reserves a job from a tube on the watch list, giving this client
exclusive access to it for the TTR. Returns the reserved job.
This blocks until a job is reserved unless a ``timeout`` is given,
which will raise a :class:`TimedOutError <greenstalk.TimedOutError>` if
a job cannot be reserved within that time.
:param timeout: The maximum number of seconds to wait.
"""
if timeout is None:
cmd = b'reserve'
else:
cmd = b'reserve-with-timeout %d' % timeout
return self._job_cmd(cmd, b'RESERVED')
|
python
|
{
"resource": ""
}
|
q5504
|
Client.release
|
train
|
def release(self,
job: Job,
priority: int = DEFAULT_PRIORITY,
delay: int = DEFAULT_DELAY) -> None:
"""Releases a reserved job.
:param job: The job to release.
:param priority: An integer between 0 and 4,294,967,295 where 0 is the
most urgent.
:param delay: The number of seconds to delay the job for.
"""
self._send_cmd(b'release %d %d %d' % (job.id, priority, delay), b'RELEASED')
|
python
|
{
"resource": ""
}
|
q5505
|
Client.bury
|
train
|
def bury(self, job: Job, priority: int = DEFAULT_PRIORITY) -> None:
"""Buries a reserved job.
:param job: The job to bury.
:param priority: An integer between 0 and 4,294,967,295 where 0 is the
most urgent.
"""
self._send_cmd(b'bury %d %d' % (job.id, priority), b'BURIED')
|
python
|
{
"resource": ""
}
|
q5506
|
Client.touch
|
train
|
def touch(self, job: Job) -> None:
"""Refreshes the TTR of a reserved job.
:param job: The job to touch.
"""
self._send_cmd(b'touch %d' % job.id, b'TOUCHED')
|
python
|
{
"resource": ""
}
|
q5507
|
Client.watch
|
train
|
def watch(self, tube: str) -> int:
"""Adds a tube to the watch list. Returns the number of tubes this
client is watching.
:param tube: The tube to watch.
"""
return self._int_cmd(b'watch %b' % tube.encode('ascii'), b'WATCHING')
|
python
|
{
"resource": ""
}
|
q5508
|
Client.ignore
|
train
|
def ignore(self, tube: str) -> int:
"""Removes a tube from the watch list. Returns the number of tubes this
client is watching.
:param tube: The tube to ignore.
"""
return self._int_cmd(b'ignore %b' % tube.encode('ascii'), b'WATCHING')
|
python
|
{
"resource": ""
}
|
q5509
|
Client.kick
|
train
|
def kick(self, bound: int) -> int:
"""Moves delayed and buried jobs into the ready queue and returns the
number of jobs effected.
Only jobs from the currently used tube are moved.
A kick will only move jobs in a single state. If there are any buried
jobs, only those will be moved. Otherwise delayed jobs will be moved.
:param bound: The maximum number of jobs to kick.
"""
return self._int_cmd(b'kick %d' % bound, b'KICKED')
|
python
|
{
"resource": ""
}
|
q5510
|
Client.kick_job
|
train
|
def kick_job(self, job: JobOrID) -> None:
"""Moves a delayed or buried job into the ready queue.
:param job: The job or job ID to kick.
"""
self._send_cmd(b'kick-job %d' % _to_id(job), b'KICKED')
|
python
|
{
"resource": ""
}
|
q5511
|
Client.stats_job
|
train
|
def stats_job(self, job: JobOrID) -> Stats:
"""Returns job statistics.
:param job: The job or job ID to return statistics for.
"""
return self._stats_cmd(b'stats-job %d' % _to_id(job))
|
python
|
{
"resource": ""
}
|
q5512
|
Client.stats_tube
|
train
|
def stats_tube(self, tube: str) -> Stats:
"""Returns tube statistics.
:param tube: The tube to return statistics for.
"""
return self._stats_cmd(b'stats-tube %b' % tube.encode('ascii'))
|
python
|
{
"resource": ""
}
|
q5513
|
Client.pause_tube
|
train
|
def pause_tube(self, tube: str, delay: int) -> None:
"""Prevents jobs from being reserved from a tube for a period of time.
:param tube: The tube to pause.
:param delay: The number of seconds to pause the tube for.
"""
self._send_cmd(b'pause-tube %b %d' % (tube.encode('ascii'), delay), b'PAUSED')
|
python
|
{
"resource": ""
}
|
q5514
|
cli
|
train
|
def cli(conf):
"""The fedora-messaging command line interface."""
if conf:
if not os.path.isfile(conf):
raise click.exceptions.BadParameter("{} is not a file".format(conf))
try:
config.conf.load_config(config_path=conf)
except exceptions.ConfigurationException as e:
raise click.exceptions.BadParameter(str(e))
twisted_observer = legacy_twisted_log.PythonLoggingObserver()
twisted_observer.start()
config.conf.setup_logging()
|
python
|
{
"resource": ""
}
|
q5515
|
consume
|
train
|
def consume(exchange, queue_name, routing_key, callback, app_name):
"""Consume messages from an AMQP queue using a Python callback."""
# The configuration validates these are not null and contain all required keys
# when it is loaded.
bindings = config.conf["bindings"]
queues = config.conf["queues"]
# The CLI and config.DEFAULTS have different defaults for the queue
# settings at the moment. We should select a universal default in the
# future and remove this. Unfortunately that will break backwards compatibility.
if queues == config.DEFAULTS["queues"]:
queues[config._default_queue_name]["durable"] = True
queues[config._default_queue_name]["auto_delete"] = False
if queue_name:
queues = {queue_name: config.conf["queues"][config._default_queue_name]}
for binding in bindings:
binding["queue"] = queue_name
if exchange:
for binding in bindings:
binding["exchange"] = exchange
if routing_key:
for binding in bindings:
binding["routing_keys"] = routing_key
callback_path = callback or config.conf["callback"]
if not callback_path:
raise click.ClickException(
"A Python path to a callable object that accepts the message must be provided"
' with the "--callback" command line option or in the configuration file'
)
try:
module, cls = callback_path.strip().split(":")
except ValueError:
raise click.ClickException(
"Unable to parse the callback path ({}); the "
'expected format is "my_package.module:'
'callable_object"'.format(callback_path)
)
try:
module = importlib.import_module(module)
except ImportError as e:
provider = "--callback argument" if callback else "configuration file"
raise click.ClickException(
"Failed to import the callback module ({}) provided in the {}".format(
str(e), provider
)
)
try:
callback = getattr(module, cls)
except AttributeError as e:
raise click.ClickException(
"Unable to import {} ({}); is the package installed? The python path should "
'be in the format "my_package.module:callable_object"'.format(
callback_path, str(e)
)
)
if app_name:
config.conf["client_properties"]["app"] = app_name
_log.info("Starting consumer with %s callback", callback_path)
try:
deferred_consumers = api.twisted_consume(
callback, bindings=bindings, queues=queues
)
deferred_consumers.addCallback(_consume_callback)
deferred_consumers.addErrback(_consume_errback)
except ValueError as e:
click_version = pkg_resources.get_distribution("click").parsed_version
if click_version < pkg_resources.parse_version("7.0"):
raise click.exceptions.BadOptionUsage(str(e))
else:
raise click.exceptions.BadOptionUsage("callback", str(e))
reactor.run()
sys.exit(_exit_code)
|
python
|
{
"resource": ""
}
|
q5516
|
_consume_errback
|
train
|
def _consume_errback(failure):
"""Handle any errors that occur during consumer registration."""
global _exit_code
if failure.check(exceptions.BadDeclaration):
_log.error(
"Unable to declare the %s object on the AMQP broker. The "
"broker responded with %s. Check permissions for your user.",
failure.value.obj_type,
failure.value.reason,
)
_exit_code = 10
elif failure.check(exceptions.PermissionException):
_exit_code = 15
_log.error(
"The consumer could not proceed because of a permissions problem: %s",
str(failure.value),
)
elif failure.check(exceptions.ConnectionException):
_exit_code = 14
_log.error(failure.value.reason)
else:
_exit_code = 11
_log.exception(
"An unexpected error (%r) occurred while registering the "
"consumer, please report this bug.",
failure.value,
)
try:
reactor.stop()
except error.ReactorNotRunning:
pass
|
python
|
{
"resource": ""
}
|
q5517
|
_consume_callback
|
train
|
def _consume_callback(consumers):
"""
Callback when consumers are successfully registered.
This simply registers callbacks for consumer.result deferred object which
fires when the consumer stops.
Args
consumers (list of fedora_messaging.api.Consumer):
The list of consumers that were successfully created.
"""
for consumer in consumers:
def errback(failure):
global _exit_code
if failure.check(exceptions.HaltConsumer):
_exit_code = failure.value.exit_code
if _exit_code:
_log.error(
"Consumer halted with non-zero exit code (%d): %s",
_exit_code,
str(failure.value.reason),
)
elif failure.check(exceptions.ConsumerCanceled):
_exit_code = 12
_log.error(
"The consumer was canceled server-side, check with system administrators."
)
elif failure.check(exceptions.PermissionException):
_exit_code = 15
_log.error(
"The consumer could not proceed because of a permissions problem: %s",
str(failure.value),
)
else:
_exit_code = 13
_log.error(
"Unexpected error occurred in consumer %r: %r", consumer, failure
)
try:
reactor.stop()
except error.ReactorNotRunning:
pass
def callback(consumer):
_log.info("The %r consumer halted.", consumer)
if all([c.result.called for c in consumers]):
_log.info("All consumers have stopped; shutting down.")
try:
# Last consumer out shuts off the lights
reactor.stop()
except error.ReactorNotRunning:
pass
consumer.result.addCallbacks(callback, errback)
|
python
|
{
"resource": ""
}
|
q5518
|
_add_timeout
|
train
|
def _add_timeout(deferred, timeout):
"""
Add a timeout to the given deferred. This is designed to work with both old
Twisted and versions of Twisted with the addTimeout API. This is
exclusively to support EL7.
The deferred will errback with a :class:`defer.CancelledError` if the
version of Twisted being used doesn't have the
``defer.Deferred.addTimeout`` API, otherwise it will errback with the
normal ``error.TimeoutError``
"""
try:
deferred.addTimeout(timeout, reactor)
except AttributeError:
# Twisted 12.2 (in EL7) does not have the addTimeout API, so make do with
# the slightly more annoying approach of scheduling a call to cancel which
# is then canceled if the deferred succeeds before the timeout is up.
delayed_cancel = reactor.callLater(timeout, deferred.cancel)
def cancel_cancel_call(result):
"""Halt the delayed call to cancel if the deferred fires before the timeout."""
if not delayed_cancel.called:
delayed_cancel.cancel()
return result
deferred.addBoth(cancel_cancel_call)
|
python
|
{
"resource": ""
}
|
q5519
|
FedoraMessagingProtocolV2._allocate_channel
|
train
|
def _allocate_channel(self):
"""
Allocate a new AMQP channel.
Raises:
NoFreeChannels: If this connection has reached its maximum number of channels.
"""
try:
channel = yield self.channel()
except pika.exceptions.NoFreeChannels:
raise NoFreeChannels()
_std_log.debug("Created AMQP channel id %d", channel.channel_number)
if self._confirms:
yield channel.confirm_delivery()
defer.returnValue(channel)
|
python
|
{
"resource": ""
}
|
q5520
|
FedoraMessagingProtocolV2.declare_exchanges
|
train
|
def declare_exchanges(self, exchanges):
"""
Declare a number of exchanges at once.
This simply wraps the :meth:`pika.channel.Channel.exchange_declare`
method and deals with error handling and channel allocation.
Args:
exchanges (list of dict): A list of dictionaries, where each dictionary
represents an exchange. Each dictionary can have the following keys:
* exchange (str): The exchange's name
* exchange_type (str): The type of the exchange ("direct", "topic", etc)
* passive (bool): If true, this will just assert that the exchange exists,
but won't create it if it doesn't. Defaults to the configuration value
:ref:`conf-passive-declares`
* durable (bool): Whether or not the exchange is durable
* arguments (dict): Extra arguments for the exchange's creation.
Raises:
NoFreeChannels: If there are no available channels on this connection.
If this occurs, you can either reduce the number of consumers on this
connection or create an additional connection.
BadDeclaration: If an exchange could not be declared. This can occur
if the exchange already exists, but does its type does not match
(e.g. it is declared as a "topic" exchange, but exists as a "direct"
exchange). It can also occur if it does not exist, but the current
user does not have permissions to create the object.
"""
channel = yield self._allocate_channel()
try:
for exchange in exchanges:
args = exchange.copy()
args.setdefault("passive", config.conf["passive_declares"])
try:
yield channel.exchange_declare(**args)
except pika.exceptions.ChannelClosed as e:
raise BadDeclaration("exchange", args, e)
finally:
try:
channel.close()
except pika.exceptions.AMQPError:
pass
|
python
|
{
"resource": ""
}
|
q5521
|
FedoraMessagingProtocolV2.declare_queues
|
train
|
def declare_queues(self, queues):
"""
Declare a list of queues.
Args:
queues (list of dict): A list of dictionaries, where each dictionary
represents an exchange. Each dictionary can have the following keys:
* queue (str): The name of the queue
* passive (bool): If true, this will just assert that the queue exists,
but won't create it if it doesn't. Defaults to the configuration value
:ref:`conf-passive-declares`
* durable (bool): Whether or not the queue is durable
* exclusive (bool): Whether or not the queue is exclusive to this connection.
* auto_delete (bool): Whether or not the queue should be automatically
deleted once this connection ends.
* arguments (dict): Additional arguments for the creation of the queue.
Raises:
NoFreeChannels: If there are no available channels on this connection.
If this occurs, you can either reduce the number of consumers on this
connection or create an additional connection.
BadDeclaration: If a queue could not be declared. This can occur
if the queue already exists, but does its type does not match
(e.g. it is declared as a durable queue, but exists as a non-durable
queue). It can also occur if it does not exist, but the current
user does not have permissions to create the object.
"""
channel = yield self._allocate_channel()
try:
for queue in queues:
args = queue.copy()
args.setdefault("passive", config.conf["passive_declares"])
try:
yield channel.queue_declare(**args)
except pika.exceptions.ChannelClosed as e:
raise BadDeclaration("queue", args, e)
finally:
try:
channel.close()
except pika.exceptions.AMQPError:
pass
|
python
|
{
"resource": ""
}
|
q5522
|
FedoraMessagingProtocolV2.bind_queues
|
train
|
def bind_queues(self, bindings):
"""
Declare a set of bindings between queues and exchanges.
Args:
bindings (list of dict): A list of binding definitions. Each dictionary
must contain the "queue" key whose value is the name of the queue
to create the binding on, as well as the "exchange" key whose value
should be the name of the exchange to bind to. Additional acceptable
keys are any keyword arguments accepted by
:meth:`pika.channel.Channel.queue_bind`.
Raises:
NoFreeChannels: If there are no available channels on this connection.
If this occurs, you can either reduce the number of consumers on this
connection or create an additional connection.
BadDeclaration: If a binding could not be declared. This can occur if the
queue or exchange don't exist, or if they do, but the current user does
not have permissions to create bindings.
"""
channel = yield self._allocate_channel()
try:
for binding in bindings:
try:
yield channel.queue_bind(**binding)
except pika.exceptions.ChannelClosed as e:
raise BadDeclaration("binding", binding, e)
finally:
try:
channel.close()
except pika.exceptions.AMQPError:
pass
|
python
|
{
"resource": ""
}
|
q5523
|
FedoraMessagingProtocolV2.halt
|
train
|
def halt(self):
"""
Signal to consumers they should stop after finishing any messages
currently being processed, then close the connection.
Returns:
defer.Deferred: fired when all consumers have successfully stopped
and the connection is closed.
"""
if self.is_closed:
# We were asked to stop because the connection is already gone.
# There's no graceful way to stop because we can't acknowledge
# messages in the middle of being processed.
_std_log.info("Disconnect requested, but AMQP connection already gone")
self._channel = None
return
_std_log.info(
"Waiting for %d consumer(s) to finish processing before halting",
len(self._consumers),
)
pending_cancels = []
for c in list(self._consumers.values()):
pending_cancels.append(c.cancel())
yield defer.gatherResults(pending_cancels)
_std_log.info("Finished canceling %d consumers", len(self._consumers))
try:
yield self.close()
except pika.exceptions.ConnectionWrongStateError:
pass # Already closing, not a problem since that's what we want.
self._consumers = {}
self._channel = None
|
python
|
{
"resource": ""
}
|
q5524
|
FedoraMessagingProtocol.resumeProducing
|
train
|
def resumeProducing(self):
"""
Starts or resumes the retrieval of messages from the server queue.
This method starts receiving messages from the server, they will be
passed to the consumer callback.
.. note:: This is called automatically when :meth:`.consume` is called,
so users should not need to call this unless :meth:`.pauseProducing`
has been called.
Returns:
defer.Deferred: fired when the production is ready to start
"""
# Start consuming
self._running = True
for consumer in self._consumers.values():
queue_object, _ = yield consumer.channel.basic_consume(
queue=consumer.queue, consumer_tag=consumer.tag
)
deferred = self._read(queue_object, consumer)
deferred.addErrback(
lambda f: _legacy_twisted_log.msg,
"_read failed on consumer {c}",
c=consumer,
logLevel=logging.ERROR,
)
_legacy_twisted_log.msg("AMQP connection successfully established")
|
python
|
{
"resource": ""
}
|
q5525
|
FedoraMessagingProtocol.pauseProducing
|
train
|
def pauseProducing(self):
"""
Pause the reception of messages by canceling all existing consumers.
This does not disconnect from the server.
Message reception can be resumed with :meth:`resumeProducing`.
Returns:
Deferred: fired when the production is paused.
"""
if not self._running:
return
# Exit the read loop and cancel the consumer on the server.
self._running = False
for consumer in self._consumers.values():
yield consumer.channel.basic_cancel(consumer_tag=consumer.tag)
_legacy_twisted_log.msg("Paused retrieval of messages for the server queue")
|
python
|
{
"resource": ""
}
|
q5526
|
get_class
|
train
|
def get_class(schema_name):
"""
Retrieve the message class associated with the schema name.
If no match is found, the default schema is returned and a warning is logged.
Args:
schema_name (six.text_type): The name of the :class:`Message` sub-class;
this is typically the Python path.
Returns:
Message: A sub-class of :class:`Message` to create the message from.
"""
global _registry_loaded
if not _registry_loaded:
load_message_classes()
try:
return _schema_name_to_class[schema_name]
except KeyError:
_log.warning(
'The schema "%s" is not in the schema registry! Either install '
"the package with its schema definition or define a schema. "
"Falling back to the default schema...",
schema_name,
)
return Message
|
python
|
{
"resource": ""
}
|
q5527
|
get_name
|
train
|
def get_name(cls):
"""
Retrieve the schema name associated with a message class.
Returns:
str: The schema name.
Raises:
TypeError: If the message class isn't registered. Check your entry point
for correctness.
"""
global _registry_loaded
if not _registry_loaded:
load_message_classes()
try:
return _class_to_schema_name[cls]
except KeyError:
raise TypeError(
"The class {} is not in the message registry, which indicates it is"
' not in the current list of entry points for "fedora_messaging".'
" Please check that the class has been added to your package's"
" entry points.".format(repr(cls))
)
|
python
|
{
"resource": ""
}
|
q5528
|
load_message_classes
|
train
|
def load_message_classes():
"""Load the 'fedora.messages' entry points and register the message classes."""
for message in pkg_resources.iter_entry_points("fedora.messages"):
cls = message.load()
_log.info(
"Registering the '%s' key as the '%r' class in the Message "
"class registry",
message.name,
cls,
)
_schema_name_to_class[message.name] = cls
_class_to_schema_name[cls] = message.name
global _registry_loaded
_registry_loaded = True
|
python
|
{
"resource": ""
}
|
q5529
|
get_message
|
train
|
def get_message(routing_key, properties, body):
"""
Construct a Message instance given the routing key, the properties and the
body received from the AMQP broker.
Args:
routing_key (str): The AMQP routing key (will become the message topic)
properties (pika.BasicProperties): the AMQP properties
body (bytes): The encoded message body
Raises:
ValidationError: If Message validation failed or message body
docoding/loading is impossible.
"""
if properties.headers is None:
_log.error(
"Message (body=%r) arrived without headers. " "A publisher is misbehaving!",
body,
)
properties.headers = {}
try:
MessageClass = get_class(properties.headers["fedora_messaging_schema"])
except KeyError:
_log.error(
"Message (headers=%r, body=%r) arrived without a schema header."
" A publisher is misbehaving!",
properties.headers,
body,
)
MessageClass = Message
try:
severity = properties.headers["fedora_messaging_severity"]
except KeyError:
_log.error(
"Message (headers=%r, body=%r) arrived without a severity."
" A publisher is misbehaving! Defaulting to INFO.",
properties.headers,
body,
)
severity = INFO
if properties.content_encoding is None:
_log.error("Message arrived without a content encoding")
properties.content_encoding = "utf-8"
try:
body = body.decode(properties.content_encoding)
except UnicodeDecodeError as e:
_log.error(
"Unable to decode message body %r with %s content encoding",
body,
properties.content_encoding,
)
raise ValidationError(e)
try:
body = json.loads(body)
except ValueError as e:
_log.error("Failed to load message body %r, %r", body, e)
raise ValidationError(e)
message = MessageClass(
body=body, topic=routing_key, properties=properties, severity=severity
)
try:
message.validate()
_log.debug("Successfully validated message %r", message)
except jsonschema.exceptions.ValidationError as e:
_log.error("Message validation of %r failed: %r", message, e)
raise ValidationError(e)
return message
|
python
|
{
"resource": ""
}
|
q5530
|
dumps
|
train
|
def dumps(messages):
"""
Serialize messages to a JSON formatted str
Args:
messages (list): The list of messages to serialize. Each message in
the messages is subclass of Messge.
Returns:
str: Serialized messages.
Raises:
TypeError: If at least one message is not instance of Message class or subclass.
"""
serialized_messages = []
try:
for message in messages:
message_dict = message._dump()
serialized_messages.append(message_dict)
except AttributeError:
_log.error("Improper object for messages serialization.")
raise TypeError("Message have to be instance of Message class or subclass.")
return json.dumps(serialized_messages, sort_keys=True)
|
python
|
{
"resource": ""
}
|
q5531
|
loads
|
train
|
def loads(serialized_messages):
"""
Deserialize messages from a JSON formatted str
Args:
serialized_messages (JSON str):
Returns:
list: Deserialized message objects.
Raises:
ValidationError: If deserialized message validation failed.
KeyError: If serialized_messages aren't properly serialized.
ValueError: If serialized_messages is not valid JSON
"""
try:
messages_dicts = json.loads(serialized_messages)
except ValueError:
_log.error("Loading serialized messages failed.")
raise
messages = []
for message_dict in messages_dicts:
try:
headers = message_dict["headers"]
except KeyError:
_log.error("Message saved without headers.")
raise
try:
MessageClass = get_class(headers["fedora_messaging_schema"])
except KeyError:
_log.error("Message (headers=%r) saved without a schema header.", headers)
raise
try:
body = message_dict["body"]
except KeyError:
_log.error("Message saved without body.")
raise
try:
id = message_dict["id"]
except KeyError:
_log.error("Message saved without id.")
raise
try:
queue = message_dict["queue"]
except KeyError:
_log.warning("Message saved without queue.")
queue = None
try:
topic = message_dict["topic"]
except KeyError:
_log.error("Message saved without topic.")
raise
try:
severity = headers["fedora_messaging_severity"]
except KeyError:
_log.error("Message saved without a severity.")
raise
message = MessageClass(
body=body, topic=topic, headers=headers, severity=severity
)
try:
message.validate()
_log.debug("Successfully validated message %r", message)
except jsonschema.exceptions.ValidationError as e:
_log.error("Message validation of %r failed: %r", message, e)
raise ValidationError(e)
message.queue = queue
message.id = id
messages.append(message)
return messages
|
python
|
{
"resource": ""
}
|
q5532
|
Message._filter_headers
|
train
|
def _filter_headers(self):
"""
Add headers designed for filtering messages based on objects.
Returns:
dict: Filter-related headers to be combined with the existing headers
"""
headers = {}
for user in self.usernames:
headers["fedora_messaging_user_{}".format(user)] = True
for package in self.packages:
headers["fedora_messaging_rpm_{}".format(package)] = True
for container in self.containers:
headers["fedora_messaging_container_{}".format(container)] = True
for module in self.modules:
headers["fedora_messaging_module_{}".format(module)] = True
for flatpak in self.flatpaks:
headers["fedora_messaging_flatpak_{}".format(flatpak)] = True
return headers
|
python
|
{
"resource": ""
}
|
q5533
|
Message._encoded_routing_key
|
train
|
def _encoded_routing_key(self):
"""The encoded routing key used to publish the message on the broker."""
topic = self.topic
if config.conf["topic_prefix"]:
topic = ".".join((config.conf["topic_prefix"].rstrip("."), topic))
return topic.encode("utf-8")
|
python
|
{
"resource": ""
}
|
q5534
|
Message.validate
|
train
|
def validate(self):
"""
Validate the headers and body with the message schema, if any.
In addition to the user-provided schema, all messages are checked against
the base schema which requires certain message headers and the that body
be a JSON object.
.. warning:: This method should not be overridden by sub-classes.
Raises:
jsonschema.ValidationError: If either the message headers or the message body
are invalid.
jsonschema.SchemaError: If either the message header schema or the message body
schema are invalid.
"""
for schema in (self.headers_schema, Message.headers_schema):
_log.debug(
'Validating message headers "%r" with schema "%r"',
self._headers,
schema,
)
jsonschema.validate(self._headers, schema)
for schema in (self.body_schema, Message.body_schema):
_log.debug(
'Validating message body "%r" with schema "%r"', self.body, schema
)
jsonschema.validate(self.body, schema)
|
python
|
{
"resource": ""
}
|
q5535
|
Message._dump
|
train
|
def _dump(self):
"""
Dump message attributes.
Returns:
dict: A dictionary of message attributes.
"""
return {
"topic": self.topic,
"headers": self._headers,
"id": self.id,
"body": self.body,
"queue": self.queue,
}
|
python
|
{
"resource": ""
}
|
q5536
|
_check_callback
|
train
|
def _check_callback(callback):
"""
Turns a callback that is potentially a class into a callable object.
Args:
callback (object): An object that might be a class, method, or function.
if the object is a class, this creates an instance of it.
Raises:
ValueError: If an instance can't be created or it isn't a callable object.
TypeError: If the class requires arguments to be instantiated.
Returns:
callable: A callable object suitable for use as the consumer callback.
"""
# If the callback is a class, create an instance of it first
if inspect.isclass(callback):
callback_object = callback()
if not callable(callback_object):
raise ValueError(
"Callback must be a class that implements __call__ or a function."
)
elif callable(callback):
callback_object = callback
else:
raise ValueError(
"Callback must be a class that implements __call__ or a function."
)
return callback_object
|
python
|
{
"resource": ""
}
|
q5537
|
consume
|
train
|
def consume(callback, bindings=None, queues=None):
"""
Start a message consumer that executes the provided callback when messages are
received.
This API is blocking and will not return until the process receives a signal
from the operating system.
.. warning:: This API is runs the callback in the IO loop thread. This means
if your callback could run for a length of time near the heartbeat interval,
which is likely on the order of 60 seconds, the broker will kill the TCP
connection and the message will be re-delivered on start-up.
For now, use the :func:`twisted_consume` API which runs the
callback in a thread and continues to handle AMQP events while the
callback runs if you have a long-running callback.
The callback receives a single positional argument, the message:
>>> from fedora_messaging import api
>>> def my_callback(message):
... print(message)
>>> bindings = [{'exchange': 'amq.topic', 'queue': 'demo', 'routing_keys': ['#']}]
>>> queues = {
... "demo": {"durable": False, "auto_delete": True, "exclusive": True, "arguments": {}}
... }
>>> api.consume(my_callback, bindings=bindings, queues=queues)
If the bindings and queue arguments are not provided, they will be loaded from
the configuration.
For complete documentation on writing consumers, see the :ref:`consumers`
documentation.
Args:
callback (callable): A callable object that accepts one positional argument,
a :class:`Message` or a class object that implements the ``__call__``
method. The class will be instantiated before use.
bindings (dict or list of dict): Bindings to declare before consuming. This
should be the same format as the :ref:`conf-bindings` configuration.
queues (dict): The queue or queues to declare and consume from. This should be
in the same format as the :ref:`conf-queues` configuration dictionary where
each key is a queue name and each value is a dictionary of settings for that
queue.
Raises:
fedora_messaging.exceptions.HaltConsumer: If the consumer requests that
it be stopped.
ValueError: If the consumer provide callback that is not a class that
implements __call__ and is not a function, if the bindings argument
is not a dict or list of dicts with the proper keys, or if the queues
argument isn't a dict with the proper keys.
"""
if isinstance(bindings, dict):
bindings = [bindings]
if bindings is None:
bindings = config.conf["bindings"]
else:
try:
config.validate_bindings(bindings)
except exceptions.ConfigurationException as e:
raise ValueError(e.message)
if queues is None:
queues = config.conf["queues"]
else:
try:
config.validate_queues(queues)
except exceptions.ConfigurationException as e:
raise ValueError(e.message)
session = _session.ConsumerSession()
session.consume(callback, bindings=bindings, queues=queues)
|
python
|
{
"resource": ""
}
|
q5538
|
FedoraMessagingFactory._on_client_ready
|
train
|
def _on_client_ready(self):
"""Called when the client is ready to send and receive messages."""
_legacy_twisted_log.msg("Successfully connected to the AMQP broker.")
yield self.client.resumeProducing()
yield self.client.declare_exchanges(self.exchanges)
yield self.client.declare_queues(self.queues)
yield self.client.bind_queues(self.bindings)
for queue, callback in self.consumers.items():
yield self.client.consume(callback, queue)
_legacy_twisted_log.msg("Successfully declared all AMQP objects.")
self._client_ready.callback(None)
|
python
|
{
"resource": ""
}
|
q5539
|
FedoraMessagingFactory.clientConnectionLost
|
train
|
def clientConnectionLost(self, connector, reason):
"""Called when the connection to the broker has been lost.
See the documentation of
`twisted.internet.protocol.ReconnectingClientFactory` for details.
"""
if not isinstance(reason.value, error.ConnectionDone):
_legacy_twisted_log.msg(
"Lost connection to the AMQP broker ({reason})",
reason=reason.value,
logLevel=logging.WARNING,
)
if self._client_ready.called:
# Renew the ready deferred, it will callback when the
# next connection is ready.
self._client_ready = defer.Deferred()
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
|
python
|
{
"resource": ""
}
|
q5540
|
FedoraMessagingFactory.clientConnectionFailed
|
train
|
def clientConnectionFailed(self, connector, reason):
"""Called when the client has failed to connect to the broker.
See the documentation of
`twisted.internet.protocol.ReconnectingClientFactory` for details.
"""
_legacy_twisted_log.msg(
"Connection to the AMQP broker failed ({reason})",
reason=reason.value,
logLevel=logging.WARNING,
)
protocol.ReconnectingClientFactory.clientConnectionFailed(
self, connector, reason
)
|
python
|
{
"resource": ""
}
|
q5541
|
FedoraMessagingFactory.stopTrying
|
train
|
def stopTrying(self):
"""Stop trying to reconnect to the broker.
See the documentation of
`twisted.internet.protocol.ReconnectingClientFactory` for details.
"""
protocol.ReconnectingClientFactory.stopTrying(self)
if not self._client_ready.called:
self._client_ready.errback(
pika.exceptions.AMQPConnectionError(
u"Could not connect, reconnection cancelled."
)
)
|
python
|
{
"resource": ""
}
|
q5542
|
FedoraMessagingFactory.consume
|
train
|
def consume(self, callback, queue):
"""
Register a new consumer.
This consumer will be configured for every protocol this factory
produces so it will be reconfigured on network failures. If a connection
is already active, the consumer will be added to it.
Args:
callback (callable): The callback to invoke when a message arrives.
queue (str): The name of the queue to consume from.
"""
self.consumers[queue] = callback
if self._client_ready.called:
return self.client.consume(callback, queue)
|
python
|
{
"resource": ""
}
|
q5543
|
FedoraMessagingFactoryV2.when_connected
|
train
|
def when_connected(self):
"""
Retrieve the currently-connected Protocol, or the next one to connect.
Returns:
defer.Deferred: A Deferred that fires with a connected
:class:`FedoraMessagingProtocolV2` instance. This is similar to
the whenConnected method from the Twisted endpoints APIs, which
is sadly isn't available before 16.1.0, which isn't available
in EL7.
"""
if self._client and not self._client.is_closed:
return defer.succeed(self._client)
else:
return self._client_deferred
|
python
|
{
"resource": ""
}
|
q5544
|
FedoraMessagingFactoryV2.consume
|
train
|
def consume(self, callback, bindings, queues):
"""
Start a consumer that lasts across individual connections.
Args:
callback (callable): A callable object that accepts one positional argument,
a :class:`Message` or a class object that implements the ``__call__``
method. The class will be instantiated before use.
bindings (dict or list of dict): Bindings to declare before consuming. This
should be the same format as the :ref:`conf-bindings` configuration.
queues (dict): The queues to declare and consume from. Each key in this
dictionary is a queue, and each value is its settings as a dictionary.
These settings dictionaries should have the "durable", "auto_delete",
"exclusive", and "arguments" keys. Refer to :ref:`conf-queues` for
details on their meanings.
Returns:
defer.Deferred:
A deferred that fires with the list of one or more
:class:`fedora_messaging.twisted.consumer.Consumer` objects.
These can be passed to the
:meth:`FedoraMessagingFactoryV2.cancel` API to halt them. Each
consumer object has a ``result`` instance variable that is a
Deferred that fires or errors when the consumer halts. The
Deferred may error back with a BadDeclaration if the user does
not have permissions to consume from the queue.
"""
expanded_bindings = collections.defaultdict(list)
for binding in bindings:
for key in binding["routing_keys"]:
b = binding.copy()
del b["routing_keys"]
b["routing_key"] = key
expanded_bindings[b["queue"]].append(b)
expanded_queues = []
for name, settings in queues.items():
q = {"queue": name}
q.update(settings)
expanded_queues.append(q)
protocol = yield self.when_connected()
consumers = []
for queue in expanded_queues:
yield protocol.declare_queues([queue])
b = expanded_bindings.get(queue["queue"], [])
yield protocol.bind_queues(b)
consumer = yield protocol.consume(callback, queue["queue"])
self._consumers[queue["queue"]] = (consumer, queue, b)
consumers.append(consumer)
defer.returnValue(consumers)
|
python
|
{
"resource": ""
}
|
q5545
|
FedoraMessagingFactoryV2.cancel
|
train
|
def cancel(self, consumers):
"""
Cancel a consumer that was previously started with consume.
Args:
consumer (list of fedora_messaging.api.Consumer): The consumers to cancel.
"""
for consumer in consumers:
del self._consumers[consumer.queue]
protocol = yield self.when_connected()
yield protocol.cancel(consumer)
|
python
|
{
"resource": ""
}
|
q5546
|
Consumer.cancel
|
train
|
def cancel(self):
"""
Cancel the consumer and clean up resources associated with it.
Consumers that are canceled are allowed to finish processing any
messages before halting.
Returns:
defer.Deferred: A deferred that fires when the consumer has finished
processing any message it was in the middle of and has been successfully
canceled.
"""
# Remove it from protocol and factory so it doesn't restart later.
try:
del self._protocol._consumers[self.queue]
except (KeyError, AttributeError):
pass
try:
del self._protocol.factory._consumers[self.queue]
except (KeyError, AttributeError):
pass
# Signal to the _read loop it's time to stop and wait for it to finish
# with whatever message it might be working on, then wait for the deferred
# to fire which indicates it is done.
self._running = False
yield self._read_loop
try:
yield self._channel.basic_cancel(consumer_tag=self._tag)
except pika.exceptions.AMQPChannelError:
# Consumers are tied to channels, so if this channel is dead the
# consumer should already be canceled (and we can't get to it anyway)
pass
try:
yield self._channel.close()
except pika.exceptions.AMQPChannelError:
pass
if not self.result.called:
self.result.callback(self)
|
python
|
{
"resource": ""
}
|
q5547
|
validate_bindings
|
train
|
def validate_bindings(bindings):
"""
Validate the bindings configuration.
Raises:
exceptions.ConfigurationException: If the configuration provided is of an
invalid format.
"""
if not isinstance(bindings, (list, tuple)):
raise exceptions.ConfigurationException(
"bindings must be a list or tuple of dictionaries, but was a {}".format(
type(bindings)
)
)
for binding in bindings:
missing_keys = []
for key in ("queue", "exchange", "routing_keys"):
if key not in binding:
missing_keys.append(key)
if missing_keys:
raise exceptions.ConfigurationException(
"a binding is missing the following keys from its settings "
"value: {}".format(missing_keys)
)
if not isinstance(binding["routing_keys"], (list, tuple)):
raise exceptions.ConfigurationException(
"routing_keys must be a list or tuple, but was a {}".format(
type(binding["routing_keys"])
)
)
|
python
|
{
"resource": ""
}
|
q5548
|
validate_queues
|
train
|
def validate_queues(queues):
"""
Validate the queues configuration.
Raises:
exceptions.ConfigurationException: If the configuration provided is of an
invalid format.
"""
if not isinstance(queues, dict):
raise exceptions.ConfigurationException(
"'queues' must be a dictionary mapping queue names to settings."
)
for queue, settings in queues.items():
if not isinstance(settings, dict):
raise exceptions.ConfigurationException(
"the {} queue in the 'queues' setting has a value of type {}, but it "
"should be a dictionary of settings.".format(queue, type(settings))
)
missing_keys = []
for key in ("durable", "auto_delete", "exclusive", "arguments"):
if key not in settings:
missing_keys.append(key)
if missing_keys:
raise exceptions.ConfigurationException(
"the {} queue is missing the following keys from its settings "
"value: {}".format(queue, missing_keys)
)
|
python
|
{
"resource": ""
}
|
q5549
|
validate_client_properties
|
train
|
def validate_client_properties(props):
"""
Validate the client properties setting.
This will add the "version", "information", and "product" keys if they are
missing. All other keys are application-specific.
Raises:
exceptions.ConfigurationException: If any of the basic keys are overridden.
"""
for key in ("version", "information", "product"):
# Nested dictionaries are not merged so key can be missing
if key not in props:
props[key] = DEFAULTS["client_properties"][key]
# Don't let users override these as they identify this library in AMQP
if props[key] != DEFAULTS["client_properties"][key]:
raise exceptions.ConfigurationException(
'"{}" is a reserved keyword in client_properties'.format(key)
)
|
python
|
{
"resource": ""
}
|
q5550
|
LazyConfig._validate
|
train
|
def _validate(self):
"""
Perform checks on the configuration to assert its validity
Raises:
ConfigurationException: If the configuration is invalid.
"""
for key in self:
if key not in DEFAULTS:
raise exceptions.ConfigurationException(
'Unknown configuration key "{}"! Valid configuration keys are'
" {}".format(key, list(DEFAULTS.keys()))
)
validate_queues(self["queues"])
validate_bindings(self["bindings"])
validate_client_properties(self["client_properties"])
|
python
|
{
"resource": ""
}
|
q5551
|
LazyConfig.load_config
|
train
|
def load_config(self, config_path=None):
"""
Load application configuration from a file and merge it with the default
configuration.
If the ``FEDORA_MESSAGING_CONF`` environment variable is set to a
filesystem path, the configuration will be loaded from that location.
Otherwise, the path defaults to ``/etc/fedora-messaging/config.toml``.
"""
self.loaded = True
config = copy.deepcopy(DEFAULTS)
if config_path is None:
if "FEDORA_MESSAGING_CONF" in os.environ:
config_path = os.environ["FEDORA_MESSAGING_CONF"]
else:
config_path = "/etc/fedora-messaging/config.toml"
if os.path.exists(config_path):
_log.info("Loading configuration from {}".format(config_path))
with open(config_path) as fd:
try:
file_config = toml.load(fd)
for key in file_config:
config[key.lower()] = file_config[key]
except toml.TomlDecodeError as e:
msg = "Failed to parse {}: error at line {}, column {}: {}".format(
config_path, e.lineno, e.colno, e.msg
)
raise exceptions.ConfigurationException(msg)
else:
_log.info("The configuration file, {}, does not exist.".format(config_path))
self.update(config)
self._validate()
return self
|
python
|
{
"resource": ""
}
|
q5552
|
_ssl_context_factory
|
train
|
def _ssl_context_factory(parameters):
"""
Produce a Twisted SSL context object from a pika connection parameter object.
This is necessary as Twisted manages the connection, not Pika.
Args:
parameters (pika.ConnectionParameters): The connection parameters built
from the fedora_messaging configuration.
"""
client_cert = None
ca_cert = None
key = config.conf["tls"]["keyfile"]
cert = config.conf["tls"]["certfile"]
ca_file = config.conf["tls"]["ca_cert"]
if ca_file:
with open(ca_file, "rb") as fd:
# Open it in binary mode since otherwise Twisted will immediately
# re-encode it as ASCII, which won't work if the cert bundle has
# comments that can't be encoded with ASCII.
ca_cert = ssl.Certificate.loadPEM(fd.read())
if key and cert:
# Note that _configure_tls_parameters sets the auth mode to EXTERNAL
# if both key and cert are defined, so we don't need to do that here.
with open(key) as fd:
client_keypair = fd.read()
with open(cert) as fd:
client_keypair += fd.read()
client_cert = ssl.PrivateCertificate.loadPEM(client_keypair)
hostname = parameters.host
if not isinstance(hostname, six.text_type):
# Twisted requires the hostname as decoded text, which it isn't in Python 2
# Decode with the system encoding since this came from the config file. Die,
# Python 2, die.
hostname = hostname.decode(locale.getdefaultlocale()[1])
try:
context_factory = ssl.optionsForClientTLS(
hostname,
trustRoot=ca_cert or ssl.platformTrust(),
clientCertificate=client_cert,
extraCertificateOptions={"raiseMinimumTo": ssl.TLSVersion.TLSv1_2},
)
except AttributeError:
# Twisted 12.2 path for EL7 :(
context_factory = ssl.CertificateOptions(
certificate=client_cert.original,
privateKey=client_cert.privateKey.original,
caCerts=[ca_cert.original] or ssl.platformTrust(),
verify=True,
requireCertificate=True,
verifyOnce=False,
enableSessions=False,
)
return context_factory
|
python
|
{
"resource": ""
}
|
q5553
|
FedoraMessagingServiceV2.stopService
|
train
|
def stopService(self):
"""
Gracefully stop the service.
Returns:
defer.Deferred: a Deferred which is triggered when the service has
finished shutting down.
"""
self._service.factory.stopTrying()
yield self._service.factory.stopFactory()
yield service.MultiService.stopService(self)
|
python
|
{
"resource": ""
}
|
q5554
|
_configure_tls_parameters
|
train
|
def _configure_tls_parameters(parameters):
"""
Configure the pika connection parameters for TLS based on the configuration.
This modifies the object provided to it. This accounts for whether or not
the new API based on the standard library's SSLContext is available for
pika.
Args:
parameters (pika.ConnectionParameters): The connection parameters to apply
TLS connection settings to.
"""
cert = config.conf["tls"]["certfile"]
key = config.conf["tls"]["keyfile"]
if cert and key:
_log.info(
"Authenticating with server using x509 (certfile: %s, keyfile: %s)",
cert,
key,
)
parameters.credentials = pika.credentials.ExternalCredentials()
else:
cert, key = None, None
if SSLOptions is None:
parameters.ssl = True
parameters.ssl_options = {
"keyfile": key,
"certfile": cert,
"ca_certs": config.conf["tls"]["ca_cert"],
"cert_reqs": ssl.CERT_REQUIRED,
"ssl_version": ssl.PROTOCOL_TLSv1_2,
}
else:
ssl_context = ssl.create_default_context()
if config.conf["tls"]["ca_cert"]:
try:
ssl_context.load_verify_locations(cafile=config.conf["tls"]["ca_cert"])
except ssl.SSLError as e:
raise ConfigurationException(
'The "ca_cert" setting in the "tls" section is invalid ({})'.format(
e
)
)
ssl_context.options |= ssl.OP_NO_SSLv2
ssl_context.options |= ssl.OP_NO_SSLv3
ssl_context.options |= ssl.OP_NO_TLSv1
ssl_context.options |= ssl.OP_NO_TLSv1_1
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.check_hostname = True
if cert and key:
try:
ssl_context.load_cert_chain(cert, key)
except ssl.SSLError as e:
raise ConfigurationException(
'The "keyfile" setting in the "tls" section is invalid ({})'.format(
e
)
)
parameters.ssl_options = SSLOptions(
ssl_context, server_hostname=parameters.host
)
|
python
|
{
"resource": ""
}
|
q5555
|
ConsumerSession._shutdown
|
train
|
def _shutdown(self):
"""Gracefully shut down the consumer and exit."""
if self._channel:
_log.info("Halting %r consumer sessions", self._channel.consumer_tags)
self._running = False
if self._connection and self._connection.is_open:
self._connection.close()
# Reset the signal handler
for signum in (signal.SIGTERM, signal.SIGINT):
signal.signal(signum, signal.SIG_DFL)
|
python
|
{
"resource": ""
}
|
q5556
|
ConsumerSession._on_cancelok
|
train
|
def _on_cancelok(self, cancel_frame):
"""
Called when the server acknowledges a cancel request.
Args:
cancel_frame (pika.spec.Basic.CancelOk): The cancelok frame from
the server.
"""
_log.info("Consumer canceled; returning all unprocessed messages to the queue")
self._channel.basic_nack(delivery_tag=0, multiple=True, requeue=True)
|
python
|
{
"resource": ""
}
|
q5557
|
ConsumerSession._on_channel_open
|
train
|
def _on_channel_open(self, channel):
"""
Callback used when a channel is opened.
This registers all the channel callbacks.
Args:
channel (pika.channel.Channel): The channel that successfully opened.
"""
channel.add_on_close_callback(self._on_channel_close)
channel.add_on_cancel_callback(self._on_cancel)
channel.basic_qos(callback=self._on_qosok, **config.conf["qos"])
|
python
|
{
"resource": ""
}
|
q5558
|
ConsumerSession._on_qosok
|
train
|
def _on_qosok(self, qosok_frame):
"""
Callback invoked when the server acknowledges the QoS settings.
Asserts or creates the exchanges and queues exist.
Args:
qosok_frame (pika.spec.Basic.Qos): The frame send from the server.
"""
for name, args in self._exchanges.items():
self._channel.exchange_declare(
exchange=name,
exchange_type=args["type"],
durable=args["durable"],
auto_delete=args["auto_delete"],
arguments=args["arguments"],
passive=config.conf["passive_declares"],
callback=self._on_exchange_declareok,
)
for name, args in self._queues.items():
self._channel.queue_declare(
queue=name,
durable=args["durable"],
auto_delete=args["auto_delete"],
exclusive=args["exclusive"],
arguments=args["arguments"],
passive=config.conf["passive_declares"],
callback=self._on_queue_declareok,
)
|
python
|
{
"resource": ""
}
|
q5559
|
ConsumerSession._on_channel_close
|
train
|
def _on_channel_close(self, channel, reply_code_or_reason, reply_text=None):
"""
Callback invoked when the channel is closed.
Args:
channel (pika.channel.Channel): The channel that got closed.
reply_code_or_reason (int|Exception): The reason why the channel
was closed. In older versions of pika, this is the AMQP code.
reply_text (str): The human-readable reason for the channel's
closure (only in older versions of pika).
"""
if isinstance(reply_code_or_reason, pika_errs.ChannelClosed):
reply_code = reply_code_or_reason.reply_code
reply_text = reply_code_or_reason.reply_text
elif isinstance(reply_code_or_reason, int):
reply_code = reply_code_or_reason
else:
reply_code = 0
reply_text = str(reply_code_or_reason)
_log.info("Channel %r closed (%d): %s", channel, reply_code, reply_text)
self._channel = None
|
python
|
{
"resource": ""
}
|
q5560
|
ConsumerSession._on_connection_open
|
train
|
def _on_connection_open(self, connection):
"""
Callback invoked when the connection is successfully established.
Args:
connection (pika.connection.SelectConnection): The newly-estabilished
connection.
"""
_log.info("Successfully opened connection to %s", connection.params.host)
self._channel = connection.channel(on_open_callback=self._on_channel_open)
|
python
|
{
"resource": ""
}
|
q5561
|
ConsumerSession._on_connection_close
|
train
|
def _on_connection_close(self, connection, reply_code_or_reason, reply_text=None):
"""
Callback invoked when a previously-opened connection is closed.
Args:
connection (pika.connection.SelectConnection): The connection that
was just closed.
reply_code_or_reason (int|Exception): The reason why the channel
was closed. In older versions of pika, this is the AMQP code.
reply_text (str): The human-readable reason the connection was
closed (only in older versions of pika)
"""
self._channel = None
if isinstance(reply_code_or_reason, pika_errs.ConnectionClosed):
reply_code = reply_code_or_reason.reply_code
reply_text = reply_code_or_reason.reply_text
elif isinstance(reply_code_or_reason, int):
reply_code = reply_code_or_reason
else:
reply_code = 0
reply_text = str(reply_code_or_reason)
if reply_code == 200:
# Normal shutdown, exit the consumer.
_log.info("Server connection closed (%s), shutting down", reply_text)
connection.ioloop.stop()
else:
_log.warning(
"Connection to %s closed unexpectedly (%d): %s",
connection.params.host,
reply_code,
reply_text,
)
self.call_later(1, self.reconnect)
|
python
|
{
"resource": ""
}
|
q5562
|
ConsumerSession._on_connection_error
|
train
|
def _on_connection_error(self, connection, error_message):
"""
Callback invoked when the connection failed to be established.
Args:
connection (pika.connection.SelectConnection): The connection that
failed to open.
error_message (str): The reason the connection couldn't be opened.
"""
self._channel = None
if isinstance(error_message, pika_errs.AMQPConnectionError):
error_message = repr(error_message.args[0])
_log.error(error_message)
self.call_later(1, self.reconnect)
|
python
|
{
"resource": ""
}
|
q5563
|
ConsumerSession._on_queue_declareok
|
train
|
def _on_queue_declareok(self, frame):
"""
Callback invoked when a queue is successfully declared.
Args:
frame (pika.frame.Method): The message sent from the server.
"""
_log.info("Successfully declared the %s queue", frame.method.queue)
for binding in self._bindings:
if binding["queue"] == frame.method.queue:
for key in binding["routing_keys"]:
_log.info(
"Asserting %s is bound to %s with the %s key",
binding["queue"],
binding["exchange"],
key,
)
self._channel.queue_bind(
callback=None,
queue=binding["queue"],
exchange=binding["exchange"],
routing_key=key,
)
bc_args = dict(queue=frame.method.queue)
if _pika_version < pkg_resources.parse_version("1.0.0b1"):
bc_args["consumer_callback"] = self._on_message
else:
bc_args["on_message_callback"] = self._on_message
tag = self._channel.basic_consume(**bc_args)
self._consumers[tag] = binding["queue"]
|
python
|
{
"resource": ""
}
|
q5564
|
ConsumerSession.call_later
|
train
|
def call_later(self, delay, callback):
"""Schedule a one-shot timeout given delay seconds.
This method is only useful for compatibility with older versions of pika.
Args:
delay (float): Non-negative number of seconds from now until
expiration
callback (method): The callback method, having the signature
`callback()`
"""
if hasattr(self._connection.ioloop, "call_later"):
self._connection.ioloop.call_later(delay, callback)
else:
self._connection.ioloop.add_timeout(delay, callback)
|
python
|
{
"resource": ""
}
|
q5565
|
ConsumerSession.reconnect
|
train
|
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the _on_connection_close method.
"""
# This is the old connection instance, stop its ioloop.
self._connection.ioloop.stop()
if self._running:
# Create a new connection
self.connect()
# There is now a new connection, needs the new ioloop to run.
self._connection.ioloop.start()
|
python
|
{
"resource": ""
}
|
q5566
|
ConsumerSession.consume
|
train
|
def consume(self, callback, bindings=None, queues=None, exchanges=None):
"""
Consume messages from a message queue.
Simply define a callable to be used as the callback when messages are
delivered and specify the queue bindings. This call blocks. The callback
signature should accept a single positional argument which is an
instance of a :class:`Message` (or a sub-class of it).
Args:
callback (callable): The callable to pass the message to when one
arrives.
bindings (list of dict): A list of dictionaries describing bindings
for queues. Refer to the :ref:`conf-bindings` configuration
documentation for the format.
queues (dict): A dictionary of queues to ensure exist. Refer to the
:ref:`conf-queues` configuration documentation for the format.
exchanges (dict): A dictionary of exchanges to ensure exist. Refer
to the :ref:`conf-exchanges` configuration documentation for the
format.
Raises:
HaltConsumer: Raised when the consumer halts.
ValueError: If the callback isn't a callable object or a class with
__call__ defined.
"""
self._bindings = bindings or config.conf["bindings"]
self._queues = queues or config.conf["queues"]
self._exchanges = exchanges or config.conf["exchanges"]
# If the callback is a class, create an instance of it first
if inspect.isclass(callback):
cb_obj = callback()
if not callable(cb_obj):
raise ValueError(
"Callback must be a class that implements __call__"
" or a function."
)
self._consumer_callback = cb_obj
elif callable(callback):
self._consumer_callback = callback
else:
raise ValueError(
"Callback must be a class that implements __call__" " or a function."
)
self._running = True
self.connect()
self._connection.ioloop.start()
|
python
|
{
"resource": ""
}
|
q5567
|
get_avatar
|
train
|
def get_avatar(from_header, size=64, default="retro"):
"""Get the avatar URL from the email's From header.
Args:
from_header (str): The email's From header. May contain the sender's full name.
Returns:
str: The URL to that sender's avatar.
"""
params = OrderedDict([("s", size), ("d", default)])
query = parse.urlencode(params)
address = email.utils.parseaddr(from_header)[1]
value_hash = sha256(address.encode("utf-8")).hexdigest()
return "https://seccdn.libravatar.org/avatar/{}?{}".format(value_hash, query)
|
python
|
{
"resource": ""
}
|
q5568
|
BaseMessage.url
|
train
|
def url(self):
"""An URL to the email in HyperKitty
Returns:
str or None: A relevant URL.
"""
base_url = "https://lists.fedoraproject.org/archives"
archived_at = self._get_archived_at()
if archived_at and archived_at.startswith("<"):
archived_at = archived_at[1:]
if archived_at and archived_at.endswith(">"):
archived_at = archived_at[:-1]
if archived_at and archived_at.startswith("http"):
return archived_at
elif archived_at:
return base_url + archived_at
else:
return None
|
python
|
{
"resource": ""
}
|
q5569
|
user_avatar_url
|
train
|
def user_avatar_url(username, size=64, default="retro"):
"""Get the avatar URL of the provided Fedora username.
The URL is returned from the Libravatar service.
Args:
username (str): The username to get the avatar of.
size (int): Size of the avatar in pixels (it's a square).
default (str): Default avatar to return if not found.
Returns:
str: The URL to the avatar image.
"""
openid = "http://{}.id.fedoraproject.org/".format(username)
return libravatar_url(openid=openid, size=size, default=default)
|
python
|
{
"resource": ""
}
|
q5570
|
libravatar_url
|
train
|
def libravatar_url(email=None, openid=None, size=64, default="retro"):
"""Get the URL to an avatar from libravatar.
Either the user's email or openid must be provided.
If you want to use Libravatar federation (through DNS), you should install
and use the ``libravatar`` library instead. Check out the
``libravatar.libravatar_url()`` function.
Args:
email (str): The user's email
openid (str): The user's OpenID
size (int): Size of the avatar in pixels (it's a square).
default (str): Default avatar to return if not found.
Returns:
str: The URL to the avatar image.
Raises:
ValueError: If neither email nor openid are provided.
"""
# We use an OrderedDict here to make testing easier (URL strings become
# predictable).
params = collections.OrderedDict([("s", size), ("d", default)])
query = parse.urlencode(params)
if email:
value = email
elif openid:
value = openid
else:
raise ValueError("You must provide either the email or the openid.")
idhash = sha256(value.encode("utf-8")).hexdigest()
return "https://seccdn.libravatar.org/avatar/%s?%s" % (idhash, query)
|
python
|
{
"resource": ""
}
|
q5571
|
get_all_fields
|
train
|
def get_all_fields(obj):
"""Returns a list of all field names on the instance."""
fields = []
for f in obj._meta.fields:
fname = f.name
get_choice = "get_" + fname + "_display"
if hasattr(obj, get_choice):
value = getattr(obj, get_choice)()
else:
try:
value = getattr(obj, fname)
except Exception:
value = None
if isinstance(value, list):
value = ",".join(str(v) for v in value)
if f.editable and value and f.name:
fields.append(
{"label": f.verbose_name, "name": f.name, "value": value}
)
return fields
|
python
|
{
"resource": ""
}
|
q5572
|
query_string
|
train
|
def query_string(context, **kwargs):
"""Add param to the given query string"""
params = context["request"].GET.copy()
for key, value in list(kwargs.items()):
params[key] = value
return "?" + params.urlencode()
|
python
|
{
"resource": ""
}
|
q5573
|
arctic_url
|
train
|
def arctic_url(context, link, *args, **kwargs):
"""
Resolves links into urls with optional
arguments set in self.urls. please check get_urls method in View.
We could tie this to check_url_access() to check for permissions,
including object-level.
"""
def reverse_mutable_url_args(url_args):
mutated_url_args = []
for arg in url_args:
# listview item, and argument is a string
if "item" in context and type(arg) == str:
# try to get attribute of this object
try:
arg = getattr(context["v"], arg.split(".")[-1])
# if not found fallback to row pk, which is always first column
except Exception:
arg = context["item"][0]
mutated_url_args.append(arg)
return reverse(link, args=mutated_url_args, kwargs=None)
url_args = args
# set arguments defined in urls if provided
if type(link) in (tuple, list):
context["urls"][link[0]] = list(link[1:])
link = link[0]
if link in context["urls"]:
# for where the params directly given. e.g. ('article-detail',
# (self.object.pk,))
url_args = context["urls"][link]
# list given, which means it's mutable!
if isinstance(url_args, list):
return reverse_mutable_url_args(url_args)
return reverse(link, args=url_args, kwargs=None)
|
python
|
{
"resource": ""
}
|
q5574
|
get_role_model
|
train
|
def get_role_model():
"""
Returns the Role model that is active in this project.
"""
app_model = getattr(settings, "ARCTIC_ROLE_MODEL", "arctic.Role")
try:
return django_apps.get_model(app_model)
except ValueError:
raise ImproperlyConfigured(
"ARCTIC_ROLE_MODEL must be of the " "form 'app_label.model_name'"
)
except LookupError:
raise ImproperlyConfigured(
"ARCTIC_ROLE_MODEL refers to model '%s' that has not been "
"installed" % settings.ARCTIC_ROLE_MODEL
)
|
python
|
{
"resource": ""
}
|
q5575
|
get_user_role_model
|
train
|
def get_user_role_model():
"""
Returns the UserRole model that is active in this project.
"""
app_model = getattr(settings, "ARCTIC_USER_ROLE_MODEL", "arctic.UserRole")
try:
return django_apps.get_model(app_model)
except ValueError:
raise ImproperlyConfigured(
"ARCTIC_USER_ROLE_MODEL must be of the "
"form 'app_label.model_name'"
)
except LookupError:
raise ImproperlyConfigured(
"ARCTIC_USER_ROLE_MODEL refers to model '%s' that has not been "
"installed" % settings.ARCTIC_USER_ROLE_MODEL
)
|
python
|
{
"resource": ""
}
|
q5576
|
View.dispatch
|
train
|
def dispatch(self, request, *args, **kwargs):
"""
Most views in a CMS require a login, so this is the default setup.
If a login is not required then the requires_login property
can be set to False to disable this.
"""
if self.requires_login:
if settings.LOGIN_URL is None or settings.LOGOUT_URL is None:
raise ImproperlyConfigured(
"LOGIN_URL and LOGOUT_URL "
"has to be defined if requires_login is True"
)
if not request.user.is_authenticated:
return redirect(
"%s?next=%s"
% (
resolve_url(settings.LOGIN_URL),
quote(request.get_full_path()),
)
)
return super(View, self).dispatch(request, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q5577
|
View.media
|
train
|
def media(self):
"""
Return all media required to render this view, including forms.
"""
media = self._get_common_media()
media += self._get_view_media()
media += self.get_media_assets()
return media
|
python
|
{
"resource": ""
}
|
q5578
|
View._get_view_media
|
train
|
def _get_view_media(self):
"""
Gather view-level media assets
"""
try:
css = self.Media.css
except AttributeError:
css = {}
try:
js = self.Media.js
except AttributeError:
js = []
return Media(css=css, js=js)
|
python
|
{
"resource": ""
}
|
q5579
|
DataListView.get_paginator
|
train
|
def get_paginator(
self,
dataset,
per_page,
orphans=0,
allow_empty_first_page=True,
**kwargs
):
"""Return an instance of the paginator for this view."""
return IndefinitePaginator(
dataset,
per_page,
orphans=orphans,
allow_empty_first_page=allow_empty_first_page,
**kwargs
)
|
python
|
{
"resource": ""
}
|
q5580
|
DeleteView.get
|
train
|
def get(self, request, *args, **kwargs):
"""
Catch protected relations and show to user.
"""
self.object = self.get_object()
can_delete = True
protected_objects = []
collector_message = None
collector = Collector(using="default")
try:
collector.collect([self.object])
except ProtectedError as e:
collector_message = (
"Cannot delete %s because it has relations "
"that depends on it." % self.object
)
protected_objects = e.protected_objects
can_delete = False
if can_delete and self.redirect:
messages.success(request, self.get_success_message(self.object))
return self.delete(request, *args, **kwargs)
context = self.get_context_data(
object=self.object,
can_delete=can_delete,
collector_message=collector_message,
protected_objects=protected_objects,
)
return self.render_to_response(context)
|
python
|
{
"resource": ""
}
|
q5581
|
ModalMixin.get_modal_link
|
train
|
def get_modal_link(self, url, obj={}):
"""
Returns the metadata for a link that needs to be confirmed, if it
exists, it also parses the message and title of the url to include
row field data if needed.
"""
if not (url in self.modal_links.keys()):
return None
try:
if type(obj) != dict:
obj.obj = str(obj)
obj = vars(obj)
link = self.modal_links[url]
if link["type"] == "confirm":
link["message"] = link["message"].format(**obj)
link["title"] = link["title"].format(**obj)
link["ok"] # triggers a KeyError exception if not existent
link["cancel"]
elif link["type"] == "iframe":
try:
link["size"]
except KeyError:
link["size"] = "medium"
else:
raise ImproperlyConfigured(
"modal_links type: " + link["type"] + " is unsupported"
)
return link
except KeyError as e:
raise ImproperlyConfigured(
"modal_links misses the following attribute: " + str(e)
)
except AttributeError:
return None
|
python
|
{
"resource": ""
}
|
q5582
|
FormMixin.get_success_url
|
train
|
def get_success_url(self):
"""Return the URL to redirect to after processing a valid form."""
if not self.success_url:
if self.request.GET.get("inmodal"):
return reverse("arctic:redirect_to_parent")
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url."
)
return self.in_modal(str(self.success_url))
|
python
|
{
"resource": ""
}
|
q5583
|
FormMixin._set_has_no_columns
|
train
|
def _set_has_no_columns(self, has_no_column, col_avg, col_last, fields):
"""
Regenerate has_no_column by adding the amount of columns at the end
"""
for index, field in has_no_column.items():
if index == len(has_no_column):
field_name = "{field}|{col_last}".format(
field=field, col_last=col_last
)
has_no_column[index] = self._return_field(field_name, fields)
else:
field_name = "{field}|{col_avg}".format(
field=field, col_avg=col_avg
)
has_no_column[index] = self._return_field(field_name, fields)
return has_no_column
|
python
|
{
"resource": ""
}
|
q5584
|
FormMixin._return_fieldset
|
train
|
def _return_fieldset(self, fieldset):
"""
This function became a bit messy, since it needs to deal with two
cases.
1) No fieldset, which is represented as an integer
2) A fieldset
"""
collapsible = None
description = None
try:
# Make sure strings with numbers work as well, do this
int(str(fieldset))
title = None
except ValueError:
if fieldset.count("|") > 1:
raise ImproperlyConfigured(
"The fieldset name does not "
"support more than one | sign. "
"It's meant to separate a "
"fieldset from its description."
)
title = fieldset
if "|" in fieldset:
title, description = fieldset.split("|")
if fieldset and (fieldset[0] in "-+"):
if fieldset[0] == "-":
collapsible = "closed"
else:
collapsible = "open"
title = title[1:]
return {
"title": title,
"description": description,
"collapsible": collapsible,
}
|
python
|
{
"resource": ""
}
|
q5585
|
ListMixin.ordering_url
|
train
|
def ordering_url(self, field_name):
"""
Creates a url link for sorting the given field.
The direction of sorting will be either ascending, if the field is not
yet sorted, or the opposite of the current sorting if sorted.
"""
path = self.request.path
direction = ""
query_params = self.request.GET.copy()
ordering = self.request.GET.get("order", "").split(",")
field = self._get_ordering_field_lookup(field_name)
if not ordering:
ordering = self.get_default_ordering()
merged_ordering = list(ordering) # copy the list
for ordering_field in self.get_ordering_fields_lookups():
if (ordering_field.lstrip("-") not in ordering) and (
("-" + ordering_field.lstrip("-")) not in ordering
):
merged_ordering.append(ordering_field)
new_ordering = []
for item in merged_ordering:
if item.lstrip("-") == field.lstrip("-"):
if (item[0] == "-") or not (item in ordering):
if item in ordering:
direction = "desc"
new_ordering.insert(0, item.lstrip("-"))
else:
direction = "asc"
new_ordering.insert(0, "-" + item)
query_params["order"] = ",".join(new_ordering)
return (path + "?" + query_params.urlencode(safe=","), direction)
|
python
|
{
"resource": ""
}
|
q5586
|
ListMixin.get_fields
|
train
|
def get_fields(self, strip_labels=False):
"""
Hook to dynamically change the fields that will be displayed
"""
if strip_labels:
return [
f[0] if type(f) in (tuple, list) else f for f in self.fields
]
return self.fields
|
python
|
{
"resource": ""
}
|
q5587
|
ListMixin.get_ordering_fields_lookups
|
train
|
def get_ordering_fields_lookups(self):
"""
Getting real model fields to order by
"""
ordering_field = []
for field_name in self.get_ordering_fields():
ordering_field.append(self._get_ordering_field_lookup(field_name))
return ordering_field
|
python
|
{
"resource": ""
}
|
q5588
|
ListMixin._get_ordering_field_lookup
|
train
|
def _get_ordering_field_lookup(self, field_name):
"""
get real model field to order by
"""
field = field_name
get_field = getattr(self, "get_%s_ordering_field" % field_name, None)
if get_field:
field = get_field()
return field
|
python
|
{
"resource": ""
}
|
q5589
|
ListMixin.get_advanced_search_form
|
train
|
def get_advanced_search_form(self, data):
"""
Hook to dynamically change the advanced search form
"""
if self.get_advanced_search_form_class():
self._advanced_search_form = self.get_advanced_search_form_class()(
data=data
)
return self._advanced_search_form
|
python
|
{
"resource": ""
}
|
q5590
|
RoleAuthentication.sync
|
train
|
def sync(cls):
"""
Save all the roles defined in the settings that are not yet in the db
this is needed to create a foreign key relation between a user and a
role. Roles that are no longer specified in settings are set as
inactive.
"""
try:
settings_roles = set(settings.ARCTIC_ROLES.keys())
except AttributeError:
settings_roles = set()
saved_roles = set(Role.objects.values_list("name", flat=True))
unsaved_roles = settings_roles - saved_roles
unused_roles = saved_roles - settings_roles - set([cls.ADMIN])
# ensure that admin is not defined in settings
if cls.ADMIN in settings_roles:
raise ImproperlyConfigured(
'"' + cls.ADMIN + '" role is reserved '
"and cannot be defined in settings"
)
# ensure that admin exists in the database
if cls.ADMIN not in saved_roles:
Role(name=cls.ADMIN, is_active=True).save()
# check if the role defined in settings already exists in the database
# and if it does ensure it is enabled.
for role in saved_roles:
if role in settings_roles:
saved_role = Role.objects.get(name=role)
if not saved_role.is_active:
saved_role.is_active = True
saved_role.save()
for role in unsaved_roles:
Role(name=role).save()
for role in unused_roles:
unused_role = Role.objects.get(name=role)
unused_role.is_active = False
unused_role.save()
|
python
|
{
"resource": ""
}
|
q5591
|
RoleAuthentication.get_permission_required
|
train
|
def get_permission_required(cls):
"""
Get permission required property.
Must return an iterable.
"""
if cls.permission_required is None:
raise ImproperlyConfigured(
"{0} is missing the permission_required attribute. "
"Define {0}.permission_required, or override "
"{0}.get_permission_required().".format(cls.__name__)
)
if isinstance(cls.permission_required, six.string_types):
if cls.permission_required != "":
perms = (cls.permission_required,)
else:
perms = ()
else:
perms = cls.permission_required
return perms
|
python
|
{
"resource": ""
}
|
q5592
|
RoleAuthentication.has_permission
|
train
|
def has_permission(cls, user):
"""
We override this method to customize the way permissions are checked.
Using our roles to check permissions.
"""
# no login is needed, so its always fine
if not cls.requires_login:
return True
# if user is somehow not logged in
if not user.is_authenticated:
return False
# attribute permission_required is mandatory, returns tuple
perms = cls.get_permission_required()
# if perms are defined and empty, we skip checking
if not perms:
return True
# get role of user, skip admin role
role = user.urole.role.name
if role == cls.ADMIN:
return True
# check if at least one permissions is valid
for permission in perms:
if cls.check_permission(role, permission):
return True
# permission denied
return False
|
python
|
{
"resource": ""
}
|
q5593
|
RoleAuthentication.check_permission
|
train
|
def check_permission(cls, role, permission):
"""
Check if role contains permission
"""
result = permission in settings.ARCTIC_ROLES[role]
# will try to call a method with the same name as the permission
# to enable an object level permission check.
if result:
try:
return getattr(cls, permission)(role)
except AttributeError:
pass
return result
|
python
|
{
"resource": ""
}
|
q5594
|
str_to_bool
|
train
|
def str_to_bool(val):
"""
Helper function to turn a string representation of "true" into
boolean True.
"""
if isinstance(val, str):
val = val.lower()
return val in ["true", "on", "yes", True]
|
python
|
{
"resource": ""
}
|
q5595
|
menu
|
train
|
def menu(menu_config=None, **kwargs):
"""
Tranforms a menu definition into a dictionary which is a frendlier format
to parse in a template.
"""
request = kwargs.pop("request", None)
user = kwargs.pop("user", None)
url_full_name = ":".join(
[request.resolver_match.namespace, request.resolver_match.url_name]
)
if not menu_config:
menu_config = settings.ARCTIC_MENU
menu_dict = OrderedDict()
for menu_entry in menu_config:
if type(menu_entry) in (list, tuple):
# check permission based on named_url
path = None
if menu_entry[1]:
if not view_from_url(menu_entry[1]).has_permission(user):
continue
path = reverse(menu_entry[1])
# icons and collapse are optional
icon = None
if (len(menu_entry) >= 3) and (
not type(menu_entry[2]) in (list, tuple)
):
icon = menu_entry[2]
active_weight = len(path) if path else 0
menu_dict[menu_entry[0]] = {
"url": menu_entry[1],
"icon": icon,
"submenu": None,
"active": is_active(menu_entry, url_full_name),
"active_weight": active_weight,
}
# check if the last item in a menu entry is a submenu
submenu = _get_submenu(menu_entry)
if submenu:
menu_dict[menu_entry[0]]["submenu"] = menu(
submenu, user=user, request=request
)
return menu_clean(menu_dict)
|
python
|
{
"resource": ""
}
|
q5596
|
view_from_url
|
train
|
def view_from_url(named_url): # noqa
"""
Finds and returns the view class from a named url
"""
# code below is `stolen` from django's reverse method.
resolver = get_resolver(get_urlconf())
if type(named_url) in (list, tuple):
named_url = named_url[0]
parts = named_url.split(":")
parts.reverse()
view = parts[0]
path = parts[1:]
current_path = None
resolved_path = []
ns_pattern = ""
ns_converters = {}
# if it's a local url permission already given, so we just return true
if named_url.startswith("#"):
class LocalUrlDummyView:
@staticmethod
def has_permission(user):
return True
return LocalUrlDummyView
while path:
ns = path.pop()
current_ns = current_path.pop() if current_path else None
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_ns and current_ns in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_ns
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
if ns != current_ns:
current_path = None
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
try:
ns_converters.update(resolver.pattern.converters)
except Exception:
pass
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'"
% (key, ":".join(resolved_path))
)
else:
raise NoReverseMatch("%s is not a registered namespace" % key)
if ns_pattern:
try:
resolver = get_ns_resolver(
ns_pattern, resolver, tuple(ns_converters.items())
)
except Exception:
resolver = get_ns_resolver(ns_pattern, resolver)
# custom code, get view from reverse_dict
reverse_dict = resolver.reverse_dict.dict()
for key, url_obj in reverse_dict.items():
if url_obj == reverse_dict[view] and key != view:
module = importlib.import_module(key.__module__)
return getattr(module, key.__name__)
|
python
|
{
"resource": ""
}
|
q5597
|
find_field_meta
|
train
|
def find_field_meta(obj, value):
"""
In a model, finds the attribute meta connected to the last object when
a chain of connected objects is given in a string separated with double
underscores.
"""
if "__" in value:
value_list = value.split("__")
child_obj = obj._meta.get_field(value_list[0]).rel.to
return find_field_meta(child_obj, "__".join(value_list[1:]))
return obj._meta.get_field(value)
|
python
|
{
"resource": ""
}
|
q5598
|
get_field_class
|
train
|
def get_field_class(qs, field_name):
"""
Given a queryset and a field name, it will return the field's class
"""
try:
return qs.model._meta.get_field(field_name).__class__.__name__
# while annotating, it's possible that field does not exists.
except FieldDoesNotExist:
return None
|
python
|
{
"resource": ""
}
|
q5599
|
arctic_setting
|
train
|
def arctic_setting(setting_name, valid_options=None):
"""
Tries to get a setting from the django settings, if not available defaults
to the one defined in defaults.py
"""
try:
value = getattr(settings, setting_name)
if valid_options and value not in valid_options:
error_message = "Invalid value for {}, must be one of: {}".format(
setting_name, str(valid_options)
)
raise ImproperlyConfigured(error_message)
except AttributeError:
pass
return getattr(settings, setting_name, getattr(defaults, setting_name))
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.