_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q16400
|
DeviceServer.request_restart
|
train
|
def request_restart(self, req, msg):
"""Restart the device server.
Returns
-------
success : {'ok', 'fail'}
Whether scheduling the restart succeeded.
Examples
--------
::
?restart
!restart ok
"""
if self._restart_queue is None:
raise FailReply("No restart queue registered -- cannot restart.")
f = tornado_Future()
@gen.coroutine
def _restart():
# .put should never block because queue should have no size limit
self._restart_queue.put_nowait(self)
req.reply('ok')
raise AsyncReply
self.ioloop.add_callback(lambda: chain_future(_restart(), f))
return f
|
python
|
{
"resource": ""
}
|
q16401
|
DeviceServer.request_client_list
|
train
|
def request_client_list(self, req, msg):
"""Request the list of connected clients.
The list of clients is sent as a sequence of #client-list informs.
Informs
-------
addr : str
The address of the client as host:port with host in dotted quad
notation. If the address of the client could not be determined
(because, for example, the client disconnected suddenly) then
a unique string representing the client is sent instead.
Returns
-------
success : {'ok', 'fail'}
Whether sending the client list succeeded.
informs : int
Number of #client-list inform messages sent.
Examples
--------
::
?client-list
#client-list 127.0.0.1:53600
!client-list ok 1
"""
# TODO Get list of ClientConnection* instances and implement a standard
# 'address-print' method in the ClientConnection class
clients = self._client_conns
num_clients = len(clients)
for conn in clients:
addr = conn.address
req.inform(addr)
return req.make_reply('ok', str(num_clients))
|
python
|
{
"resource": ""
}
|
q16402
|
DeviceServer.request_version_list
|
train
|
def request_version_list(self, req, msg):
"""Request the list of versions of roles and subcomponents.
Informs
-------
name : str
Name of the role or component.
version : str
A string identifying the version of the component. Individual
components may define the structure of this argument as they
choose. In the absence of other information clients should
treat it as an opaque string.
build_state_or_serial_number : str
A unique identifier for a particular instance of a component.
This should change whenever the component is replaced or updated.
Returns
-------
success : {'ok', 'fail'}
Whether sending the version list succeeded.
informs : int
Number of #version-list inform messages sent.
Examples
--------
::
?version-list
#version-list katcp-protocol 5.0-MI
#version-list katcp-library katcp-python-0.4 katcp-python-0.4.1-py2
#version-list katcp-device foodevice-1.0 foodevice-1.0.0rc1
!version-list ok 3
"""
versions = [
("katcp-protocol", (self.PROTOCOL_INFO, None)),
("katcp-library", ("katcp-python-%s" % katcp.__version__, katcp.__version__)),
("katcp-device", (self.version(), self.build_state())),
]
extra_versions = sorted(self.extra_versions.items())
for name, (version, build_state) in versions + extra_versions:
if build_state is None:
inform_args = (name, version)
else:
inform_args = (name, version, build_state)
req.inform(*inform_args)
num_versions = len(versions) + len(extra_versions)
return req.make_reply("ok", str(num_versions))
|
python
|
{
"resource": ""
}
|
q16403
|
DeviceServer.request_sensor_list
|
train
|
def request_sensor_list(self, req, msg):
"""Request the list of sensors.
The list of sensors is sent as a sequence of #sensor-list informs.
Parameters
----------
name : str, optional
Name of the sensor to list (the default is to list all sensors).
If name starts and ends with '/' it is treated as a regular
expression and all sensors whose names contain the regular
expression are returned.
Informs
-------
name : str
The name of the sensor being described.
description : str
Description of the named sensor.
units : str
Units for the value of the named sensor.
type : str
Type of the named sensor.
params : list of str, optional
Additional sensor parameters (type dependent). For integer and
float sensors the additional parameters are the minimum and maximum
sensor value. For discrete sensors the additional parameters are
the allowed values. For all other types no additional parameters
are sent.
Returns
-------
success : {'ok', 'fail'}
Whether sending the sensor list succeeded.
informs : int
Number of #sensor-list inform messages sent.
Examples
--------
::
?sensor-list
#sensor-list psu.voltage PSU\_voltage. V float 0.0 5.0
#sensor-list cpu.status CPU\_status. \@ discrete on off error
...
!sensor-list ok 5
?sensor-list cpu.power.on
#sensor-list cpu.power.on Whether\_CPU\_hase\_power. \@ boolean
!sensor-list ok 1
?sensor-list /voltage/
#sensor-list psu.voltage PSU\_voltage. V float 0.0 5.0
#sensor-list cpu.voltage CPU\_voltage. V float 0.0 3.0
!sensor-list ok 2
"""
exact, name_filter = construct_name_filter(msg.arguments[0]
if msg.arguments else None)
sensors = [(name, sensor) for name, sensor in
sorted(self._sensors.iteritems()) if name_filter(name)]
if exact and not sensors:
return req.make_reply("fail", "Unknown sensor name.")
self._send_sensor_value_informs(req, sensors)
return req.make_reply("ok", str(len(sensors)))
|
python
|
{
"resource": ""
}
|
q16404
|
DeviceServer.request_sensor_value
|
train
|
def request_sensor_value(self, req, msg):
"""Request the value of a sensor or sensors.
A list of sensor values as a sequence of #sensor-value informs.
Parameters
----------
name : str, optional
Name of the sensor to poll (the default is to send values for all
sensors). If name starts and ends with '/' it is treated as a
regular expression and all sensors whose names contain the regular
expression are returned.
Informs
-------
timestamp : float
Timestamp of the sensor reading in seconds since the Unix
epoch, or milliseconds for katcp versions <= 4.
count : {1}
Number of sensors described in this #sensor-value inform. Will
always be one. It exists to keep this inform compatible with
#sensor-status.
name : str
Name of the sensor whose value is being reported.
value : object
Value of the named sensor. Type depends on the type of the sensor.
Returns
-------
success : {'ok', 'fail'}
Whether sending the list of values succeeded.
informs : int
Number of #sensor-value inform messages sent.
Examples
--------
::
?sensor-value
#sensor-value 1244631611.415231 1 psu.voltage 4.5
#sensor-value 1244631611.415200 1 cpu.status off
...
!sensor-value ok 5
?sensor-value cpu.power.on
#sensor-value 1244631611.415231 1 cpu.power.on 0
!sensor-value ok 1
"""
exact, name_filter = construct_name_filter(msg.arguments[0]
if msg.arguments else None)
sensors = [(name, sensor) for name, sensor in
sorted(self._sensors.iteritems()) if name_filter(name)]
if exact and not sensors:
return req.make_reply("fail", "Unknown sensor name.")
katcp_version = self.PROTOCOL_INFO.major
for name, sensor in sensors:
timestamp, status, value = sensor.read_formatted(katcp_version)
req.inform(timestamp, "1", name, status, value)
return req.make_reply("ok", str(len(sensors)))
|
python
|
{
"resource": ""
}
|
q16405
|
DeviceServer.request_sensor_sampling
|
train
|
def request_sensor_sampling(self, req, msg):
"""Configure or query the way a sensor is sampled.
Sampled values are reported asynchronously using the #sensor-status
message.
Parameters
----------
name : str
Name of the sensor whose sampling strategy to query or configure.
strategy : {'none', 'auto', 'event', 'differential', \
'period', 'event-rate'}, optional
Type of strategy to use to report the sensor value. The
differential strategy type may only be used with integer or float
sensors. If this parameter is supplied, it sets the new strategy.
params : list of str, optional
Additional strategy parameters (dependent on the strategy type).
For the differential strategy, the parameter is an integer or float
giving the amount by which the sensor value may change before an
updated value is sent.
For the period strategy, the parameter is the sampling period
in float seconds.
The event strategy has no parameters. Note that this has changed
from KATCPv4.
For the event-rate strategy, a minimum period between updates and
a maximum period between updates (both in float seconds) must be
given. If the event occurs more than once within the minimum period,
only one update will occur. Whether or not the event occurs, the
sensor value will be updated at least once per maximum period.
The differential-rate strategy is not supported in this release.
Returns
-------
success : {'ok', 'fail'}
Whether the sensor-sampling request succeeded.
name : str
Name of the sensor queried or configured.
strategy : {'none', 'auto', 'event', 'differential', 'period'}
Name of the new or current sampling strategy for the sensor.
params : list of str
Additional strategy parameters (see description under Parameters).
Examples
--------
::
?sensor-sampling cpu.power.on
!sensor-sampling ok cpu.power.on none
?sensor-sampling cpu.power.on period 500
!sensor-sampling ok cpu.power.on period 500
"""
f = Future()
self.ioloop.add_callback(lambda: chain_future(
self._handle_sensor_sampling(req, msg), f))
return f
|
python
|
{
"resource": ""
}
|
q16406
|
DeviceServer.request_sensor_sampling_clear
|
train
|
def request_sensor_sampling_clear(self, req):
"""Set all sampling strategies for this client to none.
Returns
-------
success : {'ok', 'fail'}
Whether sending the list of devices succeeded.
Examples
--------
?sensor-sampling-clear
!sensor-sampling-clear ok
"""
f = Future()
@gen.coroutine
def _clear_strategies():
self.clear_strategies(req.client_connection)
raise gen.Return(('ok',))
self.ioloop.add_callback(lambda: chain_future(_clear_strategies(), f))
return f
|
python
|
{
"resource": ""
}
|
q16407
|
DeviceLogger.level_name
|
train
|
def level_name(self, level=None):
"""Return the name of the given level value.
If level is None, return the name of the current level.
Parameters
----------
level : logging level constant
The logging level constant whose name to retrieve.
Returns
-------
level_name : str
The name of the logging level.
"""
if level is None:
level = self._log_level
return self.LEVELS[level]
|
python
|
{
"resource": ""
}
|
q16408
|
DeviceLogger.level_from_name
|
train
|
def level_from_name(self, level_name):
"""Return the level constant for a given name.
If the *level_name* is not known, raise a ValueError.
Parameters
----------
level_name : str
The logging level name whose logging level constant
to retrieve.
Returns
-------
level : logging level constant
The logging level constant associated with the name.
"""
try:
return self.LEVELS.index(level_name)
except ValueError:
raise ValueError("Unknown logging level name '%s'" % (level_name,))
|
python
|
{
"resource": ""
}
|
q16409
|
DeviceLogger.set_log_level
|
train
|
def set_log_level(self, level):
"""Set the logging level.
Parameters
----------
level : logging level constant
The value to set the logging level to.
"""
self._log_level = level
if self._python_logger:
try:
level = self.PYTHON_LEVEL.get(level)
except ValueError as err:
raise FailReply("Unknown logging level '%s'" % (level))
self._python_logger.setLevel(level)
|
python
|
{
"resource": ""
}
|
q16410
|
DeviceLogger.log
|
train
|
def log(self, level, msg, *args, **kwargs):
"""Log a message and inform all clients.
Parameters
----------
level : logging level constant
The level to log the message at.
msg : str
The text format for the log message.
args : list of objects
Arguments to pass to log format string. Final message text is
created using: msg % args.
kwargs : additional keyword parameters
Allowed keywords are 'name' and 'timestamp'. The name is the name
of the logger to log the message to. If not given the name defaults
to the root logger. The timestamp is a float in seconds. If not
given the timestamp defaults to the current time.
"""
timestamp = kwargs.get("timestamp")
python_msg = msg
if self._python_logger is not None:
if timestamp is not None:
python_msg = ' '.join((
'katcp timestamp: %r' % timestamp,
python_msg))
self._python_logger.log(self.PYTHON_LEVEL[level], python_msg, *args)
if level >= self._log_level:
name = kwargs.get("name")
if name is None:
name = self._root_logger_name
try:
inform_msg = msg % args
except TypeError:
# Catch the "not enough arguments for format string" exception.
inform_msg = "{} {}".format(
msg,
args if args else '').strip()
self._device_server.mass_inform(
self._device_server.create_log_inform(
self.level_name(level),
inform_msg,
name,
timestamp=timestamp))
|
python
|
{
"resource": ""
}
|
q16411
|
DeviceLogger.warn
|
train
|
def warn(self, msg, *args, **kwargs):
"""Log an warning message."""
self.log(self.WARN, msg, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q16412
|
DeviceLogger.log_to_python
|
train
|
def log_to_python(cls, logger, msg):
"""Log a KATCP logging message to a Python logger.
Parameters
----------
logger : logging.Logger object
The Python logger to log the given message to.
msg : Message object
The #log message to create a log entry from.
"""
(level, timestamp, name, message) = tuple(msg.arguments)
log_string = "%s %s: %s" % (timestamp, name, message)
logger.log({"trace": 0,
"debug": logging.DEBUG,
"info": logging.INFO,
"warn": logging.WARN,
"error": logging.ERROR,
"fatal": logging.FATAL}[level], log_string)
|
python
|
{
"resource": ""
}
|
q16413
|
DeviceExampleServer.request_echo
|
train
|
def request_echo(self, sock, msg):
"""Echo the arguments of the message sent."""
return katcp.Message.reply(msg.name, "ok", *msg.arguments)
|
python
|
{
"resource": ""
}
|
q16414
|
log_future_exceptions
|
train
|
def log_future_exceptions(logger, f, ignore=()):
"""Log any exceptions set to a future
Parameters
----------
logger : logging.Logger instance
logger.exception(...) is called if the future resolves with an exception
f : Future object
Future to be monitored for exceptions
ignore : Exception or tuple of Exception
Exptected exception(s) to ignore, i.e. they will not be logged.
Notes
-----
This is useful when an async task is started for its side effects without waiting for
the result. The problem is that if the future's resolution is not checked for
exceptions, unhandled exceptions in the async task will be silently ignored.
"""
def log_cb(f):
try:
f.result()
except ignore:
pass
except Exception:
logger.exception('Unhandled exception returned by future')
f.add_done_callback(log_cb)
|
python
|
{
"resource": ""
}
|
q16415
|
steal_docstring_from
|
train
|
def steal_docstring_from(obj):
"""Decorator that lets you steal a docstring from another object
Example
-------
::
@steal_docstring_from(superclass.meth)
def meth(self, arg):
"Extra subclass documentation"
pass
In this case the docstring of the new 'meth' will be copied from superclass.meth, and
if an additional dosctring was defined for meth it will be appended to the superclass
docstring with a two newlines inbetween.
"""
def deco(fn):
docs = [obj.__doc__]
if fn.__doc__:
docs.append(fn.__doc__)
fn.__doc__ = '\n\n'.join(docs)
return fn
return deco
|
python
|
{
"resource": ""
}
|
q16416
|
hashable_identity
|
train
|
def hashable_identity(obj):
"""Generate a hashable ID that is stable for methods etc
Approach borrowed from blinker. Why it matters: see e.g.
http://stackoverflow.com/questions/13348031/python-bound-and-unbound-method-object
"""
if hasattr(obj, '__func__'):
return (id(obj.__func__), id(obj.__self__))
elif hasattr(obj, 'im_func'):
return (id(obj.im_func), id(obj.im_self))
elif isinstance(obj, (basestring, unicode)):
return obj
else:
return id(obj)
|
python
|
{
"resource": ""
}
|
q16417
|
until_any
|
train
|
def until_any(*futures, **kwargs):
"""Return a future that resolves when any of the passed futures resolves.
Resolves with the value yielded by the first future to resolve.
Note, this will only work with tornado futures.
"""
timeout = kwargs.get('timeout', None)
ioloop = kwargs.get('ioloop', None) or tornado.ioloop.IOLoop.current()
any_future = tornado_Future()
def handle_done(done_future):
if not any_future.done():
try:
any_future.set_result(done_future.result())
except Exception:
any_future.set_exc_info(done_future.exc_info())
# (NM) Nasty hack to remove handle_done from the callback list to prevent a
# memory leak where one of the futures resolves quickly, particularly when
# used together with AsyncState.until_state(). Also addresses Jira issue
# CM-593
for f in futures:
if f._callbacks:
try:
f._callbacks.remove(handle_done)
except ValueError:
pass
for f in futures:
f.add_done_callback(handle_done)
if any_future.done():
break
if timeout:
return with_timeout(ioloop.time() + timeout, any_future, ioloop)
else:
return any_future
|
python
|
{
"resource": ""
}
|
q16418
|
future_timeout_manager
|
train
|
def future_timeout_manager(timeout=None, ioloop=None):
"""Create Helper function for yielding with a cumulative timeout if required
Keeps track of time over multiple timeout calls so that a single timeout can
be placed over multiple operations.
Parameters
----------
timeout : int or None
Timeout, or None for no timeout
ioloop : IOLoop instance or None
tornado IOloop instance to use, or None for IOLoop.current()
Return value
------------
maybe_timeout : func
Accepts a future, and wraps it in
:func:tornado.gen.with_timeout. maybe_timeout raises
:class:`tornado.gen.TimeoutError` if the timeout expires
Has a function attribute `remaining()` that returns the remaining
timeout or None if timeout == None
Example
-------
::
@tornado.gen.coroutine
def multi_op(timeout):
maybe_timeout = future_timeout_manager(timeout)
result1 = yield maybe_timeout(op1())
result2 = yield maybe_timeout(op2())
# If the cumulative time of op1 and op2 exceeds timeout,
# :class:`tornado.gen.TimeoutError` is raised
"""
ioloop = ioloop or tornado.ioloop.IOLoop.current()
t0 = ioloop.time()
def _remaining():
return timeout - (ioloop.time() - t0) if timeout else None
def maybe_timeout(f):
"""Applies timeout if timeout is not None"""
if not timeout:
return f
else:
remaining = _remaining()
deadline = ioloop.time() + remaining
return with_timeout(deadline, f, ioloop)
maybe_timeout.remaining = _remaining
return maybe_timeout
|
python
|
{
"resource": ""
}
|
q16419
|
until_some
|
train
|
def until_some(*args, **kwargs):
"""Return a future that resolves when some of the passed futures resolve.
The futures can be passed as either a sequence of *args* or a dict of
*kwargs* (but not both). Some additional keyword arguments are supported,
as described below. Once a specified number of underlying futures have
resolved, the returned future resolves as well, or a timeout could be
raised if specified.
Parameters
----------
done_at_least : None or int
Number of futures that need to resolve before this resolves or None
to wait for all (default None)
timeout : None or float
Timeout in seconds, or None for no timeout (the default)
Returns
-------
This command returns a tornado Future that resolves with a list of
(index, value) tuples containing the results of all futures that resolved,
with corresponding indices (numbers for *args* futures or keys for *kwargs*
futures).
Raises
------
:class:`tornado.gen.TimeoutError`
If operation times out before the requisite number of futures resolve
"""
done_at_least = kwargs.pop('done_at_least', None)
timeout = kwargs.pop('timeout', None)
# At this point args and kwargs are either empty or contain futures only
if done_at_least is None:
done_at_least = len(args) + len(kwargs)
wait_iterator = tornado.gen.WaitIterator(*args, **kwargs)
maybe_timeout = future_timeout_manager(timeout)
results = []
while not wait_iterator.done():
result = yield maybe_timeout(wait_iterator.next())
results.append((wait_iterator.current_index, result))
if len(results) >= done_at_least:
break
raise tornado.gen.Return(results)
|
python
|
{
"resource": ""
}
|
q16420
|
Message.format_argument
|
train
|
def format_argument(self, arg):
"""Format a Message argument to a string"""
if isinstance(arg, float):
return repr(arg)
elif isinstance(arg, bool):
return str(int(arg))
else:
try:
return str(arg)
except UnicodeEncodeError:
# unicode characters will break the str cast, so
# try to encode to ascii and replace the offending characters
# with a '?' character
logger.error("Error casting message argument to str! "
"Trying to encode argument to ascii.")
if not isinstance(arg, unicode):
arg = arg.decode('utf-8')
return arg.encode('ascii', 'replace')
|
python
|
{
"resource": ""
}
|
q16421
|
Message.reply_ok
|
train
|
def reply_ok(self):
"""Return True if this is a reply and its first argument is 'ok'."""
return (self.mtype == self.REPLY and self.arguments and
self.arguments[0] == self.OK)
|
python
|
{
"resource": ""
}
|
q16422
|
Message.request
|
train
|
def request(cls, name, *args, **kwargs):
"""Helper method for creating request messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
Keyword arguments
-----------------
mid : str or None
Message ID to use or None (default) for no Message ID
"""
mid = kwargs.pop('mid', None)
if len(kwargs) > 0:
raise TypeError('Invalid keyword argument(s): %r' % kwargs)
return cls(cls.REQUEST, name, args, mid)
|
python
|
{
"resource": ""
}
|
q16423
|
Message.reply
|
train
|
def reply(cls, name, *args, **kwargs):
"""Helper method for creating reply messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
Keyword Arguments
-----------------
mid : str or None
Message ID to use or None (default) for no Message ID
"""
mid = kwargs.pop('mid', None)
if len(kwargs) > 0:
raise TypeError('Invalid keyword argument(s): %r' % kwargs)
return cls(cls.REPLY, name, args, mid)
|
python
|
{
"resource": ""
}
|
q16424
|
Message.reply_to_request
|
train
|
def reply_to_request(cls, req_msg, *args):
"""Helper method for creating reply messages to a specific request.
Copies the message name and message identifier from request message.
Parameters
----------
req_msg : katcp.core.Message instance
The request message that this inform if in reply to
args : list of strings
The message arguments.
"""
return cls(cls.REPLY, req_msg.name, args, req_msg.mid)
|
python
|
{
"resource": ""
}
|
q16425
|
Message.inform
|
train
|
def inform(cls, name, *args, **kwargs):
"""Helper method for creating inform messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
"""
mid = kwargs.pop('mid', None)
if len(kwargs) > 0:
raise TypeError('Invalid keyword argument(s): %r' % kwargs)
return cls(cls.INFORM, name, args, mid)
|
python
|
{
"resource": ""
}
|
q16426
|
Message.reply_inform
|
train
|
def reply_inform(cls, req_msg, *args):
"""Helper method for creating inform messages in reply to a request.
Copies the message name and message identifier from request message.
Parameters
----------
req_msg : katcp.core.Message instance
The request message that this inform if in reply to
args : list of strings
The message arguments except name
"""
return cls(cls.INFORM, req_msg.name, args, req_msg.mid)
|
python
|
{
"resource": ""
}
|
q16427
|
MessageParser._unescape_match
|
train
|
def _unescape_match(self, match):
"""Given an re.Match, unescape the escape code it represents."""
char = match.group(1)
if char in self.ESCAPE_LOOKUP:
return self.ESCAPE_LOOKUP[char]
elif not char:
raise KatcpSyntaxError("Escape slash at end of argument.")
else:
raise KatcpSyntaxError("Invalid escape character %r." % (char,))
|
python
|
{
"resource": ""
}
|
q16428
|
MessageParser._parse_arg
|
train
|
def _parse_arg(self, arg):
"""Parse an argument."""
match = self.SPECIAL_RE.search(arg)
if match:
raise KatcpSyntaxError("Unescaped special %r." % (match.group(),))
return self.UNESCAPE_RE.sub(self._unescape_match, arg)
|
python
|
{
"resource": ""
}
|
q16429
|
MessageParser.parse
|
train
|
def parse(self, line):
"""Parse a line, return a Message.
Parameters
----------
line : str
The line to parse (should not contain the terminating newline
or carriage return).
Returns
-------
msg : Message object
The resulting Message.
"""
# find command type and check validity
if not line:
raise KatcpSyntaxError("Empty message received.")
type_char = line[0]
if type_char not in self.TYPE_SYMBOL_LOOKUP:
raise KatcpSyntaxError("Bad type character %r." % (type_char,))
mtype = self.TYPE_SYMBOL_LOOKUP[type_char]
# find command and arguments name
# (removing possible empty argument resulting from whitespace at end
# of command)
parts = self.WHITESPACE_RE.split(line)
if not parts[-1]:
del parts[-1]
name = parts[0][1:]
arguments = [self._parse_arg(x) for x in parts[1:]]
# split out message id
match = self.NAME_RE.match(name)
if match:
name = match.group('name')
mid = match.group('id')
else:
raise KatcpSyntaxError("Bad message name (and possibly id) %r." %
(name,))
return Message(mtype, name, arguments, mid)
|
python
|
{
"resource": ""
}
|
q16430
|
DeviceServerMetaclass.check_protocol
|
train
|
def check_protocol(mcs, handler):
"""
True if the current server's protocol flags satisfy handler requirements
"""
protocol_info = mcs.PROTOCOL_INFO
protocol_version = (protocol_info.major, protocol_info.minor)
protocol_flags = protocol_info.flags
# Check if minimum protocol version requirement is met
min_protocol_version = getattr(handler, '_minimum_katcp_version', None)
protocol_version_ok = (min_protocol_version is None or
protocol_version >= min_protocol_version)
# Check if required optional protocol flags are present
required_katcp_protocol_flags = getattr(
handler, '_has_katcp_protocol_flags', None)
protocol_flags_ok = (
required_katcp_protocol_flags is None or
all(flag in protocol_flags
for flag in required_katcp_protocol_flags))
return protocol_version_ok and protocol_flags_ok
|
python
|
{
"resource": ""
}
|
q16431
|
Sensor.integer
|
train
|
def integer(cls, name, description=None, unit='', params=None,
default=None, initial_status=None):
"""Instantiate a new integer sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
params : list
[min, max] -- miniumum and maximum values of the sensor
default : int
An initial value for the sensor. Defaults to 0.
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
"""
return cls(cls.INTEGER, name, description, unit, params,
default, initial_status)
|
python
|
{
"resource": ""
}
|
q16432
|
Sensor.float
|
train
|
def float(cls, name, description=None, unit='', params=None,
default=None, initial_status=None):
"""Instantiate a new float sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
params : list
[min, max] -- miniumum and maximum values of the sensor
default : float
An initial value for the sensor. Defaults to 0.0.
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
"""
return cls(cls.FLOAT, name, description, unit, params,
default, initial_status)
|
python
|
{
"resource": ""
}
|
q16433
|
Sensor.boolean
|
train
|
def boolean(cls, name, description=None, unit='',
default=None, initial_status=None):
"""Instantiate a new boolean sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
default : bool
An initial value for the sensor. Defaults to False.
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
"""
return cls(cls.BOOLEAN, name, description, unit, None,
default, initial_status)
|
python
|
{
"resource": ""
}
|
q16434
|
Sensor.lru
|
train
|
def lru(cls, name, description=None, unit='',
default=None, initial_status=None):
"""Instantiate a new lru sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
default : enum, Sensor.LRU_*
An initial value for the sensor. Defaults to self.LRU_NOMINAL
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
"""
return cls(cls.LRU, name, description, unit, None,
default, initial_status)
|
python
|
{
"resource": ""
}
|
q16435
|
Sensor.string
|
train
|
def string(cls, name, description=None, unit='',
default=None, initial_status=None):
"""Instantiate a new string sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
default : string
An initial value for the sensor. Defaults to the empty string.
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
"""
return cls(cls.STRING, name, description, unit, None,
default, initial_status)
|
python
|
{
"resource": ""
}
|
q16436
|
Sensor.discrete
|
train
|
def discrete(cls, name, description=None, unit='', params=None,
default=None, initial_status=None):
"""Instantiate a new discrete sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
params : [str]
Sequence of all allowable discrete sensor states
default : str
An initial value for the sensor. Defaults to the first item
of params
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
"""
return cls(cls.DISCRETE, name, description, unit, params,
default, initial_status)
|
python
|
{
"resource": ""
}
|
q16437
|
Sensor.timestamp
|
train
|
def timestamp(cls, name, description=None, unit='',
default=None, initial_status=None):
"""Instantiate a new timestamp sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. For timestamp sensor may only be the
empty string.
default : string
An initial value for the sensor in seconds since the Unix Epoch.
Defaults to 0.
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
"""
return cls(cls.TIMESTAMP, name, description, unit, None,
default, initial_status)
|
python
|
{
"resource": ""
}
|
q16438
|
Sensor.address
|
train
|
def address(cls, name, description=None, unit='',
default=None, initial_status=None):
"""Instantiate a new IP address sensor object.
Parameters
----------
name : str
The name of the sensor.
description : str
A short description of the sensor.
units : str
The units of the sensor value. May be the empty string
if there are no applicable units.
default : (string, int)
An initial value for the sensor. Tuple contaning (host, port).
default is ("0.0.0.0", None)
initial_status : int enum or None
An initial status for the sensor. If None, defaults to
Sensor.UNKNOWN. `initial_status` must be one of the keys in
Sensor.STATUSES
"""
return cls(cls.ADDRESS, name, description, unit, None,
default, initial_status)
|
python
|
{
"resource": ""
}
|
q16439
|
Sensor.notify
|
train
|
def notify(self, reading):
"""Notify all observers of changes to this sensor."""
# copy list before iterating in case new observers arrive
for o in list(self._observers):
o.update(self, reading)
|
python
|
{
"resource": ""
}
|
q16440
|
Sensor.set_value
|
train
|
def set_value(self, value, status=NOMINAL, timestamp=None,
major=DEFAULT_KATCP_MAJOR):
"""Check and then set the value of the sensor.
Parameters
----------
value : object
Value of the appropriate type for the sensor.
status : Sensor status constant
Whether the value represents an error condition or not.
timestamp : float in seconds or None
The time at which the sensor value was determined.
Uses current time if None.
major : int
Major version of KATCP to use when interpreting types.
Defaults to latest implemented KATCP version.
"""
self._kattype.check(value, major)
if timestamp is None:
timestamp = time.time()
self.set(timestamp, status, value)
|
python
|
{
"resource": ""
}
|
q16441
|
Sensor.parse_type
|
train
|
def parse_type(cls, type_string):
"""Parse KATCP formatted type code into Sensor type constant.
Parameters
----------
type_string : str
KATCP formatted type code.
Returns
-------
sensor_type : Sensor type constant
The corresponding Sensor type constant.
"""
if type_string in cls.SENSOR_TYPE_LOOKUP:
return cls.SENSOR_TYPE_LOOKUP[type_string]
else:
raise KatcpSyntaxError("Invalid sensor type string %s" %
type_string)
|
python
|
{
"resource": ""
}
|
q16442
|
Sensor.parse_params
|
train
|
def parse_params(cls, sensor_type, formatted_params,
major=DEFAULT_KATCP_MAJOR):
"""Parse KATCP formatted parameters into Python values.
Parameters
----------
sensor_type : Sensor type constant
The type of sensor the parameters are for.
formatted_params : list of strings
The formatted parameters that should be parsed.
major : int
Major version of KATCP to use when interpreting types.
Defaults to latest implemented KATCP version.
Returns
-------
params : list of objects
The parsed parameters.
"""
typeclass, _value = cls.SENSOR_TYPES[sensor_type]
if sensor_type == cls.DISCRETE:
kattype = typeclass([])
else:
kattype = typeclass()
return [kattype.decode(x, major) for x in formatted_params]
|
python
|
{
"resource": ""
}
|
q16443
|
AsyncEvent.wait_with_ioloop
|
train
|
def wait_with_ioloop(self, ioloop, timeout=None):
"""Do blocking wait until condition is event is set.
Parameters
----------
ioloop : tornadio.ioloop.IOLoop instance
MUST be the same ioloop that set() / clear() is called from
timeout : float, int or None
If not None, only wait up to `timeout` seconds for event to be set.
Return Value
------------
flag : True if event was set within timeout, otherwise False.
Notes
-----
This will deadlock if called in the ioloop!
"""
f = Future()
def cb():
return gen.chain_future(self.until_set(), f)
ioloop.add_callback(cb)
try:
f.result(timeout)
return True
except TimeoutError:
return self._flag
|
python
|
{
"resource": ""
}
|
q16444
|
AsyncState.until_state
|
train
|
def until_state(self, state, timeout=None):
"""Return a tornado Future that will resolve when the requested state is set"""
if state not in self._valid_states:
raise ValueError('State must be one of {0}, not {1}'
.format(self._valid_states, state))
if state != self._state:
if timeout:
return with_timeout(self._ioloop.time() + timeout,
self._waiting_futures[state],
self._ioloop)
else:
return self._waiting_futures[state]
else:
f = tornado_Future()
f.set_result(True)
return f
|
python
|
{
"resource": ""
}
|
q16445
|
AsyncState.until_state_in
|
train
|
def until_state_in(self, *states, **kwargs):
"""Return a tornado Future, resolves when any of the requested states is set"""
timeout = kwargs.get('timeout', None)
state_futures = (self.until_state(s, timeout=timeout) for s in states)
return until_any(*state_futures)
|
python
|
{
"resource": ""
}
|
q16446
|
LatencyTimer.check_future
|
train
|
def check_future(self, fut):
"""Call with each future that is to be yielded on"""
done = self.done = fut.done()
if done and not self.prev_done:
self.done_since = self.ioloop.time()
self.prev_done = done
|
python
|
{
"resource": ""
}
|
q16447
|
request_check
|
train
|
def request_check(client, exception, *msg_parms, **kwargs):
"""Make blocking request to client and raise exception if reply is not ok.
Parameters
----------
client : DeviceClient instance
exception: Exception class to raise
*msg_parms : Message parameters sent to the Message.request() call
**kwargs : Keyword arguments
Forwards kwargs['timeout'] to client.blocking_request().
Forwards kwargs['mid'] to Message.request().
Returns
-------
reply, informs : as returned by client.blocking_request
Raises
------
*exception* passed as parameter is raised if reply.reply_ok() is False
Notes
-----
A typical use-case for this function is to use functools.partial() to bind
a particular client and exception. The resulting function can then be used
instead of direct client.blocking_request() calls to automate error
handling.
"""
timeout = kwargs.get('timeout', None)
req_msg = Message.request(*msg_parms)
if timeout is not None:
reply, informs = client.blocking_request(req_msg, timeout=timeout)
else:
reply, informs = client.blocking_request(req_msg)
if not reply.reply_ok():
raise exception('Unexpected failure reply "{2}"\n'
' with device at {0}, request \n"{1}"'
.format(client.bind_address_string, req_msg, reply))
return reply, informs
|
python
|
{
"resource": ""
}
|
q16448
|
DeviceClient.convert_seconds
|
train
|
def convert_seconds(self, time_seconds):
"""Convert a time in seconds to the device timestamp units.
KATCP v4 and earlier, specified all timestamps in milliseconds. Since
KATCP v5, all timestamps are in seconds. If the device KATCP version
has been detected, this method converts a value in seconds to the
appropriate (seconds or milliseconds) quantity. For version smaller
than V4, the time value will be truncated to the nearest millisecond.
"""
if self.protocol_flags.major >= SEC_TS_KATCP_MAJOR:
return time_seconds
else:
device_time = time_seconds * SEC_TO_MS_FAC
if self.protocol_flags.major < FLOAT_TS_KATCP_MAJOR:
device_time = int(device_time)
return device_time
|
python
|
{
"resource": ""
}
|
q16449
|
DeviceClient._next_id
|
train
|
def _next_id(self):
"""Return the next available message id."""
assert get_thread_ident() == self.ioloop_thread_id
self._last_msg_id += 1
return str(self._last_msg_id)
|
python
|
{
"resource": ""
}
|
q16450
|
DeviceClient._get_mid_and_update_msg
|
train
|
def _get_mid_and_update_msg(self, msg, use_mid):
"""Get message ID for current request and assign to msg.mid if needed.
Parameters
----------
msg : katcp.Message ?request message
use_mid : bool or None
If msg.mid is None, a new message ID will be created. msg.mid will be
filled with this ID if use_mid is True or if use_mid is None and the
server supports message ids. If msg.mid is already assigned, it will
not be touched, and will be used as the active message ID.
Return value
------------
The active message ID
"""
if use_mid is None:
use_mid = self._server_supports_ids
if msg.mid is None:
mid = self._next_id()
if use_mid:
msg.mid = mid
# An internal mid may be needed for the request/inform/response
# machinery to work, so we return it
return mid
else:
return msg.mid
|
python
|
{
"resource": ""
}
|
q16451
|
DeviceClient.request
|
train
|
def request(self, msg, use_mid=None):
"""Send a request message, with automatic message ID assignment.
Parameters
----------
msg : katcp.Message request message
use_mid : bool or None, default=None
Returns
-------
mid : string or None
The message id, or None if no msg id is used
If use_mid is None and the server supports msg ids, or if use_mid is
True a message ID will automatically be assigned msg.mid is None.
if msg.mid has a value, and the server supports msg ids, that value
will be used. If the server does not support msg ids, KatcpVersionError
will be raised.
"""
mid = self._get_mid_and_update_msg(msg, use_mid)
self.send_request(msg)
return mid
|
python
|
{
"resource": ""
}
|
q16452
|
DeviceClient.send_message
|
train
|
def send_message(self, msg):
"""Send any kind of message.
Parameters
----------
msg : Message object
The message to send.
"""
assert get_thread_ident() == self.ioloop_thread_id
data = str(msg) + "\n"
# Log all sent messages here so no one else has to.
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug("Sending to {}: {}"
.format(self.bind_address_string, repr(data)))
if not self._connected.isSet():
raise KatcpClientDisconnected('Not connected to device {0}'.format(
self.bind_address_string))
try:
return self._stream.write(data)
except Exception:
self._logger.warn('Could not send message {0!r} to {1!r}'
.format(str(msg), self._bindaddr), exc_info=True)
self._disconnect(exc_info=True)
|
python
|
{
"resource": ""
}
|
q16453
|
DeviceClient._disconnect
|
train
|
def _disconnect(self, exc_info=False):
"""Disconnect and cleanup."""
if self._stream:
self._stream.close(exc_info=exc_info)
|
python
|
{
"resource": ""
}
|
q16454
|
DeviceClient.handle_message
|
train
|
def handle_message(self, msg):
"""Handle a message from the server.
Parameters
----------
msg : Message object
The Message to dispatch to the handler methods.
"""
# log messages received so that no one else has to
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug(
"received from {}: {}"
.format(self.bind_address_string, repr(str(msg))))
if msg.mtype == Message.INFORM:
return self.handle_inform(msg)
elif msg.mtype == Message.REPLY:
return self.handle_reply(msg)
elif msg.mtype == Message.REQUEST:
return self.handle_request(msg)
else:
self._logger.error("Unexpected message type from server ['%s']."
% (msg,))
|
python
|
{
"resource": ""
}
|
q16455
|
DeviceClient.handle_reply
|
train
|
def handle_reply(self, msg):
"""Dispatch a reply message to the appropriate method.
Parameters
----------
msg : Message object
The reply message to dispatch.
"""
method = self.__class__.unhandled_reply
if msg.name in self._reply_handlers:
method = self._reply_handlers[msg.name]
try:
return method(self, msg)
except Exception:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Reply %s FAIL: %s" % (msg.name, reason))
|
python
|
{
"resource": ""
}
|
q16456
|
DeviceClient.set_ioloop
|
train
|
def set_ioloop(self, ioloop=None):
"""Set the tornado.ioloop.IOLoop instance to use.
This defaults to IOLoop.current(). If set_ioloop() is never called the
IOLoop is managed: started in a new thread, and will be stopped if
self.stop() is called.
Notes
-----
Must be called before start() is called
"""
self._ioloop_manager.set_ioloop(ioloop, managed=False)
self.ioloop = ioloop
|
python
|
{
"resource": ""
}
|
q16457
|
DeviceClient.enable_thread_safety
|
train
|
def enable_thread_safety(self):
"""Enable thread-safety features.
Must be called before start().
"""
if self.threadsafe:
return # Already done!
if self._running.isSet():
raise RuntimeError('Cannot enable thread safety after start')
def _getattr(obj, name):
# use 'is True' so mock objects don't return true for everything
return getattr(obj, name, False) is True
for name in dir(self):
try:
meth = getattr(self, name)
except AttributeError:
# Subclasses may have computed attributes that don't work
# before they are started, so let's ignore those
pass
if not callable(meth):
continue
make_threadsafe = _getattr(meth, 'make_threadsafe')
make_threadsafe_blocking = _getattr(meth, 'make_threadsafe_blocking')
if make_threadsafe:
assert not make_threadsafe_blocking
meth = self._make_threadsafe(meth)
setattr(self, name, meth)
elif make_threadsafe_blocking:
meth = self._make_threadsafe_blocking(meth)
setattr(self, name, meth)
self._threadsafe = True
|
python
|
{
"resource": ""
}
|
q16458
|
DeviceClient.start
|
train
|
def start(self, timeout=None):
"""Start the client in a new thread.
Parameters
----------
timeout : float in seconds
Seconds to wait for client thread to start. Do not specify a
timeout if start() is being called from the same ioloop that this
client will be installed on, since it will block the ioloop without
progressing.
"""
if self._running.isSet():
raise RuntimeError("Device client already started.")
# Make sure we have an ioloop
self.ioloop = self._ioloop_manager.get_ioloop()
if timeout:
t0 = self.ioloop.time()
self._ioloop_manager.start(timeout)
self.ioloop.add_callback(self._install)
if timeout:
remaining_timeout = timeout - (self.ioloop.time() - t0)
self.wait_running(remaining_timeout)
|
python
|
{
"resource": ""
}
|
q16459
|
DeviceClient.wait_running
|
train
|
def wait_running(self, timeout=None):
"""Wait until the client is running.
Parameters
----------
timeout : float in seconds
Seconds to wait for the client to start running.
Returns
-------
running : bool
Whether the client is running
Notes
-----
Do not call this from the ioloop, use until_running().
"""
ioloop = getattr(self, 'ioloop', None)
if not ioloop:
raise RuntimeError('Call start() before wait_running()')
return self._running.wait_with_ioloop(ioloop, timeout)
|
python
|
{
"resource": ""
}
|
q16460
|
DeviceClient.until_connected
|
train
|
def until_connected(self, timeout=None):
"""Return future that resolves when the client is connected."""
t0 = self.ioloop.time()
yield self.until_running(timeout=timeout)
t1 = self.ioloop.time()
if timeout:
timedelta = timeout - (t1 - t0)
else:
timedelta = None
assert get_thread_ident() == self.ioloop_thread_id
yield self._connected.until_set(timeout=timedelta)
|
python
|
{
"resource": ""
}
|
q16461
|
DeviceClient.until_protocol
|
train
|
def until_protocol(self, timeout=None):
"""Return future that resolves after receipt of katcp protocol info.
If the returned future resolves, the server's protocol information is
available in the ProtocolFlags instance self.protocol_flags.
"""
t0 = self.ioloop.time()
yield self.until_running(timeout=timeout)
t1 = self.ioloop.time()
if timeout:
timedelta = timeout - (t1 - t0)
else:
timedelta = None
assert get_thread_ident() == self.ioloop_thread_id
yield self._received_protocol_info.until_set(timeout=timedelta)
|
python
|
{
"resource": ""
}
|
q16462
|
AsyncClient._pop_async_request
|
train
|
def _pop_async_request(self, msg_id, msg_name):
"""Pop the set of callbacks for a request.
Return tuple of Nones if callbacks already popped (or don't exist).
"""
assert get_thread_ident() == self.ioloop_thread_id
if msg_id is None:
msg_id = self._msg_id_for_name(msg_name)
if msg_id in self._async_queue:
callback_tuple = self._async_queue[msg_id]
del self._async_queue[msg_id]
self._async_id_stack[callback_tuple[0].name].remove(msg_id)
return callback_tuple
else:
return None, None, None, None, None
|
python
|
{
"resource": ""
}
|
q16463
|
AsyncClient._peek_async_request
|
train
|
def _peek_async_request(self, msg_id, msg_name):
"""Peek at the set of callbacks for a request.
Return tuple of Nones if callbacks don't exist.
"""
assert get_thread_ident() == self.ioloop_thread_id
if msg_id is None:
msg_id = self._msg_id_for_name(msg_name)
if msg_id in self._async_queue:
return self._async_queue[msg_id]
else:
return None, None, None, None, None
|
python
|
{
"resource": ""
}
|
q16464
|
AsyncClient._msg_id_for_name
|
train
|
def _msg_id_for_name(self, msg_name):
"""Find the msg_id for a given request name.
Return None if no message id exists.
"""
if msg_name in self._async_id_stack and self._async_id_stack[msg_name]:
return self._async_id_stack[msg_name][0]
|
python
|
{
"resource": ""
}
|
q16465
|
AsyncClient.blocking_request
|
train
|
def blocking_request(self, msg, timeout=None, use_mid=None):
"""Send a request messsage and wait for its reply.
Parameters
----------
msg : Message object
The request Message to send.
timeout : float in seconds
How long to wait for a reply. The default is the
the timeout set when creating the AsyncClient.
use_mid : boolean, optional
Whether to use message IDs. Default is to use message IDs
if the server supports them.
Returns
-------
reply : Message object
The reply message received.
informs : list of Message objects
A list of the inform messages received.
"""
assert (get_thread_ident() != self.ioloop_thread_id), (
'Cannot call blocking_request() in ioloop')
if timeout is None:
timeout = self._request_timeout
f = Future() # for thread safety
tf = [None] # Placeholder for tornado Future for exception tracebacks
def blocking_request_callback():
try:
tf[0] = frf = self.future_request(msg, timeout=timeout,
use_mid=use_mid)
except Exception:
tf[0] = frf = tornado_Future()
frf.set_exc_info(sys.exc_info())
gen.chain_future(frf, f)
self.ioloop.add_callback(blocking_request_callback)
# We wait on the future result that should be set by the reply
# handler callback. If this does not occur within the
# timeout it means something unexpected went wrong. We give it
# an extra second to deal with (unlikely?) slowness in the
# rest of the code.
extra_wait = 1
wait_timeout = timeout
if wait_timeout is not None:
wait_timeout = wait_timeout + extra_wait
try:
return f.result(timeout=wait_timeout)
except TimeoutError:
raise RuntimeError('Unexpected error: Async request handler did '
'not call reply handler within timeout period')
except Exception:
# Use the tornado future to give us a usable traceback
tf[0].result()
assert False
|
python
|
{
"resource": ""
}
|
q16466
|
AsyncClient.handle_inform
|
train
|
def handle_inform(self, msg):
"""Handle inform messages related to any current requests.
Inform messages not related to the current request go up
to the base class method.
Parameters
----------
msg : Message object
The inform message to dispatch.
"""
# this may also result in inform_cb being None if no
# inform_cb was passed to the request method.
if msg.mid is not None:
_request, _reply_cb, inform_cb, user_data, _timeout_handle = \
self._peek_async_request(msg.mid, None)
else:
request, _reply_cb, inform_cb, user_data, _timeout_handle = \
self._peek_async_request(None, msg.name)
if request is not None and request.mid is not None:
# we sent a mid but this inform doesn't have one
inform_cb, user_data = None, None
if inform_cb is None:
inform_cb = super(AsyncClient, self).handle_inform
# override user_data since handle_inform takes no user_data
user_data = None
try:
if user_data is None:
inform_cb(msg)
else:
inform_cb(msg, *user_data)
except Exception:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Callback inform %s FAIL: %s" %
(msg.name, reason))
|
python
|
{
"resource": ""
}
|
q16467
|
AsyncClient._do_fail_callback
|
train
|
def _do_fail_callback(
self, reason, msg, reply_cb, inform_cb, user_data, timeout_handle):
"""Do callback for a failed request."""
# this may also result in reply_cb being None if no
# reply_cb was passed to the request method
if reply_cb is None:
# this happens if no reply_cb was passed in to the request
return
reason_msg = Message.reply(msg.name, "fail", reason, mid=msg.mid)
try:
if user_data is None:
reply_cb(reason_msg)
else:
reply_cb(reason_msg, *user_data)
except Exception:
e_type, e_value, trace = sys.exc_info()
exc_reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Callback reply during failure %s, %s FAIL: %s" %
(reason, msg.name, exc_reason))
|
python
|
{
"resource": ""
}
|
q16468
|
AsyncClient._handle_timeout
|
train
|
def _handle_timeout(self, msg_id, start_time):
"""Handle a timed-out callback request.
Parameters
----------
msg_id : uuid.UUID for message
The name of the reply which was expected.
"""
msg, reply_cb, inform_cb, user_data, timeout_handle = \
self._pop_async_request(msg_id, None)
# We may have been racing with the actual reply handler if the reply
# arrived close to the timeout expiry,
# which means the self._pop_async_request() call gave us None's.
# In this case, just bail.
#
# NM 2014-09-17 Not sure if this is true after porting to tornado,
# but I'm too afraid to remove this code :-/
if timeout_handle is None:
return
reason = "Request {0.name} timed out after {1:f} seconds.".format(
msg, self.ioloop.time() - start_time)
self._do_fail_callback(
reason, msg, reply_cb, inform_cb, user_data, timeout_handle)
|
python
|
{
"resource": ""
}
|
q16469
|
AsyncClient.handle_reply
|
train
|
def handle_reply(self, msg):
"""Handle a reply message related to the current request.
Reply messages not related to the current request go up
to the base class method.
Parameters
----------
msg : Message object
The reply message to dispatch.
"""
# this may also result in reply_cb being None if no
# reply_cb was passed to the request method
if msg.mid is not None:
_request, reply_cb, _inform_cb, user_data, timeout_handle = \
self._pop_async_request(msg.mid, None)
else:
request, _reply_cb, _inform_cb, _user_data, timeout_handle = \
self._peek_async_request(None, msg.name)
if request is not None and request.mid is None:
# we didn't send a mid so this is the request we want
_request, reply_cb, _inform_cb, user_data, timeout_handle = \
self._pop_async_request(None, msg.name)
else:
reply_cb, user_data = None, None
if timeout_handle is not None:
self.ioloop.remove_timeout(timeout_handle)
if reply_cb is None:
reply_cb = super(AsyncClient, self).handle_reply
# override user_data since handle_reply takes no user_data
user_data = None
try:
if user_data is None:
reply_cb(msg)
else:
reply_cb(msg, *user_data)
except Exception:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Callback reply %s FAIL: %s" %
(msg.name, reason))
|
python
|
{
"resource": ""
}
|
q16470
|
SubmissionSerializer.validate_answer
|
train
|
def validate_answer(self, value):
"""
Check that the answer is JSON-serializable and not too long.
"""
# Check that the answer is JSON-serializable
try:
serialized = json.dumps(value)
except (ValueError, TypeError):
raise serializers.ValidationError("Answer value must be JSON-serializable")
# Check the length of the serialized representation
if len(serialized) > Submission.MAXSIZE:
raise serializers.ValidationError("Maximum answer size exceeded.")
return value
|
python
|
{
"resource": ""
}
|
q16471
|
ScoreSerializer.get_annotations
|
train
|
def get_annotations(self, obj):
"""
Inspect ScoreAnnotations to attach all relevant annotations.
"""
annotations = ScoreAnnotation.objects.filter(score_id=obj.id)
return [
ScoreAnnotationSerializer(instance=annotation).data
for annotation in annotations
]
|
python
|
{
"resource": ""
}
|
q16472
|
ScoreSummary.update_score_summary
|
train
|
def update_score_summary(sender, **kwargs):
"""
Listen for new Scores and update the relevant ScoreSummary.
Args:
sender: not used
Kwargs:
instance (Score): The score model whose save triggered this receiver.
"""
score = kwargs['instance']
try:
score_summary = ScoreSummary.objects.get(
student_item=score.student_item
)
score_summary.latest = score
# A score with the "reset" flag set will always replace the current highest score
if score.reset:
score_summary.highest = score
# The conversion to a float may return None if points possible is zero
# In Python, None is always less than an integer, so any score
# with non-null points possible will take precedence.
elif score.to_float() > score_summary.highest.to_float():
score_summary.highest = score
score_summary.save()
except ScoreSummary.DoesNotExist:
ScoreSummary.objects.create(
student_item=score.student_item,
highest=score,
latest=score,
)
except DatabaseError as err:
logger.exception(
u"Error while updating score summary for student item {}"
.format(score.student_item)
)
|
python
|
{
"resource": ""
}
|
q16473
|
Command.handle
|
train
|
def handle(self, *args, **options):
"""
By default, we're going to do this in chunks. This way, if there ends up being an error,
we can check log messages and continue from that point after fixing the issue.
"""
# Note that by taking last_id here, we're going to miss any submissions created *during* the command execution
# But that's okay! All new entries have already been created using the new style, no acion needed there
last_id = Submission._objects.all().aggregate(Max('id'))['id__max']
log.info("Beginning uuid update")
current = options['start']
while current < last_id:
end_chunk = current + options['chunk'] if last_id - options['chunk'] >= current else last_id
log.info("Updating entries in range [{}, {}]".format(current, end_chunk))
with transaction.atomic():
for submission in Submission._objects.filter(id__gte=current, id__lte=end_chunk).iterator():
submission.save(update_fields=['uuid'])
time.sleep(options['wait'])
current = end_chunk + 1
|
python
|
{
"resource": ""
}
|
q16474
|
get_submissions_for_student_item
|
train
|
def get_submissions_for_student_item(request, course_id, student_id, item_id):
"""Retrieve all submissions associated with the given student item.
Developer utility for accessing all the submissions associated with a
student item. The student item is specified by the unique combination of
course, student, and item.
Args:
request (dict): The request.
course_id (str): The course id for this student item.
student_id (str): The student id for this student item.
item_id (str): The item id for this student item.
Returns:
HttpResponse: The response object for this request. Renders a simple
development page with all the submissions related to the specified
student item.
"""
student_item_dict = dict(
course_id=course_id,
student_id=student_id,
item_id=item_id,
)
context = dict(**student_item_dict)
try:
submissions = get_submissions(student_item_dict)
context["submissions"] = submissions
except SubmissionRequestError:
context["error"] = "The specified student item was not found."
return render_to_response('submissions.html', context)
|
python
|
{
"resource": ""
}
|
q16475
|
create_submission
|
train
|
def create_submission(student_item_dict, answer, submitted_at=None, attempt_number=None):
"""Creates a submission for assessment.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (JSON-serializable): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
per question. This allows the designated attempt to be overridden.
If the attempt is not specified, it will take the most recent
submission, as specified by the submitted_at time, and use its
attempt_number plus one.
Returns:
dict: A representation of the created Submission. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when there are validation errors for the
student item or submission. This can be caused by the student item
missing required values, the submission being too long, the
attempt_number is negative, or the given submitted_at time is invalid.
SubmissionInternalError: Raised when submission access causes an
internal error.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> create_submission(student_item_dict, "The answer is 42.", datetime.utcnow, 1)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
student_item_model = _get_or_create_student_item(student_item_dict)
if attempt_number is None:
try:
submissions = Submission.objects.filter(
student_item=student_item_model)[:1]
except DatabaseError:
error_message = u"An error occurred while filtering submissions for student item: {}".format(
student_item_dict)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
attempt_number = submissions[0].attempt_number + 1 if submissions else 1
model_kwargs = {
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
if submitted_at:
model_kwargs["submitted_at"] = submitted_at
try:
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(field_errors=submission_serializer.errors)
submission_serializer.save()
sub_data = submission_serializer.data
_log_submission(sub_data, student_item_dict)
return sub_data
except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format(
model_kwargs,
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
|
python
|
{
"resource": ""
}
|
q16476
|
_get_submission_model
|
train
|
def _get_submission_model(uuid, read_replica=False):
"""
Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes
EDUCATOR-1090, because uuids are stored both with and without hyphens.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
try:
submission = submission_qs.get(uuid=uuid)
except Submission.DoesNotExist:
try:
hyphenated_value = six.text_type(UUID(uuid))
query = """
SELECT
`submissions_submission`.`id`,
`submissions_submission`.`uuid`,
`submissions_submission`.`student_item_id`,
`submissions_submission`.`attempt_number`,
`submissions_submission`.`submitted_at`,
`submissions_submission`.`created_at`,
`submissions_submission`.`raw_answer`,
`submissions_submission`.`status`
FROM
`submissions_submission`
WHERE (
NOT (`submissions_submission`.`status` = 'D')
AND `submissions_submission`.`uuid` = '{}'
)
"""
query = query.replace("{}", hyphenated_value)
# We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually
submission = Submission.objects.raw(query)[0]
except IndexError:
raise Submission.DoesNotExist()
# Avoid the extra hit next time
submission.save(update_fields=['uuid'])
return submission
|
python
|
{
"resource": ""
}
|
q16477
|
get_submission
|
train
|
def get_submission(submission_uuid, read_replica=False):
"""Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
|
python
|
{
"resource": ""
}
|
q16478
|
get_submission_and_student
|
train
|
def get_submission_and_student(uuid, read_replica=False):
"""
Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
"""
# This may raise API exceptions
submission = get_submission(uuid, read_replica=read_replica)
# Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving student item from the cache")
cached_student_item = None
if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission
|
python
|
{
"resource": ""
}
|
q16479
|
get_submissions
|
train
|
def get_submissions(student_item_dict, limit=None):
"""Retrieves the submissions for the specified student item,
ordered by most recent submitted date.
Returns the submissions relative to the specified student item. Exception
thrown if no submission is found relative to this location.
Args:
student_item_dict (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
limit (int): Optional parameter for limiting the returned number of
submissions associated with this student item. If not specified, all
associated submissions are returned.
Returns:
List dict: A list of dicts for the associated student item. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when the associated student item fails
validation.
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> get_submissions(student_item_dict, 3)
[{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}]
"""
student_item_model = _get_or_create_student_item(student_item_dict)
try:
submission_models = Submission.objects.filter(
student_item=student_item_model)
except DatabaseError:
error_message = (
u"Error getting submission request for student item {}"
.format(student_item_dict)
)
logger.exception(error_message)
raise SubmissionNotFoundError(error_message)
if limit:
submission_models = submission_models[:limit]
return SubmissionSerializer(submission_models, many=True).data
|
python
|
{
"resource": ""
}
|
q16480
|
get_all_submissions
|
train
|
def get_all_submissions(course_id, item_id, item_type, read_replica=True):
"""For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable.
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data
|
python
|
{
"resource": ""
}
|
q16481
|
get_all_course_submission_information
|
train
|
def get_all_course_submission_information(course_id, item_type, read_replica=True):
""" For the given course, get all student items of the given item type, all the submissions for those itemes,
and the latest scores for each item. If a submission was given a score that is not the latest score for the
relevant student item, it will still be included but without score.
Args:
course_id (str): The course that we are getting submissions from.
item_type (str): The type of items that we are getting submissions for.
read_replica (bool): Try to use the database's read replica if it's available.
Yields:
A tuple of three dictionaries representing:
(1) a student item with the following fields:
student_id
course_id
student_item
item_type
(2) a submission with the following fields:
student_item
attempt_number
submitted_at
created_at
answer
(3) a score with the following fields, if one exists and it is the latest score:
(if both conditions are not met, an empty dict is returned here)
student_item
submission
points_earned
points_possible
created_at
submission_uuid
"""
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
query = submission_qs.select_related('student_item__scoresummary__latest__submission').filter(
student_item__course_id=course_id,
student_item__item_type=item_type,
).iterator()
for submission in query:
student_item = submission.student_item
serialized_score = {}
if hasattr(student_item, 'scoresummary'):
latest_score = student_item.scoresummary.latest
# Only include the score if it is not a reset score (is_hidden), and if the current submission is the same
# as the student_item's latest score's submission. This matches the behavior of the API's get_score method.
if (not latest_score.is_hidden()) and latest_score.submission.uuid == submission.uuid:
serialized_score = ScoreSerializer(latest_score).data
yield (
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
serialized_score
)
|
python
|
{
"resource": ""
}
|
q16482
|
get_top_submissions
|
train
|
def get_top_submissions(course_id, item_id, item_type, number_of_top_scores, use_cache=True, read_replica=True):
"""Get a number of top scores for an assessment based on a particular student item
This function will return top scores for the piece of assessment.
It will consider only the latest and greater than 0 score for a piece of assessment.
A score is only calculated for a student item if it has completed the workflow for
a particular assessment module.
In general, users of top submissions can tolerate some latency
in the search results, so by default this call uses
a cache and the read replica (if available).
Args:
course_id (str): The course to retrieve for the top scores
item_id (str): The item within the course to retrieve for the top scores
item_type (str): The type of item to retrieve
number_of_top_scores (int): The number of scores to return, greater than 0 and no
more than 100.
Kwargs:
use_cache (bool): If true, check the cache before retrieving querying the database.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
topscores (dict): The top scores for the assessment for the student item.
An empty array if there are no scores or all scores are 0.
Raises:
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
SubmissionRequestError: Raised when the number of top scores is higher than the
MAX_TOP_SUBMISSIONS constant.
Examples:
>>> course_id = "TestCourse"
>>> item_id = "u_67"
>>> item_type = "openassessment"
>>> number_of_top_scores = 10
>>>
>>> get_top_submissions(course_id, item_id, item_type, number_of_top_scores)
[{
'score': 20,
'content': "Platypus"
},{
'score': 16,
'content': "Frog"
}]
"""
if number_of_top_scores < 1 or number_of_top_scores > MAX_TOP_SUBMISSIONS:
error_msg = (
u"Number of top scores must be a number between 1 and {}.".format(MAX_TOP_SUBMISSIONS)
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
# First check the cache (unless caching is disabled)
cache_key = "submissions.top_submissions.{course}.{item}.{type}.{number}".format(
course=course_id,
item=item_id,
type=item_type,
number=number_of_top_scores
)
top_submissions = cache.get(cache_key) if use_cache else None
# If we can't find it in the cache (or caching is disabled), check the database
# By default, prefer the read-replica.
if top_submissions is None:
try:
query = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
latest__points_earned__gt=0
).select_related('latest', 'latest__submission').order_by("-latest__points_earned")
if read_replica:
query = _use_read_replica(query)
score_summaries = query[:number_of_top_scores]
except DatabaseError:
msg = u"Could not fetch top score summaries for course {}, item {} of type {}".format(
course_id, item_id, item_type
)
logger.exception(msg)
raise SubmissionInternalError(msg)
# Retrieve the submission content for each top score
top_submissions = [
{
"score": score_summary.latest.points_earned,
"content": SubmissionSerializer(score_summary.latest.submission).data['answer']
}
for score_summary in score_summaries
]
# Always store the retrieved list in the cache
cache.set(cache_key, top_submissions, TOP_SUBMISSIONS_CACHE_TIMEOUT)
return top_submissions
|
python
|
{
"resource": ""
}
|
q16483
|
get_score
|
train
|
def get_score(student_item):
"""Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}]
"""
try:
student_item_model = StudentItem.objects.get(**student_item)
score = ScoreSummary.objects.get(student_item=student_item_model).latest
except (ScoreSummary.DoesNotExist, StudentItem.DoesNotExist):
return None
# By convention, scores are hidden if "points possible" is set to 0.
# This can occur when an instructor has reset scores for a student.
if score.is_hidden():
return None
else:
return ScoreSerializer(score).data
|
python
|
{
"resource": ""
}
|
q16484
|
get_scores
|
train
|
def get_scores(course_id, student_id):
"""Return a dict mapping item_ids to scores.
Scores are represented by serialized Score objects in JSON-like dict
format.
This method would be used by an LMS to find all the scores for a given
student in a given course.
Scores that are "hidden" (because they have points earned set to zero)
are excluded from the results.
Args:
course_id (str): Course ID, used to do a lookup on the `StudentItem`.
student_id (str): Student ID, used to do a lookup on the `StudentItem`.
Returns:
dict: The keys are `item_id`s (`str`) and the values are tuples of
`(points_earned, points_possible)`. All points are integer values and
represent the raw, unweighted scores. Submissions does not have any
concept of weights. If there are no entries matching the `course_id` or
`student_id`, we simply return an empty dictionary. This is not
considered an error because there might be many queries for the progress
page of a person who has never submitted anything.
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
try:
score_summaries = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__student_id=student_id,
).select_related('latest', 'latest__submission', 'student_item')
except DatabaseError:
msg = u"Could not fetch scores for course {}, student {}".format(
course_id, student_id
)
logger.exception(msg)
raise SubmissionInternalError(msg)
scores = {
summary.student_item.item_id: UnannotatedScoreSerializer(summary.latest).data
for summary in score_summaries if not summary.latest.is_hidden()
}
return scores
|
python
|
{
"resource": ""
}
|
q16485
|
reset_score
|
train
|
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg)
|
python
|
{
"resource": ""
}
|
q16486
|
set_score
|
train
|
def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None):
"""Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
}
"""
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass
|
python
|
{
"resource": ""
}
|
q16487
|
_log_submission
|
train
|
def _log_submission(submission, student_item):
"""
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
"""
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
)
|
python
|
{
"resource": ""
}
|
q16488
|
_log_score
|
train
|
def _log_score(score):
"""
Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None
"""
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
)
|
python
|
{
"resource": ""
}
|
q16489
|
_get_or_create_student_item
|
train
|
def _get_or_create_student_item(student_item_dict):
"""Gets or creates a Student Item that matches the values specified.
Attempts to get the specified Student Item. If it does not exist, the
specified parameters are validated, and a new Student Item is created.
Args:
student_item_dict (dict): The dict containing the student_id, item_id,
course_id, and item_type that uniquely defines a student item.
Returns:
StudentItem: The student item that was retrieved or created.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to create or retrieve the specified student item.
SubmissionRequestError: Thrown if the given student item parameters fail
validation.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> _get_or_create_student_item(student_item_dict)
{'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'}
"""
try:
try:
return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(
data=student_item_dict
)
if not student_item_serializer.is_valid():
logger.error(
u"Invalid StudentItemSerializer: errors:{} data:{}".format(
student_item_serializer.errors,
student_item_dict
)
)
raise SubmissionRequestError(field_errors=student_item_serializer.errors)
return student_item_serializer.save()
except DatabaseError:
error_message = u"An error occurred creating student item: {}".format(
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
|
python
|
{
"resource": ""
}
|
q16490
|
RawMantaClient._request
|
train
|
def _request(self,
path,
method="GET",
query=None,
body=None,
headers=None):
"""Make a Manta request
...
@returns (res, content)
"""
assert path.startswith('/'), "bogus path: %r" % path
# Presuming utf-8 encoding here for requests. Not sure if that is
# technically correct.
if not isinstance(path, bytes):
spath = path.encode('utf-8')
else:
spath = path
qpath = urlquote(spath)
if query:
qpath += '?' + urlencode(query)
url = self.url + qpath
http = self._get_http()
ubody = body
if body is not None and isinstance(body, dict):
ubody = urlencode(body)
if headers is None:
headers = {}
headers["User-Agent"] = self.user_agent
if self.signer:
# Signature auth.
if "Date" not in headers:
headers["Date"] = http_date()
sigstr = 'date: ' + headers["Date"]
algorithm, fingerprint, signature = self.signer.sign(sigstr.encode(
'utf-8'))
auth = 'Signature keyId="/%s/keys/%s",algorithm="%s",signature="%s"'\
% ('/'.join(filter(None, [self.account, self.subuser])),
fingerprint, algorithm, signature.decode('utf-8'))
headers["Authorization"] = auth
if self.role:
headers['Role'] = self.role
# python 3
try:
url = url.decode('utf-8') # encoding='utf-8'
except:
pass
return http.request(url, method, ubody, headers)
|
python
|
{
"resource": ""
}
|
q16491
|
MantaClient.ls
|
train
|
def ls(self, mdir, limit=None, marker=None):
"""List a directory.
Dev Notes:
- If `limit` and `marker` are *not* specified. This handles paging
through a directory with more entries than Manta will return in
one request (1000).
- This returns a dict mapping name to dirent as a convenience.
Note that that makes this inappropriate for streaming a huge
listing. A streaming-appropriate `ls` will be a separate method
if/when that is added.
@param mdir {str} A manta directory, e.g. '/trent/stor/a-dir'.
@returns {dict} A mapping of names to their directory entry (dirent).
"""
assert limit is None and marker is None, "not yet implemented"
dirents = {}
if limit or marker:
entries = self.list_directory(mdir, limit=limit, marker=marker)
for entry in entries:
dirents[entry["name"]] = entry
else:
marker = None
while True:
res, entries = self.list_directory2(mdir, marker=marker)
if marker:
entries.pop(0) # first one is a repeat (the marker)
if not entries:
# Only the marker was there, we've got them all.
break
for entry in entries:
if "id" in entry: # GET /:account/jobs
dirents[entry["id"]] = entry
else:
dirents[entry["name"]] = entry
if marker is None:
# See if got all results in one go (quick out).
result_set_size = int(res.get("result-set-size", 0))
if len(entries) == result_set_size:
break
if "id" in entries[-1]:
marker = entries[-1]["id"] # jobs
else:
marker = entries[-1]["name"]
return dirents
|
python
|
{
"resource": ""
}
|
q16492
|
MantaClient.stat
|
train
|
def stat(self, mpath):
"""Return available dirent info for the given Manta path."""
parts = mpath.split('/')
if len(parts) == 0:
raise errors.MantaError("cannot stat empty manta path: %r" % mpath)
elif len(parts) <= 3:
raise errors.MantaError("cannot stat special manta path: %r" %
mpath)
mparent = udirname(mpath)
name = ubasename(mpath)
dirents = self.ls(mparent)
if name in dirents:
return dirents[name]
else:
raise errors.MantaResourceNotFoundError(
"%s: no such object or directory" % mpath)
|
python
|
{
"resource": ""
}
|
q16493
|
MantaClient.type
|
train
|
def type(self, mpath):
"""Return the manta type for the given manta path.
@param mpath {str} The manta path for which to get the type.
@returns {str|None} The manta type, e.g. "object" or "directory",
or None if the path doesn't exist.
"""
try:
return self.stat(mpath)["type"]
except errors.MantaResourceNotFoundError:
return None
except errors.MantaAPIError:
_, ex, _ = sys.exc_info()
if ex.code in ('ResourceNotFound', 'DirectoryDoesNotExist'):
return None
else:
raise
|
python
|
{
"resource": ""
}
|
q16494
|
option
|
train
|
def option(*args, **kwargs):
"""Decorator to add an option to the optparser argument of a Cmdln
subcommand.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#...
"""
#XXX Is there a possible optimization for many options to not have a
# large stack depth here?
def decorate(f):
if not hasattr(f, "optparser"):
f.optparser = SubCmdOptionParser()
f.optparser.add_option(*args, **kwargs)
return f
return decorate
|
python
|
{
"resource": ""
}
|
q16495
|
man_sections_from_cmdln
|
train
|
def man_sections_from_cmdln(inst, summary=None, description=None, author=None):
"""Return man page sections appropriate for the given Cmdln instance.
Join these sections for man page content.
The man page sections generated are:
NAME
SYNOPSIS
DESCRIPTION (if `description` is given)
OPTIONS
COMMANDS
HELP TOPICS (if any)
@param inst {Cmdln} Instance of Cmdln subclass for which to generate
man page content.
@param summary {str} A one-liner summary of the command.
@param description {str} A description of the command. If given,
it will be used for a "DESCRIPTION" section.
@param author {str} The author name and email for the AUTHOR secion
of the man page.
@raises {ValueError} if man page content cannot be generated for the
given class.
"""
if not inst.__class__.name:
raise ValueError("cannot generate man page content: `name` is not "
"set on class %r" % inst.__class__)
data = {
"name": inst.name,
"ucname": inst.name.upper(),
"date": datetime.date.today().strftime("%b %Y"),
"cmdln_version": __version__,
"version_str": inst.version and " %s" % inst.version or "",
"summary_str": summary and r" \- %s" % summary or "",
}
sections = []
sections.append(
'.\\" Automatically generated by cmdln %(cmdln_version)s\n'
'.TH %(ucname)s "1" "%(date)s" "%(name)s%(version_str)s" "User Commands"\n'
% data)
sections.append(".SH NAME\n%(name)s%(summary_str)s\n" % data)
sections.append(_dedent(r"""
.SH SYNOPSIS
.B %(name)s
[\fIGLOBALOPTS\fR] \fISUBCOMMAND \fR[\fIOPTS\fR] [\fIARGS\fR...]
.br
.B %(name)s
\fIhelp SUBCOMMAND\fR
""") % data)
if description:
sections.append(".SH DESCRIPTION\n%s\n" % description)
section = ".SH OPTIONS\n"
if not hasattr(inst, "optparser") is None:
#HACK: In case `.main()` hasn't been run.
inst.optparser = inst.get_optparser()
lines = inst._help_preprocess("${option_list}", None).splitlines(False)
for line in lines[1:]:
line = line.lstrip()
if not line:
continue
section += ".TP\n"
opts, desc = line.split(' ', 1)
section += ".B %s\n" % opts
section += "%s\n" % _dedent(desc.lstrip(), skip_first_line=True)
sections.append(section)
section = ".SH COMMANDS\n"
cmds = inst._get_cmds_data()
for cmdstr, doc in cmds:
cmdname = cmdstr.split(' ')[0] # e.g. "commit (ci)" -> "commit"
doc = inst._help_reindent(doc, indent="")
doc = inst._help_preprocess(doc, cmdname)
doc = doc.rstrip() + "\n" # trim down trailing space
section += '.PP\n.SS %s\n%s\n' % (cmdstr, doc)
sections.append(section)
help_names = inst._get_help_names()
if help_names:
section = ".SH HELP TOPICS\n"
for help_name, help_meth in sorted(help_names.items()):
help = help_meth(inst)
help = inst._help_reindent(help, indent="")
section += '.PP\n.SS %s\n%s\n' % (help_name, help)
sections.append(section)
if author:
sections.append(".SH AUTHOR\n%s\n" % author)
return sections
|
python
|
{
"resource": ""
}
|
q16496
|
_format_linedata
|
train
|
def _format_linedata(linedata, indent, indent_width):
"""Format specific linedata into a pleasant layout.
"linedata" is a list of 2-tuples of the form:
(<item-display-string>, <item-docstring>)
"indent" is a string to use for one level of indentation
"indent_width" is a number of columns by which the
formatted data will be indented when printed.
The <item-display-string> column is held to 30 columns.
"""
lines = []
WIDTH = 78 - indent_width
SPACING = 2
NAME_WIDTH_LOWER_BOUND = 13
NAME_WIDTH_UPPER_BOUND = 30
NAME_WIDTH = max([len(s) for s, d in linedata])
if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
elif NAME_WIDTH > NAME_WIDTH_UPPER_BOUND:
NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
for namestr, doc in linedata:
line = indent + namestr
if len(namestr) <= NAME_WIDTH:
line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
else:
lines.append(line)
line = indent + ' ' * (NAME_WIDTH + SPACING)
line += _summarize_doc(doc, DOC_WIDTH)
lines.append(line.rstrip())
return lines
|
python
|
{
"resource": ""
}
|
q16497
|
_summarize_doc
|
train
|
def _summarize_doc(doc, length=60):
r"""Parse out a short one line summary from the given doclines.
"doc" is the doc string to summarize.
"length" is the max length for the summary
>>> _summarize_doc("this function does this")
'this function does this'
>>> _summarize_doc("this function does this", 10)
'this fu...'
>>> _summarize_doc("this function does this\nand that")
'this function does this and that'
>>> _summarize_doc("this function does this\n\nand that")
'this function does this'
"""
import re
if doc is None:
return ""
assert length > 3, "length <= 3 is absurdly short for a doc summary"
doclines = doc.strip().splitlines(0)
if not doclines:
return ""
summlines = []
for i, line in enumerate(doclines):
stripped = line.strip()
if not stripped:
break
summlines.append(stripped)
if len(''.join(summlines)) >= length:
break
summary = ' '.join(summlines)
if len(summary) > length:
summary = summary[:length - 3] + "..."
return summary
|
python
|
{
"resource": ""
}
|
q16498
|
line2argv
|
train
|
def line2argv(line):
r"""Parse the given line into an argument vector.
"line" is the line of input to parse.
This may get niggly when dealing with quoting and escaping. The
current state of this parsing may not be completely thorough/correct
in this respect.
>>> from cmdln import line2argv
>>> line2argv("foo")
['foo']
>>> line2argv("foo bar")
['foo', 'bar']
>>> line2argv("foo bar ")
['foo', 'bar']
>>> line2argv(" foo bar")
['foo', 'bar']
Quote handling:
>>> line2argv("'foo bar'")
['foo bar']
>>> line2argv('"foo bar"')
['foo bar']
>>> line2argv(r'"foo\"bar"')
['foo"bar']
>>> line2argv("'foo bar' spam")
['foo bar', 'spam']
>>> line2argv("'foo 'bar spam")
['foo bar', 'spam']
>>> line2argv('some\tsimple\ttests')
['some', 'simple', 'tests']
>>> line2argv('a "more complex" test')
['a', 'more complex', 'test']
>>> line2argv('a more="complex test of " quotes')
['a', 'more=complex test of ', 'quotes']
>>> line2argv('a more" complex test of " quotes')
['a', 'more complex test of ', 'quotes']
>>> line2argv('an "embedded \\"quote\\""')
['an', 'embedded "quote"']
# Komodo bug 48027
>>> line2argv('foo bar C:\\')
['foo', 'bar', 'C:\\']
# Komodo change 127581
>>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"')
['\\test\\slash', 'foo bar', 'foo"bar']
# Komodo change 127629
>>> if sys.platform == "win32":
... line2argv(r'\foo\bar') == ['\\foo\\bar']
... line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar']
... line2argv('"foo') == ['foo']
... else:
... line2argv(r'\foo\bar') == ['foobar']
... line2argv(r'\\foo\\bar') == ['\\foo\\bar']
... try:
... line2argv('"foo')
... except ValueError as ex:
... "not terminated" in str(ex)
True
True
True
"""
line = line.strip()
argv = []
state = "default"
arg = None # the current argument being parsed
i = -1
WHITESPACE = '\t\n\x0b\x0c\r ' # don't use string.whitespace (bug 81316)
while 1:
i += 1
if i >= len(line): break
ch = line[i]
if ch == "\\" and i + 1 < len(line):
# escaped char always added to arg, regardless of state
if arg is None: arg = ""
if (sys.platform == "win32" or
state in ("double-quoted", "single-quoted")
) and line[i + 1] not in tuple('"\''):
arg += ch
i += 1
arg += line[i]
continue
if state == "single-quoted":
if ch == "'":
state = "default"
else:
arg += ch
elif state == "double-quoted":
if ch == '"':
state = "default"
else:
arg += ch
elif state == "default":
if ch == '"':
if arg is None: arg = ""
state = "double-quoted"
elif ch == "'":
if arg is None: arg = ""
state = "single-quoted"
elif ch in WHITESPACE:
if arg is not None:
argv.append(arg)
arg = None
else:
if arg is None: arg = ""
arg += ch
if arg is not None:
argv.append(arg)
if not sys.platform == "win32" and state != "default":
raise ValueError("command line is not terminated: unfinished %s "
"segment" % state)
return argv
|
python
|
{
"resource": ""
}
|
q16499
|
argv2line
|
train
|
def argv2line(argv):
r"""Put together the given argument vector into a command line.
"argv" is the argument vector to process.
>>> from cmdln import argv2line
>>> argv2line(['foo'])
'foo'
>>> argv2line(['foo', 'bar'])
'foo bar'
>>> argv2line(['foo', 'bar baz'])
'foo "bar baz"'
>>> argv2line(['foo"bar'])
'foo"bar'
>>> print(argv2line(['foo" bar']))
'foo" bar'
>>> print(argv2line(["foo' bar"]))
"foo' bar"
>>> argv2line(["foo'bar"])
"foo'bar"
"""
escapedArgs = []
for arg in argv:
if ' ' in arg and '"' not in arg:
arg = '"' + arg + '"'
elif ' ' in arg and "'" not in arg:
arg = "'" + arg + "'"
elif ' ' in arg:
arg = arg.replace('"', r'\"')
arg = '"' + arg + '"'
escapedArgs.append(arg)
return ' '.join(escapedArgs)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.