code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if self._stream:
self._stream.close(exc_info=exc_info) | def _disconnect(self, exc_info=False) | Disconnect and cleanup. | 4.466345 | 3.678238 | 1.214262 |
# log messages received so that no one else has to
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug(
"received from {}: {}"
.format(self.bind_address_string, repr(str(msg))))
if msg.mtype == Message.INFORM:
return self.handle_inform(msg)
elif msg.mtype == Message.REPLY:
return self.handle_reply(msg)
elif msg.mtype == Message.REQUEST:
return self.handle_request(msg)
else:
self._logger.error("Unexpected message type from server ['%s']."
% (msg,)) | def handle_message(self, msg) | Handle a message from the server.
Parameters
----------
msg : Message object
The Message to dispatch to the handler methods. | 3.810136 | 3.828982 | 0.995078 |
method = self._inform_handlers.get(
msg.name, self.__class__.unhandled_inform)
try:
return method(self, msg)
except Exception:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Inform %s FAIL: %s" % (msg.name, reason)) | def handle_inform(self, msg) | Dispatch an inform message to the appropriate method.
Parameters
----------
msg : Message object
The inform message to dispatch. | 3.432372 | 3.914669 | 0.876797 |
method = self.__class__.unhandled_reply
if msg.name in self._reply_handlers:
method = self._reply_handlers[msg.name]
try:
return method(self, msg)
except Exception:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Reply %s FAIL: %s" % (msg.name, reason)) | def handle_reply(self, msg) | Dispatch a reply message to the appropriate method.
Parameters
----------
msg : Message object
The reply message to dispatch. | 3.245814 | 3.480141 | 0.932668 |
method = self.__class__.unhandled_request
if msg.name in self._request_handlers:
method = self._request_handlers[msg.name]
try:
reply = method(self, msg)
if isinstance(reply, Message):
# If it is a message object, assume it is a reply.
reply.mid = msg.mid
assert (reply.mtype == Message.REPLY)
assert (reply.name == msg.name)
self._logger.info("%s OK" % (msg.name,))
self.send_message(reply)
else:
# Just pass on what is, potentially, a future. The implementor
# of the request handler method must arrange for a reply to be
# sent. Since clients have no business dealing with requests in
# any case we don't do much to help them.
return reply
# We do want to catch everything that inherits from Exception
# pylint: disable-msg = W0703
except Exception:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Request %s FAIL: %s" % (msg.name, reason)) | def handle_request(self, msg) | Dispatch a request message to the appropriate method.
Parameters
----------
msg : Message object
The request message to dispatch. | 4.9653 | 5.034571 | 0.986241 |
self._ioloop_manager.set_ioloop(ioloop, managed=False)
self.ioloop = ioloop | def set_ioloop(self, ioloop=None) | Set the tornado.ioloop.IOLoop instance to use.
This defaults to IOLoop.current(). If set_ioloop() is never called the
IOLoop is managed: started in a new thread, and will be stopped if
self.stop() is called.
Notes
-----
Must be called before start() is called | 5.852895 | 7.718707 | 0.758274 |
if self.threadsafe:
return # Already done!
if self._running.isSet():
raise RuntimeError('Cannot enable thread safety after start')
def _getattr(obj, name):
# use 'is True' so mock objects don't return true for everything
return getattr(obj, name, False) is True
for name in dir(self):
try:
meth = getattr(self, name)
except AttributeError:
# Subclasses may have computed attributes that don't work
# before they are started, so let's ignore those
pass
if not callable(meth):
continue
make_threadsafe = _getattr(meth, 'make_threadsafe')
make_threadsafe_blocking = _getattr(meth, 'make_threadsafe_blocking')
if make_threadsafe:
assert not make_threadsafe_blocking
meth = self._make_threadsafe(meth)
setattr(self, name, meth)
elif make_threadsafe_blocking:
meth = self._make_threadsafe_blocking(meth)
setattr(self, name, meth)
self._threadsafe = True | def enable_thread_safety(self) | Enable thread-safety features.
Must be called before start(). | 3.939833 | 3.943156 | 0.999157 |
if self._running.isSet():
raise RuntimeError("Device client already started.")
# Make sure we have an ioloop
self.ioloop = self._ioloop_manager.get_ioloop()
if timeout:
t0 = self.ioloop.time()
self._ioloop_manager.start(timeout)
self.ioloop.add_callback(self._install)
if timeout:
remaining_timeout = timeout - (self.ioloop.time() - t0)
self.wait_running(remaining_timeout) | def start(self, timeout=None) | Start the client in a new thread.
Parameters
----------
timeout : float in seconds
Seconds to wait for client thread to start. Do not specify a
timeout if start() is being called from the same ioloop that this
client will be installed on, since it will block the ioloop without
progressing. | 3.918715 | 3.600568 | 1.08836 |
ioloop = getattr(self, 'ioloop', None)
if not ioloop:
raise RuntimeError('Call start() before stop()')
if timeout:
if get_thread_ident() == self.ioloop_thread_id:
raise RuntimeError('Cannot block inside ioloop')
self._running.wait_with_ioloop(self.ioloop, timeout)
def _cleanup():
self._running.clear()
self._disconnect()
self._ioloop_manager.stop(timeout=timeout, callback=_cleanup) | def stop(self, timeout=None) | Stop a running client (from another thread).
Parameters
----------
timeout : float in seconds
Seconds to wait for client thread to have *started*. | 4.715682 | 4.854605 | 0.971383 |
ioloop = getattr(self, 'ioloop', None)
if not ioloop:
raise RuntimeError('Call start() before wait_running()')
return self._running.wait_with_ioloop(ioloop, timeout) | def wait_running(self, timeout=None) | Wait until the client is running.
Parameters
----------
timeout : float in seconds
Seconds to wait for the client to start running.
Returns
-------
running : bool
Whether the client is running
Notes
-----
Do not call this from the ioloop, use until_running(). | 4.584349 | 4.509638 | 1.016567 |
t0 = self.ioloop.time()
yield self.until_running(timeout=timeout)
t1 = self.ioloop.time()
if timeout:
timedelta = timeout - (t1 - t0)
else:
timedelta = None
assert get_thread_ident() == self.ioloop_thread_id
yield self._connected.until_set(timeout=timedelta) | def until_connected(self, timeout=None) | Return future that resolves when the client is connected. | 4.191644 | 3.918966 | 1.069579 |
t0 = self.ioloop.time()
yield self.until_running(timeout=timeout)
t1 = self.ioloop.time()
if timeout:
timedelta = timeout - (t1 - t0)
else:
timedelta = None
assert get_thread_ident() == self.ioloop_thread_id
yield self._received_protocol_info.until_set(timeout=timedelta) | def until_protocol(self, timeout=None) | Return future that resolves after receipt of katcp protocol info.
If the returned future resolves, the server's protocol information is
available in the ProtocolFlags instance self.protocol_flags. | 4.914876 | 4.165586 | 1.179876 |
assert get_thread_ident() == self.ioloop_thread_id
self._async_queue[msg_id] = (
request, reply_cb, inform_cb, user_data, timeout_handle)
if request.name in self._async_id_stack:
self._async_id_stack[request.name].append(msg_id)
else:
self._async_id_stack[request.name] = [msg_id] | def _push_async_request(self, msg_id, request, reply_cb, inform_cb,
user_data, timeout_handle) | Store reply / inform callbacks for request we've sent. | 2.508891 | 2.511576 | 0.998931 |
assert get_thread_ident() == self.ioloop_thread_id
if msg_id is None:
msg_id = self._msg_id_for_name(msg_name)
if msg_id in self._async_queue:
callback_tuple = self._async_queue[msg_id]
del self._async_queue[msg_id]
self._async_id_stack[callback_tuple[0].name].remove(msg_id)
return callback_tuple
else:
return None, None, None, None, None | def _pop_async_request(self, msg_id, msg_name) | Pop the set of callbacks for a request.
Return tuple of Nones if callbacks already popped (or don't exist). | 3.250213 | 3.013012 | 1.078726 |
assert get_thread_ident() == self.ioloop_thread_id
if msg_id is None:
msg_id = self._msg_id_for_name(msg_name)
if msg_id in self._async_queue:
return self._async_queue[msg_id]
else:
return None, None, None, None, None | def _peek_async_request(self, msg_id, msg_name) | Peek at the set of callbacks for a request.
Return tuple of Nones if callbacks don't exist. | 3.280914 | 3.015845 | 1.087892 |
if msg_name in self._async_id_stack and self._async_id_stack[msg_name]:
return self._async_id_stack[msg_name][0] | def _msg_id_for_name(self, msg_name) | Find the msg_id for a given request name.
Return None if no message id exists. | 3.355343 | 3.536595 | 0.948749 |
if timeout is None:
timeout = self._request_timeout
mid = self._get_mid_and_update_msg(msg, use_mid)
if timeout is None: # deal with 'no timeout', i.e. None
timeout_handle = None
else:
timeout_handle = self.ioloop.call_later(
timeout, partial(self._handle_timeout, mid, self.ioloop.time()))
self._push_async_request(
mid, msg, reply_cb, inform_cb, user_data, timeout_handle)
try:
self.send_request(msg)
except KatcpClientError, e:
error_reply = Message.request(msg.name, "fail", str(e))
error_reply.mid = mid
self.handle_reply(error_reply) | def callback_request(self, msg, reply_cb=None, inform_cb=None,
user_data=None, timeout=None, use_mid=None) | Send a request messsage.
Parameters
----------
msg : Message object
The request message to send.
reply_cb : function
The reply callback with signature reply_cb(msg)
or reply_cb(msg, \*user_data)
inform_cb : function
The inform callback with signature inform_cb(msg)
or inform_cb(msg, \*user_data)
user_data : tuple
Optional user data to send to the reply and inform
callbacks.
timeout : float in seconds
How long to wait for a reply. The default is the
the timeout set when creating the AsyncClient.
use_mid : boolean, optional
Whether to use message IDs. Default is to use message IDs
if the server supports them. | 3.801404 | 3.855025 | 0.986091 |
if timeout is None:
timeout = self._request_timeout
f = tornado_Future()
informs = []
def reply_cb(msg):
f.set_result((msg, informs))
def inform_cb(msg):
informs.append(msg)
try:
self.callback_request(msg, reply_cb=reply_cb, inform_cb=inform_cb,
timeout=timeout, use_mid=use_mid)
except Exception:
f.set_exc_info(sys.exc_info())
return f | def future_request(self, msg, timeout=None, use_mid=None) | Send a request messsage, with future replies.
Parameters
----------
msg : Message object
The request Message to send.
timeout : float in seconds
How long to wait for a reply. The default is the
the timeout set when creating the AsyncClient.
use_mid : boolean, optional
Whether to use message IDs. Default is to use message IDs
if the server supports them.
Returns
-------
A tornado.concurrent.Future that resolves with:
reply : Message object
The reply message received.
informs : list of Message objects
A list of the inform messages received. | 2.738564 | 2.657076 | 1.030668 |
assert (get_thread_ident() != self.ioloop_thread_id), (
'Cannot call blocking_request() in ioloop')
if timeout is None:
timeout = self._request_timeout
f = Future() # for thread safety
tf = [None] # Placeholder for tornado Future for exception tracebacks
def blocking_request_callback():
try:
tf[0] = frf = self.future_request(msg, timeout=timeout,
use_mid=use_mid)
except Exception:
tf[0] = frf = tornado_Future()
frf.set_exc_info(sys.exc_info())
gen.chain_future(frf, f)
self.ioloop.add_callback(blocking_request_callback)
# We wait on the future result that should be set by the reply
# handler callback. If this does not occur within the
# timeout it means something unexpected went wrong. We give it
# an extra second to deal with (unlikely?) slowness in the
# rest of the code.
extra_wait = 1
wait_timeout = timeout
if wait_timeout is not None:
wait_timeout = wait_timeout + extra_wait
try:
return f.result(timeout=wait_timeout)
except TimeoutError:
raise RuntimeError('Unexpected error: Async request handler did '
'not call reply handler within timeout period')
except Exception:
# Use the tornado future to give us a usable traceback
tf[0].result()
assert False | def blocking_request(self, msg, timeout=None, use_mid=None) | Send a request messsage and wait for its reply.
Parameters
----------
msg : Message object
The request Message to send.
timeout : float in seconds
How long to wait for a reply. The default is the
the timeout set when creating the AsyncClient.
use_mid : boolean, optional
Whether to use message IDs. Default is to use message IDs
if the server supports them.
Returns
-------
reply : Message object
The reply message received.
informs : list of Message objects
A list of the inform messages received. | 5.454749 | 5.660831 | 0.963595 |
# this may also result in inform_cb being None if no
# inform_cb was passed to the request method.
if msg.mid is not None:
_request, _reply_cb, inform_cb, user_data, _timeout_handle = \
self._peek_async_request(msg.mid, None)
else:
request, _reply_cb, inform_cb, user_data, _timeout_handle = \
self._peek_async_request(None, msg.name)
if request is not None and request.mid is not None:
# we sent a mid but this inform doesn't have one
inform_cb, user_data = None, None
if inform_cb is None:
inform_cb = super(AsyncClient, self).handle_inform
# override user_data since handle_inform takes no user_data
user_data = None
try:
if user_data is None:
inform_cb(msg)
else:
inform_cb(msg, *user_data)
except Exception:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Callback inform %s FAIL: %s" %
(msg.name, reason)) | def handle_inform(self, msg) | Handle inform messages related to any current requests.
Inform messages not related to the current request go up
to the base class method.
Parameters
----------
msg : Message object
The inform message to dispatch. | 3.777195 | 3.92575 | 0.962159 |
# this may also result in reply_cb being None if no
# reply_cb was passed to the request method
if reply_cb is None:
# this happens if no reply_cb was passed in to the request
return
reason_msg = Message.reply(msg.name, "fail", reason, mid=msg.mid)
try:
if user_data is None:
reply_cb(reason_msg)
else:
reply_cb(reason_msg, *user_data)
except Exception:
e_type, e_value, trace = sys.exc_info()
exc_reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Callback reply during failure %s, %s FAIL: %s" %
(reason, msg.name, exc_reason)) | def _do_fail_callback(
self, reason, msg, reply_cb, inform_cb, user_data, timeout_handle) | Do callback for a failed request. | 4.162935 | 4.22267 | 0.985854 |
msg, reply_cb, inform_cb, user_data, timeout_handle = \
self._pop_async_request(msg_id, None)
# We may have been racing with the actual reply handler if the reply
# arrived close to the timeout expiry,
# which means the self._pop_async_request() call gave us None's.
# In this case, just bail.
#
# NM 2014-09-17 Not sure if this is true after porting to tornado,
# but I'm too afraid to remove this code :-/
if timeout_handle is None:
return
reason = "Request {0.name} timed out after {1:f} seconds.".format(
msg, self.ioloop.time() - start_time)
self._do_fail_callback(
reason, msg, reply_cb, inform_cb, user_data, timeout_handle) | def _handle_timeout(self, msg_id, start_time) | Handle a timed-out callback request.
Parameters
----------
msg_id : uuid.UUID for message
The name of the reply which was expected. | 6.995345 | 6.979931 | 1.002208 |
# this may also result in reply_cb being None if no
# reply_cb was passed to the request method
if msg.mid is not None:
_request, reply_cb, _inform_cb, user_data, timeout_handle = \
self._pop_async_request(msg.mid, None)
else:
request, _reply_cb, _inform_cb, _user_data, timeout_handle = \
self._peek_async_request(None, msg.name)
if request is not None and request.mid is None:
# we didn't send a mid so this is the request we want
_request, reply_cb, _inform_cb, user_data, timeout_handle = \
self._pop_async_request(None, msg.name)
else:
reply_cb, user_data = None, None
if timeout_handle is not None:
self.ioloop.remove_timeout(timeout_handle)
if reply_cb is None:
reply_cb = super(AsyncClient, self).handle_reply
# override user_data since handle_reply takes no user_data
user_data = None
try:
if user_data is None:
reply_cb(msg)
else:
reply_cb(msg, *user_data)
except Exception:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Callback reply %s FAIL: %s" %
(msg.name, reason)) | def handle_reply(self, msg) | Handle a reply message related to the current request.
Reply messages not related to the current request go up
to the base class method.
Parameters
----------
msg : Message object
The reply message to dispatch. | 3.26226 | 3.358852 | 0.971242 |
# Check that the answer is JSON-serializable
try:
serialized = json.dumps(value)
except (ValueError, TypeError):
raise serializers.ValidationError("Answer value must be JSON-serializable")
# Check the length of the serialized representation
if len(serialized) > Submission.MAXSIZE:
raise serializers.ValidationError("Maximum answer size exceeded.")
return value | def validate_answer(self, value) | Check that the answer is JSON-serializable and not too long. | 4.351177 | 3.582193 | 1.214668 |
annotations = ScoreAnnotation.objects.filter(score_id=obj.id)
return [
ScoreAnnotationSerializer(instance=annotation).data
for annotation in annotations
] | def get_annotations(self, obj) | Inspect ScoreAnnotations to attach all relevant annotations. | 4.403314 | 3.284679 | 1.340561 |
# By setting the "reset" flag, we ensure that the "highest"
# score in the score summary will point to this score.
# By setting points earned and points possible to 0,
# we ensure that this score will be hidden from the user.
return cls.objects.create(
student_item=student_item,
submission=None,
points_earned=0,
points_possible=0,
reset=True,
) | def create_reset_score(cls, student_item) | Create a "reset" score (a score with a null submission).
Only scores created after the most recent "reset" score
should be used to determine a student's effective score.
Args:
student_item (StudentItem): The student item model.
Returns:
Score: The newly created "reset" score.
Raises:
DatabaseError: An error occurred while creating the score | 5.330619 | 5.333438 | 0.999472 |
score = kwargs['instance']
try:
score_summary = ScoreSummary.objects.get(
student_item=score.student_item
)
score_summary.latest = score
# A score with the "reset" flag set will always replace the current highest score
if score.reset:
score_summary.highest = score
# The conversion to a float may return None if points possible is zero
# In Python, None is always less than an integer, so any score
# with non-null points possible will take precedence.
elif score.to_float() > score_summary.highest.to_float():
score_summary.highest = score
score_summary.save()
except ScoreSummary.DoesNotExist:
ScoreSummary.objects.create(
student_item=score.student_item,
highest=score,
latest=score,
)
except DatabaseError as err:
logger.exception(
u"Error while updating score summary for student item {}"
.format(score.student_item)
) | def update_score_summary(sender, **kwargs) | Listen for new Scores and update the relevant ScoreSummary.
Args:
sender: not used
Kwargs:
instance (Score): The score model whose save triggered this receiver. | 4.317906 | 4.534445 | 0.952246 |
parser.add_argument(
'--start', '-s',
default=0,
type=int,
help=u"The Submission.id at which to begin updating rows. 0 by default."
)
parser.add_argument(
'--chunk', '-c',
default=1000,
type=int,
help=u"Batch size, how many rows to update in a given transaction. Default 1000.",
)
parser.add_argument(
'--wait', '-w',
default=2,
type=int,
help=u"Wait time between transactions, in seconds. Default 2.",
) | def add_arguments(self, parser) | Add arguments to the command parser.
Uses argparse syntax. See documentation at
https://docs.python.org/3/library/argparse.html. | 3.010754 | 3.021558 | 0.996424 |
# Note that by taking last_id here, we're going to miss any submissions created *during* the command execution
# But that's okay! All new entries have already been created using the new style, no acion needed there
last_id = Submission._objects.all().aggregate(Max('id'))['id__max']
log.info("Beginning uuid update")
current = options['start']
while current < last_id:
end_chunk = current + options['chunk'] if last_id - options['chunk'] >= current else last_id
log.info("Updating entries in range [{}, {}]".format(current, end_chunk))
with transaction.atomic():
for submission in Submission._objects.filter(id__gte=current, id__lte=end_chunk).iterator():
submission.save(update_fields=['uuid'])
time.sleep(options['wait'])
current = end_chunk + 1 | def handle(self, *args, **options) | By default, we're going to do this in chunks. This way, if there ends up being an error,
we can check log messages and continue from that point after fixing the issue. | 5.690969 | 5.376636 | 1.058463 |
student_item_dict = dict(
course_id=course_id,
student_id=student_id,
item_id=item_id,
)
context = dict(**student_item_dict)
try:
submissions = get_submissions(student_item_dict)
context["submissions"] = submissions
except SubmissionRequestError:
context["error"] = "The specified student item was not found."
return render_to_response('submissions.html', context) | def get_submissions_for_student_item(request, course_id, student_id, item_id) | Retrieve all submissions associated with the given student item.
Developer utility for accessing all the submissions associated with a
student item. The student item is specified by the unique combination of
course, student, and item.
Args:
request (dict): The request.
course_id (str): The course id for this student item.
student_id (str): The student id for this student item.
item_id (str): The item id for this student item.
Returns:
HttpResponse: The response object for this request. Renders a simple
development page with all the submissions related to the specified
student item. | 2.721406 | 2.798118 | 0.972585 |
student_item_model = _get_or_create_student_item(student_item_dict)
if attempt_number is None:
try:
submissions = Submission.objects.filter(
student_item=student_item_model)[:1]
except DatabaseError:
error_message = u"An error occurred while filtering submissions for student item: {}".format(
student_item_dict)
logger.exception(error_message)
raise SubmissionInternalError(error_message)
attempt_number = submissions[0].attempt_number + 1 if submissions else 1
model_kwargs = {
"student_item": student_item_model.pk,
"answer": answer,
"attempt_number": attempt_number,
}
if submitted_at:
model_kwargs["submitted_at"] = submitted_at
try:
submission_serializer = SubmissionSerializer(data=model_kwargs)
if not submission_serializer.is_valid():
raise SubmissionRequestError(field_errors=submission_serializer.errors)
submission_serializer.save()
sub_data = submission_serializer.data
_log_submission(sub_data, student_item_dict)
return sub_data
except DatabaseError:
error_message = u"An error occurred while creating submission {} for student item: {}".format(
model_kwargs,
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message) | def create_submission(student_item_dict, answer, submitted_at=None, attempt_number=None) | Creates a submission for assessment.
Generic means by which to submit an answer for assessment.
Args:
student_item_dict (dict): The student_item this
submission is associated with. This is used to determine which
course, student, and location this submission belongs to.
answer (JSON-serializable): The answer given by the student to be assessed.
submitted_at (datetime): The date in which this submission was submitted.
If not specified, defaults to the current date.
attempt_number (int): A student may be able to submit multiple attempts
per question. This allows the designated attempt to be overridden.
If the attempt is not specified, it will take the most recent
submission, as specified by the submitted_at time, and use its
attempt_number plus one.
Returns:
dict: A representation of the created Submission. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when there are validation errors for the
student item or submission. This can be caused by the student item
missing required values, the submission being too long, the
attempt_number is negative, or the given submitted_at time is invalid.
SubmissionInternalError: Raised when submission access causes an
internal error.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> create_submission(student_item_dict, "The answer is 42.", datetime.utcnow, 1)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
} | 2.227977 | 2.186312 | 1.019057 |
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
try:
submission = submission_qs.get(uuid=uuid)
except Submission.DoesNotExist:
try:
hyphenated_value = six.text_type(UUID(uuid))
query =
query = query.replace("{}", hyphenated_value)
# We can use Submission.objects instead of the SoftDeletedManager, we'll include that logic manually
submission = Submission.objects.raw(query)[0]
except IndexError:
raise Submission.DoesNotExist()
# Avoid the extra hit next time
submission.save(update_fields=['uuid'])
return submission | def _get_submission_model(uuid, read_replica=False) | Helper to retrieve a given Submission object from the database. Helper is needed to centralize logic that fixes
EDUCATOR-1090, because uuids are stored both with and without hyphens. | 6.565886 | 6.435908 | 1.020196 |
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
# Something very unexpected has just happened (like DB misconfig)
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data | def get_submission(submission_uuid, read_replica=False) | Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
} | 2.949316 | 2.972838 | 0.992088 |
# This may raise API exceptions
submission = get_submission(uuid, read_replica=read_replica)
# Retrieve the student item from the cache
cache_key = "submissions.student_item.{}".format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception
# (for example, memcache keys that contain spaces)
logger.exception("Error occurred while retrieving student item from the cache")
cached_student_item = None
if cached_student_item is not None:
submission['student_item'] = cached_student_item
else:
# There is probably a more idiomatic way to do this using the Django REST framework
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = "Could not get submission due to error: {}".format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission | def get_submission_and_student(uuid, read_replica=False) | Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors. | 3.046789 | 3.006885 | 1.013271 |
student_item_model = _get_or_create_student_item(student_item_dict)
try:
submission_models = Submission.objects.filter(
student_item=student_item_model)
except DatabaseError:
error_message = (
u"Error getting submission request for student item {}"
.format(student_item_dict)
)
logger.exception(error_message)
raise SubmissionNotFoundError(error_message)
if limit:
submission_models = submission_models[:limit]
return SubmissionSerializer(submission_models, many=True).data | def get_submissions(student_item_dict, limit=None) | Retrieves the submissions for the specified student item,
ordered by most recent submitted date.
Returns the submissions relative to the specified student item. Exception
thrown if no submission is found relative to this location.
Args:
student_item_dict (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
limit (int): Optional parameter for limiting the returned number of
submissions associated with this student item. If not specified, all
associated submissions are returned.
Returns:
List dict: A list of dicts for the associated student item. The submission
contains five attributes: student_item, attempt_number, submitted_at,
created_at, and answer. 'student_item' is the ID of the related student
item for the submission. 'attempt_number' is the attempt this submission
represents for this question. 'submitted_at' represents the time this
submission was submitted, which can be configured, versus the
'created_at' date, which is when the submission is first created.
Raises:
SubmissionRequestError: Raised when the associated student item fails
validation.
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> get_submissions(student_item_dict, 3)
[{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}] | 2.686744 | 3.022248 | 0.888989 |
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
# We cannot use SELECT DISTINCT ON because it's PostgreSQL only, so unfortunately
# our results will contain every entry of each student, not just the most recent.
# We sort by student_id and primary key, so the reults will be grouped be grouped by
# student, with the most recent submission being the first one in each group.
query = submission_qs.select_related('student_item').filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
).order_by('student_item__student_id', '-submitted_at', '-id').iterator()
for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')):
submission = next(row_iter)
data = SubmissionSerializer(submission).data
data['student_id'] = submission.student_item.student_id
yield data | def get_all_submissions(course_id, item_id, item_type, read_replica=True) | For the given item, get the most recent submission for every student who has submitted.
This may return a very large result set! It is implemented as a generator for efficiency.
Args:
course_id, item_id, item_type (string): The values of the respective student_item fields
to filter the submissions by.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Yields:
Dicts representing the submissions with the following fields:
student_item
student_id
attempt_number
submitted_at
created_at
answer
Raises:
Cannot fail unless there's a database error, but may return an empty iterable. | 4.256506 | 4.005878 | 1.062565 |
submission_qs = Submission.objects
if read_replica:
submission_qs = _use_read_replica(submission_qs)
query = submission_qs.select_related('student_item__scoresummary__latest__submission').filter(
student_item__course_id=course_id,
student_item__item_type=item_type,
).iterator()
for submission in query:
student_item = submission.student_item
serialized_score = {}
if hasattr(student_item, 'scoresummary'):
latest_score = student_item.scoresummary.latest
# Only include the score if it is not a reset score (is_hidden), and if the current submission is the same
# as the student_item's latest score's submission. This matches the behavior of the API's get_score method.
if (not latest_score.is_hidden()) and latest_score.submission.uuid == submission.uuid:
serialized_score = ScoreSerializer(latest_score).data
yield (
StudentItemSerializer(student_item).data,
SubmissionSerializer(submission).data,
serialized_score
) | def get_all_course_submission_information(course_id, item_type, read_replica=True) | For the given course, get all student items of the given item type, all the submissions for those itemes,
and the latest scores for each item. If a submission was given a score that is not the latest score for the
relevant student item, it will still be included but without score.
Args:
course_id (str): The course that we are getting submissions from.
item_type (str): The type of items that we are getting submissions for.
read_replica (bool): Try to use the database's read replica if it's available.
Yields:
A tuple of three dictionaries representing:
(1) a student item with the following fields:
student_id
course_id
student_item
item_type
(2) a submission with the following fields:
student_item
attempt_number
submitted_at
created_at
answer
(3) a score with the following fields, if one exists and it is the latest score:
(if both conditions are not met, an empty dict is returned here)
student_item
submission
points_earned
points_possible
created_at
submission_uuid | 3.670062 | 3.251877 | 1.128598 |
if number_of_top_scores < 1 or number_of_top_scores > MAX_TOP_SUBMISSIONS:
error_msg = (
u"Number of top scores must be a number between 1 and {}.".format(MAX_TOP_SUBMISSIONS)
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
# First check the cache (unless caching is disabled)
cache_key = "submissions.top_submissions.{course}.{item}.{type}.{number}".format(
course=course_id,
item=item_id,
type=item_type,
number=number_of_top_scores
)
top_submissions = cache.get(cache_key) if use_cache else None
# If we can't find it in the cache (or caching is disabled), check the database
# By default, prefer the read-replica.
if top_submissions is None:
try:
query = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__item_id=item_id,
student_item__item_type=item_type,
latest__points_earned__gt=0
).select_related('latest', 'latest__submission').order_by("-latest__points_earned")
if read_replica:
query = _use_read_replica(query)
score_summaries = query[:number_of_top_scores]
except DatabaseError:
msg = u"Could not fetch top score summaries for course {}, item {} of type {}".format(
course_id, item_id, item_type
)
logger.exception(msg)
raise SubmissionInternalError(msg)
# Retrieve the submission content for each top score
top_submissions = [
{
"score": score_summary.latest.points_earned,
"content": SubmissionSerializer(score_summary.latest.submission).data['answer']
}
for score_summary in score_summaries
]
# Always store the retrieved list in the cache
cache.set(cache_key, top_submissions, TOP_SUBMISSIONS_CACHE_TIMEOUT)
return top_submissions | def get_top_submissions(course_id, item_id, item_type, number_of_top_scores, use_cache=True, read_replica=True) | Get a number of top scores for an assessment based on a particular student item
This function will return top scores for the piece of assessment.
It will consider only the latest and greater than 0 score for a piece of assessment.
A score is only calculated for a student item if it has completed the workflow for
a particular assessment module.
In general, users of top submissions can tolerate some latency
in the search results, so by default this call uses
a cache and the read replica (if available).
Args:
course_id (str): The course to retrieve for the top scores
item_id (str): The item within the course to retrieve for the top scores
item_type (str): The type of item to retrieve
number_of_top_scores (int): The number of scores to return, greater than 0 and no
more than 100.
Kwargs:
use_cache (bool): If true, check the cache before retrieving querying the database.
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
topscores (dict): The top scores for the assessment for the student item.
An empty array if there are no scores or all scores are 0.
Raises:
SubmissionNotFoundError: Raised when a submission cannot be found for
the associated student item.
SubmissionRequestError: Raised when the number of top scores is higher than the
MAX_TOP_SUBMISSIONS constant.
Examples:
>>> course_id = "TestCourse"
>>> item_id = "u_67"
>>> item_type = "openassessment"
>>> number_of_top_scores = 10
>>>
>>> get_top_submissions(course_id, item_id, item_type, number_of_top_scores)
[{
'score': 20,
'content': "Platypus"
},{
'score': 16,
'content': "Frog"
}] | 2.578006 | 2.466932 | 1.045025 |
try:
student_item_model = StudentItem.objects.get(**student_item)
score = ScoreSummary.objects.get(student_item=student_item_model).latest
except (ScoreSummary.DoesNotExist, StudentItem.DoesNotExist):
return None
# By convention, scores are hidden if "points possible" is set to 0.
# This can occur when an instructor has reset scores for a student.
if score.is_hidden():
return None
else:
return ScoreSerializer(score).data | def get_score(student_item) | Get the score for a particular student item
Each student item should have a unique score. This function will return the
score if it is available. A score is only calculated for a student item if
it has completed the workflow for a particular assessment module.
Args:
student_item (dict): The dictionary representation of a student item.
Function returns the score related to this student item.
Returns:
score (dict): The score associated with this student item. None if there
is no score found.
Raises:
SubmissionInternalError: Raised if a score cannot be retrieved because
of an internal server error.
Examples:
>>> student_item = {
>>> "student_id":"Tim",
>>> "course_id":"TestCourse",
>>> "item_id":"u_67",
>>> "item_type":"openassessment"
>>> }
>>>
>>> get_score(student_item)
[{
'student_item': 2,
'submission': 2,
'points_earned': 8,
'points_possible': 20,
'created_at': datetime.datetime(2014, 2, 7, 18, 30, 1, 807911, tzinfo=<UTC>)
}] | 5.496511 | 5.77679 | 0.951482 |
try:
score_summaries = ScoreSummary.objects.filter(
student_item__course_id=course_id,
student_item__student_id=student_id,
).select_related('latest', 'latest__submission', 'student_item')
except DatabaseError:
msg = u"Could not fetch scores for course {}, student {}".format(
course_id, student_id
)
logger.exception(msg)
raise SubmissionInternalError(msg)
scores = {
summary.student_item.item_id: UnannotatedScoreSerializer(summary.latest).data
for summary in score_summaries if not summary.latest.is_hidden()
}
return scores | def get_scores(course_id, student_id) | Return a dict mapping item_ids to scores.
Scores are represented by serialized Score objects in JSON-like dict
format.
This method would be used by an LMS to find all the scores for a given
student in a given course.
Scores that are "hidden" (because they have points earned set to zero)
are excluded from the results.
Args:
course_id (str): Course ID, used to do a lookup on the `StudentItem`.
student_id (str): Student ID, used to do a lookup on the `StudentItem`.
Returns:
dict: The keys are `item_id`s (`str`) and the values are tuples of
`(points_earned, points_possible)`. All points are integer values and
represent the raw, unweighted scores. Submissions does not have any
concept of weights. If there are no entries matching the `course_id` or
`student_id`, we simply return an empty dictionary. This is not
considered an error because there might be many queries for the progress
page of a person who has never submitted anything.
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores. | 3.565391 | 3.114516 | 1.144766 |
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg) | def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True) | Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores. | 2.61928 | 2.370069 | 1.105149 |
try:
submission_model = _get_submission_model(submission_uuid)
except Submission.DoesNotExist:
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except DatabaseError:
error_msg = u"Could not retrieve submission {}.".format(
submission_uuid
)
logger.exception(error_msg)
raise SubmissionRequestError(msg=error_msg)
score = ScoreSerializer(
data={
"student_item": submission_model.student_item.pk,
"submission": submission_model.pk,
"points_earned": points_earned,
"points_possible": points_possible,
}
)
if not score.is_valid():
logger.exception(score.errors)
raise SubmissionInternalError(score.errors)
# When we save the score, a score summary will be created if
# it does not already exist.
# When the database's isolation level is set to repeatable-read,
# it's possible for a score summary to exist for this student item,
# even though we cannot retrieve it.
# In this case, we assume that someone else has already created
# a score summary and ignore the error.
# TODO: once we're using Django 1.8, use transactions to ensure that these
# two models are saved at the same time.
try:
score_model = score.save()
_log_score(score_model)
if annotation_creator is not None:
score_annotation = ScoreAnnotation(
score=score_model,
creator=annotation_creator,
annotation_type=annotation_type,
reason=annotation_reason
)
score_annotation.save()
# Send a signal out to any listeners who are waiting for scoring events.
score_set.send(
sender=None,
points_possible=points_possible,
points_earned=points_earned,
anonymous_user_id=submission_model.student_item.student_id,
course_id=submission_model.student_item.course_id,
item_id=submission_model.student_item.item_id,
created_at=score_model.created_at,
)
except IntegrityError:
pass | def set_score(submission_uuid, points_earned, points_possible,
annotation_creator=None, annotation_type=None, annotation_reason=None) | Set a score for a particular submission.
Sets the score for a particular submission. This score is calculated
externally to the API.
Args:
submission_uuid (str): UUID for the submission (must exist).
points_earned (int): The earned points for this submission.
points_possible (int): The total points possible for this particular student item.
annotation_creator (str): An optional field for recording who gave this particular score
annotation_type (str): An optional field for recording what type of annotation should be created,
e.g. "staff_override".
annotation_reason (str): An optional field for recording why this score was set to its value.
Returns:
None
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to save the score.
SubmissionRequestError: Thrown if the given student item or submission
are not found.
Examples:
>>> set_score("a778b933-9fb3-11e3-9c0f-040ccee02800", 11, 12)
{
'student_item': 2,
'submission': 1,
'points_earned': 11,
'points_possible': 12,
'created_at': datetime.datetime(2014, 2, 7, 20, 6, 42, 331156, tzinfo=<UTC>)
} | 3.156602 | 3.038007 | 1.039037 |
logger.info(
u"Created submission uuid={submission_uuid} for "
u"(course_id={course_id}, item_id={item_id}, "
u"anonymous_student_id={anonymous_student_id})"
.format(
submission_uuid=submission["uuid"],
course_id=student_item["course_id"],
item_id=student_item["item_id"],
anonymous_student_id=student_item["student_id"]
)
) | def _log_submission(submission, student_item) | Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None | 2.424949 | 2.413322 | 1.004818 |
logger.info(
"Score of ({}/{}) set for submission {}"
.format(score.points_earned, score.points_possible, score.submission.uuid)
) | def _log_score(score) | Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None | 8.059968 | 8.989391 | 0.896609 |
try:
try:
return StudentItem.objects.get(**student_item_dict)
except StudentItem.DoesNotExist:
student_item_serializer = StudentItemSerializer(
data=student_item_dict
)
if not student_item_serializer.is_valid():
logger.error(
u"Invalid StudentItemSerializer: errors:{} data:{}".format(
student_item_serializer.errors,
student_item_dict
)
)
raise SubmissionRequestError(field_errors=student_item_serializer.errors)
return student_item_serializer.save()
except DatabaseError:
error_message = u"An error occurred creating student item: {}".format(
student_item_dict
)
logger.exception(error_message)
raise SubmissionInternalError(error_message) | def _get_or_create_student_item(student_item_dict) | Gets or creates a Student Item that matches the values specified.
Attempts to get the specified Student Item. If it does not exist, the
specified parameters are validated, and a new Student Item is created.
Args:
student_item_dict (dict): The dict containing the student_id, item_id,
course_id, and item_type that uniquely defines a student item.
Returns:
StudentItem: The student item that was retrieved or created.
Raises:
SubmissionInternalError: Thrown if there was an internal error while
attempting to create or retrieve the specified student item.
SubmissionRequestError: Thrown if the given student item parameters fail
validation.
Examples:
>>> student_item_dict = dict(
>>> student_id="Tim",
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one"
>>> )
>>> _get_or_create_student_item(student_item_dict)
{'item_id': 'item_1', 'item_type': 'type_one', 'course_id': 'course_1', 'student_id': 'Tim'} | 2.531389 | 2.387372 | 1.060324 |
assert path.startswith('/'), "bogus path: %r" % path
# Presuming utf-8 encoding here for requests. Not sure if that is
# technically correct.
if not isinstance(path, bytes):
spath = path.encode('utf-8')
else:
spath = path
qpath = urlquote(spath)
if query:
qpath += '?' + urlencode(query)
url = self.url + qpath
http = self._get_http()
ubody = body
if body is not None and isinstance(body, dict):
ubody = urlencode(body)
if headers is None:
headers = {}
headers["User-Agent"] = self.user_agent
if self.signer:
# Signature auth.
if "Date" not in headers:
headers["Date"] = http_date()
sigstr = 'date: ' + headers["Date"]
algorithm, fingerprint, signature = self.signer.sign(sigstr.encode(
'utf-8'))
auth = 'Signature keyId="/%s/keys/%s",algorithm="%s",signature="%s"'\
% ('/'.join(filter(None, [self.account, self.subuser])),
fingerprint, algorithm, signature.decode('utf-8'))
headers["Authorization"] = auth
if self.role:
headers['Role'] = self.role
# python 3
try:
url = url.decode('utf-8') # encoding='utf-8'
except:
pass
return http.request(url, method, ubody, headers) | def _request(self,
path,
method="GET",
query=None,
body=None,
headers=None) | Make a Manta request
...
@returns (res, content) | 3.868767 | 3.992985 | 0.968891 |
log.debug('PutDirectory %r', mdir)
headers = {"Content-Type": "application/json; type=directory"}
res, content = self._request(mdir, "PUT", headers=headers)
if res["status"] != "204":
raise errors.MantaAPIError(res, content) | def put_directory(self, mdir) | PutDirectory
https://apidocs.joyent.com/manta/api.html#PutDirectory
@param mdir {str} A manta path, e.g. '/trent/stor/mydir'. | 4.924203 | 4.045686 | 1.217149 |
res, dirents = self.list_directory2(mdir, limit=limit, marker=marker)
return dirents | def list_directory(self, mdir, limit=None, marker=None) | ListDirectory
https://apidocs.joyent.com/manta/api.html#ListDirectory
@param mdir {str} A manta path, e.g. '/trent/stor/mydir'.
@param limit {int} Limits the number of records to come back (default
and max is 1000).
@param marker {str} Key name at which to start the next listing.
@returns Directory entries (dirents). E.g.:
[{u'mtime': u'2012-12-11T01:54:07Z', u'name': u'play', u'type': u'directory'},
...] | 5.081648 | 7.492919 | 0.678193 |
log.debug('ListDirectory %r', mdir)
query = {}
if limit:
query["limit"] = limit
if marker:
query["marker"] = marker
res, content = self._request(mdir, "GET", query=query)
if res["status"] != "200":
raise errors.MantaAPIError(res, content)
lines = content.splitlines(False)
dirents = []
for line in lines:
if not line.strip():
continue
try:
dirents.append(json.loads(line.decode("utf-8")))
except ValueError:
raise errors.MantaError('invalid directory entry: %r' % line)
return res, dirents | def list_directory2(self, mdir, limit=None, marker=None) | A lower-level version of `list_directory` that returns the
response object (which includes the headers).
...
@returns (res, dirents) {2-tuple} | 2.921141 | 2.911197 | 1.003416 |
log.debug('HEAD ListDirectory %r', mdir)
res, content = self._request(mdir, "HEAD")
if res["status"] != "200":
raise errors.MantaAPIError(res, content)
return res | def head_directory(self, mdir) | HEAD method on ListDirectory
https://apidocs.joyent.com/manta/api.html#ListDirectory
This is not strictly a documented Manta API call. However it is
provided to allow access to the useful 'result-set-size' header.
@param mdir {str} A manta path, e.g. '/trent/stor/mydir'.
@returns The response object, which acts as a dict with the headers. | 7.723147 | 4.973283 | 1.552927 |
log.debug('PutObject %r', mpath)
headers = {"Content-Type": content_type, }
if durability_level:
headers["x-durability-level"] = durability_level
methods = [m for m in [content, path, file] if m is not None]
if len(methods) != 1:
raise errors.MantaError("exactly one of 'content', 'path' or "
"'file' must be provided")
if content is not None:
pass
elif path:
f = io.open(path, 'rb')
try:
content = f.read()
finally:
f.close()
else:
content = file.read()
try:
# python 3
content_bytes = bytes(content, encoding='utf-8')
except:
# python 2
content_bytes = content
headers["Content-Length"] = str(len(content))
md5 = hashlib.md5(content_bytes)
headers["Content-MD5"] = base64.b64encode(md5.digest())
res, content = self._request(mpath,
"PUT",
body=content,
headers=headers)
if res["status"] != "204":
raise errors.MantaAPIError(res, content) | def put_object(self,
mpath,
content=None,
path=None,
file=None,
content_length=None,
content_type="application/octet-stream",
durability_level=None) | PutObject
https://apidocs.joyent.com/manta/api.html#PutObject
Examples:
client.put_object('/trent/stor/foo', 'foo\nbar\nbaz')
client.put_object('/trent/stor/foo', path='path/to/foo.txt')
client.put_object('/trent/stor/foo', file=open('path/to/foo.txt'),
size=11)
One of `content`, `path` or `file` is required.
@param mpath {str} Required. A manta path, e.g. '/trent/stor/myobj'.
@param content {bytes}
@param path {str}
@param file {file-like object}
@param content_length {int} Not currently used. Expect this to be used
when streaming support is added.
@param content_type {string} Optional, but suggested. Default is
'application/octet-stream'.
@param durability_level {int} Optional. Default is 2. This tells
Manta the number of copies to keep. | 2.469217 | 2.397985 | 1.029705 |
res, content = self.get_object2(mpath, path=path, accept=accept)
try:
if isinstance(content, bytes):
return content.decode(sys.stdout.encoding)
else:
return content
except UnicodeDecodeError:
return content | def get_object(self, mpath, path=None, accept="*/*") | GetObject
https://apidocs.joyent.com/manta/api.html#GetObject
@param mpath {str} Required. A manta path, e.g. '/trent/stor/myobj'.
@param path {str} Optional. If given, the retrieved object will be
written to the given file path instead of the content being
returned.
@param accept {str} Optional. Default is '*/*'. The Accept header
for content negotiation.
@returns {str|None} None if `path` is provided, else the object
content. | 3.130329 | 3.839295 | 0.81534 |
log.debug('GetObject %r', mpath)
headers = {"Accept": accept}
res, content = self._request(mpath, "GET", headers=headers)
if res["status"] not in ("200", "304"):
raise errors.MantaAPIError(res, content)
if len(content) != int(res["content-length"]):
raise errors.MantaError("content-length mismatch: expected %d, "
"got %s" %
(res["content-length"], content))
if res.get("content-md5"):
md5 = hashlib.md5(content)
content_md5 = base64.b64encode(md5.digest()).decode("utf-8")
if content_md5 != res["content-md5"]:
raise errors.MantaError("content-md5 mismatch: expected %s, "
"got %s" %
(res["content-md5"], content_md5))
if path is not None:
f = io.open(path, 'wb')
try:
f.write(content)
finally:
f.close()
return (res, None)
else:
return (res, content) | def get_object2(self, mpath, path=None, accept="*/*") | A lower-level version of `get_object` that returns the
response object (which includes the headers).
...
@returns (res, content) {2-tuple} `content` is None if `path` was
provided | 2.339472 | 2.444808 | 0.956915 |
log.debug('DeleteObject %r', mpath)
res, content = self._request(mpath, "DELETE")
if res["status"] != "204":
raise errors.MantaAPIError(res, content)
return res | def delete_object(self, mpath) | DeleteObject
https://apidocs.joyent.com/manta/api.html#DeleteObject
@param mpath {str} Required. A manta path, e.g. '/trent/stor/myobj'. | 5.563125 | 4.764325 | 1.167663 |
log.debug('PutLink %r -> %r', link_path, object_path)
headers = {
"Content-Type": "application/json; type=link",
#"Content-Length": "0", #XXX Needed?
"Location": object_path
}
res, content = self._request(link_path, "PUT", headers=headers)
if res["status"] != "204":
raise errors.MantaAPIError(res, content) | def put_snaplink(self, link_path, object_path) | PutSnapLink
https://mo.joyent.com/docs/muskie/master/api.html#putsnaplink
@param link_path {str} Required. A manta path, e.g.
'/trent/stor/mylink'.
@param object_path {str} Required. The manta path to an existing target
manta object. | 4.222903 | 3.938437 | 1.072228 |
log.debug('CreateJob')
path = '/%s/jobs' % self.account
body = {"phases": phases}
if name:
body["name"] = name
if input:
body["input"] = input
headers = {"Content-Type": "application/json"}
res, content = self._request(path,
"POST",
body=json.dumps(body),
headers=headers)
if res["status"] != '201':
raise errors.MantaAPIError(res, content)
location = res["location"]
assert res["location"]
job_id = res["location"].rsplit('/', 1)[-1]
return job_id | def create_job(self, phases, name=None, input=None) | CreateJob
https://apidocs.joyent.com/manta/api.html#CreateJob | 2.866444 | 2.55036 | 1.123937 |
log.debug("AddJobInputs %r", job_id)
path = "/%s/jobs/%s/live/in" % (self.account, job_id)
body = '\r\n'.join(keys) + '\r\n'
headers = {
"Content-Type": "text/plain",
"Content-Length": str(len(body))
}
res, content = self._request(path, "POST", body=body, headers=headers)
if res["status"] != '204':
raise errors.MantaAPIError(res, content) | def add_job_inputs(self, job_id, keys) | AddJobInputs
https://apidocs.joyent.com/manta/api.html#AddJobInputs | 3.254141 | 2.791007 | 1.165938 |
log.debug("EndJobInput %r", job_id)
path = "/%s/jobs/%s/live/in/end" % (self.account, job_id)
headers = {
# "Content-Length": "0" #XXX needed?
}
res, content = self._request(path, "POST", headers=headers)
if res["status"] != '202':
raise errors.MantaAPIError(res, content) | def end_job_input(self, job_id) | EndJobInput
https://mo.joyent.com/docs/muskie/master/api.html#EndJobInput | 4.967298 | 4.783677 | 1.038385 |
log.debug('ListJobs')
path = "/%s/jobs" % self.account
query = {}
if state:
query["state"] = state
if limit:
query["limit"] = limit
if marker:
query["marker"] = marker
res, content = self._request(path, "GET", query=query)
if res["status"] != "200":
raise errors.MantaAPIError(res, content)
lines = content.splitlines(False)
jobs = []
for line in lines:
if not line.strip():
continue
try:
jobs.append(json.loads(line))
except ValueError:
raise errors.MantaError('invalid job entry: %r' % line)
return jobs | def list_jobs(self, state=None, limit=None, marker=None) | ListJobs
https://apidocs.joyent.com/manta/api.html#ListJobs
Limitation: at this time `list_jobs` doesn't support paging through
more than the default response num results. (TODO)
@param state {str} Only return jobs in the given state, e.g.
"running", "done", etc.
@param limit TODO
@param marker TODO
@returns jobs {list} | 2.608857 | 2.567228 | 1.016216 |
log.debug("GetJob %r", job_id)
path = "/%s/jobs/%s/live/status" % (self.account, job_id)
res, content = self._request(path, "GET")
if res.status != 200:
raise errors.MantaAPIError(res, content)
try:
return json.loads(content)
except ValueError:
raise errors.MantaError('invalid job data: %r' % content) | def get_job(self, job_id) | GetJob
https://apidocs.joyent.com/manta/api.html#GetJob | 3.564914 | 3.035174 | 1.174534 |
log.debug("GetJobOutput %r", job_id)
path = "/%s/jobs/%s/live/out" % (self.account, job_id)
res, content = self._request(path, "GET")
if res["status"] != "200":
raise errors.MantaAPIError(res, content)
keys = content.splitlines(False)
return keys | def get_job_output(self, job_id) | GetJobOutput
https://apidocs.joyent.com/manta/api.html#GetJobOutput | 4.650548 | 3.730038 | 1.246783 |
log.debug("GetJobErrors %r", job_id)
path = "/%s/jobs/%s/live/err" % (self.account, job_id)
res, content = self._request(path, "GET")
if res["status"] != "200":
raise errors.MantaAPIError(res, content)
return self._job_errors_from_content(content) | def get_job_errors(self, job_id) | GetJobErrors
https://apidocs.joyent.com/manta/api.html#GetJobErrors | 4.012764 | 3.30174 | 1.215348 |
dirents = self.ls(mtop)
mdirs, mnondirs = [], []
for dirent in sorted(dirents.values(), key=itemgetter("name")):
if dirent["type"] == "directory":
mdirs.append(dirent)
else:
mnondirs.append(dirent)
if topdown:
yield mtop, mdirs, mnondirs
for mdir in mdirs:
mpath = ujoin(mtop, mdir["name"])
for x in self.walk(mpath, topdown):
yield x
if not topdown:
yield mtop, mdirs, mnondirs | def walk(self, mtop, topdown=True) | `os.walk(path)` for a directory in Manta.
A somewhat limited form in that some of the optional args to
`os.walk` are not supported. Also, instead of dir *names* and file
*names*, the dirents for those are returned. E.g.:
>>> for dirpath, dirents, objents in client.walk('/trent/stor/test'):
... pprint((dirpath, dirents, objents))
('/trent/stor/test',
[{u'mtime': u'2012-12-12T05:40:23Z',
u'name': u'__pycache__',
u'type': u'directory'}],
[{u'etag': u'a5ab3753-c691-4645-9c14-db6653d4f064',
u'mtime': u'2012-12-12T05:40:22Z',
u'name': u'test.py',
u'size': 5627,
u'type': u'object'},
...])
...
@param mtop {Manta dir} | 2.411814 | 2.380152 | 1.013303 |
assert limit is None and marker is None, "not yet implemented"
dirents = {}
if limit or marker:
entries = self.list_directory(mdir, limit=limit, marker=marker)
for entry in entries:
dirents[entry["name"]] = entry
else:
marker = None
while True:
res, entries = self.list_directory2(mdir, marker=marker)
if marker:
entries.pop(0) # first one is a repeat (the marker)
if not entries:
# Only the marker was there, we've got them all.
break
for entry in entries:
if "id" in entry: # GET /:account/jobs
dirents[entry["id"]] = entry
else:
dirents[entry["name"]] = entry
if marker is None:
# See if got all results in one go (quick out).
result_set_size = int(res.get("result-set-size", 0))
if len(entries) == result_set_size:
break
if "id" in entries[-1]:
marker = entries[-1]["id"] # jobs
else:
marker = entries[-1]["name"]
return dirents | def ls(self, mdir, limit=None, marker=None) | List a directory.
Dev Notes:
- If `limit` and `marker` are *not* specified. This handles paging
through a directory with more entries than Manta will return in
one request (1000).
- This returns a dict mapping name to dirent as a convenience.
Note that that makes this inappropriate for streaming a huge
listing. A streaming-appropriate `ls` will be a separate method
if/when that is added.
@param mdir {str} A manta directory, e.g. '/trent/stor/a-dir'.
@returns {dict} A mapping of names to their directory entry (dirent). | 3.986222 | 4.113153 | 0.96914 |
assert mdir.startswith('/'), "%s: invalid manta path" % mdir
parts = mdir.split('/')
assert len(parts) > 3, "%s: cannot create top-level dirs" % mdir
if not parents:
self.put_directory(mdir)
else:
# Find the first non-existant dir: binary search. Because
# PutDirectory doesn't error on 'mkdir .../already-exists' we
# don't have a way to detect a miss on `start`. So basically we
# keep doing the binary search until we hit and close the `start`
# to `end` gap.
# Example:
# - mdir: /trent/stor/builds/a/b/c (need to mk a/b/c)
# parts: ['', 'trent', 'stor', 'builds', 'a', 'b', 'c']
# start: 4
# end: 8
# - idx: 6
# d: /trent/stor/builds/a/b (put_directory fails)
# end: 6
# - idx: 5
# d: /trent/stor/builds/a (put_directory succeeds)
# start: 5
# (break out of loop)
# - for i in range(6, 8):
# i=6 -> d: /trent/stor/builds/a/b
# i=7 -> d: /trent/stor/builds/a/b/c
end = len(parts) + 1
start = 3 # Index of the first possible dir to create.
while start < end - 1:
idx = int((end - start) // 2 + start)
d = '/'.join(parts[:idx])
try:
self.put_directory(d)
except errors.MantaAPIError:
_, ex, _ = sys.exc_info()
if ex.code == 'DirectoryDoesNotExist':
end = idx
else:
raise
else:
start = idx
# Now need to create from (end-1, len(parts)].
for i in range(end, len(parts) + 1):
d = '/'.join(parts[:i])
self.put_directory(d) | def mkdir(self, mdir, parents=False) | Make a directory.
Note that this will not error out if the directory already exists
(that is how the PutDirectory Manta API behaves).
@param mdir {str} A manta path, e.g. '/trent/stor/mydir'.
@param parents {bool} Optional. Default false. Like 'mkdir -p', this
will create parent dirs as necessary. | 4.326394 | 3.948779 | 1.095628 |
parts = mpath.split('/')
if len(parts) == 0:
raise errors.MantaError("cannot stat empty manta path: %r" % mpath)
elif len(parts) <= 3:
raise errors.MantaError("cannot stat special manta path: %r" %
mpath)
mparent = udirname(mpath)
name = ubasename(mpath)
dirents = self.ls(mparent)
if name in dirents:
return dirents[name]
else:
raise errors.MantaResourceNotFoundError(
"%s: no such object or directory" % mpath) | def stat(self, mpath) | Return available dirent info for the given Manta path. | 4.069718 | 3.557447 | 1.144 |
try:
return self.stat(mpath)["type"]
except errors.MantaResourceNotFoundError:
return None
except errors.MantaAPIError:
_, ex, _ = sys.exc_info()
if ex.code in ('ResourceNotFound', 'DirectoryDoesNotExist'):
return None
else:
raise | def type(self, mpath) | Return the manta type for the given manta path.
@param mpath {str} The manta path for which to get the type.
@returns {str|None} The manta type, e.g. "object" or "directory",
or None if the path doesn't exist. | 5.439245 | 4.166792 | 1.305379 |
try:
return RawMantaClient.get_job(self, job_id)
except errors.MantaAPIError as ex:
if ex.res.status != 404:
raise
# Job was archived, try to retrieve the archived data.
mpath = "/%s/jobs/%s/job.json" % (self.account, job_id)
content = self.get_object(mpath, accept='application/json')
try:
return json.loads(content)
except ValueError:
raise errors.MantaError('invalid job data: %r' % content) | def get_job(self, job_id) | GetJob
https://apidocs.joyent.com/manta/api.html#GetJob
with the added sugar that it will retrieve the archived job if it has
been archived, per:
https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival | 4.057437 | 3.501682 | 1.158711 |
try:
return RawMantaClient.get_job_input(self, job_id)
except errors.MantaAPIError as ex:
if ex.res.status != 404:
raise
# Job was archived, try to retrieve the archived data.
mpath = "/%s/jobs/%s/in.txt" % (self.account, job_id)
content = self.get_object(mpath)
keys = content.splitlines(False)
return keys | def get_job_input(self, job_id) | GetJobInput
https://apidocs.joyent.com/manta/api.html#GetJobInput
with the added sugar that it will retrieve the archived job if it has
been archived, per:
https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival | 5.174983 | 4.266703 | 1.212876 |
try:
return RawMantaClient.get_job_errors(self, job_id)
except errors.MantaAPIError as ex:
if ex.res.status != 404:
raise
# Job was archived, try to retrieve the archived data.
mpath = "/%s/jobs/%s/err.txt" % (self.account, job_id)
content = self.get_object(mpath)
return self._job_errors_from_content(content) | def get_job_errors(self, job_id) | GetJobErrors
https://apidocs.joyent.com/manta/api.html#GetJobErrors
with the added sugar that it will retrieve the archived job if it has
been archived, per:
https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival | 4.619711 | 3.915706 | 1.17979 |
def decorate(f):
if not hasattr(f, "aliases"):
f.aliases = []
f.aliases += aliases
return f
return decorate | def alias(*aliases) | Decorator to add aliases for Cmdln.do_* command handlers.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.alias("!", "sh")
def do_shell(self, argv):
#...implement 'shell' command | 3.233848 | 3.599401 | 0.898441 |
#XXX Is there a possible optimization for many options to not have a
# large stack depth here?
def decorate(f):
if not hasattr(f, "optparser"):
f.optparser = SubCmdOptionParser()
f.optparser.add_option(*args, **kwargs)
return f
return decorate | def option(*args, **kwargs) | Decorator to add an option to the optparser argument of a Cmdln
subcommand.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#... | 7.08042 | 6.788545 | 1.042995 |
if not inst.__class__.name:
raise ValueError("cannot generate man page content: `name` is not "
"set on class %r" % inst.__class__)
data = {
"name": inst.name,
"ucname": inst.name.upper(),
"date": datetime.date.today().strftime("%b %Y"),
"cmdln_version": __version__,
"version_str": inst.version and " %s" % inst.version or "",
"summary_str": summary and r" \- %s" % summary or "",
}
sections = []
sections.append(
'.\\" Automatically generated by cmdln %(cmdln_version)s\n'
'.TH %(ucname)s "1" "%(date)s" "%(name)s%(version_str)s" "User Commands"\n'
% data)
sections.append(".SH NAME\n%(name)s%(summary_str)s\n" % data)
sections.append(_dedent(r) % data)
if description:
sections.append(".SH DESCRIPTION\n%s\n" % description)
section = ".SH OPTIONS\n"
if not hasattr(inst, "optparser") is None:
#HACK: In case `.main()` hasn't been run.
inst.optparser = inst.get_optparser()
lines = inst._help_preprocess("${option_list}", None).splitlines(False)
for line in lines[1:]:
line = line.lstrip()
if not line:
continue
section += ".TP\n"
opts, desc = line.split(' ', 1)
section += ".B %s\n" % opts
section += "%s\n" % _dedent(desc.lstrip(), skip_first_line=True)
sections.append(section)
section = ".SH COMMANDS\n"
cmds = inst._get_cmds_data()
for cmdstr, doc in cmds:
cmdname = cmdstr.split(' ')[0] # e.g. "commit (ci)" -> "commit"
doc = inst._help_reindent(doc, indent="")
doc = inst._help_preprocess(doc, cmdname)
doc = doc.rstrip() + "\n" # trim down trailing space
section += '.PP\n.SS %s\n%s\n' % (cmdstr, doc)
sections.append(section)
help_names = inst._get_help_names()
if help_names:
section = ".SH HELP TOPICS\n"
for help_name, help_meth in sorted(help_names.items()):
help = help_meth(inst)
help = inst._help_reindent(help, indent="")
section += '.PP\n.SS %s\n%s\n' % (help_name, help)
sections.append(section)
if author:
sections.append(".SH AUTHOR\n%s\n" % author)
return sections | def man_sections_from_cmdln(inst, summary=None, description=None, author=None) | Return man page sections appropriate for the given Cmdln instance.
Join these sections for man page content.
The man page sections generated are:
NAME
SYNOPSIS
DESCRIPTION (if `description` is given)
OPTIONS
COMMANDS
HELP TOPICS (if any)
@param inst {Cmdln} Instance of Cmdln subclass for which to generate
man page content.
@param summary {str} A one-liner summary of the command.
@param description {str} A description of the command. If given,
it will be used for a "DESCRIPTION" section.
@param author {str} The author name and email for the AUTHOR secion
of the man page.
@raises {ValueError} if man page content cannot be generated for the
given class. | 3.809133 | 3.623455 | 1.051243 |
lines = []
WIDTH = 78 - indent_width
SPACING = 2
NAME_WIDTH_LOWER_BOUND = 13
NAME_WIDTH_UPPER_BOUND = 30
NAME_WIDTH = max([len(s) for s, d in linedata])
if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
elif NAME_WIDTH > NAME_WIDTH_UPPER_BOUND:
NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
for namestr, doc in linedata:
line = indent + namestr
if len(namestr) <= NAME_WIDTH:
line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
else:
lines.append(line)
line = indent + ' ' * (NAME_WIDTH + SPACING)
line += _summarize_doc(doc, DOC_WIDTH)
lines.append(line.rstrip())
return lines | def _format_linedata(linedata, indent, indent_width) | Format specific linedata into a pleasant layout.
"linedata" is a list of 2-tuples of the form:
(<item-display-string>, <item-docstring>)
"indent" is a string to use for one level of indentation
"indent_width" is a number of columns by which the
formatted data will be indented when printed.
The <item-display-string> column is held to 30 columns. | 2.500284 | 2.437969 | 1.02556 |
r
import re
if doc is None:
return ""
assert length > 3, "length <= 3 is absurdly short for a doc summary"
doclines = doc.strip().splitlines(0)
if not doclines:
return ""
summlines = []
for i, line in enumerate(doclines):
stripped = line.strip()
if not stripped:
break
summlines.append(stripped)
if len(''.join(summlines)) >= length:
break
summary = ' '.join(summlines)
if len(summary) > length:
summary = summary[:length - 3] + "..."
return summary | def _summarize_doc(doc, length=60) | r"""Parse out a short one line summary from the given doclines.
"doc" is the doc string to summarize.
"length" is the max length for the summary
>>> _summarize_doc("this function does this")
'this function does this'
>>> _summarize_doc("this function does this", 10)
'this fu...'
>>> _summarize_doc("this function does this\nand that")
'this function does this and that'
>>> _summarize_doc("this function does this\n\nand that")
'this function does this' | 3.305906 | 3.152346 | 1.048713 |
r
line = line.strip()
argv = []
state = "default"
arg = None # the current argument being parsed
i = -1
WHITESPACE = '\t\n\x0b\x0c\r ' # don't use string.whitespace (bug 81316)
while 1:
i += 1
if i >= len(line): break
ch = line[i]
if ch == "\\" and i + 1 < len(line):
# escaped char always added to arg, regardless of state
if arg is None: arg = ""
if (sys.platform == "win32" or
state in ("double-quoted", "single-quoted")
) and line[i + 1] not in tuple('"\''):
arg += ch
i += 1
arg += line[i]
continue
if state == "single-quoted":
if ch == "'":
state = "default"
else:
arg += ch
elif state == "double-quoted":
if ch == '"':
state = "default"
else:
arg += ch
elif state == "default":
if ch == '"':
if arg is None: arg = ""
state = "double-quoted"
elif ch == "'":
if arg is None: arg = ""
state = "single-quoted"
elif ch in WHITESPACE:
if arg is not None:
argv.append(arg)
arg = None
else:
if arg is None: arg = ""
arg += ch
if arg is not None:
argv.append(arg)
if not sys.platform == "win32" and state != "default":
raise ValueError("command line is not terminated: unfinished %s "
"segment" % state)
return argv | def line2argv(line) | r"""Parse the given line into an argument vector.
"line" is the line of input to parse.
This may get niggly when dealing with quoting and escaping. The
current state of this parsing may not be completely thorough/correct
in this respect.
>>> from cmdln import line2argv
>>> line2argv("foo")
['foo']
>>> line2argv("foo bar")
['foo', 'bar']
>>> line2argv("foo bar ")
['foo', 'bar']
>>> line2argv(" foo bar")
['foo', 'bar']
Quote handling:
>>> line2argv("'foo bar'")
['foo bar']
>>> line2argv('"foo bar"')
['foo bar']
>>> line2argv(r'"foo\"bar"')
['foo"bar']
>>> line2argv("'foo bar' spam")
['foo bar', 'spam']
>>> line2argv("'foo 'bar spam")
['foo bar', 'spam']
>>> line2argv('some\tsimple\ttests')
['some', 'simple', 'tests']
>>> line2argv('a "more complex" test')
['a', 'more complex', 'test']
>>> line2argv('a more="complex test of " quotes')
['a', 'more=complex test of ', 'quotes']
>>> line2argv('a more" complex test of " quotes')
['a', 'more complex test of ', 'quotes']
>>> line2argv('an "embedded \\"quote\\""')
['an', 'embedded "quote"']
# Komodo bug 48027
>>> line2argv('foo bar C:\\')
['foo', 'bar', 'C:\\']
# Komodo change 127581
>>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"')
['\\test\\slash', 'foo bar', 'foo"bar']
# Komodo change 127629
>>> if sys.platform == "win32":
... line2argv(r'\foo\bar') == ['\\foo\\bar']
... line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar']
... line2argv('"foo') == ['foo']
... else:
... line2argv(r'\foo\bar') == ['foobar']
... line2argv(r'\\foo\\bar') == ['\\foo\\bar']
... try:
... line2argv('"foo')
... except ValueError as ex:
... "not terminated" in str(ex)
True
True
True | 2.64629 | 2.617883 | 1.010851 |
r
escapedArgs = []
for arg in argv:
if ' ' in arg and '"' not in arg:
arg = '"' + arg + '"'
elif ' ' in arg and "'" not in arg:
arg = "'" + arg + "'"
elif ' ' in arg:
arg = arg.replace('"', r'\"')
arg = '"' + arg + '"'
escapedArgs.append(arg)
return ' '.join(escapedArgs) | def argv2line(argv) | r"""Put together the given argument vector into a command line.
"argv" is the argument vector to process.
>>> from cmdln import argv2line
>>> argv2line(['foo'])
'foo'
>>> argv2line(['foo', 'bar'])
'foo bar'
>>> argv2line(['foo', 'bar baz'])
'foo "bar baz"'
>>> argv2line(['foo"bar'])
'foo"bar'
>>> print(argv2line(['foo" bar']))
'foo" bar'
>>> print(argv2line(["foo' bar"]))
"foo' bar"
>>> argv2line(["foo'bar"])
"foo'bar" | 2.58137 | 2.916585 | 0.885066 |
# Figure out how much the marker is indented.
INDENT_CHARS = tuple(' \t')
start = s.index(marker)
i = start
while i > 0:
if s[i - 1] not in INDENT_CHARS:
break
i -= 1
indent = s[i:start]
indent_width = 0
for ch in indent:
if ch == ' ':
indent_width += 1
elif ch == '\t':
indent_width += tab_width - (indent_width % tab_width)
return indent, indent_width | def _get_indent(marker, s, tab_width=8) | _get_indent(marker, s, tab_width=8) ->
(<indentation-of-'marker'>, <indentation-width>) | 2.309063 | 2.229156 | 1.035847 |
suffix = ''
start = s.index(marker) + len(marker)
i = start
while i < len(s):
if s[i] in ' \t':
suffix += s[i]
elif s[i] in '\r\n':
suffix += s[i]
if s[i] == '\r' and i + 1 < len(s) and s[i + 1] == '\n':
suffix += s[i + 1]
break
else:
break
i += 1
return suffix | def _get_trailing_whitespace(marker, s) | Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline. | 1.871858 | 1.869483 | 1.00127 |
version = (self.version is not None and
"%s %s" % (self._name_str, self.version) or None)
return CmdlnOptionParser(self, version=version) | def get_optparser(self) | Hook for subclasses to set the option parser for the
top-level command/shell.
This option parser is used retrieved and used by `.main()' to
handle top-level options.
The default implements a single '-h|--help' option. Sub-classes
can return None to have no options at the top-level. Typically
an instance of CmdlnOptionParser should be returned. | 6.841435 | 5.63682 | 1.213705 |
if argv is None:
import sys
argv = sys.argv
else:
argv = argv[:] # don't modify caller's list
self.optparser = self.get_optparser()
if self.optparser: # i.e. optparser=None means don't process for opts
try:
self.options, args = self.optparser.parse_args(argv[1:])
except CmdlnUserError as ex:
msg = "%s: %s\nTry '%s help' for info.\n" % (self.name, ex,
self.name)
self.stderr.write(self._str(msg))
self.stderr.flush()
return 1
except StopOptionProcessing as ex:
return 0
else:
self.options, args = None, argv[1:]
retval = self.postoptparse()
if retval:
return retval
if loop == LOOP_ALWAYS:
if args:
self.cmdqueue.append(args)
return self.cmdloop()
elif loop == LOOP_NEVER:
if args:
return self.cmd(args)
else:
return self.emptyline()
elif loop == LOOP_IF_EMPTY:
if args:
return self.cmd(args)
else:
return self.cmdloop() | def main(self, argv=None, loop=LOOP_NEVER) | A possible mainline handler for a script, like so:
import cmdln
class MyCmd(cmdln.Cmdln):
name = "mycmd"
...
if __name__ == "__main__":
MyCmd().main()
By default this will use sys.argv to issue a single command to
'MyCmd', then exit. The 'loop' argument can be use to control
interactive shell behaviour.
Arguments:
"argv" (optional, default sys.argv) is the command to run.
It must be a sequence, where the first element is the
command name and subsequent elements the args for that
command.
"loop" (optional, default LOOP_NEVER) is a constant
indicating if a command loop should be started (i.e. an
interactive shell). Valid values (constants on this module):
LOOP_ALWAYS start loop and run "argv", if any
LOOP_NEVER run "argv" (or .emptyline()) and exit
LOOP_IF_EMPTY run "argv", if given, and exit;
otherwise, start loop | 3.0986 | 2.920784 | 1.06088 |
assert isinstance(argv, (list, tuple)), \
"'argv' is not a sequence: %r" % argv
retval = None
try:
argv = self.precmd(argv)
retval = self.onecmd(argv)
self.postcmd(argv)
except:
if not self.cmdexc(argv):
raise
retval = 1
return retval | def cmd(self, argv) | Run one command and exit.
"argv" is the arglist for the command to run. argv[0] is the
command to run. If argv is an empty list then the
'emptyline' handler is run.
Returns the return value from the command handler. | 3.129758 | 3.355546 | 0.932712 |
self.cmdlooping = True
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
if sys.platform == "darwin":
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind(self.completekey + ": complete")
except ImportError:
pass
try:
if intro is None:
intro = self.intro
if intro:
intro_str = self._str(intro)
self.stdout.write(intro_str + '\n')
self.stop = False
retval = None
while not self.stop:
if self.cmdqueue:
argv = self.cmdqueue.pop(0)
assert isinstance(argv, (list, tuple)), \
"item on 'cmdqueue' is not a sequence: %r" % argv
else:
if self.use_rawinput:
try:
line = input(self._str(self._prompt_str))
except EOFError:
line = 'EOF'
except KeyboardInterrupt:
line = 'KeyboardInterrupt'
else:
self.stdout.write(self._str(self._prompt_str))
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line[:-1] # chop '\n'
argv = line2argv(line)
try:
argv = self.precmd(argv)
retval = self.onecmd(argv)
self.postcmd(argv)
except:
if not self.cmdexc(argv):
raise
retval = 1
self.lastretval = retval
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
self.cmdlooping = False
return retval | def cmdloop(self, intro=None) | Repeatedly issue a prompt, accept input, parse into an argv, and
dispatch (via .precmd(), .onecmd() and .postcmd()), passing them
the argv. In other words, start a shell.
"intro" (optional) is a introductory message to print when
starting the command loop. This overrides the class
"intro" attribute, if any. | 2.066205 | 2.01706 | 1.024365 |
import sys
type, exc, traceback = sys.exc_info()
if isinstance(exc, CmdlnUserError):
msg = "%s %s: %s\nTry '%s help %s' for info.\n"\
% (self.name, argv[0], exc, self.name, argv[0])
self.stderr.write(self._str(msg))
self.stderr.flush()
return True | def cmdexc(self, argv) | Called if an exception is raised in any of precmd(), onecmd(),
or postcmd(). If True is returned, the exception is deemed to have
been dealt with. Otherwise, the exception is re-raised.
The default implementation handles CmdlnUserError's, which
typically correspond to user error in calling commands (as
opposed to programmer error in the design of the script using
cmdln.py). | 4.570648 | 3.467302 | 1.318215 |
errmsg = self._str(self.unknowncmd % (argv[0], ))
if self.cmdlooping:
self.stderr.write(errmsg + "\n")
else:
self.stderr.write("%s: %s\nTry '%s help' for info.\n" %
(self._name_str, errmsg, self._name_str))
self.stderr.flush()
return 1 | def default(self, argv) | Hook called to handle a command for which there is no handler.
"argv" is the command and arguments to run.
The default implementation writes an error message to stderr
and returns an error exit status.
Returns a numeric command exit status. | 4.684796 | 4.866671 | 0.962629 |
if known:
msg = self._str(self.nohelp % (cmd, ))
if self.cmdlooping:
self.stderr.write(msg + '\n')
else:
self.stderr.write("%s: %s\n" % (self.name, msg))
else:
msg = self.unknowncmd % (cmd, )
if self.cmdlooping:
self.stderr.write(msg + '\n')
else:
self.stderr.write("%s: %s\n"
"Try '%s help' for info.\n" %
(self.name, msg, self.name))
self.stderr.flush()
return 1 | def helpdefault(self, cmd, known) | Hook called to handle help on a command for which there is no
help handler.
"cmd" is the command name on which help was requested.
"known" is a boolean indicating if this command is known
(i.e. if there is a handler for it).
Returns a return code. | 2.686472 | 2.829505 | 0.94945 |
if len(argv) > 1: # asking for help on a particular command
doc = None
cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1]
if not cmdname:
return self.helpdefault(argv[1], False)
else:
helpfunc = getattr(self, "help_" + cmdname, None)
if helpfunc:
doc = helpfunc()
else:
handler = self._get_cmd_handler(cmdname)
if handler:
doc = handler.__doc__
if doc is None:
return self.helpdefault(argv[1], handler != None)
else: # bare "help" command
doc = self.__class__.__doc__ # try class docstring
if doc is None:
# Try to provide some reasonable useful default help.
if self.cmdlooping: prefix = ""
else: prefix = self.name + ' '
doc = % (prefix, prefix)
cmdname = None
if doc: # *do* have help content, massage and print that
doc = self._help_reindent(doc)
doc = self._help_preprocess(doc, cmdname)
doc = doc.rstrip() + '\n' # trim down trailing space
self.stdout.write(self._str(doc))
self.stdout.flush() | def do_help(self, argv) | ${cmd_name}: give detailed help on a specific sub-command
Usage:
${name} help [COMMAND] | 4.823689 | 5.070009 | 0.951416 |
if indent is None:
indent = self.helpindent
lines = help.splitlines(0)
_dedentlines(lines, skip_first_line=True)
lines = [(indent + line).rstrip() for line in lines]
return '\n'.join(lines) | def _help_reindent(self, help, indent=None) | Hook to re-indent help strings before writing to stdout.
"help" is the help content to re-indent
"indent" is a string with which to indent each line of the
help content after normalizing. If unspecified or None
then the default is use: the 'self.helpindent' class
attribute. By default this is the empty string, i.e.
no indentation.
By default, all common leading whitespace is removed and then
the lot is indented by 'self.helpindent'. When calculating the
common leading whitespace the first line is ignored -- hence
help content for Conan can be written as follows and have the
expected indentation:
def do_crush(self, ...):
'''${cmd_name}: crush your enemies, see them driven before you...
c.f. Conan the Barbarian''' | 3.739919 | 3.311198 | 1.129476 |
preprocessors = {
"${name}": self._help_preprocess_name,
"${option_list}": self._help_preprocess_option_list,
"${command_list}": self._help_preprocess_command_list,
"${help_list}": self._help_preprocess_help_list,
"${cmd_name}": self._help_preprocess_cmd_name,
"${cmd_usage}": self._help_preprocess_cmd_usage,
"${cmd_option_list}": self._help_preprocess_cmd_option_list,
}
for marker, preprocessor in preprocessors.items():
if marker in help:
help = preprocessor(help, cmdname)
return help | def _help_preprocess(self, help, cmdname) | Hook to preprocess a help string before writing to stdout.
"help" is the help string to process.
"cmdname" is the canonical sub-command name for which help
is being given, or None if the help is not specific to a
command.
By default the following template variables are interpolated in
help content. (Note: these are similar to Python 2.4's
string.Template interpolation but not quite.)
${name}
The tool's/shell's name, i.e. 'self.name'.
${option_list}
A formatted table of options for this shell/tool.
${command_list}
A formatted table of available sub-commands.
${help_list}
A formatted table of additional help topics (i.e. 'help_*'
methods with no matching 'do_*' method).
${cmd_name}
The name (and aliases) for this sub-command formatted as:
"NAME (ALIAS1, ALIAS2, ...)".
${cmd_usage}
A formatted usage block inferred from the command function
signature.
${cmd_option_list}
A formatted table of options for this sub-command. (This is
only available for commands using the optparse integration,
i.e. using @cmdln.option decorators or manually setting the
'optparser' attribute on the 'do_*' method.)
Returns the processed help. | 2.039444 | 1.647648 | 1.237792 |
# Determine the additional help topics, if any.
help_names = {}
token2cmdname = self._get_canonical_map()
for attrname, attr in self._gen_names_and_attrs():
if not attrname.startswith("help_"): continue
help_name = attrname[5:]
if help_name not in token2cmdname:
help_names[help_name] = attr
return help_names | def _get_help_names(self) | Return a mapping of help topic name to `.help_*()` method. | 5.14517 | 4.348584 | 1.183183 |
cacheattr = "_token2canonical"
if not hasattr(self, cacheattr):
# Get the list of commands and their aliases, if any.
token2canonical = {}
cmd2funcname = {} # use a dict to strip duplicates
for attr in self.get_names():
if attr.startswith("do_"): cmdname = attr[3:]
elif attr.startswith("_do_"): cmdname = attr[4:]
else:
continue
cmd2funcname[cmdname] = attr
token2canonical[cmdname] = cmdname
for cmdname, funcname in cmd2funcname.items(): # add aliases
func = getattr(self, funcname)
aliases = getattr(func, "aliases", [])
for alias in aliases:
if alias in cmd2funcname:
import warnings
warnings.warn("'%s' alias for '%s' command conflicts "
"with '%s' handler" %
(alias, cmdname, cmd2funcname[alias]))
continue
token2canonical[alias] = cmdname
setattr(self, cacheattr, token2canonical)
return getattr(self, cacheattr) | def _get_canonical_map(self) | Return a mapping of available command names and aliases to
their canonical command name. | 3.060229 | 2.84989 | 1.073806 |
self.cmdln = cmdln
self.subcmd = subcmd | def set_cmdln_info(self, cmdln, subcmd) | Called by Cmdln to pass relevant info about itself needed
for print_help(). | 2.916499 | 2.830394 | 1.030422 |
co_argcount = handler.__func__.__code__.co_argcount
if co_argcount == 2: # handler ::= do_foo(self, argv)
return handler(argv)
elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
try:
optparser = handler.optparser
except AttributeError:
optparser = handler.__func__.optparser = SubCmdOptionParser()
assert isinstance(optparser, SubCmdOptionParser)
optparser.set_cmdln_info(self, argv[0])
try:
opts, args = optparser.parse_args(argv[1:])
except StopOptionProcessing:
#TODO: this doesn't really fly for a replacement of
# optparse.py behaviour, does it?
return 0 # Normal command termination
try:
return handler(argv[0], opts, *args)
except TypeError as ex:
# Some TypeError's are user errors because of incorrect number
# of arguments. Raise CmdlnUserError for these with a suitably
# massaged error message.
import sys
tb = sys.exc_info()[2] # the traceback object
if tb.tb_next is not None:
# If the traceback is more than one level deep, then the
# TypeError do *not* happen on the "handler(...)" call
# above. In that we don't want to handle it specially
# here: it would falsely mask deeper code errors.
raise
msg = ex.args[0]
userErr = self._userErrFromNumArgsErrmsg(msg)
if userErr:
raise userErr
else:
raise
else:
raise CmdlnError("incorrect argcount for %s(): takes %d, must "
"take 2 for 'argv' signature or 3+ for 'opts' "
"signature" % (handler.__name__, co_argcount)) | def _dispatch_cmd(self, handler, argv) | Introspect sub-command handler signature to determine how to
dispatch the command. The raw handler provided by the base
'RawCmdln' class is still supported:
def do_foo(self, argv):
# 'argv' is the vector of command line args, argv[0] is
# the command name itself (i.e. "foo" or an alias)
pass
In addition, if the handler has more than 2 arguments option
processing is automatically done (using optparse):
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar(self, subcmd, opts, *args):
# subcmd = <"bar" or an alias>
# opts = <an optparse.Values instance>
if opts.verbose:
print "lots of debugging output..."
# args = <tuple of arguments>
for arg in args:
bar(arg)
TODO: explain that "*args" can be other signatures as well.
The `cmdln.option` decorator corresponds to an `add_option()`
method call on an `optparse.OptionParser` instance.
You can declare a specific number of arguments:
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar2(self, subcmd, opts, bar_one, bar_two):
#...
and an appropriate error message will be raised/printed if the
command is called with a different number of args. | 6.726731 | 6.12161 | 1.09885 |
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exeName)[1].lower() != '.exe':
exeName += '.exe'
import _winreg
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\
exeName
value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except _winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered | def _getRegisteredExecutable(exeName) | Windows allow application paths to be registered in the registry. | 2.690922 | 2.669794 | 1.007914 |
matches = []
if path is None:
usingGivenPath = 0
path = os.environ.get("PATH", "").split(os.pathsep)
if sys.platform.startswith("win"):
path.insert(0, os.curdir) # implied by Windows shell
else:
usingGivenPath = 1
# Windows has the concept of a list of extensions (PATHEXT env var).
if sys.platform.startswith("win"):
if exts is None:
exts = os.environ.get("PATHEXT", "").split(os.pathsep)
# If '.exe' is not in exts then obviously this is Win9x and
# or a bogus PATHEXT, then use a reasonable default.
for ext in exts:
if ext.lower() == ".exe":
break
else:
exts = ['.COM', '.EXE', '.BAT']
elif not isinstance(exts, list):
raise TypeError("'exts' argument must be a list or None")
else:
if exts is not None:
raise WhichError("'exts' argument is not supported on "\
"platform '%s'" % sys.platform)
exts = []
# File name cannot have path separators because PATH lookup does not
# work that way.
if os.sep in command or os.altsep and os.altsep in command:
if os.path.exists(command):
match = _cull((command, "explicit path given"), matches, verbose)
if verbose:
yield match
else:
yield match[0]
else:
for i in range(len(path)):
dirName = path[i]
# On windows the dirName *could* be quoted, drop the quotes
if sys.platform.startswith("win") and len(dirName) >= 2\
and dirName[0] == '"' and dirName[-1] == '"':
dirName = dirName[1:-1]
for ext in ['']+exts:
absName = os.path.abspath(
os.path.normpath(os.path.join(dirName, command+ext)))
if os.path.isfile(absName):
if usingGivenPath:
fromWhere = "from given path element %d" % i
elif not sys.platform.startswith("win"):
fromWhere = "from PATH element %d" % i
elif i == 0:
fromWhere = "from current directory"
else:
fromWhere = "from PATH element %d" % (i-1)
match = _cull((absName, fromWhere), matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
match = _getRegisteredExecutable(command)
if match is not None:
match = _cull(match, matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0] | def whichgen(command, path=None, verbose=0, exts=None) | Return a generator of full paths to the given command.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
This method returns a generator which yields either full paths to
the given command or, if verbose, tuples of the form (<path to
command>, <where path found>). | 3.278504 | 3.008708 | 1.089672 |
return list( whichgen(command, path, verbose, exts) ) | def whichall(command, path=None, verbose=0, exts=None) | Return a list of full paths to all matches of the given command
on the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows. | 8.530068 | 13.161571 | 0.648104 |
_globals = {}
_locals = {}
exec(
compile(
open(TOP + "/manta/version.py").read(), TOP + "/manta/version.py",
'exec'), _globals, _locals)
return _locals["__version__"] | def get_version() | Get the python-manta version without having to import the manta package,
which requires deps to already be installed. | 3.887907 | 3.431093 | 1.133139 |
data = data.strip()
# Let's accept either:
# - just the base64 encoded data part, e.g.
# 'AAAAB3NzaC1yc2EAAAABIwAA...2l24uq9Lfw=='
# - the full ssh pub key file content, e.g.:
# 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAA...2l24uq9Lfw== my comment'
if (re.search(r'^ssh-(?:rsa|dss) ', data) or
re.search(r'^ecdsa-sha2-nistp(?:[0-9]+)', data)):
data = data.split(None, 2)[1]
key = base64.b64decode(data)
fp_plain = hashlib.md5(key).hexdigest()
return ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])) | def fingerprint_from_ssh_pub_key(data) | Calculate the fingerprint of SSH public key data.
>>> data = "ssh-rsa AAAAB3NzaC1y...4IEAA1Z4wIWCuk8F9Tzw== my key comment"
>>> fingerprint_from_ssh_pub_key(data)
'54:c7:4c:93:cf:ff:e3:32:68:bc:89:6e:5e:22:b5:9c'
Adapted from <http://stackoverflow.com/questions/6682815/>
and imgapi.js#fingerprintFromSshpubkey. | 3.3461 | 3.15282 | 1.061304 |
fp_plain = hashlib.md5(key).hexdigest()
return ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])) | def fingerprint_from_raw_ssh_pub_key(key) | Encode a raw SSH key (string of bytes, as from
`str(paramiko.AgentKey)`) to a fingerprint in the typical
'54:c7:4c:93:cf:ff:e3:32:68:bc:89:6e:5e:22:b5:9c' form. | 2.66899 | 2.617873 | 1.019526 |
digest = hashlib.sha256(raw_key).digest()
h = base64.b64encode(digest).decode('utf-8')
h = h.rstrip().rstrip('=') # drop newline and possible base64 padding
return 'SHA256:' + h | def sha256_fingerprint_from_raw_ssh_pub_key(raw_key) | Encode a raw SSH key (string of bytes, as from
`str(paramiko.AgentKey)`) to a fingerprint in the SHA256 form:
SHA256:j2WoSeOWhFy69BQ39fuafFAySp9qCZTSCEyT2vRKcL+s | 3.648773 | 3.535101 | 1.032155 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.